]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/kern_lock.c
libevent: Import libevent 2.1.12
[FreeBSD/FreeBSD.git] / sys / kern / kern_lock.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice(s), this list of conditions and the following disclaimer as
12  *    the first lines of this file unmodified other than the possible
13  *    addition of one or more copyright notices.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice(s), this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28  * DAMAGE.
29  */
30
31 #include "opt_ddb.h"
32 #include "opt_hwpmc_hooks.h"
33
34 #include <sys/cdefs.h>
35 #include <sys/param.h>
36 #include <sys/kdb.h>
37 #include <sys/ktr.h>
38 #include <sys/limits.h>
39 #include <sys/lock.h>
40 #include <sys/lock_profile.h>
41 #include <sys/lockmgr.h>
42 #include <sys/lockstat.h>
43 #include <sys/mutex.h>
44 #include <sys/proc.h>
45 #include <sys/sleepqueue.h>
46 #ifdef DEBUG_LOCKS
47 #include <sys/stack.h>
48 #endif
49 #include <sys/sysctl.h>
50 #include <sys/systm.h>
51
52 #include <machine/cpu.h>
53
54 #ifdef DDB
55 #include <ddb/ddb.h>
56 #endif
57
58 #ifdef HWPMC_HOOKS
59 #include <sys/pmckern.h>
60 PMC_SOFT_DECLARE( , , lock, failed);
61 #endif
62
63 /*
64  * Hack. There should be prio_t or similar so that this is not necessary.
65  */
66 _Static_assert((PRILASTFLAG * 2) - 1 <= USHRT_MAX,
67     "prio flags wont fit in u_short pri in struct lock");
68
69 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
70     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
71
72 #define SQ_EXCLUSIVE_QUEUE      0
73 #define SQ_SHARED_QUEUE         1
74
75 #ifndef INVARIANTS
76 #define _lockmgr_assert(lk, what, file, line)
77 #endif
78
79 #define TD_SLOCKS_INC(td)       ((td)->td_lk_slocks++)
80 #define TD_SLOCKS_DEC(td)       ((td)->td_lk_slocks--)
81
82 #ifndef DEBUG_LOCKS
83 #define STACK_PRINT(lk)
84 #define STACK_SAVE(lk)
85 #define STACK_ZERO(lk)
86 #else
87 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
88 #define STACK_SAVE(lk)  stack_save(&(lk)->lk_stack)
89 #define STACK_ZERO(lk)  stack_zero(&(lk)->lk_stack)
90 #endif
91
92 #define LOCK_LOG2(lk, string, arg1, arg2)                               \
93         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
94                 CTR2(KTR_LOCK, (string), (arg1), (arg2))
95 #define LOCK_LOG3(lk, string, arg1, arg2, arg3)                         \
96         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
97                 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
98
99 #define GIANT_DECLARE                                                   \
100         int _i = 0;                                                     \
101         WITNESS_SAVE_DECL(Giant)
102 #define GIANT_RESTORE() do {                                            \
103         if (__predict_false(_i > 0)) {                                  \
104                 while (_i--)                                            \
105                         mtx_lock(&Giant);                               \
106                 WITNESS_RESTORE(&Giant.lock_object, Giant);             \
107         }                                                               \
108 } while (0)
109 #define GIANT_SAVE() do {                                               \
110         if (__predict_false(mtx_owned(&Giant))) {                       \
111                 WITNESS_SAVE(&Giant.lock_object, Giant);                \
112                 while (mtx_owned(&Giant)) {                             \
113                         _i++;                                           \
114                         mtx_unlock(&Giant);                             \
115                 }                                                       \
116         }                                                               \
117 } while (0)
118
119 static bool __always_inline
120 LK_CAN_SHARE(uintptr_t x, int flags, bool fp)
121 {
122
123         if ((x & (LK_SHARE | LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) ==
124             LK_SHARE)
125                 return (true);
126         if (fp || (!(x & LK_SHARE)))
127                 return (false);
128         if ((curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||
129             (curthread->td_pflags & TDP_DEADLKTREAT))
130                 return (true);
131         return (false);
132 }
133
134 #define LK_TRYOP(x)                                                     \
135         ((x) & LK_NOWAIT)
136
137 #define LK_CAN_WITNESS(x)                                               \
138         (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
139 #define LK_TRYWIT(x)                                                    \
140         (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
141
142 #define lockmgr_disowned(lk)                                            \
143         (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
144
145 #define lockmgr_xlocked_v(v)                                            \
146         (((v) & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
147
148 #define lockmgr_xlocked(lk) lockmgr_xlocked_v(lockmgr_read_value(lk))
149
150 static void     assert_lockmgr(const struct lock_object *lock, int how);
151 #ifdef DDB
152 static void     db_show_lockmgr(const struct lock_object *lock);
153 #endif
154 static void     lock_lockmgr(struct lock_object *lock, uintptr_t how);
155 #ifdef KDTRACE_HOOKS
156 static int      owner_lockmgr(const struct lock_object *lock,
157                     struct thread **owner);
158 #endif
159 static uintptr_t unlock_lockmgr(struct lock_object *lock);
160
161 struct lock_class lock_class_lockmgr = {
162         .lc_name = "lockmgr",
163         .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
164         .lc_assert = assert_lockmgr,
165 #ifdef DDB
166         .lc_ddb_show = db_show_lockmgr,
167 #endif
168         .lc_lock = lock_lockmgr,
169         .lc_unlock = unlock_lockmgr,
170 #ifdef KDTRACE_HOOKS
171         .lc_owner = owner_lockmgr,
172 #endif
173 };
174
175 static __read_mostly bool lk_adaptive = true;
176 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
177 SYSCTL_BOOL(_debug_lockmgr, OID_AUTO, adaptive_spinning, CTLFLAG_RW, &lk_adaptive,
178     0, "");
179 #define lockmgr_delay  locks_delay
180
181 struct lockmgr_wait {
182         const char *iwmesg;
183         int ipri;
184         int itimo;
185 };
186
187 static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
188     int flags, bool fp);
189 static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp);
190
191 static void
192 lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper)
193 {
194         struct lock_class *class;
195
196         if (flags & LK_INTERLOCK) {
197                 class = LOCK_CLASS(ilk);
198                 class->lc_unlock(ilk);
199         }
200
201         if (__predict_false(wakeup_swapper))
202                 kick_proc0();
203 }
204
205 static void
206 lockmgr_note_shared_acquire(struct lock *lk, int contested,
207     uint64_t waittime, const char *file, int line, int flags)
208 {
209
210         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
211             waittime, file, line, LOCKSTAT_READER);
212         LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
213         WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
214         TD_LOCKS_INC(curthread);
215         TD_SLOCKS_INC(curthread);
216         STACK_SAVE(lk);
217 }
218
219 static void
220 lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
221 {
222
223         WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
224         LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
225         TD_LOCKS_DEC(curthread);
226         TD_SLOCKS_DEC(curthread);
227 }
228
229 static void
230 lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
231     uint64_t waittime, const char *file, int line, int flags)
232 {
233
234         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
235             waittime, file, line, LOCKSTAT_WRITER);
236         LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
237         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
238             line);
239         TD_LOCKS_INC(curthread);
240         STACK_SAVE(lk);
241 }
242
243 static void
244 lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
245 {
246
247         if (LK_HOLDER(lockmgr_read_value(lk)) != LK_KERNPROC) {
248                 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
249                 TD_LOCKS_DEC(curthread);
250         }
251         LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
252             line);
253 }
254
255 static __inline struct thread *
256 lockmgr_xholder(const struct lock *lk)
257 {
258         uintptr_t x;
259
260         x = lockmgr_read_value(lk);
261         return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
262 }
263
264 /*
265  * It assumes sleepq_lock held and returns with this one unheld.
266  * It also assumes the generic interlock is sane and previously checked.
267  * If LK_INTERLOCK is specified the interlock is not reacquired after the
268  * sleep.
269  */
270 static __inline int
271 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
272     const char *wmesg, int pri, int timo, int queue)
273 {
274         GIANT_DECLARE;
275         struct lock_class *class;
276         int catch, error;
277
278         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
279         catch = pri & PCATCH;
280         pri &= PRIMASK;
281         error = 0;
282
283         LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
284             (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
285
286         if (flags & LK_INTERLOCK)
287                 class->lc_unlock(ilk);
288         if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0) {
289                 if (lk->lk_exslpfail < USHRT_MAX)
290                         lk->lk_exslpfail++;
291         }
292         GIANT_SAVE();
293         sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
294             SLEEPQ_INTERRUPTIBLE : 0), queue);
295         if ((flags & LK_TIMELOCK) && timo)
296                 sleepq_set_timeout(&lk->lock_object, timo);
297
298         /*
299          * Decisional switch for real sleeping.
300          */
301         if ((flags & LK_TIMELOCK) && timo && catch)
302                 error = sleepq_timedwait_sig(&lk->lock_object, pri);
303         else if ((flags & LK_TIMELOCK) && timo)
304                 error = sleepq_timedwait(&lk->lock_object, pri);
305         else if (catch)
306                 error = sleepq_wait_sig(&lk->lock_object, pri);
307         else
308                 sleepq_wait(&lk->lock_object, pri);
309         GIANT_RESTORE();
310         if ((flags & LK_SLEEPFAIL) && error == 0)
311                 error = ENOLCK;
312
313         return (error);
314 }
315
316 static __inline int
317 wakeupshlk(struct lock *lk, const char *file, int line)
318 {
319         uintptr_t v, x, orig_x;
320         u_int realexslp;
321         int queue, wakeup_swapper;
322
323         wakeup_swapper = 0;
324         for (;;) {
325                 x = lockmgr_read_value(lk);
326                 if (lockmgr_sunlock_try(lk, &x))
327                         break;
328
329                 /*
330                  * We should have a sharer with waiters, so enter the hard
331                  * path in order to handle wakeups correctly.
332                  */
333                 sleepq_lock(&lk->lock_object);
334                 orig_x = lockmgr_read_value(lk);
335 retry_sleepq:
336                 x = orig_x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
337                 v = LK_UNLOCKED;
338
339                 /*
340                  * If the lock has exclusive waiters, give them preference in
341                  * order to avoid deadlock with shared runners up.
342                  * If interruptible sleeps left the exclusive queue empty
343                  * avoid a starvation for the threads sleeping on the shared
344                  * queue by giving them precedence and cleaning up the
345                  * exclusive waiters bit anyway.
346                  * Please note that lk_exslpfail count may be lying about
347                  * the real number of waiters with the LK_SLEEPFAIL flag on
348                  * because they may be used in conjunction with interruptible
349                  * sleeps so lk_exslpfail might be considered an 'upper limit'
350                  * bound, including the edge cases.
351                  */
352                 realexslp = sleepq_sleepcnt(&lk->lock_object,
353                     SQ_EXCLUSIVE_QUEUE);
354                 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
355                         if (lk->lk_exslpfail != USHRT_MAX && lk->lk_exslpfail < realexslp) {
356                                 lk->lk_exslpfail = 0;
357                                 queue = SQ_EXCLUSIVE_QUEUE;
358                                 v |= (x & LK_SHARED_WAITERS);
359                         } else {
360                                 lk->lk_exslpfail = 0;
361                                 LOCK_LOG2(lk,
362                                     "%s: %p has only LK_SLEEPFAIL sleepers",
363                                     __func__, lk);
364                                 LOCK_LOG2(lk,
365                             "%s: %p waking up threads on the exclusive queue",
366                                     __func__, lk);
367                                 wakeup_swapper =
368                                     sleepq_broadcast(&lk->lock_object,
369                                     SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
370                                 queue = SQ_SHARED_QUEUE;
371                         }
372                 } else {
373                         /*
374                          * Exclusive waiters sleeping with LK_SLEEPFAIL on
375                          * and using interruptible sleeps/timeout may have
376                          * left spourious lk_exslpfail counts on, so clean
377                          * it up anyway.
378                          */
379                         lk->lk_exslpfail = 0;
380                         queue = SQ_SHARED_QUEUE;
381                 }
382
383                 if (lockmgr_sunlock_try(lk, &orig_x)) {
384                         sleepq_release(&lk->lock_object);
385                         break;
386                 }
387
388                 x |= LK_SHARERS_LOCK(1);
389                 if (!atomic_fcmpset_rel_ptr(&lk->lk_lock, &x, v)) {
390                         orig_x = x;
391                         goto retry_sleepq;
392                 }
393                 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
394                     __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
395                     "exclusive");
396                 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
397                     0, queue);
398                 sleepq_release(&lk->lock_object);
399                 break;
400         }
401
402         LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
403         return (wakeup_swapper);
404 }
405
406 static void
407 assert_lockmgr(const struct lock_object *lock, int what)
408 {
409
410         panic("lockmgr locks do not support assertions");
411 }
412
413 static void
414 lock_lockmgr(struct lock_object *lock, uintptr_t how)
415 {
416
417         panic("lockmgr locks do not support sleep interlocking");
418 }
419
420 static uintptr_t
421 unlock_lockmgr(struct lock_object *lock)
422 {
423
424         panic("lockmgr locks do not support sleep interlocking");
425 }
426
427 #ifdef KDTRACE_HOOKS
428 static int
429 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
430 {
431
432         panic("lockmgr locks do not support owner inquiring");
433 }
434 #endif
435
436 void
437 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
438 {
439         int iflags;
440
441         MPASS((flags & ~LK_INIT_MASK) == 0);
442         ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
443             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
444             &lk->lk_lock));
445
446         iflags = LO_SLEEPABLE | LO_UPGRADABLE;
447         if (flags & LK_CANRECURSE)
448                 iflags |= LO_RECURSABLE;
449         if ((flags & LK_NODUP) == 0)
450                 iflags |= LO_DUPOK;
451         if (flags & LK_NOPROFILE)
452                 iflags |= LO_NOPROFILE;
453         if ((flags & LK_NOWITNESS) == 0)
454                 iflags |= LO_WITNESS;
455         if (flags & LK_QUIET)
456                 iflags |= LO_QUIET;
457         if (flags & LK_IS_VNODE)
458                 iflags |= LO_IS_VNODE;
459         if (flags & LK_NEW)
460                 iflags |= LO_NEW;
461         iflags |= flags & LK_NOSHARE;
462
463         lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
464         lk->lk_lock = LK_UNLOCKED;
465         lk->lk_recurse = 0;
466         lk->lk_exslpfail = 0;
467         lk->lk_timo = timo;
468         lk->lk_pri = pri;
469         STACK_ZERO(lk);
470 }
471
472 /*
473  * XXX: Gross hacks to manipulate external lock flags after
474  * initialization.  Used for certain vnode and buf locks.
475  */
476 void
477 lockallowshare(struct lock *lk)
478 {
479
480         lockmgr_assert(lk, KA_XLOCKED);
481         lk->lock_object.lo_flags &= ~LK_NOSHARE;
482 }
483
484 void
485 lockdisableshare(struct lock *lk)
486 {
487
488         lockmgr_assert(lk, KA_XLOCKED);
489         lk->lock_object.lo_flags |= LK_NOSHARE;
490 }
491
492 void
493 lockallowrecurse(struct lock *lk)
494 {
495
496         lockmgr_assert(lk, KA_XLOCKED);
497         lk->lock_object.lo_flags |= LO_RECURSABLE;
498 }
499
500 void
501 lockdisablerecurse(struct lock *lk)
502 {
503
504         lockmgr_assert(lk, KA_XLOCKED);
505         lk->lock_object.lo_flags &= ~LO_RECURSABLE;
506 }
507
508 void
509 lockdestroy(struct lock *lk)
510 {
511
512         KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
513         KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
514         KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
515         lock_destroy(&lk->lock_object);
516 }
517
518 static bool __always_inline
519 lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags, bool fp)
520 {
521
522         /*
523          * If no other thread has an exclusive lock, or
524          * no exclusive waiter is present, bump the count of
525          * sharers.  Since we have to preserve the state of
526          * waiters, if we fail to acquire the shared lock
527          * loop back and retry.
528          */
529         while (LK_CAN_SHARE(*xp, flags, fp)) {
530                 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
531                     *xp + LK_ONE_SHARER)) {
532                         return (true);
533                 }
534         }
535         return (false);
536 }
537
538 static bool __always_inline
539 lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp)
540 {
541
542         for (;;) {
543                 if (LK_SHARERS(*xp) > 1 || !(*xp & LK_ALL_WAITERS)) {
544                         if (atomic_fcmpset_rel_ptr(&lk->lk_lock, xp,
545                             *xp - LK_ONE_SHARER))
546                                 return (true);
547                         continue;
548                 }
549                 break;
550         }
551         return (false);
552 }
553
554 static bool
555 lockmgr_slock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp,
556     int flags)
557 {
558         struct thread *owner;
559         uintptr_t x;
560
561         x = *xp;
562         MPASS(x != LK_UNLOCKED);
563         owner = (struct thread *)LK_HOLDER(x);
564         for (;;) {
565                 MPASS(owner != curthread);
566                 if (owner == (struct thread *)LK_KERNPROC)
567                         return (false);
568                 if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
569                         return (false);
570                 if (owner == NULL)
571                         return (false);
572                 if (!TD_IS_RUNNING(owner))
573                         return (false);
574                 if ((x & LK_ALL_WAITERS) != 0)
575                         return (false);
576                 lock_delay(lda);
577                 x = lockmgr_read_value(lk);
578                 if (LK_CAN_SHARE(x, flags, false)) {
579                         *xp = x;
580                         return (true);
581                 }
582                 owner = (struct thread *)LK_HOLDER(x);
583         }
584 }
585
586 static __noinline int
587 lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
588     const char *file, int line, struct lockmgr_wait *lwa)
589 {
590         uintptr_t tid, x;
591         int error = 0;
592         const char *iwmesg;
593         int ipri, itimo;
594
595 #ifdef KDTRACE_HOOKS
596         uint64_t sleep_time = 0;
597 #endif
598 #ifdef LOCK_PROFILING
599         uint64_t waittime = 0;
600         int contested = 0;
601 #endif
602         struct lock_delay_arg lda;
603
604         if (SCHEDULER_STOPPED())
605                 goto out;
606
607         tid = (uintptr_t)curthread;
608
609         if (LK_CAN_WITNESS(flags))
610                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
611                     file, line, flags & LK_INTERLOCK ? ilk : NULL);
612         x = lockmgr_read_value(lk);
613         lock_delay_arg_init(&lda, &lockmgr_delay);
614         if (!lk_adaptive)
615                 flags &= ~LK_ADAPTIVE;
616         /*
617          * The lock may already be locked exclusive by curthread,
618          * avoid deadlock.
619          */
620         if (LK_HOLDER(x) == tid) {
621                 LOCK_LOG2(lk,
622                     "%s: %p already held in exclusive mode",
623                     __func__, lk);
624                 error = EDEADLK;
625                 goto out;
626         }
627
628         for (;;) {
629                 if (lockmgr_slock_try(lk, &x, flags, false))
630                         break;
631
632                 lock_profile_obtain_lock_failed(&lk->lock_object, false,
633                     &contested, &waittime);
634
635                 if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
636                         if (lockmgr_slock_adaptive(&lda, lk, &x, flags))
637                                 continue;
638                 }
639
640 #ifdef HWPMC_HOOKS
641                 PMC_SOFT_CALL( , , lock, failed);
642 #endif
643
644                 /*
645                  * If the lock is expected to not sleep just give up
646                  * and return.
647                  */
648                 if (LK_TRYOP(flags)) {
649                         LOCK_LOG2(lk, "%s: %p fails the try operation",
650                             __func__, lk);
651                         error = EBUSY;
652                         break;
653                 }
654
655                 /*
656                  * Acquire the sleepqueue chain lock because we
657                  * probabilly will need to manipulate waiters flags.
658                  */
659                 sleepq_lock(&lk->lock_object);
660                 x = lockmgr_read_value(lk);
661 retry_sleepq:
662
663                 /*
664                  * if the lock can be acquired in shared mode, try
665                  * again.
666                  */
667                 if (LK_CAN_SHARE(x, flags, false)) {
668                         sleepq_release(&lk->lock_object);
669                         continue;
670                 }
671
672                 /*
673                  * Try to set the LK_SHARED_WAITERS flag.  If we fail,
674                  * loop back and retry.
675                  */
676                 if ((x & LK_SHARED_WAITERS) == 0) {
677                         if (!atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
678                             x | LK_SHARED_WAITERS)) {
679                                 goto retry_sleepq;
680                         }
681                         LOCK_LOG2(lk, "%s: %p set shared waiters flag",
682                             __func__, lk);
683                 }
684
685                 if (lwa == NULL) {
686                         iwmesg = lk->lock_object.lo_name;
687                         ipri = lk->lk_pri;
688                         itimo = lk->lk_timo;
689                 } else {
690                         iwmesg = lwa->iwmesg;
691                         ipri = lwa->ipri;
692                         itimo = lwa->itimo;
693                 }
694
695                 /*
696                  * As far as we have been unable to acquire the
697                  * shared lock and the shared waiters flag is set,
698                  * we will sleep.
699                  */
700 #ifdef KDTRACE_HOOKS
701                 sleep_time -= lockstat_nsecs(&lk->lock_object);
702 #endif
703                 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
704                     SQ_SHARED_QUEUE);
705 #ifdef KDTRACE_HOOKS
706                 sleep_time += lockstat_nsecs(&lk->lock_object);
707 #endif
708                 flags &= ~LK_INTERLOCK;
709                 if (error) {
710                         LOCK_LOG3(lk,
711                             "%s: interrupted sleep for %p with %d",
712                             __func__, lk, error);
713                         break;
714                 }
715                 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
716                     __func__, lk);
717                 x = lockmgr_read_value(lk);
718         }
719         if (error == 0) {
720 #ifdef KDTRACE_HOOKS
721                 if (sleep_time != 0)
722                         LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
723                             LOCKSTAT_READER, (x & LK_SHARE) == 0,
724                             (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
725 #endif
726 #ifdef LOCK_PROFILING
727                 lockmgr_note_shared_acquire(lk, contested, waittime,
728                     file, line, flags);
729 #else
730                 lockmgr_note_shared_acquire(lk, 0, 0, file, line,
731                     flags);
732 #endif
733         }
734
735 out:
736         lockmgr_exit(flags, ilk, 0);
737         return (error);
738 }
739
740 static bool
741 lockmgr_xlock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp)
742 {
743         struct thread *owner;
744         uintptr_t x;
745
746         x = *xp;
747         MPASS(x != LK_UNLOCKED);
748         owner = (struct thread *)LK_HOLDER(x);
749         for (;;) {
750                 MPASS(owner != curthread);
751                 if (owner == NULL)
752                         return (false);
753                 if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
754                         return (false);
755                 if (owner == (struct thread *)LK_KERNPROC)
756                         return (false);
757                 if (!TD_IS_RUNNING(owner))
758                         return (false);
759                 if ((x & LK_ALL_WAITERS) != 0)
760                         return (false);
761                 lock_delay(lda);
762                 x = lockmgr_read_value(lk);
763                 if (x == LK_UNLOCKED) {
764                         *xp = x;
765                         return (true);
766                 }
767                 owner = (struct thread *)LK_HOLDER(x);
768         }
769 }
770
771 static __noinline int
772 lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
773     const char *file, int line, struct lockmgr_wait *lwa)
774 {
775         struct lock_class *class;
776         uintptr_t tid, x, v;
777         int error = 0;
778         const char *iwmesg;
779         int ipri, itimo;
780
781 #ifdef KDTRACE_HOOKS
782         uint64_t sleep_time = 0;
783 #endif
784 #ifdef LOCK_PROFILING
785         uint64_t waittime = 0;
786         int contested = 0;
787 #endif
788         struct lock_delay_arg lda;
789
790         if (SCHEDULER_STOPPED())
791                 goto out;
792
793         tid = (uintptr_t)curthread;
794
795         if (LK_CAN_WITNESS(flags))
796                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
797                     LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
798                     ilk : NULL);
799
800         /*
801          * If curthread already holds the lock and this one is
802          * allowed to recurse, simply recurse on it.
803          */
804         if (lockmgr_xlocked(lk)) {
805                 if ((flags & LK_CANRECURSE) == 0 &&
806                     (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
807                         /*
808                          * If the lock is expected to not panic just
809                          * give up and return.
810                          */
811                         if (LK_TRYOP(flags)) {
812                                 LOCK_LOG2(lk,
813                                     "%s: %p fails the try operation",
814                                     __func__, lk);
815                                 error = EBUSY;
816                                 goto out;
817                         }
818                         if (flags & LK_INTERLOCK) {
819                                 class = LOCK_CLASS(ilk);
820                                 class->lc_unlock(ilk);
821                         }
822                         STACK_PRINT(lk);
823                         panic("%s: recursing on non recursive lockmgr %p "
824                             "@ %s:%d\n", __func__, lk, file, line);
825                 }
826                 atomic_set_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
827                 lk->lk_recurse++;
828                 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
829                 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
830                     lk->lk_recurse, file, line);
831                 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
832                     LK_TRYWIT(flags), file, line);
833                 TD_LOCKS_INC(curthread);
834                 goto out;
835         }
836
837         x = LK_UNLOCKED;
838         lock_delay_arg_init(&lda, &lockmgr_delay);
839         if (!lk_adaptive)
840                 flags &= ~LK_ADAPTIVE;
841         for (;;) {
842                 if (x == LK_UNLOCKED) {
843                         if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x, tid))
844                                 break;
845                         continue;
846                 }
847
848                 lock_profile_obtain_lock_failed(&lk->lock_object, false,
849                     &contested, &waittime);
850
851                 if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
852                         if (lockmgr_xlock_adaptive(&lda, lk, &x))
853                                 continue;
854                 }
855 #ifdef HWPMC_HOOKS
856                 PMC_SOFT_CALL( , , lock, failed);
857 #endif
858
859                 /*
860                  * If the lock is expected to not sleep just give up
861                  * and return.
862                  */
863                 if (LK_TRYOP(flags)) {
864                         LOCK_LOG2(lk, "%s: %p fails the try operation",
865                             __func__, lk);
866                         error = EBUSY;
867                         break;
868                 }
869
870                 /*
871                  * Acquire the sleepqueue chain lock because we
872                  * probabilly will need to manipulate waiters flags.
873                  */
874                 sleepq_lock(&lk->lock_object);
875                 x = lockmgr_read_value(lk);
876 retry_sleepq:
877
878                 /*
879                  * if the lock has been released while we spun on
880                  * the sleepqueue chain lock just try again.
881                  */
882                 if (x == LK_UNLOCKED) {
883                         sleepq_release(&lk->lock_object);
884                         continue;
885                 }
886
887                 /*
888                  * The lock can be in the state where there is a
889                  * pending queue of waiters, but still no owner.
890                  * This happens when the lock is contested and an
891                  * owner is going to claim the lock.
892                  * If curthread is the one successfully acquiring it
893                  * claim lock ownership and return, preserving waiters
894                  * flags.
895                  */
896                 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
897                 if ((x & ~v) == LK_UNLOCKED) {
898                         v &= ~LK_EXCLUSIVE_SPINNERS;
899                         if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
900                             tid | v)) {
901                                 sleepq_release(&lk->lock_object);
902                                 LOCK_LOG2(lk,
903                                     "%s: %p claimed by a new writer",
904                                     __func__, lk);
905                                 break;
906                         }
907                         goto retry_sleepq;
908                 }
909
910                 /*
911                  * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
912                  * fail, loop back and retry.
913                  */
914                 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
915                         if (!atomic_fcmpset_ptr(&lk->lk_lock, &x,
916                             x | LK_EXCLUSIVE_WAITERS)) {
917                                 goto retry_sleepq;
918                         }
919                         LOCK_LOG2(lk, "%s: %p set excl waiters flag",
920                             __func__, lk);
921                 }
922
923                 if (lwa == NULL) {
924                         iwmesg = lk->lock_object.lo_name;
925                         ipri = lk->lk_pri;
926                         itimo = lk->lk_timo;
927                 } else {
928                         iwmesg = lwa->iwmesg;
929                         ipri = lwa->ipri;
930                         itimo = lwa->itimo;
931                 }
932
933                 /*
934                  * As far as we have been unable to acquire the
935                  * exclusive lock and the exclusive waiters flag
936                  * is set, we will sleep.
937                  */
938 #ifdef KDTRACE_HOOKS
939                 sleep_time -= lockstat_nsecs(&lk->lock_object);
940 #endif
941                 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
942                     SQ_EXCLUSIVE_QUEUE);
943 #ifdef KDTRACE_HOOKS
944                 sleep_time += lockstat_nsecs(&lk->lock_object);
945 #endif
946                 flags &= ~LK_INTERLOCK;
947                 if (error) {
948                         LOCK_LOG3(lk,
949                             "%s: interrupted sleep for %p with %d",
950                             __func__, lk, error);
951                         break;
952                 }
953                 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
954                     __func__, lk);
955                 x = lockmgr_read_value(lk);
956         }
957         if (error == 0) {
958 #ifdef KDTRACE_HOOKS
959                 if (sleep_time != 0)
960                         LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
961                             LOCKSTAT_WRITER, (x & LK_SHARE) == 0,
962                             (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
963 #endif
964 #ifdef LOCK_PROFILING
965                 lockmgr_note_exclusive_acquire(lk, contested, waittime,
966                     file, line, flags);
967 #else
968                 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
969                     flags);
970 #endif
971         }
972
973 out:
974         lockmgr_exit(flags, ilk, 0);
975         return (error);
976 }
977
978 static __noinline int
979 lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
980     const char *file, int line, struct lockmgr_wait *lwa)
981 {
982         uintptr_t tid, v, setv;
983         int error = 0;
984         int op;
985
986         if (SCHEDULER_STOPPED())
987                 goto out;
988
989         tid = (uintptr_t)curthread;
990
991         _lockmgr_assert(lk, KA_SLOCKED, file, line);
992
993         op = flags & LK_TYPE_MASK;
994         v = lockmgr_read_value(lk);
995         for (;;) {
996                 if (LK_SHARERS(v) > 1) {
997                         if (op == LK_TRYUPGRADE) {
998                                 LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
999                                     __func__, lk);
1000                                 error = EBUSY;
1001                                 goto out;
1002                         }
1003                         if (atomic_fcmpset_rel_ptr(&lk->lk_lock, &v,
1004                             v - LK_ONE_SHARER)) {
1005                                 lockmgr_note_shared_release(lk, file, line);
1006                                 goto out_xlock;
1007                         }
1008                         continue;
1009                 }
1010                 MPASS((v & ~LK_ALL_WAITERS) == LK_SHARERS_LOCK(1));
1011
1012                 setv = tid;
1013                 setv |= (v & LK_ALL_WAITERS);
1014
1015                 /*
1016                  * Try to switch from one shared lock to an exclusive one.
1017                  * We need to preserve waiters flags during the operation.
1018                  */
1019                 if (atomic_fcmpset_ptr(&lk->lk_lock, &v, setv)) {
1020                         LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
1021                             line);
1022                         WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
1023                             LK_TRYWIT(flags), file, line);
1024                         LOCKSTAT_RECORD0(lockmgr__upgrade, lk);
1025                         TD_SLOCKS_DEC(curthread);
1026                         goto out;
1027                 }
1028         }
1029
1030 out_xlock:
1031         error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa);
1032         flags &= ~LK_INTERLOCK;
1033 out:
1034         lockmgr_exit(flags, ilk, 0);
1035         return (error);
1036 }
1037
1038 int
1039 lockmgr_lock_flags(struct lock *lk, u_int flags, struct lock_object *ilk,
1040     const char *file, int line)
1041 {
1042         struct lock_class *class;
1043         uintptr_t x, tid;
1044         u_int op;
1045         bool locked;
1046
1047         if (SCHEDULER_STOPPED())
1048                 return (0);
1049
1050         op = flags & LK_TYPE_MASK;
1051         locked = false;
1052         switch (op) {
1053         case LK_SHARED:
1054                 if (LK_CAN_WITNESS(flags))
1055                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1056                             file, line, flags & LK_INTERLOCK ? ilk : NULL);
1057                 if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
1058                         break;
1059                 x = lockmgr_read_value(lk);
1060                 if (lockmgr_slock_try(lk, &x, flags, true)) {
1061                         lockmgr_note_shared_acquire(lk, 0, 0,
1062                             file, line, flags);
1063                         locked = true;
1064                 } else {
1065                         return (lockmgr_slock_hard(lk, flags, ilk, file, line,
1066                             NULL));
1067                 }
1068                 break;
1069         case LK_EXCLUSIVE:
1070                 if (LK_CAN_WITNESS(flags))
1071                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1072                             LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1073                             ilk : NULL);
1074                 tid = (uintptr_t)curthread;
1075                 if (lockmgr_read_value(lk) == LK_UNLOCKED &&
1076                     atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1077                         lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1078                             flags);
1079                         locked = true;
1080                 } else {
1081                         return (lockmgr_xlock_hard(lk, flags, ilk, file, line,
1082                             NULL));
1083                 }
1084                 break;
1085         case LK_UPGRADE:
1086         case LK_TRYUPGRADE:
1087                 return (lockmgr_upgrade(lk, flags, ilk, file, line, NULL));
1088         default:
1089                 break;
1090         }
1091         if (__predict_true(locked)) {
1092                 if (__predict_false(flags & LK_INTERLOCK)) {
1093                         class = LOCK_CLASS(ilk);
1094                         class->lc_unlock(ilk);
1095                 }
1096                 return (0);
1097         } else {
1098                 return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
1099                     LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line));
1100         }
1101 }
1102
1103 static __noinline int
1104 lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1105     const char *file, int line)
1106
1107 {
1108         int wakeup_swapper = 0;
1109
1110         if (SCHEDULER_STOPPED())
1111                 goto out;
1112
1113         wakeup_swapper = wakeupshlk(lk, file, line);
1114
1115 out:
1116         lockmgr_exit(flags, ilk, wakeup_swapper);
1117         return (0);
1118 }
1119
1120 static __noinline int
1121 lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1122     const char *file, int line)
1123 {
1124         uintptr_t tid, v;
1125         int wakeup_swapper = 0;
1126         u_int realexslp;
1127         int queue;
1128
1129         if (SCHEDULER_STOPPED())
1130                 goto out;
1131
1132         tid = (uintptr_t)curthread;
1133
1134         /*
1135          * As first option, treact the lock as if it has not
1136          * any waiter.
1137          * Fix-up the tid var if the lock has been disowned.
1138          */
1139         if (LK_HOLDER(x) == LK_KERNPROC)
1140                 tid = LK_KERNPROC;
1141
1142         /*
1143          * The lock is held in exclusive mode.
1144          * If the lock is recursed also, then unrecurse it.
1145          */
1146         if (lockmgr_recursed_v(x)) {
1147                 LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk);
1148                 lk->lk_recurse--;
1149                 if (lk->lk_recurse == 0)
1150                         atomic_clear_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
1151                 goto out;
1152         }
1153         if (tid != LK_KERNPROC)
1154                 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,
1155                     LOCKSTAT_WRITER);
1156
1157         if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED))
1158                 goto out;
1159
1160         sleepq_lock(&lk->lock_object);
1161         x = lockmgr_read_value(lk);
1162         v = LK_UNLOCKED;
1163
1164         /*
1165          * If the lock has exclusive waiters, give them
1166          * preference in order to avoid deadlock with
1167          * shared runners up.
1168          * If interruptible sleeps left the exclusive queue
1169          * empty avoid a starvation for the threads sleeping
1170          * on the shared queue by giving them precedence
1171          * and cleaning up the exclusive waiters bit anyway.
1172          * Please note that lk_exslpfail count may be lying
1173          * about the real number of waiters with the
1174          * LK_SLEEPFAIL flag on because they may be used in
1175          * conjunction with interruptible sleeps so
1176          * lk_exslpfail might be considered an 'upper limit'
1177          * bound, including the edge cases.
1178          */
1179         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1180         realexslp = sleepq_sleepcnt(&lk->lock_object, SQ_EXCLUSIVE_QUEUE);
1181         if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1182                 if (lk->lk_exslpfail != USHRT_MAX && lk->lk_exslpfail < realexslp) {
1183                         lk->lk_exslpfail = 0;
1184                         queue = SQ_EXCLUSIVE_QUEUE;
1185                         v |= (x & LK_SHARED_WAITERS);
1186                 } else {
1187                         lk->lk_exslpfail = 0;
1188                         LOCK_LOG2(lk,
1189                             "%s: %p has only LK_SLEEPFAIL sleepers",
1190                             __func__, lk);
1191                         LOCK_LOG2(lk,
1192                             "%s: %p waking up threads on the exclusive queue",
1193                             __func__, lk);
1194                         wakeup_swapper = sleepq_broadcast(&lk->lock_object,
1195                             SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1196                         queue = SQ_SHARED_QUEUE;
1197                 }
1198         } else {
1199                 /*
1200                  * Exclusive waiters sleeping with LK_SLEEPFAIL
1201                  * on and using interruptible sleeps/timeout
1202                  * may have left spourious lk_exslpfail counts
1203                  * on, so clean it up anyway.
1204                  */
1205                 lk->lk_exslpfail = 0;
1206                 queue = SQ_SHARED_QUEUE;
1207         }
1208
1209         LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
1210             __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1211             "exclusive");
1212         atomic_store_rel_ptr(&lk->lk_lock, v);
1213         wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
1214         sleepq_release(&lk->lock_object);
1215
1216 out:
1217         lockmgr_exit(flags, ilk, wakeup_swapper);
1218         return (0);
1219 }
1220
1221 /*
1222  * Lightweight entry points for common operations.
1223  *
1224  * Functionality is similar to sx locks, in that none of the additional lockmgr
1225  * features are supported. To be clear, these are NOT supported:
1226  * 1. shared locking disablement
1227  * 2. returning with an error after sleep
1228  * 3. unlocking the interlock
1229  *
1230  * If in doubt, use lockmgr_lock_flags.
1231  */
1232 int
1233 lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line)
1234 {
1235         uintptr_t x;
1236
1237         MPASS((flags & LK_TYPE_MASK) == LK_SHARED);
1238         MPASS((flags & LK_INTERLOCK) == 0);
1239         MPASS((lk->lock_object.lo_flags & LK_NOSHARE) == 0);
1240
1241         if (LK_CAN_WITNESS(flags))
1242                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1243                     file, line, NULL);
1244         x = lockmgr_read_value(lk);
1245         if (__predict_true(lockmgr_slock_try(lk, &x, flags, true))) {
1246                 lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags);
1247                 return (0);
1248         }
1249
1250         return (lockmgr_slock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1251 }
1252
1253 int
1254 lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line)
1255 {
1256         uintptr_t tid;
1257
1258         MPASS((flags & LK_TYPE_MASK) == LK_EXCLUSIVE);
1259         MPASS((flags & LK_INTERLOCK) == 0);
1260
1261         if (LK_CAN_WITNESS(flags))
1262                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1263                     LOP_EXCLUSIVE, file, line, NULL);
1264         tid = (uintptr_t)curthread;
1265         if (atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1266                 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1267                     flags);
1268                 return (0);
1269         }
1270
1271         return (lockmgr_xlock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1272 }
1273
1274 int
1275 lockmgr_unlock(struct lock *lk)
1276 {
1277         uintptr_t x, tid;
1278         const char *file;
1279         int line;
1280
1281         file = __FILE__;
1282         line = __LINE__;
1283
1284         _lockmgr_assert(lk, KA_LOCKED, file, line);
1285         x = lockmgr_read_value(lk);
1286         if (__predict_true(x & LK_SHARE) != 0) {
1287                 lockmgr_note_shared_release(lk, file, line);
1288                 if (lockmgr_sunlock_try(lk, &x)) {
1289                         LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
1290                 } else {
1291                         return (lockmgr_sunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1292                 }
1293         } else {
1294                 tid = (uintptr_t)curthread;
1295                 lockmgr_note_exclusive_release(lk, file, line);
1296                 if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
1297                         LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,LOCKSTAT_WRITER);
1298                 } else {
1299                         return (lockmgr_xunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1300                 }
1301         }
1302         return (0);
1303 }
1304
1305 int
1306 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
1307     const char *wmesg, int pri, int timo, const char *file, int line)
1308 {
1309         GIANT_DECLARE;
1310         struct lockmgr_wait lwa;
1311         struct lock_class *class;
1312         const char *iwmesg;
1313         uintptr_t tid, v, x;
1314         u_int op, realexslp;
1315         int error, ipri, itimo, queue, wakeup_swapper;
1316 #ifdef LOCK_PROFILING
1317         uint64_t waittime = 0;
1318         int contested = 0;
1319 #endif
1320
1321         if (SCHEDULER_STOPPED())
1322                 return (0);
1323
1324         error = 0;
1325         tid = (uintptr_t)curthread;
1326         op = (flags & LK_TYPE_MASK);
1327         iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
1328         ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
1329         itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
1330
1331         lwa.iwmesg = iwmesg;
1332         lwa.ipri = ipri;
1333         lwa.itimo = itimo;
1334
1335         MPASS((flags & ~LK_TOTAL_MASK) == 0);
1336         KASSERT((op & (op - 1)) == 0,
1337             ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
1338         KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
1339             (op != LK_DOWNGRADE && op != LK_RELEASE),
1340             ("%s: Invalid flags in regard of the operation desired @ %s:%d",
1341             __func__, file, line));
1342         KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
1343             ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
1344             __func__, file, line));
1345         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
1346             ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
1347             lk->lock_object.lo_name, file, line));
1348
1349         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
1350
1351         if (lk->lock_object.lo_flags & LK_NOSHARE) {
1352                 switch (op) {
1353                 case LK_SHARED:
1354                         op = LK_EXCLUSIVE;
1355                         break;
1356                 case LK_UPGRADE:
1357                 case LK_TRYUPGRADE:
1358                 case LK_DOWNGRADE:
1359                         _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
1360                             file, line);
1361                         if (flags & LK_INTERLOCK)
1362                                 class->lc_unlock(ilk);
1363                         return (0);
1364                 }
1365         }
1366
1367         wakeup_swapper = 0;
1368         switch (op) {
1369         case LK_SHARED:
1370                 return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa));
1371                 break;
1372         case LK_UPGRADE:
1373         case LK_TRYUPGRADE:
1374                 return (lockmgr_upgrade(lk, flags, ilk, file, line, &lwa));
1375                 break;
1376         case LK_EXCLUSIVE:
1377                 return (lockmgr_xlock_hard(lk, flags, ilk, file, line, &lwa));
1378                 break;
1379         case LK_DOWNGRADE:
1380                 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1381                 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
1382
1383                 /*
1384                  * Panic if the lock is recursed.
1385                  */
1386                 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1387                         if (flags & LK_INTERLOCK)
1388                                 class->lc_unlock(ilk);
1389                         panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
1390                             __func__, iwmesg, file, line);
1391                 }
1392                 TD_SLOCKS_INC(curthread);
1393
1394                 /*
1395                  * In order to preserve waiters flags, just spin.
1396                  */
1397                 for (;;) {
1398                         x = lockmgr_read_value(lk);
1399                         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1400                         x &= LK_ALL_WAITERS;
1401                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1402                             LK_SHARERS_LOCK(1) | x))
1403                                 break;
1404                         cpu_spinwait();
1405                 }
1406                 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
1407                 LOCKSTAT_RECORD0(lockmgr__downgrade, lk);
1408                 break;
1409         case LK_RELEASE:
1410                 _lockmgr_assert(lk, KA_LOCKED, file, line);
1411                 x = lockmgr_read_value(lk);
1412
1413                 if (__predict_true(x & LK_SHARE) != 0) {
1414                         lockmgr_note_shared_release(lk, file, line);
1415                         return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
1416                 } else {
1417                         lockmgr_note_exclusive_release(lk, file, line);
1418                         return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
1419                 }
1420                 break;
1421         case LK_DRAIN:
1422                 if (LK_CAN_WITNESS(flags))
1423                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1424                             LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1425                             ilk : NULL);
1426
1427                 /*
1428                  * Trying to drain a lock we already own will result in a
1429                  * deadlock.
1430                  */
1431                 if (lockmgr_xlocked(lk)) {
1432                         if (flags & LK_INTERLOCK)
1433                                 class->lc_unlock(ilk);
1434                         panic("%s: draining %s with the lock held @ %s:%d\n",
1435                             __func__, iwmesg, file, line);
1436                 }
1437
1438                 for (;;) {
1439                         if (lk->lk_lock == LK_UNLOCKED &&
1440                             atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1441                                 break;
1442
1443 #ifdef HWPMC_HOOKS
1444                         PMC_SOFT_CALL( , , lock, failed);
1445 #endif
1446                         lock_profile_obtain_lock_failed(&lk->lock_object, false,
1447                             &contested, &waittime);
1448
1449                         /*
1450                          * If the lock is expected to not sleep just give up
1451                          * and return.
1452                          */
1453                         if (LK_TRYOP(flags)) {
1454                                 LOCK_LOG2(lk, "%s: %p fails the try operation",
1455                                     __func__, lk);
1456                                 error = EBUSY;
1457                                 break;
1458                         }
1459
1460                         /*
1461                          * Acquire the sleepqueue chain lock because we
1462                          * probabilly will need to manipulate waiters flags.
1463                          */
1464                         sleepq_lock(&lk->lock_object);
1465                         x = lockmgr_read_value(lk);
1466
1467                         /*
1468                          * if the lock has been released while we spun on
1469                          * the sleepqueue chain lock just try again.
1470                          */
1471                         if (x == LK_UNLOCKED) {
1472                                 sleepq_release(&lk->lock_object);
1473                                 continue;
1474                         }
1475
1476                         v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1477                         if ((x & ~v) == LK_UNLOCKED) {
1478                                 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1479
1480                                 /*
1481                                  * If interruptible sleeps left the exclusive
1482                                  * queue empty avoid a starvation for the
1483                                  * threads sleeping on the shared queue by
1484                                  * giving them precedence and cleaning up the
1485                                  * exclusive waiters bit anyway.
1486                                  * Please note that lk_exslpfail count may be
1487                                  * lying about the real number of waiters with
1488                                  * the LK_SLEEPFAIL flag on because they may
1489                                  * be used in conjunction with interruptible
1490                                  * sleeps so lk_exslpfail might be considered
1491                                  * an 'upper limit' bound, including the edge
1492                                  * cases.
1493                                  */
1494                                 if (v & LK_EXCLUSIVE_WAITERS) {
1495                                         queue = SQ_EXCLUSIVE_QUEUE;
1496                                         v &= ~LK_EXCLUSIVE_WAITERS;
1497                                 } else {
1498                                         /*
1499                                          * Exclusive waiters sleeping with
1500                                          * LK_SLEEPFAIL on and using
1501                                          * interruptible sleeps/timeout may
1502                                          * have left spourious lk_exslpfail
1503                                          * counts on, so clean it up anyway.
1504                                          */
1505                                         MPASS(v & LK_SHARED_WAITERS);
1506                                         lk->lk_exslpfail = 0;
1507                                         queue = SQ_SHARED_QUEUE;
1508                                         v &= ~LK_SHARED_WAITERS;
1509                                 }
1510                                 if (queue == SQ_EXCLUSIVE_QUEUE) {
1511                                         realexslp =
1512                                             sleepq_sleepcnt(&lk->lock_object,
1513                                             SQ_EXCLUSIVE_QUEUE);
1514                                         if (lk->lk_exslpfail >= realexslp) {
1515                                                 lk->lk_exslpfail = 0;
1516                                                 queue = SQ_SHARED_QUEUE;
1517                                                 v &= ~LK_SHARED_WAITERS;
1518                                                 if (realexslp != 0) {
1519                                                         LOCK_LOG2(lk,
1520                                         "%s: %p has only LK_SLEEPFAIL sleepers",
1521                                                             __func__, lk);
1522                                                         LOCK_LOG2(lk,
1523                         "%s: %p waking up threads on the exclusive queue",
1524                                                             __func__, lk);
1525                                                         wakeup_swapper =
1526                                                             sleepq_broadcast(
1527                                                             &lk->lock_object,
1528                                                             SLEEPQ_LK, 0,
1529                                                             SQ_EXCLUSIVE_QUEUE);
1530                                                 }
1531                                         } else
1532                                                 lk->lk_exslpfail = 0;
1533                                 }
1534                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1535                                         sleepq_release(&lk->lock_object);
1536                                         continue;
1537                                 }
1538                                 LOCK_LOG3(lk,
1539                                 "%s: %p waking up all threads on the %s queue",
1540                                     __func__, lk, queue == SQ_SHARED_QUEUE ?
1541                                     "shared" : "exclusive");
1542                                 wakeup_swapper |= sleepq_broadcast(
1543                                     &lk->lock_object, SLEEPQ_LK, 0, queue);
1544
1545                                 /*
1546                                  * If shared waiters have been woken up we need
1547                                  * to wait for one of them to acquire the lock
1548                                  * before to set the exclusive waiters in
1549                                  * order to avoid a deadlock.
1550                                  */
1551                                 if (queue == SQ_SHARED_QUEUE) {
1552                                         for (v = lk->lk_lock;
1553                                             (v & LK_SHARE) && !LK_SHARERS(v);
1554                                             v = lk->lk_lock)
1555                                                 cpu_spinwait();
1556                                 }
1557                         }
1558
1559                         /*
1560                          * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1561                          * fail, loop back and retry.
1562                          */
1563                         if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1564                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1565                                     x | LK_EXCLUSIVE_WAITERS)) {
1566                                         sleepq_release(&lk->lock_object);
1567                                         continue;
1568                                 }
1569                                 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1570                                     __func__, lk);
1571                         }
1572
1573                         /*
1574                          * As far as we have been unable to acquire the
1575                          * exclusive lock and the exclusive waiters flag
1576                          * is set, we will sleep.
1577                          */
1578                         if (flags & LK_INTERLOCK) {
1579                                 class->lc_unlock(ilk);
1580                                 flags &= ~LK_INTERLOCK;
1581                         }
1582                         GIANT_SAVE();
1583                         sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1584                             SQ_EXCLUSIVE_QUEUE);
1585                         sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1586                         GIANT_RESTORE();
1587                         LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1588                             __func__, lk);
1589                 }
1590
1591                 if (error == 0) {
1592                         lock_profile_obtain_lock_success(&lk->lock_object,
1593                             false, contested, waittime, file, line);
1594                         LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1595                             lk->lk_recurse, file, line);
1596                         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1597                             LK_TRYWIT(flags), file, line);
1598                         TD_LOCKS_INC(curthread);
1599                         STACK_SAVE(lk);
1600                 }
1601                 break;
1602         default:
1603                 if (flags & LK_INTERLOCK)
1604                         class->lc_unlock(ilk);
1605                 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1606         }
1607
1608         if (flags & LK_INTERLOCK)
1609                 class->lc_unlock(ilk);
1610         if (wakeup_swapper)
1611                 kick_proc0();
1612
1613         return (error);
1614 }
1615
1616 void
1617 _lockmgr_disown(struct lock *lk, const char *file, int line)
1618 {
1619         uintptr_t tid, x;
1620
1621         if (SCHEDULER_STOPPED())
1622                 return;
1623
1624         tid = (uintptr_t)curthread;
1625         _lockmgr_assert(lk, KA_XLOCKED, file, line);
1626
1627         /*
1628          * Panic if the lock is recursed.
1629          */
1630         if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1631                 panic("%s: disown a recursed lockmgr @ %s:%d\n",
1632                     __func__,  file, line);
1633
1634         /*
1635          * If the owner is already LK_KERNPROC just skip the whole operation.
1636          */
1637         if (LK_HOLDER(lk->lk_lock) != tid)
1638                 return;
1639         lock_profile_release_lock(&lk->lock_object, false);
1640         LOCKSTAT_RECORD1(lockmgr__disown, lk, LOCKSTAT_WRITER);
1641         LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1642         WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1643         TD_LOCKS_DEC(curthread);
1644         STACK_SAVE(lk);
1645
1646         /*
1647          * In order to preserve waiters flags, just spin.
1648          */
1649         for (;;) {
1650                 x = lockmgr_read_value(lk);
1651                 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1652                 x &= LK_ALL_WAITERS;
1653                 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1654                     LK_KERNPROC | x))
1655                         return;
1656                 cpu_spinwait();
1657         }
1658 }
1659
1660 void
1661 lockmgr_printinfo(const struct lock *lk)
1662 {
1663         struct thread *td;
1664         uintptr_t x;
1665
1666         if (lk->lk_lock == LK_UNLOCKED)
1667                 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1668         else if (lk->lk_lock & LK_SHARE)
1669                 printf("lock type %s: SHARED (count %ju)\n",
1670                     lk->lock_object.lo_name,
1671                     (uintmax_t)LK_SHARERS(lk->lk_lock));
1672         else {
1673                 td = lockmgr_xholder(lk);
1674                 if (td == (struct thread *)LK_KERNPROC)
1675                         printf("lock type %s: EXCL by KERNPROC\n",
1676                             lk->lock_object.lo_name);
1677                 else
1678                         printf("lock type %s: EXCL by thread %p "
1679                             "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1680                             td, td->td_proc->p_pid, td->td_proc->p_comm,
1681                             td->td_tid);
1682         }
1683
1684         x = lk->lk_lock;
1685         if (x & LK_EXCLUSIVE_WAITERS)
1686                 printf(" with exclusive waiters pending\n");
1687         if (x & LK_SHARED_WAITERS)
1688                 printf(" with shared waiters pending\n");
1689         if (x & LK_EXCLUSIVE_SPINNERS)
1690                 printf(" with exclusive spinners pending\n");
1691
1692         STACK_PRINT(lk);
1693 }
1694
1695 int
1696 lockstatus(const struct lock *lk)
1697 {
1698         uintptr_t v, x;
1699         int ret;
1700
1701         ret = LK_SHARED;
1702         x = lockmgr_read_value(lk);
1703         v = LK_HOLDER(x);
1704
1705         if ((x & LK_SHARE) == 0) {
1706                 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1707                         ret = LK_EXCLUSIVE;
1708                 else
1709                         ret = LK_EXCLOTHER;
1710         } else if (x == LK_UNLOCKED)
1711                 ret = 0;
1712
1713         return (ret);
1714 }
1715
1716 #ifdef INVARIANT_SUPPORT
1717
1718 FEATURE(invariant_support,
1719     "Support for modules compiled with INVARIANTS option");
1720
1721 #ifndef INVARIANTS
1722 #undef  _lockmgr_assert
1723 #endif
1724
1725 void
1726 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1727 {
1728         int slocked = 0;
1729
1730         if (SCHEDULER_STOPPED())
1731                 return;
1732         switch (what) {
1733         case KA_SLOCKED:
1734         case KA_SLOCKED | KA_NOTRECURSED:
1735         case KA_SLOCKED | KA_RECURSED:
1736                 slocked = 1;
1737         case KA_LOCKED:
1738         case KA_LOCKED | KA_NOTRECURSED:
1739         case KA_LOCKED | KA_RECURSED:
1740 #ifdef WITNESS
1741
1742                 /*
1743                  * We cannot trust WITNESS if the lock is held in exclusive
1744                  * mode and a call to lockmgr_disown() happened.
1745                  * Workaround this skipping the check if the lock is held in
1746                  * exclusive mode even for the KA_LOCKED case.
1747                  */
1748                 if (slocked || (lk->lk_lock & LK_SHARE)) {
1749                         witness_assert(&lk->lock_object, what, file, line);
1750                         break;
1751                 }
1752 #endif
1753                 if (lk->lk_lock == LK_UNLOCKED ||
1754                     ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1755                     (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1756                         panic("Lock %s not %slocked @ %s:%d\n",
1757                             lk->lock_object.lo_name, slocked ? "share" : "",
1758                             file, line);
1759
1760                 if ((lk->lk_lock & LK_SHARE) == 0) {
1761                         if (lockmgr_recursed(lk)) {
1762                                 if (what & KA_NOTRECURSED)
1763                                         panic("Lock %s recursed @ %s:%d\n",
1764                                             lk->lock_object.lo_name, file,
1765                                             line);
1766                         } else if (what & KA_RECURSED)
1767                                 panic("Lock %s not recursed @ %s:%d\n",
1768                                     lk->lock_object.lo_name, file, line);
1769                 }
1770                 break;
1771         case KA_XLOCKED:
1772         case KA_XLOCKED | KA_NOTRECURSED:
1773         case KA_XLOCKED | KA_RECURSED:
1774                 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1775                         panic("Lock %s not exclusively locked @ %s:%d\n",
1776                             lk->lock_object.lo_name, file, line);
1777                 if (lockmgr_recursed(lk)) {
1778                         if (what & KA_NOTRECURSED)
1779                                 panic("Lock %s recursed @ %s:%d\n",
1780                                     lk->lock_object.lo_name, file, line);
1781                 } else if (what & KA_RECURSED)
1782                         panic("Lock %s not recursed @ %s:%d\n",
1783                             lk->lock_object.lo_name, file, line);
1784                 break;
1785         case KA_UNLOCKED:
1786                 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1787                         panic("Lock %s exclusively locked @ %s:%d\n",
1788                             lk->lock_object.lo_name, file, line);
1789                 break;
1790         default:
1791                 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1792                     line);
1793         }
1794 }
1795 #endif
1796
1797 #ifdef DDB
1798 int
1799 lockmgr_chain(struct thread *td, struct thread **ownerp)
1800 {
1801         const struct lock *lk;
1802
1803         lk = td->td_wchan;
1804
1805         if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1806                 return (0);
1807         db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1808         if (lk->lk_lock & LK_SHARE)
1809                 db_printf("SHARED (count %ju)\n",
1810                     (uintmax_t)LK_SHARERS(lk->lk_lock));
1811         else
1812                 db_printf("EXCL\n");
1813         *ownerp = lockmgr_xholder(lk);
1814
1815         return (1);
1816 }
1817
1818 static void
1819 db_show_lockmgr(const struct lock_object *lock)
1820 {
1821         struct thread *td;
1822         const struct lock *lk;
1823
1824         lk = (const struct lock *)lock;
1825
1826         db_printf(" state: ");
1827         if (lk->lk_lock == LK_UNLOCKED)
1828                 db_printf("UNLOCKED\n");
1829         else if (lk->lk_lock & LK_SHARE)
1830                 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1831         else {
1832                 td = lockmgr_xholder(lk);
1833                 if (td == (struct thread *)LK_KERNPROC)
1834                         db_printf("XLOCK: LK_KERNPROC\n");
1835                 else
1836                         db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1837                             td->td_tid, td->td_proc->p_pid,
1838                             td->td_proc->p_comm);
1839                 if (lockmgr_recursed(lk))
1840                         db_printf(" recursed: %d\n", lk->lk_recurse);
1841         }
1842         db_printf(" waiters: ");
1843         switch (lk->lk_lock & LK_ALL_WAITERS) {
1844         case LK_SHARED_WAITERS:
1845                 db_printf("shared\n");
1846                 break;
1847         case LK_EXCLUSIVE_WAITERS:
1848                 db_printf("exclusive\n");
1849                 break;
1850         case LK_ALL_WAITERS:
1851                 db_printf("shared and exclusive\n");
1852                 break;
1853         default:
1854                 db_printf("none\n");
1855         }
1856         db_printf(" spinners: ");
1857         if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1858                 db_printf("exclusive\n");
1859         else
1860                 db_printf("none\n");
1861 }
1862 #endif