]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/kern_lock.c
MFC
[FreeBSD/FreeBSD.git] / sys / kern / kern_lock.c
1 /*-
2  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice(s), this list of conditions and the following disclaimer as
10  *    the first lines of this file unmodified other than the possible
11  *    addition of one or more copyright notices.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice(s), this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26  * DAMAGE.
27  */
28
29 #include "opt_adaptive_lockmgrs.h"
30 #include "opt_ddb.h"
31 #include "opt_kdtrace.h"
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include <sys/param.h>
37 #include <sys/ktr.h>
38 #include <sys/lock.h>
39 #include <sys/lock_profile.h>
40 #include <sys/lockmgr.h>
41 #include <sys/mutex.h>
42 #include <sys/proc.h>
43 #include <sys/sleepqueue.h>
44 #ifdef DEBUG_LOCKS
45 #include <sys/stack.h>
46 #endif
47 #include <sys/sysctl.h>
48 #include <sys/systm.h>
49
50 #include <machine/cpu.h>
51
52 #ifdef DDB
53 #include <ddb/ddb.h>
54 #endif
55
56 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
57     (LK_ADAPTIVE | LK_NOSHARE));
58 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
59     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
60
61 #define SQ_EXCLUSIVE_QUEUE      0
62 #define SQ_SHARED_QUEUE         1
63
64 #ifndef INVARIANTS
65 #define _lockmgr_assert(lk, what, file, line)
66 #define TD_LOCKS_INC(td)
67 #define TD_LOCKS_DEC(td)
68 #else
69 #define TD_LOCKS_INC(td)        ((td)->td_locks++)
70 #define TD_LOCKS_DEC(td)        ((td)->td_locks--)
71 #endif
72 #define TD_SLOCKS_INC(td)       ((td)->td_lk_slocks++)
73 #define TD_SLOCKS_DEC(td)       ((td)->td_lk_slocks--)
74
75 #ifndef DEBUG_LOCKS
76 #define STACK_PRINT(lk)
77 #define STACK_SAVE(lk)
78 #define STACK_ZERO(lk)
79 #else
80 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
81 #define STACK_SAVE(lk)  stack_save(&(lk)->lk_stack)
82 #define STACK_ZERO(lk)  stack_zero(&(lk)->lk_stack)
83 #endif
84
85 #define LOCK_LOG2(lk, string, arg1, arg2)                               \
86         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
87                 CTR2(KTR_LOCK, (string), (arg1), (arg2))
88 #define LOCK_LOG3(lk, string, arg1, arg2, arg3)                         \
89         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
90                 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
91
92 #define GIANT_DECLARE                                                   \
93         int _i = 0;                                                     \
94         WITNESS_SAVE_DECL(Giant)
95 #define GIANT_RESTORE() do {                                            \
96         if (_i > 0) {                                                   \
97                 while (_i--)                                            \
98                         mtx_lock(&Giant);                               \
99                 WITNESS_RESTORE(&Giant.lock_object, Giant);             \
100         }                                                               \
101 } while (0)
102 #define GIANT_SAVE() do {                                               \
103         if (mtx_owned(&Giant)) {                                        \
104                 WITNESS_SAVE(&Giant.lock_object, Giant);                \
105                 while (mtx_owned(&Giant)) {                             \
106                         _i++;                                           \
107                         mtx_unlock(&Giant);                             \
108                 }                                                       \
109         }                                                               \
110 } while (0)
111
112 #define LK_CAN_SHARE(x)                                                 \
113         (((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 ||      \
114         ((x) & LK_EXCLUSIVE_SPINNERS) == 0 ||                           \
115         curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
116 #define LK_TRYOP(x)                                                     \
117         ((x) & LK_NOWAIT)
118
119 #define LK_CAN_WITNESS(x)                                               \
120         (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
121 #define LK_TRYWIT(x)                                                    \
122         (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
123
124 #define LK_CAN_ADAPT(lk, f)                                             \
125         (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 &&             \
126         ((f) & LK_SLEEPFAIL) == 0)
127
128 #define lockmgr_disowned(lk)                                            \
129         (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
130
131 #define lockmgr_xlocked(lk)                                             \
132         (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
133
134 static void      assert_lockmgr(struct lock_object *lock, int how);
135 #ifdef DDB
136 static void      db_show_lockmgr(struct lock_object *lock);
137 #endif
138 static void      lock_lockmgr(struct lock_object *lock, int how);
139 #ifdef KDTRACE_HOOKS
140 static int       owner_lockmgr(struct lock_object *lock, struct thread **owner);
141 #endif
142 static int       unlock_lockmgr(struct lock_object *lock);
143
144 struct lock_class lock_class_lockmgr = {
145         .lc_name = "lockmgr",
146         .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
147         .lc_assert = assert_lockmgr,
148 #ifdef DDB
149         .lc_ddb_show = db_show_lockmgr,
150 #endif
151         .lc_lock = lock_lockmgr,
152         .lc_unlock = unlock_lockmgr,
153 #ifdef KDTRACE_HOOKS
154         .lc_owner = owner_lockmgr,
155 #endif
156 };
157
158 #ifdef ADAPTIVE_LOCKMGRS
159 static u_int alk_retries = 10;
160 static u_int alk_loops = 10000;
161 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL,
162     "lockmgr debugging");
163 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
164 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
165 #endif
166
167 static __inline struct thread *
168 lockmgr_xholder(struct lock *lk)
169 {
170         uintptr_t x;
171
172         x = lk->lk_lock;
173         return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
174 }
175
176 /*
177  * It assumes sleepq_lock held and returns with this one unheld.
178  * It also assumes the generic interlock is sane and previously checked.
179  * If LK_INTERLOCK is specified the interlock is not reacquired after the
180  * sleep.
181  */
182 static __inline int
183 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
184     const char *wmesg, int pri, int timo, int queue)
185 {
186         GIANT_DECLARE;
187         struct lock_class *class;
188         int catch, error;
189
190         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
191         catch = pri & PCATCH;
192         pri &= PRIMASK;
193         error = 0;
194
195         LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
196             (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
197
198         if (flags & LK_INTERLOCK)
199                 class->lc_unlock(ilk);
200         if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
201                 lk->lk_exslpfail++;
202         GIANT_SAVE();
203         sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
204             SLEEPQ_INTERRUPTIBLE : 0), queue);
205         if ((flags & LK_TIMELOCK) && timo)
206                 sleepq_set_timeout(&lk->lock_object, timo);
207
208         /*
209          * Decisional switch for real sleeping.
210          */
211         if ((flags & LK_TIMELOCK) && timo && catch)
212                 error = sleepq_timedwait_sig(&lk->lock_object, pri);
213         else if ((flags & LK_TIMELOCK) && timo)
214                 error = sleepq_timedwait(&lk->lock_object, pri);
215         else if (catch)
216                 error = sleepq_wait_sig(&lk->lock_object, pri);
217         else
218                 sleepq_wait(&lk->lock_object, pri);
219         GIANT_RESTORE();
220         if ((flags & LK_SLEEPFAIL) && error == 0)
221                 error = ENOLCK;
222
223         return (error);
224 }
225
226 static __inline int
227 wakeupshlk(struct lock *lk, const char *file, int line)
228 {
229         uintptr_t v, x;
230         u_int realexslp;
231         int queue, wakeup_swapper;
232
233         TD_LOCKS_DEC(curthread);
234         TD_SLOCKS_DEC(curthread);
235         WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
236         LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
237
238         wakeup_swapper = 0;
239         for (;;) {
240                 x = lk->lk_lock;
241
242                 /*
243                  * If there is more than one shared lock held, just drop one
244                  * and return.
245                  */
246                 if (LK_SHARERS(x) > 1) {
247                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
248                             x - LK_ONE_SHARER))
249                                 break;
250                         continue;
251                 }
252
253                 /*
254                  * If there are not waiters on the exclusive queue, drop the
255                  * lock quickly.
256                  */
257                 if ((x & LK_ALL_WAITERS) == 0) {
258                         MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
259                             LK_SHARERS_LOCK(1));
260                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
261                                 break;
262                         continue;
263                 }
264
265                 /*
266                  * We should have a sharer with waiters, so enter the hard
267                  * path in order to handle wakeups correctly.
268                  */
269                 sleepq_lock(&lk->lock_object);
270                 x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
271                 v = LK_UNLOCKED;
272
273                 /*
274                  * If the lock has exclusive waiters, give them preference in
275                  * order to avoid deadlock with shared runners up.
276                  * If interruptible sleeps left the exclusive queue empty
277                  * avoid a starvation for the threads sleeping on the shared
278                  * queue by giving them precedence and cleaning up the
279                  * exclusive waiters bit anyway.
280                  * Please note that lk_exslpfail count may be lying about
281                  * the real number of waiters with the LK_SLEEPFAIL flag on
282                  * because they may be used in conjuction with interruptible
283                  * sleeps so lk_exslpfail might be considered an 'upper limit'
284                  * bound, including the edge cases.
285                  */
286                 realexslp = sleepq_sleepcnt(&lk->lock_object,
287                     SQ_EXCLUSIVE_QUEUE);
288                 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
289                         if (lk->lk_exslpfail < realexslp) {
290                                 lk->lk_exslpfail = 0;
291                                 queue = SQ_EXCLUSIVE_QUEUE;
292                                 v |= (x & LK_SHARED_WAITERS);
293                         } else {
294                                 lk->lk_exslpfail = 0;
295                                 LOCK_LOG2(lk,
296                                     "%s: %p has only LK_SLEEPFAIL sleepers",
297                                     __func__, lk);
298                                 LOCK_LOG2(lk,
299                             "%s: %p waking up threads on the exclusive queue",
300                                     __func__, lk);
301                                 wakeup_swapper =
302                                     sleepq_broadcast(&lk->lock_object,
303                                     SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
304                                 queue = SQ_SHARED_QUEUE;
305                         }
306                                 
307                 } else {
308
309                         /*
310                          * Exclusive waiters sleeping with LK_SLEEPFAIL on
311                          * and using interruptible sleeps/timeout may have
312                          * left spourious lk_exslpfail counts on, so clean
313                          * it up anyway.
314                          */
315                         lk->lk_exslpfail = 0;
316                         queue = SQ_SHARED_QUEUE;
317                 }
318
319                 if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
320                     v)) {
321                         sleepq_release(&lk->lock_object);
322                         continue;
323                 }
324                 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
325                     __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
326                     "exclusive");
327                 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
328                     0, queue);
329                 sleepq_release(&lk->lock_object);
330                 break;
331         }
332
333         lock_profile_release_lock(&lk->lock_object);
334         return (wakeup_swapper);
335 }
336
337 static void
338 assert_lockmgr(struct lock_object *lock, int what)
339 {
340
341         panic("lockmgr locks do not support assertions");
342 }
343
344 static void
345 lock_lockmgr(struct lock_object *lock, int how)
346 {
347
348         panic("lockmgr locks do not support sleep interlocking");
349 }
350
351 static int
352 unlock_lockmgr(struct lock_object *lock)
353 {
354
355         panic("lockmgr locks do not support sleep interlocking");
356 }
357
358 #ifdef KDTRACE_HOOKS
359 static int
360 owner_lockmgr(struct lock_object *lock, struct thread **owner)
361 {
362
363         panic("lockmgr locks do not support owner inquiring");
364 }
365 #endif
366
367 void
368 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
369 {
370         int iflags;
371
372         MPASS((flags & ~LK_INIT_MASK) == 0);
373         ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
374             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
375             &lk->lk_lock));
376
377         iflags = LO_SLEEPABLE | LO_UPGRADABLE;
378         if (flags & LK_CANRECURSE)
379                 iflags |= LO_RECURSABLE;
380         if ((flags & LK_NODUP) == 0)
381                 iflags |= LO_DUPOK;
382         if (flags & LK_NOPROFILE)
383                 iflags |= LO_NOPROFILE;
384         if ((flags & LK_NOWITNESS) == 0)
385                 iflags |= LO_WITNESS;
386         if (flags & LK_QUIET)
387                 iflags |= LO_QUIET;
388         iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
389
390         lk->lk_lock = LK_UNLOCKED;
391         lk->lk_recurse = 0;
392         lk->lk_exslpfail = 0;
393         lk->lk_timo = timo;
394         lk->lk_pri = pri;
395         lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
396         STACK_ZERO(lk);
397 }
398
399 /*
400  * XXX: Gross hacks to manipulate external lock flags after
401  * initialization.  Used for certain vnode and buf locks.
402  */
403 void
404 lockallowshare(struct lock *lk)
405 {
406
407         lockmgr_assert(lk, KA_XLOCKED);
408         lk->lock_object.lo_flags &= ~LK_NOSHARE;
409 }
410
411 void
412 lockallowrecurse(struct lock *lk)
413 {
414
415         lockmgr_assert(lk, KA_XLOCKED);
416         lk->lock_object.lo_flags |= LO_RECURSABLE;
417 }
418
419 void
420 lockdisablerecurse(struct lock *lk)
421 {
422
423         lockmgr_assert(lk, KA_XLOCKED);
424         lk->lock_object.lo_flags &= ~LO_RECURSABLE;
425 }
426
427 void
428 lockdestroy(struct lock *lk)
429 {
430
431         KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
432         KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
433         KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
434         lock_destroy(&lk->lock_object);
435 }
436
437 int
438 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
439     const char *wmesg, int pri, int timo, const char *file, int line)
440 {
441         GIANT_DECLARE;
442         struct lock_class *class;
443         const char *iwmesg;
444         uintptr_t tid, v, x;
445         u_int op, realexslp;
446         int error, ipri, itimo, queue, wakeup_swapper;
447 #ifdef LOCK_PROFILING
448         uint64_t waittime = 0;
449         int contested = 0;
450 #endif
451 #ifdef ADAPTIVE_LOCKMGRS
452         volatile struct thread *owner;
453         u_int i, spintries = 0;
454 #endif
455
456         error = 0;
457         tid = (uintptr_t)curthread;
458         op = (flags & LK_TYPE_MASK);
459         iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
460         ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
461         itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
462
463         MPASS((flags & ~LK_TOTAL_MASK) == 0);
464         KASSERT((op & (op - 1)) == 0,
465             ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
466         KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
467             (op != LK_DOWNGRADE && op != LK_RELEASE),
468             ("%s: Invalid flags in regard of the operation desired @ %s:%d",
469             __func__, file, line));
470         KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
471             ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
472             __func__, file, line));
473
474         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
475         if (panicstr != NULL) {
476                 if (flags & LK_INTERLOCK)
477                         class->lc_unlock(ilk);
478                 return (0);
479         }
480
481         if (lk->lock_object.lo_flags & LK_NOSHARE) {
482                 switch (op) {
483                 case LK_SHARED:
484                         op = LK_EXCLUSIVE;
485                         break;
486                 case LK_UPGRADE:
487                 case LK_DOWNGRADE:
488                         _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
489                             file, line);
490                         return (0);
491                 }
492         }
493
494         wakeup_swapper = 0;
495         switch (op) {
496         case LK_SHARED:
497                 if (LK_CAN_WITNESS(flags))
498                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
499                             file, line, ilk);
500                 for (;;) {
501                         x = lk->lk_lock;
502
503                         /*
504                          * If no other thread has an exclusive lock, or
505                          * no exclusive waiter is present, bump the count of
506                          * sharers.  Since we have to preserve the state of
507                          * waiters, if we fail to acquire the shared lock
508                          * loop back and retry.
509                          */
510                         if (LK_CAN_SHARE(x)) {
511                                 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
512                                     x + LK_ONE_SHARER))
513                                         break;
514                                 continue;
515                         }
516                         lock_profile_obtain_lock_failed(&lk->lock_object,
517                             &contested, &waittime);
518
519                         /*
520                          * If the lock is already held by curthread in
521                          * exclusive way avoid a deadlock.
522                          */
523                         if (LK_HOLDER(x) == tid) {
524                                 LOCK_LOG2(lk,
525                                     "%s: %p already held in exclusive mode",
526                                     __func__, lk);
527                                 error = EDEADLK;
528                                 break;
529                         }
530
531                         /*
532                          * If the lock is expected to not sleep just give up
533                          * and return.
534                          */
535                         if (LK_TRYOP(flags)) {
536                                 LOCK_LOG2(lk, "%s: %p fails the try operation",
537                                     __func__, lk);
538                                 error = EBUSY;
539                                 break;
540                         }
541
542 #ifdef ADAPTIVE_LOCKMGRS
543                         /*
544                          * If the owner is running on another CPU, spin until
545                          * the owner stops running or the state of the lock
546                          * changes.  We need a double-state handle here
547                          * because for a failed acquisition the lock can be
548                          * either held in exclusive mode or shared mode
549                          * (for the writer starvation avoidance technique).
550                          */
551                         if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
552                             LK_HOLDER(x) != LK_KERNPROC) {
553                                 owner = (struct thread *)LK_HOLDER(x);
554                                 if (LOCK_LOG_TEST(&lk->lock_object, 0))
555                                         CTR3(KTR_LOCK,
556                                             "%s: spinning on %p held by %p",
557                                             __func__, lk, owner);
558
559                                 /*
560                                  * If we are holding also an interlock drop it
561                                  * in order to avoid a deadlock if the lockmgr
562                                  * owner is adaptively spinning on the
563                                  * interlock itself.
564                                  */
565                                 if (flags & LK_INTERLOCK) {
566                                         class->lc_unlock(ilk);
567                                         flags &= ~LK_INTERLOCK;
568                                 }
569                                 GIANT_SAVE();
570                                 while (LK_HOLDER(lk->lk_lock) ==
571                                     (uintptr_t)owner && TD_IS_RUNNING(owner))
572                                         cpu_spinwait();
573                                 GIANT_RESTORE();
574                                 continue;
575                         } else if (LK_CAN_ADAPT(lk, flags) &&
576                             (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
577                             spintries < alk_retries) {
578                                 if (flags & LK_INTERLOCK) {
579                                         class->lc_unlock(ilk);
580                                         flags &= ~LK_INTERLOCK;
581                                 }
582                                 GIANT_SAVE();
583                                 spintries++;
584                                 for (i = 0; i < alk_loops; i++) {
585                                         if (LOCK_LOG_TEST(&lk->lock_object, 0))
586                                                 CTR4(KTR_LOCK,
587                                     "%s: shared spinning on %p with %u and %u",
588                                                     __func__, lk, spintries, i);
589                                         x = lk->lk_lock;
590                                         if ((x & LK_SHARE) == 0 ||
591                                             LK_CAN_SHARE(x) != 0)
592                                                 break;
593                                         cpu_spinwait();
594                                 }
595                                 GIANT_RESTORE();
596                                 if (i != alk_loops)
597                                         continue;
598                         }
599 #endif
600
601                         /*
602                          * Acquire the sleepqueue chain lock because we
603                          * probabilly will need to manipulate waiters flags.
604                          */
605                         sleepq_lock(&lk->lock_object);
606                         x = lk->lk_lock;
607
608                         /*
609                          * if the lock can be acquired in shared mode, try
610                          * again.
611                          */
612                         if (LK_CAN_SHARE(x)) {
613                                 sleepq_release(&lk->lock_object);
614                                 continue;
615                         }
616
617 #ifdef ADAPTIVE_LOCKMGRS
618                         /*
619                          * The current lock owner might have started executing
620                          * on another CPU (or the lock could have changed
621                          * owner) while we were waiting on the turnstile
622                          * chain lock.  If so, drop the turnstile lock and try
623                          * again.
624                          */
625                         if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
626                             LK_HOLDER(x) != LK_KERNPROC) {
627                                 owner = (struct thread *)LK_HOLDER(x);
628                                 if (TD_IS_RUNNING(owner)) {
629                                         sleepq_release(&lk->lock_object);
630                                         continue;
631                                 }
632                         }
633 #endif
634
635                         /*
636                          * Try to set the LK_SHARED_WAITERS flag.  If we fail,
637                          * loop back and retry.
638                          */
639                         if ((x & LK_SHARED_WAITERS) == 0) {
640                                 if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
641                                     x | LK_SHARED_WAITERS)) {
642                                         sleepq_release(&lk->lock_object);
643                                         continue;
644                                 }
645                                 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
646                                     __func__, lk);
647                         }
648
649                         /*
650                          * As far as we have been unable to acquire the
651                          * shared lock and the shared waiters flag is set,
652                          * we will sleep.
653                          */
654                         error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
655                             SQ_SHARED_QUEUE);
656                         flags &= ~LK_INTERLOCK;
657                         if (error) {
658                                 LOCK_LOG3(lk,
659                                     "%s: interrupted sleep for %p with %d",
660                                     __func__, lk, error);
661                                 break;
662                         }
663                         LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
664                             __func__, lk);
665                 }
666                 if (error == 0) {
667                         lock_profile_obtain_lock_success(&lk->lock_object,
668                             contested, waittime, file, line);
669                         LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
670                             line);
671                         WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
672                             line);
673                         TD_LOCKS_INC(curthread);
674                         TD_SLOCKS_INC(curthread);
675                         STACK_SAVE(lk);
676                 }
677                 break;
678         case LK_UPGRADE:
679                 _lockmgr_assert(lk, KA_SLOCKED, file, line);
680                 v = lk->lk_lock;
681                 x = v & LK_ALL_WAITERS;
682                 v &= LK_EXCLUSIVE_SPINNERS;
683
684                 /*
685                  * Try to switch from one shared lock to an exclusive one.
686                  * We need to preserve waiters flags during the operation.
687                  */
688                 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
689                     tid | x)) {
690                         LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
691                             line);
692                         WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
693                             LK_TRYWIT(flags), file, line);
694                         TD_SLOCKS_DEC(curthread);
695                         break;
696                 }
697
698                 /*
699                  * We have been unable to succeed in upgrading, so just
700                  * give up the shared lock.
701                  */
702                 wakeup_swapper |= wakeupshlk(lk, file, line);
703
704                 /* FALLTHROUGH */
705         case LK_EXCLUSIVE:
706                 if (LK_CAN_WITNESS(flags))
707                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
708                             LOP_EXCLUSIVE, file, line, ilk);
709
710                 /*
711                  * If curthread already holds the lock and this one is
712                  * allowed to recurse, simply recurse on it.
713                  */
714                 if (lockmgr_xlocked(lk)) {
715                         if ((flags & LK_CANRECURSE) == 0 &&
716                             (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
717
718                                 /*
719                                  * If the lock is expected to not panic just
720                                  * give up and return.
721                                  */
722                                 if (LK_TRYOP(flags)) {
723                                         LOCK_LOG2(lk,
724                                             "%s: %p fails the try operation",
725                                             __func__, lk);
726                                         error = EBUSY;
727                                         break;
728                                 }
729                                 if (flags & LK_INTERLOCK)
730                                         class->lc_unlock(ilk);
731                 panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
732                                     __func__, iwmesg, file, line);
733                         }
734                         lk->lk_recurse++;
735                         LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
736                         LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
737                             lk->lk_recurse, file, line);
738                         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
739                             LK_TRYWIT(flags), file, line);
740                         TD_LOCKS_INC(curthread);
741                         break;
742                 }
743
744                 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
745                     tid)) {
746                         lock_profile_obtain_lock_failed(&lk->lock_object,
747                             &contested, &waittime);
748
749                         /*
750                          * If the lock is expected to not sleep just give up
751                          * and return.
752                          */
753                         if (LK_TRYOP(flags)) {
754                                 LOCK_LOG2(lk, "%s: %p fails the try operation",
755                                     __func__, lk);
756                                 error = EBUSY;
757                                 break;
758                         }
759
760 #ifdef ADAPTIVE_LOCKMGRS
761                         /*
762                          * If the owner is running on another CPU, spin until
763                          * the owner stops running or the state of the lock
764                          * changes.
765                          */
766                         x = lk->lk_lock;
767                         if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
768                             LK_HOLDER(x) != LK_KERNPROC) {
769                                 owner = (struct thread *)LK_HOLDER(x);
770                                 if (LOCK_LOG_TEST(&lk->lock_object, 0))
771                                         CTR3(KTR_LOCK,
772                                             "%s: spinning on %p held by %p",
773                                             __func__, lk, owner);
774
775                                 /*
776                                  * If we are holding also an interlock drop it
777                                  * in order to avoid a deadlock if the lockmgr
778                                  * owner is adaptively spinning on the
779                                  * interlock itself.
780                                  */
781                                 if (flags & LK_INTERLOCK) {
782                                         class->lc_unlock(ilk);
783                                         flags &= ~LK_INTERLOCK;
784                                 }
785                                 GIANT_SAVE();
786                                 while (LK_HOLDER(lk->lk_lock) ==
787                                     (uintptr_t)owner && TD_IS_RUNNING(owner))
788                                         cpu_spinwait();
789                                 GIANT_RESTORE();
790                                 continue;
791                         } else if (LK_CAN_ADAPT(lk, flags) &&
792                             (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
793                             spintries < alk_retries) {
794                                 if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
795                                     !atomic_cmpset_ptr(&lk->lk_lock, x,
796                                     x | LK_EXCLUSIVE_SPINNERS))
797                                         continue;
798                                 if (flags & LK_INTERLOCK) {
799                                         class->lc_unlock(ilk);
800                                         flags &= ~LK_INTERLOCK;
801                                 }
802                                 GIANT_SAVE();
803                                 spintries++;
804                                 for (i = 0; i < alk_loops; i++) {
805                                         if (LOCK_LOG_TEST(&lk->lock_object, 0))
806                                                 CTR4(KTR_LOCK,
807                                     "%s: shared spinning on %p with %u and %u",
808                                                     __func__, lk, spintries, i);
809                                         if ((lk->lk_lock &
810                                             LK_EXCLUSIVE_SPINNERS) == 0)
811                                                 break;
812                                         cpu_spinwait();
813                                 }
814                                 GIANT_RESTORE();
815                                 if (i != alk_loops)
816                                         continue;
817                         }
818 #endif
819
820                         /*
821                          * Acquire the sleepqueue chain lock because we
822                          * probabilly will need to manipulate waiters flags.
823                          */
824                         sleepq_lock(&lk->lock_object);
825                         x = lk->lk_lock;
826
827                         /*
828                          * if the lock has been released while we spun on
829                          * the sleepqueue chain lock just try again.
830                          */
831                         if (x == LK_UNLOCKED) {
832                                 sleepq_release(&lk->lock_object);
833                                 continue;
834                         }
835
836 #ifdef ADAPTIVE_LOCKMGRS
837                         /*
838                          * The current lock owner might have started executing
839                          * on another CPU (or the lock could have changed
840                          * owner) while we were waiting on the turnstile
841                          * chain lock.  If so, drop the turnstile lock and try
842                          * again.
843                          */
844                         if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
845                             LK_HOLDER(x) != LK_KERNPROC) {
846                                 owner = (struct thread *)LK_HOLDER(x);
847                                 if (TD_IS_RUNNING(owner)) {
848                                         sleepq_release(&lk->lock_object);
849                                         continue;
850                                 }
851                         }
852 #endif
853
854                         /*
855                          * The lock can be in the state where there is a
856                          * pending queue of waiters, but still no owner.
857                          * This happens when the lock is contested and an
858                          * owner is going to claim the lock.
859                          * If curthread is the one successfully acquiring it
860                          * claim lock ownership and return, preserving waiters
861                          * flags.
862                          */
863                         v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
864                         if ((x & ~v) == LK_UNLOCKED) {
865                                 v &= ~LK_EXCLUSIVE_SPINNERS;
866                                 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
867                                     tid | v)) {
868                                         sleepq_release(&lk->lock_object);
869                                         LOCK_LOG2(lk,
870                                             "%s: %p claimed by a new writer",
871                                             __func__, lk);
872                                         break;
873                                 }
874                                 sleepq_release(&lk->lock_object);
875                                 continue;
876                         }
877
878                         /*
879                          * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
880                          * fail, loop back and retry.
881                          */
882                         if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
883                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
884                                     x | LK_EXCLUSIVE_WAITERS)) {
885                                         sleepq_release(&lk->lock_object);
886                                         continue;
887                                 }
888                                 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
889                                     __func__, lk);
890                         }
891
892                         /*
893                          * As far as we have been unable to acquire the
894                          * exclusive lock and the exclusive waiters flag
895                          * is set, we will sleep.
896                          */
897                         error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
898                             SQ_EXCLUSIVE_QUEUE);
899                         flags &= ~LK_INTERLOCK;
900                         if (error) {
901                                 LOCK_LOG3(lk,
902                                     "%s: interrupted sleep for %p with %d",
903                                     __func__, lk, error);
904                                 break;
905                         }
906                         LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
907                             __func__, lk);
908                 }
909                 if (error == 0) {
910                         lock_profile_obtain_lock_success(&lk->lock_object,
911                             contested, waittime, file, line);
912                         LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
913                             lk->lk_recurse, file, line);
914                         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
915                             LK_TRYWIT(flags), file, line);
916                         TD_LOCKS_INC(curthread);
917                         STACK_SAVE(lk);
918                 }
919                 break;
920         case LK_DOWNGRADE:
921                 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
922                 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
923                 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
924                 TD_SLOCKS_INC(curthread);
925
926                 /*
927                  * In order to preserve waiters flags, just spin.
928                  */
929                 for (;;) {
930                         x = lk->lk_lock;
931                         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
932                         x &= LK_ALL_WAITERS;
933                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
934                             LK_SHARERS_LOCK(1) | x))
935                                 break;
936                         cpu_spinwait();
937                 }
938                 break;
939         case LK_RELEASE:
940                 _lockmgr_assert(lk, KA_LOCKED, file, line);
941                 x = lk->lk_lock;
942
943                 if ((x & LK_SHARE) == 0) {
944
945                         /*
946                          * As first option, treact the lock as if it has not
947                          * any waiter.
948                          * Fix-up the tid var if the lock has been disowned.
949                          */
950                         if (LK_HOLDER(x) == LK_KERNPROC)
951                                 tid = LK_KERNPROC;
952                         else {
953                                 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
954                                     file, line);
955                                 TD_LOCKS_DEC(curthread);
956                         }
957                         LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
958                             lk->lk_recurse, file, line);
959
960                         /*
961                          * The lock is held in exclusive mode.
962                          * If the lock is recursed also, then unrecurse it.
963                          */
964                         if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
965                                 LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
966                                     lk);
967                                 lk->lk_recurse--;
968                                 break;
969                         }
970                         if (tid != LK_KERNPROC)
971                                 lock_profile_release_lock(&lk->lock_object);
972
973                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
974                             LK_UNLOCKED))
975                                 break;
976
977                         sleepq_lock(&lk->lock_object);
978                         x = lk->lk_lock;
979                         v = LK_UNLOCKED;
980
981                         /*
982                          * If the lock has exclusive waiters, give them
983                          * preference in order to avoid deadlock with
984                          * shared runners up.
985                          * If interruptible sleeps left the exclusive queue
986                          * empty avoid a starvation for the threads sleeping
987                          * on the shared queue by giving them precedence
988                          * and cleaning up the exclusive waiters bit anyway.
989                          * Please note that lk_exslpfail count may be lying
990                          * about the real number of waiters with the
991                          * LK_SLEEPFAIL flag on because they may be used in
992                          * conjuction with interruptible sleeps so
993                          * lk_exslpfail might be considered an 'upper limit'
994                          * bound, including the edge cases.
995                          */
996                         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
997                         realexslp = sleepq_sleepcnt(&lk->lock_object,
998                             SQ_EXCLUSIVE_QUEUE);
999                         if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1000                                 if (lk->lk_exslpfail < realexslp) {
1001                                         lk->lk_exslpfail = 0;
1002                                         queue = SQ_EXCLUSIVE_QUEUE;
1003                                         v |= (x & LK_SHARED_WAITERS);
1004                                 } else {
1005                                         lk->lk_exslpfail = 0;
1006                                         LOCK_LOG2(lk,
1007                                         "%s: %p has only LK_SLEEPFAIL sleepers",
1008                                             __func__, lk);
1009                                         LOCK_LOG2(lk,
1010                         "%s: %p waking up threads on the exclusive queue",
1011                                             __func__, lk);
1012                                         wakeup_swapper =
1013                                             sleepq_broadcast(&lk->lock_object,
1014                                             SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1015                                         queue = SQ_SHARED_QUEUE;
1016                                 }
1017                         } else {
1018
1019                                 /*
1020                                  * Exclusive waiters sleeping with LK_SLEEPFAIL
1021                                  * on and using interruptible sleeps/timeout
1022                                  * may have left spourious lk_exslpfail counts
1023                                  * on, so clean it up anyway. 
1024                                  */
1025                                 lk->lk_exslpfail = 0;
1026                                 queue = SQ_SHARED_QUEUE;
1027                         }
1028
1029                         LOCK_LOG3(lk,
1030                             "%s: %p waking up threads on the %s queue",
1031                             __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1032                             "exclusive");
1033                         atomic_store_rel_ptr(&lk->lk_lock, v);
1034                         wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
1035                             SLEEPQ_LK, 0, queue);
1036                         sleepq_release(&lk->lock_object);
1037                         break;
1038                 } else
1039                         wakeup_swapper = wakeupshlk(lk, file, line);
1040                 break;
1041         case LK_DRAIN:
1042                 if (LK_CAN_WITNESS(flags))
1043                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1044                             LOP_EXCLUSIVE, file, line, ilk);
1045
1046                 /*
1047                  * Trying to drain a lock we already own will result in a
1048                  * deadlock.
1049                  */
1050                 if (lockmgr_xlocked(lk)) {
1051                         if (flags & LK_INTERLOCK)
1052                                 class->lc_unlock(ilk);
1053                         panic("%s: draining %s with the lock held @ %s:%d\n",
1054                             __func__, iwmesg, file, line);
1055                 }
1056
1057                 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1058                         lock_profile_obtain_lock_failed(&lk->lock_object,
1059                             &contested, &waittime);
1060
1061                         /*
1062                          * If the lock is expected to not sleep just give up
1063                          * and return.
1064                          */
1065                         if (LK_TRYOP(flags)) {
1066                                 LOCK_LOG2(lk, "%s: %p fails the try operation",
1067                                     __func__, lk);
1068                                 error = EBUSY;
1069                                 break;
1070                         }
1071
1072                         /*
1073                          * Acquire the sleepqueue chain lock because we
1074                          * probabilly will need to manipulate waiters flags.
1075                          */
1076                         sleepq_lock(&lk->lock_object);
1077                         x = lk->lk_lock;
1078
1079                         /*
1080                          * if the lock has been released while we spun on
1081                          * the sleepqueue chain lock just try again.
1082                          */
1083                         if (x == LK_UNLOCKED) {
1084                                 sleepq_release(&lk->lock_object);
1085                                 continue;
1086                         }
1087
1088                         v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1089                         if ((x & ~v) == LK_UNLOCKED) {
1090                                 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1091
1092                                 /*
1093                                  * If interruptible sleeps left the exclusive
1094                                  * queue empty avoid a starvation for the
1095                                  * threads sleeping on the shared queue by
1096                                  * giving them precedence and cleaning up the
1097                                  * exclusive waiters bit anyway.
1098                                  * Please note that lk_exslpfail count may be
1099                                  * lying about the real number of waiters with
1100                                  * the LK_SLEEPFAIL flag on because they may
1101                                  * be used in conjuction with interruptible
1102                                  * sleeps so lk_exslpfail might be considered
1103                                  * an 'upper limit' bound, including the edge
1104                                  * cases.
1105                                  */
1106                                 if (v & LK_EXCLUSIVE_WAITERS) {
1107                                         queue = SQ_EXCLUSIVE_QUEUE;
1108                                         v &= ~LK_EXCLUSIVE_WAITERS;
1109                                 } else {
1110
1111                                         /*
1112                                          * Exclusive waiters sleeping with
1113                                          * LK_SLEEPFAIL on and using
1114                                          * interruptible sleeps/timeout may
1115                                          * have left spourious lk_exslpfail
1116                                          * counts on, so clean it up anyway.
1117                                          */
1118                                         MPASS(v & LK_SHARED_WAITERS);
1119                                         lk->lk_exslpfail = 0;
1120                                         queue = SQ_SHARED_QUEUE;
1121                                         v &= ~LK_SHARED_WAITERS;
1122                                 }
1123                                 if (queue == SQ_EXCLUSIVE_QUEUE) {
1124                                         realexslp =
1125                                             sleepq_sleepcnt(&lk->lock_object,
1126                                             SQ_EXCLUSIVE_QUEUE);
1127                                         if (lk->lk_exslpfail >= realexslp) {
1128                                                 lk->lk_exslpfail = 0;
1129                                                 queue = SQ_SHARED_QUEUE;
1130                                                 v &= ~LK_SHARED_WAITERS;
1131                                                 if (realexslp != 0) {
1132                                                         LOCK_LOG2(lk,
1133                                         "%s: %p has only LK_SLEEPFAIL sleepers",
1134                                                             __func__, lk);
1135                                                         LOCK_LOG2(lk,
1136                         "%s: %p waking up threads on the exclusive queue",
1137                                                             __func__, lk);
1138                                                         wakeup_swapper =
1139                                                             sleepq_broadcast(
1140                                                             &lk->lock_object,
1141                                                             SLEEPQ_LK, 0,
1142                                                             SQ_EXCLUSIVE_QUEUE);
1143                                                 }
1144                                         } else
1145                                                 lk->lk_exslpfail = 0;
1146                                 }
1147                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1148                                         sleepq_release(&lk->lock_object);
1149                                         continue;
1150                                 }
1151                                 LOCK_LOG3(lk,
1152                                 "%s: %p waking up all threads on the %s queue",
1153                                     __func__, lk, queue == SQ_SHARED_QUEUE ?
1154                                     "shared" : "exclusive");
1155                                 wakeup_swapper |= sleepq_broadcast(
1156                                     &lk->lock_object, SLEEPQ_LK, 0, queue);
1157
1158                                 /*
1159                                  * If shared waiters have been woken up we need
1160                                  * to wait for one of them to acquire the lock
1161                                  * before to set the exclusive waiters in
1162                                  * order to avoid a deadlock.
1163                                  */
1164                                 if (queue == SQ_SHARED_QUEUE) {
1165                                         for (v = lk->lk_lock;
1166                                             (v & LK_SHARE) && !LK_SHARERS(v);
1167                                             v = lk->lk_lock)
1168                                                 cpu_spinwait();
1169                                 }
1170                         }
1171
1172                         /*
1173                          * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1174                          * fail, loop back and retry.
1175                          */
1176                         if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1177                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1178                                     x | LK_EXCLUSIVE_WAITERS)) {
1179                                         sleepq_release(&lk->lock_object);
1180                                         continue;
1181                                 }
1182                                 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1183                                     __func__, lk);
1184                         }
1185
1186                         /*
1187                          * As far as we have been unable to acquire the
1188                          * exclusive lock and the exclusive waiters flag
1189                          * is set, we will sleep.
1190                          */
1191                         if (flags & LK_INTERLOCK) {
1192                                 class->lc_unlock(ilk);
1193                                 flags &= ~LK_INTERLOCK;
1194                         }
1195                         GIANT_SAVE();
1196                         sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1197                             SQ_EXCLUSIVE_QUEUE);
1198                         sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1199                         GIANT_RESTORE();
1200                         LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1201                             __func__, lk);
1202                 }
1203
1204                 if (error == 0) {
1205                         lock_profile_obtain_lock_success(&lk->lock_object,
1206                             contested, waittime, file, line);
1207                         LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1208                             lk->lk_recurse, file, line);
1209                         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1210                             LK_TRYWIT(flags), file, line);
1211                         TD_LOCKS_INC(curthread);
1212                         STACK_SAVE(lk);
1213                 }
1214                 break;
1215         default:
1216                 if (flags & LK_INTERLOCK)
1217                         class->lc_unlock(ilk);
1218                 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1219         }
1220
1221         if (flags & LK_INTERLOCK)
1222                 class->lc_unlock(ilk);
1223         if (wakeup_swapper)
1224                 kick_proc0();
1225
1226         return (error);
1227 }
1228
1229 void
1230 _lockmgr_disown(struct lock *lk, const char *file, int line)
1231 {
1232         uintptr_t tid, x;
1233
1234         tid = (uintptr_t)curthread;
1235         _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
1236
1237         /*
1238          * If the owner is already LK_KERNPROC just skip the whole operation.
1239          */
1240         if (LK_HOLDER(lk->lk_lock) != tid)
1241                 return;
1242         lock_profile_release_lock(&lk->lock_object);
1243         LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1244         WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1245         TD_LOCKS_DEC(curthread);
1246         STACK_SAVE(lk);
1247
1248         /*
1249          * In order to preserve waiters flags, just spin.
1250          */
1251         for (;;) {
1252                 x = lk->lk_lock;
1253                 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1254                 x &= LK_ALL_WAITERS;
1255                 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1256                     LK_KERNPROC | x))
1257                         return;
1258                 cpu_spinwait();
1259         }
1260 }
1261
1262 void
1263 lockmgr_printinfo(struct lock *lk)
1264 {
1265         struct thread *td;
1266         uintptr_t x;
1267
1268         if (lk->lk_lock == LK_UNLOCKED)
1269                 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1270         else if (lk->lk_lock & LK_SHARE)
1271                 printf("lock type %s: SHARED (count %ju)\n",
1272                     lk->lock_object.lo_name,
1273                     (uintmax_t)LK_SHARERS(lk->lk_lock));
1274         else {
1275                 td = lockmgr_xholder(lk);
1276                 printf("lock type %s: EXCL by thread %p (pid %d)\n",
1277                     lk->lock_object.lo_name, td, td->td_proc->p_pid);
1278         }
1279
1280         x = lk->lk_lock;
1281         if (x & LK_EXCLUSIVE_WAITERS)
1282                 printf(" with exclusive waiters pending\n");
1283         if (x & LK_SHARED_WAITERS)
1284                 printf(" with shared waiters pending\n");
1285         if (x & LK_EXCLUSIVE_SPINNERS)
1286                 printf(" with exclusive spinners pending\n");
1287
1288         STACK_PRINT(lk);
1289 }
1290
1291 int
1292 lockstatus(struct lock *lk)
1293 {
1294         uintptr_t v, x;
1295         int ret;
1296
1297         ret = LK_SHARED;
1298         x = lk->lk_lock;
1299         v = LK_HOLDER(x);
1300
1301         if ((x & LK_SHARE) == 0) {
1302                 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1303                         ret = LK_EXCLUSIVE;
1304                 else
1305                         ret = LK_EXCLOTHER;
1306         } else if (x == LK_UNLOCKED)
1307                 ret = 0;
1308
1309         return (ret);
1310 }
1311
1312 #ifdef INVARIANT_SUPPORT
1313
1314 FEATURE(invariant_support,
1315     "Support for modules compiled with INVARIANTS option");
1316
1317 #ifndef INVARIANTS
1318 #undef  _lockmgr_assert
1319 #endif
1320
1321 void
1322 _lockmgr_assert(struct lock *lk, int what, const char *file, int line)
1323 {
1324         int slocked = 0;
1325
1326         if (panicstr != NULL)
1327                 return;
1328         switch (what) {
1329         case KA_SLOCKED:
1330         case KA_SLOCKED | KA_NOTRECURSED:
1331         case KA_SLOCKED | KA_RECURSED:
1332                 slocked = 1;
1333         case KA_LOCKED:
1334         case KA_LOCKED | KA_NOTRECURSED:
1335         case KA_LOCKED | KA_RECURSED:
1336 #ifdef WITNESS
1337
1338                 /*
1339                  * We cannot trust WITNESS if the lock is held in exclusive
1340                  * mode and a call to lockmgr_disown() happened.
1341                  * Workaround this skipping the check if the lock is held in
1342                  * exclusive mode even for the KA_LOCKED case.
1343                  */
1344                 if (slocked || (lk->lk_lock & LK_SHARE)) {
1345                         witness_assert(&lk->lock_object, what, file, line);
1346                         break;
1347                 }
1348 #endif
1349                 if (lk->lk_lock == LK_UNLOCKED ||
1350                     ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1351                     (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1352                         panic("Lock %s not %slocked @ %s:%d\n",
1353                             lk->lock_object.lo_name, slocked ? "share" : "",
1354                             file, line);
1355
1356                 if ((lk->lk_lock & LK_SHARE) == 0) {
1357                         if (lockmgr_recursed(lk)) {
1358                                 if (what & KA_NOTRECURSED)
1359                                         panic("Lock %s recursed @ %s:%d\n",
1360                                             lk->lock_object.lo_name, file,
1361                                             line);
1362                         } else if (what & KA_RECURSED)
1363                                 panic("Lock %s not recursed @ %s:%d\n",
1364                                     lk->lock_object.lo_name, file, line);
1365                 }
1366                 break;
1367         case KA_XLOCKED:
1368         case KA_XLOCKED | KA_NOTRECURSED:
1369         case KA_XLOCKED | KA_RECURSED:
1370                 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1371                         panic("Lock %s not exclusively locked @ %s:%d\n",
1372                             lk->lock_object.lo_name, file, line);
1373                 if (lockmgr_recursed(lk)) {
1374                         if (what & KA_NOTRECURSED)
1375                                 panic("Lock %s recursed @ %s:%d\n",
1376                                     lk->lock_object.lo_name, file, line);
1377                 } else if (what & KA_RECURSED)
1378                         panic("Lock %s not recursed @ %s:%d\n",
1379                             lk->lock_object.lo_name, file, line);
1380                 break;
1381         case KA_UNLOCKED:
1382                 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1383                         panic("Lock %s exclusively locked @ %s:%d\n",
1384                             lk->lock_object.lo_name, file, line);
1385                 break;
1386         default:
1387                 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1388                     line);
1389         }
1390 }
1391 #endif
1392
1393 #ifdef DDB
1394 int
1395 lockmgr_chain(struct thread *td, struct thread **ownerp)
1396 {
1397         struct lock *lk;
1398
1399         lk = td->td_wchan;
1400
1401         if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1402                 return (0);
1403         db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1404         if (lk->lk_lock & LK_SHARE)
1405                 db_printf("SHARED (count %ju)\n",
1406                     (uintmax_t)LK_SHARERS(lk->lk_lock));
1407         else
1408                 db_printf("EXCL\n");
1409         *ownerp = lockmgr_xholder(lk);
1410
1411         return (1);
1412 }
1413
1414 static void
1415 db_show_lockmgr(struct lock_object *lock)
1416 {
1417         struct thread *td;
1418         struct lock *lk;
1419
1420         lk = (struct lock *)lock;
1421
1422         db_printf(" state: ");
1423         if (lk->lk_lock == LK_UNLOCKED)
1424                 db_printf("UNLOCKED\n");
1425         else if (lk->lk_lock & LK_SHARE)
1426                 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1427         else {
1428                 td = lockmgr_xholder(lk);
1429                 if (td == (struct thread *)LK_KERNPROC)
1430                         db_printf("XLOCK: LK_KERNPROC\n");
1431                 else
1432                         db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1433                             td->td_tid, td->td_proc->p_pid,
1434                             td->td_proc->p_comm);
1435                 if (lockmgr_recursed(lk))
1436                         db_printf(" recursed: %d\n", lk->lk_recurse);
1437         }
1438         db_printf(" waiters: ");
1439         switch (lk->lk_lock & LK_ALL_WAITERS) {
1440         case LK_SHARED_WAITERS:
1441                 db_printf("shared\n");
1442                 break;
1443         case LK_EXCLUSIVE_WAITERS:
1444                 db_printf("exclusive\n");
1445                 break;
1446         case LK_ALL_WAITERS:
1447                 db_printf("shared and exclusive\n");
1448                 break;
1449         default:
1450                 db_printf("none\n");
1451         }
1452         db_printf(" spinners: ");
1453         if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1454                 db_printf("exclusive\n");
1455         else
1456                 db_printf("none\n");
1457 }
1458 #endif