]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/kern_lock.c
MFV r357635: imnport v1.9 of the O_SEARCH tests
[FreeBSD/FreeBSD.git] / sys / kern / kern_lock.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice(s), this list of conditions and the following disclaimer as
12  *    the first lines of this file unmodified other than the possible
13  *    addition of one or more copyright notices.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice(s), this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28  * DAMAGE.
29  */
30
31 #include "opt_ddb.h"
32 #include "opt_hwpmc_hooks.h"
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include <sys/param.h>
38 #include <sys/kdb.h>
39 #include <sys/ktr.h>
40 #include <sys/lock.h>
41 #include <sys/lock_profile.h>
42 #include <sys/lockmgr.h>
43 #include <sys/lockstat.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/sleepqueue.h>
47 #ifdef DEBUG_LOCKS
48 #include <sys/stack.h>
49 #endif
50 #include <sys/sysctl.h>
51 #include <sys/systm.h>
52
53 #include <machine/cpu.h>
54
55 #ifdef DDB
56 #include <ddb/ddb.h>
57 #endif
58
59 #ifdef HWPMC_HOOKS
60 #include <sys/pmckern.h>
61 PMC_SOFT_DECLARE( , , lock, failed);
62 #endif
63
64 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
65     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
66
67 #define SQ_EXCLUSIVE_QUEUE      0
68 #define SQ_SHARED_QUEUE         1
69
70 #ifndef INVARIANTS
71 #define _lockmgr_assert(lk, what, file, line)
72 #endif
73
74 #define TD_SLOCKS_INC(td)       ((td)->td_lk_slocks++)
75 #define TD_SLOCKS_DEC(td)       ((td)->td_lk_slocks--)
76
77 #ifndef DEBUG_LOCKS
78 #define STACK_PRINT(lk)
79 #define STACK_SAVE(lk)
80 #define STACK_ZERO(lk)
81 #else
82 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
83 #define STACK_SAVE(lk)  stack_save(&(lk)->lk_stack)
84 #define STACK_ZERO(lk)  stack_zero(&(lk)->lk_stack)
85 #endif
86
87 #define LOCK_LOG2(lk, string, arg1, arg2)                               \
88         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
89                 CTR2(KTR_LOCK, (string), (arg1), (arg2))
90 #define LOCK_LOG3(lk, string, arg1, arg2, arg3)                         \
91         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
92                 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
93
94 #define GIANT_DECLARE                                                   \
95         int _i = 0;                                                     \
96         WITNESS_SAVE_DECL(Giant)
97 #define GIANT_RESTORE() do {                                            \
98         if (__predict_false(_i > 0)) {                                  \
99                 while (_i--)                                            \
100                         mtx_lock(&Giant);                               \
101                 WITNESS_RESTORE(&Giant.lock_object, Giant);             \
102         }                                                               \
103 } while (0)
104 #define GIANT_SAVE() do {                                               \
105         if (__predict_false(mtx_owned(&Giant))) {                       \
106                 WITNESS_SAVE(&Giant.lock_object, Giant);                \
107                 while (mtx_owned(&Giant)) {                             \
108                         _i++;                                           \
109                         mtx_unlock(&Giant);                             \
110                 }                                                       \
111         }                                                               \
112 } while (0)
113
114 static bool __always_inline
115 LK_CAN_SHARE(uintptr_t x, int flags, bool fp)
116 {
117
118         if ((x & (LK_SHARE | LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) ==
119             LK_SHARE)
120                 return (true);
121         if (fp || (!(x & LK_SHARE)))
122                 return (false);
123         if ((curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||
124             (curthread->td_pflags & TDP_DEADLKTREAT))
125                 return (true);
126         return (false);
127 }
128
129 #define LK_TRYOP(x)                                                     \
130         ((x) & LK_NOWAIT)
131
132 #define LK_CAN_WITNESS(x)                                               \
133         (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
134 #define LK_TRYWIT(x)                                                    \
135         (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
136
137 #define lockmgr_disowned(lk)                                            \
138         (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
139
140 #define lockmgr_xlocked_v(v)                                            \
141         (((v) & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
142
143 #define lockmgr_xlocked(lk) lockmgr_xlocked_v((lk)->lk_lock)
144
145 static void     assert_lockmgr(const struct lock_object *lock, int how);
146 #ifdef DDB
147 static void     db_show_lockmgr(const struct lock_object *lock);
148 #endif
149 static void     lock_lockmgr(struct lock_object *lock, uintptr_t how);
150 #ifdef KDTRACE_HOOKS
151 static int      owner_lockmgr(const struct lock_object *lock,
152                     struct thread **owner);
153 #endif
154 static uintptr_t unlock_lockmgr(struct lock_object *lock);
155
156 struct lock_class lock_class_lockmgr = {
157         .lc_name = "lockmgr",
158         .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
159         .lc_assert = assert_lockmgr,
160 #ifdef DDB
161         .lc_ddb_show = db_show_lockmgr,
162 #endif
163         .lc_lock = lock_lockmgr,
164         .lc_unlock = unlock_lockmgr,
165 #ifdef KDTRACE_HOOKS
166         .lc_owner = owner_lockmgr,
167 #endif
168 };
169
170 struct lockmgr_wait {
171         const char *iwmesg;
172         int ipri;
173         int itimo;
174 };
175
176 static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
177     int flags, bool fp);
178 static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp);
179
180 static void
181 lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper)
182 {
183         struct lock_class *class;
184
185         if (flags & LK_INTERLOCK) {
186                 class = LOCK_CLASS(ilk);
187                 class->lc_unlock(ilk);
188         }
189
190         if (__predict_false(wakeup_swapper))
191                 kick_proc0();
192 }
193
194 static void
195 lockmgr_note_shared_acquire(struct lock *lk, int contested,
196     uint64_t waittime, const char *file, int line, int flags)
197 {
198
199         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
200             waittime, file, line, LOCKSTAT_READER);
201         LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
202         WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
203         TD_LOCKS_INC(curthread);
204         TD_SLOCKS_INC(curthread);
205         STACK_SAVE(lk);
206 }
207
208 static void
209 lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
210 {
211
212         WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
213         LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
214         TD_LOCKS_DEC(curthread);
215         TD_SLOCKS_DEC(curthread);
216 }
217
218 static void
219 lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
220     uint64_t waittime, const char *file, int line, int flags)
221 {
222
223         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
224             waittime, file, line, LOCKSTAT_WRITER);
225         LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
226         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
227             line);
228         TD_LOCKS_INC(curthread);
229         STACK_SAVE(lk);
230 }
231
232 static void
233 lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
234 {
235
236         if (LK_HOLDER(lk->lk_lock) != LK_KERNPROC) {
237                 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
238                 TD_LOCKS_DEC(curthread);
239         }
240         LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
241             line);
242 }
243
244 static __inline struct thread *
245 lockmgr_xholder(const struct lock *lk)
246 {
247         uintptr_t x;
248
249         x = lk->lk_lock;
250         return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
251 }
252
253 /*
254  * It assumes sleepq_lock held and returns with this one unheld.
255  * It also assumes the generic interlock is sane and previously checked.
256  * If LK_INTERLOCK is specified the interlock is not reacquired after the
257  * sleep.
258  */
259 static __inline int
260 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
261     const char *wmesg, int pri, int timo, int queue)
262 {
263         GIANT_DECLARE;
264         struct lock_class *class;
265         int catch, error;
266
267         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
268         catch = pri & PCATCH;
269         pri &= PRIMASK;
270         error = 0;
271
272         LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
273             (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
274
275         if (flags & LK_INTERLOCK)
276                 class->lc_unlock(ilk);
277         if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
278                 lk->lk_exslpfail++;
279         GIANT_SAVE();
280         sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
281             SLEEPQ_INTERRUPTIBLE : 0), queue);
282         if ((flags & LK_TIMELOCK) && timo)
283                 sleepq_set_timeout(&lk->lock_object, timo);
284
285         /*
286          * Decisional switch for real sleeping.
287          */
288         if ((flags & LK_TIMELOCK) && timo && catch)
289                 error = sleepq_timedwait_sig(&lk->lock_object, pri);
290         else if ((flags & LK_TIMELOCK) && timo)
291                 error = sleepq_timedwait(&lk->lock_object, pri);
292         else if (catch)
293                 error = sleepq_wait_sig(&lk->lock_object, pri);
294         else
295                 sleepq_wait(&lk->lock_object, pri);
296         GIANT_RESTORE();
297         if ((flags & LK_SLEEPFAIL) && error == 0)
298                 error = ENOLCK;
299
300         return (error);
301 }
302
303 static __inline int
304 wakeupshlk(struct lock *lk, const char *file, int line)
305 {
306         uintptr_t v, x, orig_x;
307         u_int realexslp;
308         int queue, wakeup_swapper;
309
310         wakeup_swapper = 0;
311         for (;;) {
312                 x = lk->lk_lock;
313                 if (lockmgr_sunlock_try(lk, &x))
314                         break;
315
316                 /*
317                  * We should have a sharer with waiters, so enter the hard
318                  * path in order to handle wakeups correctly.
319                  */
320                 sleepq_lock(&lk->lock_object);
321                 orig_x = lk->lk_lock;
322 retry_sleepq:
323                 x = orig_x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
324                 v = LK_UNLOCKED;
325
326                 /*
327                  * If the lock has exclusive waiters, give them preference in
328                  * order to avoid deadlock with shared runners up.
329                  * If interruptible sleeps left the exclusive queue empty
330                  * avoid a starvation for the threads sleeping on the shared
331                  * queue by giving them precedence and cleaning up the
332                  * exclusive waiters bit anyway.
333                  * Please note that lk_exslpfail count may be lying about
334                  * the real number of waiters with the LK_SLEEPFAIL flag on
335                  * because they may be used in conjunction with interruptible
336                  * sleeps so lk_exslpfail might be considered an 'upper limit'
337                  * bound, including the edge cases.
338                  */
339                 realexslp = sleepq_sleepcnt(&lk->lock_object,
340                     SQ_EXCLUSIVE_QUEUE);
341                 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
342                         if (lk->lk_exslpfail < realexslp) {
343                                 lk->lk_exslpfail = 0;
344                                 queue = SQ_EXCLUSIVE_QUEUE;
345                                 v |= (x & LK_SHARED_WAITERS);
346                         } else {
347                                 lk->lk_exslpfail = 0;
348                                 LOCK_LOG2(lk,
349                                     "%s: %p has only LK_SLEEPFAIL sleepers",
350                                     __func__, lk);
351                                 LOCK_LOG2(lk,
352                             "%s: %p waking up threads on the exclusive queue",
353                                     __func__, lk);
354                                 wakeup_swapper =
355                                     sleepq_broadcast(&lk->lock_object,
356                                     SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
357                                 queue = SQ_SHARED_QUEUE;
358                         }
359                                 
360                 } else {
361
362                         /*
363                          * Exclusive waiters sleeping with LK_SLEEPFAIL on
364                          * and using interruptible sleeps/timeout may have
365                          * left spourious lk_exslpfail counts on, so clean
366                          * it up anyway.
367                          */
368                         lk->lk_exslpfail = 0;
369                         queue = SQ_SHARED_QUEUE;
370                 }
371
372                 if (lockmgr_sunlock_try(lk, &orig_x)) {
373                         sleepq_release(&lk->lock_object);
374                         break;
375                 }
376
377                 x |= LK_SHARERS_LOCK(1);
378                 if (!atomic_fcmpset_rel_ptr(&lk->lk_lock, &x, v)) {
379                         orig_x = x;
380                         goto retry_sleepq;
381                 }
382                 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
383                     __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
384                     "exclusive");
385                 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
386                     0, queue);
387                 sleepq_release(&lk->lock_object);
388                 break;
389         }
390
391         LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
392         return (wakeup_swapper);
393 }
394
395 static void
396 assert_lockmgr(const struct lock_object *lock, int what)
397 {
398
399         panic("lockmgr locks do not support assertions");
400 }
401
402 static void
403 lock_lockmgr(struct lock_object *lock, uintptr_t how)
404 {
405
406         panic("lockmgr locks do not support sleep interlocking");
407 }
408
409 static uintptr_t
410 unlock_lockmgr(struct lock_object *lock)
411 {
412
413         panic("lockmgr locks do not support sleep interlocking");
414 }
415
416 #ifdef KDTRACE_HOOKS
417 static int
418 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
419 {
420
421         panic("lockmgr locks do not support owner inquiring");
422 }
423 #endif
424
425 void
426 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
427 {
428         int iflags;
429
430         MPASS((flags & ~LK_INIT_MASK) == 0);
431         ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
432             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
433             &lk->lk_lock));
434
435         iflags = LO_SLEEPABLE | LO_UPGRADABLE;
436         if (flags & LK_CANRECURSE)
437                 iflags |= LO_RECURSABLE;
438         if ((flags & LK_NODUP) == 0)
439                 iflags |= LO_DUPOK;
440         if (flags & LK_NOPROFILE)
441                 iflags |= LO_NOPROFILE;
442         if ((flags & LK_NOWITNESS) == 0)
443                 iflags |= LO_WITNESS;
444         if (flags & LK_QUIET)
445                 iflags |= LO_QUIET;
446         if (flags & LK_IS_VNODE)
447                 iflags |= LO_IS_VNODE;
448         if (flags & LK_NEW)
449                 iflags |= LO_NEW;
450         iflags |= flags & LK_NOSHARE;
451
452         lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
453         lk->lk_lock = LK_UNLOCKED;
454         lk->lk_recurse = 0;
455         lk->lk_exslpfail = 0;
456         lk->lk_timo = timo;
457         lk->lk_pri = pri;
458         STACK_ZERO(lk);
459 }
460
461 /*
462  * XXX: Gross hacks to manipulate external lock flags after
463  * initialization.  Used for certain vnode and buf locks.
464  */
465 void
466 lockallowshare(struct lock *lk)
467 {
468
469         lockmgr_assert(lk, KA_XLOCKED);
470         lk->lock_object.lo_flags &= ~LK_NOSHARE;
471 }
472
473 void
474 lockdisableshare(struct lock *lk)
475 {
476
477         lockmgr_assert(lk, KA_XLOCKED);
478         lk->lock_object.lo_flags |= LK_NOSHARE;
479 }
480
481 void
482 lockallowrecurse(struct lock *lk)
483 {
484
485         lockmgr_assert(lk, KA_XLOCKED);
486         lk->lock_object.lo_flags |= LO_RECURSABLE;
487 }
488
489 void
490 lockdisablerecurse(struct lock *lk)
491 {
492
493         lockmgr_assert(lk, KA_XLOCKED);
494         lk->lock_object.lo_flags &= ~LO_RECURSABLE;
495 }
496
497 void
498 lockdestroy(struct lock *lk)
499 {
500
501         KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
502         KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
503         KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
504         lock_destroy(&lk->lock_object);
505 }
506
507 static bool __always_inline
508 lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags, bool fp)
509 {
510
511         /*
512          * If no other thread has an exclusive lock, or
513          * no exclusive waiter is present, bump the count of
514          * sharers.  Since we have to preserve the state of
515          * waiters, if we fail to acquire the shared lock
516          * loop back and retry.
517          */
518         *xp = lk->lk_lock;
519         while (LK_CAN_SHARE(*xp, flags, fp)) {
520                 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
521                     *xp + LK_ONE_SHARER)) {
522                         return (true);
523                 }
524         }
525         return (false);
526 }
527
528 static bool __always_inline
529 lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp)
530 {
531
532         for (;;) {
533                 if (LK_SHARERS(*xp) > 1 || !(*xp & LK_ALL_WAITERS)) {
534                         if (atomic_fcmpset_rel_ptr(&lk->lk_lock, xp,
535                             *xp - LK_ONE_SHARER))
536                                 return (true);
537                         continue;
538                 }
539                 break;
540         }
541         return (false);
542 }
543
544 static __noinline int
545 lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
546     const char *file, int line, struct lockmgr_wait *lwa)
547 {
548         uintptr_t tid, x;
549         int error = 0;
550         const char *iwmesg;
551         int ipri, itimo;
552
553 #ifdef KDTRACE_HOOKS
554         uint64_t sleep_time = 0;
555 #endif
556 #ifdef LOCK_PROFILING
557         uint64_t waittime = 0;
558         int contested = 0;
559 #endif
560
561         if (KERNEL_PANICKED())
562                 goto out;
563
564         tid = (uintptr_t)curthread;
565
566         if (LK_CAN_WITNESS(flags))
567                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
568                     file, line, flags & LK_INTERLOCK ? ilk : NULL);
569         for (;;) {
570                 if (lockmgr_slock_try(lk, &x, flags, false))
571                         break;
572 #ifdef HWPMC_HOOKS
573                 PMC_SOFT_CALL( , , lock, failed);
574 #endif
575                 lock_profile_obtain_lock_failed(&lk->lock_object,
576                     &contested, &waittime);
577
578                 /*
579                  * If the lock is already held by curthread in
580                  * exclusive way avoid a deadlock.
581                  */
582                 if (LK_HOLDER(x) == tid) {
583                         LOCK_LOG2(lk,
584                             "%s: %p already held in exclusive mode",
585                             __func__, lk);
586                         error = EDEADLK;
587                         break;
588                 }
589
590                 /*
591                  * If the lock is expected to not sleep just give up
592                  * and return.
593                  */
594                 if (LK_TRYOP(flags)) {
595                         LOCK_LOG2(lk, "%s: %p fails the try operation",
596                             __func__, lk);
597                         error = EBUSY;
598                         break;
599                 }
600
601                 /*
602                  * Acquire the sleepqueue chain lock because we
603                  * probabilly will need to manipulate waiters flags.
604                  */
605                 sleepq_lock(&lk->lock_object);
606                 x = lk->lk_lock;
607 retry_sleepq:
608
609                 /*
610                  * if the lock can be acquired in shared mode, try
611                  * again.
612                  */
613                 if (LK_CAN_SHARE(x, flags, false)) {
614                         sleepq_release(&lk->lock_object);
615                         continue;
616                 }
617
618                 /*
619                  * Try to set the LK_SHARED_WAITERS flag.  If we fail,
620                  * loop back and retry.
621                  */
622                 if ((x & LK_SHARED_WAITERS) == 0) {
623                         if (!atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
624                             x | LK_SHARED_WAITERS)) {
625                                 goto retry_sleepq;
626                         }
627                         LOCK_LOG2(lk, "%s: %p set shared waiters flag",
628                             __func__, lk);
629                 }
630
631                 if (lwa == NULL) {
632                         iwmesg = lk->lock_object.lo_name;
633                         ipri = lk->lk_pri;
634                         itimo = lk->lk_timo;
635                 } else {
636                         iwmesg = lwa->iwmesg;
637                         ipri = lwa->ipri;
638                         itimo = lwa->itimo;
639                 }
640
641                 /*
642                  * As far as we have been unable to acquire the
643                  * shared lock and the shared waiters flag is set,
644                  * we will sleep.
645                  */
646 #ifdef KDTRACE_HOOKS
647                 sleep_time -= lockstat_nsecs(&lk->lock_object);
648 #endif
649                 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
650                     SQ_SHARED_QUEUE);
651 #ifdef KDTRACE_HOOKS
652                 sleep_time += lockstat_nsecs(&lk->lock_object);
653 #endif
654                 flags &= ~LK_INTERLOCK;
655                 if (error) {
656                         LOCK_LOG3(lk,
657                             "%s: interrupted sleep for %p with %d",
658                             __func__, lk, error);
659                         break;
660                 }
661                 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
662                     __func__, lk);
663         }
664         if (error == 0) {
665 #ifdef KDTRACE_HOOKS
666                 if (sleep_time != 0)
667                         LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
668                             LOCKSTAT_READER, (x & LK_SHARE) == 0,
669                             (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
670 #endif
671 #ifdef LOCK_PROFILING
672                 lockmgr_note_shared_acquire(lk, contested, waittime,
673                     file, line, flags);
674 #else
675                 lockmgr_note_shared_acquire(lk, 0, 0, file, line,
676                     flags);
677 #endif
678         }
679
680 out:
681         lockmgr_exit(flags, ilk, 0);
682         return (error);
683 }
684
685 static __noinline int
686 lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
687     const char *file, int line, struct lockmgr_wait *lwa)
688 {
689         struct lock_class *class;
690         uintptr_t tid, x, v;
691         int error = 0;
692         const char *iwmesg;
693         int ipri, itimo;
694
695 #ifdef KDTRACE_HOOKS
696         uint64_t sleep_time = 0;
697 #endif
698 #ifdef LOCK_PROFILING
699         uint64_t waittime = 0;
700         int contested = 0;
701 #endif
702
703         if (KERNEL_PANICKED())
704                 goto out;
705
706         tid = (uintptr_t)curthread;
707
708         if (LK_CAN_WITNESS(flags))
709                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
710                     LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
711                     ilk : NULL);
712
713         /*
714          * If curthread already holds the lock and this one is
715          * allowed to recurse, simply recurse on it.
716          */
717         if (lockmgr_xlocked(lk)) {
718                 if ((flags & LK_CANRECURSE) == 0 &&
719                     (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
720                         /*
721                          * If the lock is expected to not panic just
722                          * give up and return.
723                          */
724                         if (LK_TRYOP(flags)) {
725                                 LOCK_LOG2(lk,
726                                     "%s: %p fails the try operation",
727                                     __func__, lk);
728                                 error = EBUSY;
729                                 goto out;
730                         }
731                         if (flags & LK_INTERLOCK) {
732                                 class = LOCK_CLASS(ilk);
733                                 class->lc_unlock(ilk);
734                         }
735                         panic("%s: recursing on non recursive lockmgr %p "
736                             "@ %s:%d\n", __func__, lk, file, line);
737                 }
738                 lk->lk_recurse++;
739                 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
740                 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
741                     lk->lk_recurse, file, line);
742                 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
743                     LK_TRYWIT(flags), file, line);
744                 TD_LOCKS_INC(curthread);
745                 goto out;
746         }
747
748         for (;;) {
749                 if (lk->lk_lock == LK_UNLOCKED &&
750                     atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
751                         break;
752 #ifdef HWPMC_HOOKS
753                 PMC_SOFT_CALL( , , lock, failed);
754 #endif
755                 lock_profile_obtain_lock_failed(&lk->lock_object,
756                     &contested, &waittime);
757
758                 /*
759                  * If the lock is expected to not sleep just give up
760                  * and return.
761                  */
762                 if (LK_TRYOP(flags)) {
763                         LOCK_LOG2(lk, "%s: %p fails the try operation",
764                             __func__, lk);
765                         error = EBUSY;
766                         break;
767                 }
768
769                 /*
770                  * Acquire the sleepqueue chain lock because we
771                  * probabilly will need to manipulate waiters flags.
772                  */
773                 sleepq_lock(&lk->lock_object);
774                 x = lk->lk_lock;
775 retry_sleepq:
776
777                 /*
778                  * if the lock has been released while we spun on
779                  * the sleepqueue chain lock just try again.
780                  */
781                 if (x == LK_UNLOCKED) {
782                         sleepq_release(&lk->lock_object);
783                         continue;
784                 }
785
786                 /*
787                  * The lock can be in the state where there is a
788                  * pending queue of waiters, but still no owner.
789                  * This happens when the lock is contested and an
790                  * owner is going to claim the lock.
791                  * If curthread is the one successfully acquiring it
792                  * claim lock ownership and return, preserving waiters
793                  * flags.
794                  */
795                 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
796                 if ((x & ~v) == LK_UNLOCKED) {
797                         v &= ~LK_EXCLUSIVE_SPINNERS;
798                         if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
799                             tid | v)) {
800                                 sleepq_release(&lk->lock_object);
801                                 LOCK_LOG2(lk,
802                                     "%s: %p claimed by a new writer",
803                                     __func__, lk);
804                                 break;
805                         }
806                         goto retry_sleepq;
807                 }
808
809                 /*
810                  * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
811                  * fail, loop back and retry.
812                  */
813                 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
814                         if (!atomic_fcmpset_ptr(&lk->lk_lock, &x,
815                             x | LK_EXCLUSIVE_WAITERS)) {
816                                 goto retry_sleepq;
817                         }
818                         LOCK_LOG2(lk, "%s: %p set excl waiters flag",
819                             __func__, lk);
820                 }
821
822                 if (lwa == NULL) {
823                         iwmesg = lk->lock_object.lo_name;
824                         ipri = lk->lk_pri;
825                         itimo = lk->lk_timo;
826                 } else {
827                         iwmesg = lwa->iwmesg;
828                         ipri = lwa->ipri;
829                         itimo = lwa->itimo;
830                 }
831
832                 /*
833                  * As far as we have been unable to acquire the
834                  * exclusive lock and the exclusive waiters flag
835                  * is set, we will sleep.
836                  */
837 #ifdef KDTRACE_HOOKS
838                 sleep_time -= lockstat_nsecs(&lk->lock_object);
839 #endif
840                 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
841                     SQ_EXCLUSIVE_QUEUE);
842 #ifdef KDTRACE_HOOKS
843                 sleep_time += lockstat_nsecs(&lk->lock_object);
844 #endif
845                 flags &= ~LK_INTERLOCK;
846                 if (error) {
847                         LOCK_LOG3(lk,
848                             "%s: interrupted sleep for %p with %d",
849                             __func__, lk, error);
850                         break;
851                 }
852                 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
853                     __func__, lk);
854         }
855         if (error == 0) {
856 #ifdef KDTRACE_HOOKS
857                 if (sleep_time != 0)
858                         LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
859                             LOCKSTAT_WRITER, (x & LK_SHARE) == 0,
860                             (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
861 #endif
862 #ifdef LOCK_PROFILING
863                 lockmgr_note_exclusive_acquire(lk, contested, waittime,
864                     file, line, flags);
865 #else
866                 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
867                     flags);
868 #endif
869         }
870
871 out:
872         lockmgr_exit(flags, ilk, 0);
873         return (error);
874 }
875
876 static __noinline int
877 lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
878     const char *file, int line, struct lockmgr_wait *lwa)
879 {
880         uintptr_t tid, x, v;
881         int error = 0;
882         int wakeup_swapper = 0;
883         int op;
884
885         if (KERNEL_PANICKED())
886                 goto out;
887
888         tid = (uintptr_t)curthread;
889
890         _lockmgr_assert(lk, KA_SLOCKED, file, line);
891         v = lk->lk_lock;
892         x = v & LK_ALL_WAITERS;
893         v &= LK_EXCLUSIVE_SPINNERS;
894
895         /*
896          * Try to switch from one shared lock to an exclusive one.
897          * We need to preserve waiters flags during the operation.
898          */
899         if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
900             tid | x)) {
901                 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
902                     line);
903                 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
904                     LK_TRYWIT(flags), file, line);
905                 LOCKSTAT_RECORD0(lockmgr__upgrade, lk);
906                 TD_SLOCKS_DEC(curthread);
907                 goto out;
908         }
909
910         op = flags & LK_TYPE_MASK;
911
912         /*
913          * In LK_TRYUPGRADE mode, do not drop the lock,
914          * returning EBUSY instead.
915          */
916         if (op == LK_TRYUPGRADE) {
917                 LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
918                     __func__, lk);
919                 error = EBUSY;
920                 goto out;
921         }
922
923         /*
924          * We have been unable to succeed in upgrading, so just
925          * give up the shared lock.
926          */
927         lockmgr_note_shared_release(lk, file, line);
928         wakeup_swapper |= wakeupshlk(lk, file, line);
929         error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa);
930         flags &= ~LK_INTERLOCK;
931 out:
932         lockmgr_exit(flags, ilk, wakeup_swapper);
933         return (error);
934 }
935
936 int
937 lockmgr_lock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk,
938     const char *file, int line)
939 {
940         struct lock_class *class;
941         uintptr_t x, tid;
942         u_int op;
943         bool locked;
944
945         if (KERNEL_PANICKED())
946                 return (0);
947
948         op = flags & LK_TYPE_MASK;
949         locked = false;
950         switch (op) {
951         case LK_SHARED:
952                 if (LK_CAN_WITNESS(flags))
953                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
954                             file, line, flags & LK_INTERLOCK ? ilk : NULL);
955                 if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
956                         break;
957                 if (lockmgr_slock_try(lk, &x, flags, true)) {
958                         lockmgr_note_shared_acquire(lk, 0, 0,
959                             file, line, flags);
960                         locked = true;
961                 } else {
962                         return (lockmgr_slock_hard(lk, flags, ilk, file, line,
963                             NULL));
964                 }
965                 break;
966         case LK_EXCLUSIVE:
967                 if (LK_CAN_WITNESS(flags))
968                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
969                             LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
970                             ilk : NULL);
971                 tid = (uintptr_t)curthread;
972                 if (lk->lk_lock == LK_UNLOCKED &&
973                     atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
974                         lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
975                             flags);
976                         locked = true;
977                 } else {
978                         return (lockmgr_xlock_hard(lk, flags, ilk, file, line,
979                             NULL));
980                 }
981                 break;
982         case LK_UPGRADE:
983         case LK_TRYUPGRADE:
984                 return (lockmgr_upgrade(lk, flags, ilk, file, line, NULL));
985         default:
986                 break;
987         }
988         if (__predict_true(locked)) {
989                 if (__predict_false(flags & LK_INTERLOCK)) {
990                         class = LOCK_CLASS(ilk);
991                         class->lc_unlock(ilk);
992                 }
993                 return (0);
994         } else {
995                 return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
996                     LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line));
997         }
998 }
999
1000 static __noinline int
1001 lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1002     const char *file, int line)
1003
1004 {
1005         int wakeup_swapper = 0;
1006
1007         if (KERNEL_PANICKED())
1008                 goto out;
1009
1010         wakeup_swapper = wakeupshlk(lk, file, line);
1011
1012 out:
1013         lockmgr_exit(flags, ilk, wakeup_swapper);
1014         return (0);
1015 }
1016
1017 static __noinline int
1018 lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1019     const char *file, int line)
1020 {
1021         uintptr_t tid, v;
1022         int wakeup_swapper = 0;
1023         u_int realexslp;
1024         int queue;
1025
1026         if (KERNEL_PANICKED())
1027                 goto out;
1028
1029         tid = (uintptr_t)curthread;
1030
1031         /*
1032          * As first option, treact the lock as if it has not
1033          * any waiter.
1034          * Fix-up the tid var if the lock has been disowned.
1035          */
1036         if (LK_HOLDER(x) == LK_KERNPROC)
1037                 tid = LK_KERNPROC;
1038
1039         /*
1040          * The lock is held in exclusive mode.
1041          * If the lock is recursed also, then unrecurse it.
1042          */
1043         if (lockmgr_xlocked_v(x) && lockmgr_recursed(lk)) {
1044                 LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk);
1045                 lk->lk_recurse--;
1046                 goto out;
1047         }
1048         if (tid != LK_KERNPROC)
1049                 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,
1050                     LOCKSTAT_WRITER);
1051
1052         if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED))
1053                 goto out;
1054
1055         sleepq_lock(&lk->lock_object);
1056         x = lk->lk_lock;
1057         v = LK_UNLOCKED;
1058
1059         /*
1060          * If the lock has exclusive waiters, give them
1061          * preference in order to avoid deadlock with
1062          * shared runners up.
1063          * If interruptible sleeps left the exclusive queue
1064          * empty avoid a starvation for the threads sleeping
1065          * on the shared queue by giving them precedence
1066          * and cleaning up the exclusive waiters bit anyway.
1067          * Please note that lk_exslpfail count may be lying
1068          * about the real number of waiters with the
1069          * LK_SLEEPFAIL flag on because they may be used in
1070          * conjunction with interruptible sleeps so
1071          * lk_exslpfail might be considered an 'upper limit'
1072          * bound, including the edge cases.
1073          */
1074         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1075         realexslp = sleepq_sleepcnt(&lk->lock_object, SQ_EXCLUSIVE_QUEUE);
1076         if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1077                 if (lk->lk_exslpfail < realexslp) {
1078                         lk->lk_exslpfail = 0;
1079                         queue = SQ_EXCLUSIVE_QUEUE;
1080                         v |= (x & LK_SHARED_WAITERS);
1081                 } else {
1082                         lk->lk_exslpfail = 0;
1083                         LOCK_LOG2(lk,
1084                             "%s: %p has only LK_SLEEPFAIL sleepers",
1085                             __func__, lk);
1086                         LOCK_LOG2(lk,
1087                             "%s: %p waking up threads on the exclusive queue",
1088                             __func__, lk);
1089                         wakeup_swapper = sleepq_broadcast(&lk->lock_object,
1090                             SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1091                         queue = SQ_SHARED_QUEUE;
1092                 }
1093         } else {
1094
1095                 /*
1096                  * Exclusive waiters sleeping with LK_SLEEPFAIL
1097                  * on and using interruptible sleeps/timeout
1098                  * may have left spourious lk_exslpfail counts
1099                  * on, so clean it up anyway.
1100                  */
1101                 lk->lk_exslpfail = 0;
1102                 queue = SQ_SHARED_QUEUE;
1103         }
1104
1105         LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
1106             __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1107             "exclusive");
1108         atomic_store_rel_ptr(&lk->lk_lock, v);
1109         wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
1110         sleepq_release(&lk->lock_object);
1111
1112 out:
1113         lockmgr_exit(flags, ilk, wakeup_swapper);
1114         return (0);
1115 }
1116
1117 int
1118 lockmgr_unlock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk)
1119 {
1120         struct lock_class *class;
1121         uintptr_t x, tid;
1122         const char *file;
1123         int line;
1124
1125         if (KERNEL_PANICKED())
1126                 return (0);
1127
1128         file = __FILE__;
1129         line = __LINE__;
1130
1131         _lockmgr_assert(lk, KA_LOCKED, file, line);
1132         x = lk->lk_lock;
1133         if (__predict_true(x & LK_SHARE) != 0) {
1134                 lockmgr_note_shared_release(lk, file, line);
1135                 if (lockmgr_sunlock_try(lk, &x)) {
1136                         LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
1137                 } else {
1138                         return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
1139                 }
1140         } else {
1141                 tid = (uintptr_t)curthread;
1142                 lockmgr_note_exclusive_release(lk, file, line);
1143                 if (!lockmgr_recursed(lk) &&
1144                     atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
1145                         LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_WRITER);
1146                 } else {
1147                         return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
1148                 }
1149         }
1150         if (__predict_false(flags & LK_INTERLOCK)) {
1151                 class = LOCK_CLASS(ilk);
1152                 class->lc_unlock(ilk);
1153         }
1154         return (0);
1155 }
1156
1157 /*
1158  * Lightweight entry points for common operations.
1159  *
1160  * Functionality is similar to sx locks, in that none of the additional lockmgr
1161  * features are supported. To be clear, these are NOT supported:
1162  * 1. shared locking disablement
1163  * 2. returning with an error after sleep
1164  * 3. unlocking the interlock
1165  *
1166  * If in doubt, use lockmgr_*_fast_path.
1167  */
1168 int
1169 lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line)
1170 {
1171         uintptr_t x;
1172
1173         MPASS((flags & LK_TYPE_MASK) == LK_SHARED);
1174         MPASS((flags & LK_INTERLOCK) == 0);
1175         MPASS((lk->lock_object.lo_flags & LK_NOSHARE) == 0);
1176
1177         if (LK_CAN_WITNESS(flags))
1178                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1179                     file, line, NULL);
1180         if (__predict_true(lockmgr_slock_try(lk, &x, flags, true))) {
1181                 lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags);
1182                 return (0);
1183         }
1184
1185         return (lockmgr_slock_hard(lk, flags, NULL, file, line, NULL));
1186 }
1187
1188 int
1189 lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line)
1190 {
1191         uintptr_t tid;
1192
1193         MPASS((flags & LK_TYPE_MASK) == LK_EXCLUSIVE);
1194         MPASS((flags & LK_INTERLOCK) == 0);
1195
1196         if (LK_CAN_WITNESS(flags))
1197                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1198                     LOP_EXCLUSIVE, file, line, NULL);
1199         tid = (uintptr_t)curthread;
1200         if (atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1201                 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1202                     flags);
1203                 return (0);
1204         }
1205
1206         return (lockmgr_xlock_hard(lk, flags, NULL, file, line, NULL));
1207 }
1208
1209 int
1210 lockmgr_unlock(struct lock *lk)
1211 {
1212         uintptr_t x, tid;
1213         const char *file;
1214         int line;
1215
1216         file = __FILE__;
1217         line = __LINE__;
1218
1219         _lockmgr_assert(lk, KA_LOCKED, file, line);
1220         x = lk->lk_lock;
1221         if (__predict_true(x & LK_SHARE) != 0) {
1222                 lockmgr_note_shared_release(lk, file, line);
1223                 if (lockmgr_sunlock_try(lk, &x)) {
1224                         LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
1225                 } else {
1226                         return (lockmgr_sunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1227                 }
1228         } else {
1229                 tid = (uintptr_t)curthread;
1230                 lockmgr_note_exclusive_release(lk, file, line);
1231                 if (!lockmgr_recursed(lk) &&
1232                     atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
1233                         LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_WRITER);
1234                 } else {
1235                         return (lockmgr_xunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1236                 }
1237         }
1238         return (0);
1239 }
1240
1241 int
1242 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
1243     const char *wmesg, int pri, int timo, const char *file, int line)
1244 {
1245         GIANT_DECLARE;
1246         struct lockmgr_wait lwa;
1247         struct lock_class *class;
1248         const char *iwmesg;
1249         uintptr_t tid, v, x;
1250         u_int op, realexslp;
1251         int error, ipri, itimo, queue, wakeup_swapper;
1252 #ifdef LOCK_PROFILING
1253         uint64_t waittime = 0;
1254         int contested = 0;
1255 #endif
1256
1257         if (KERNEL_PANICKED())
1258                 return (0);
1259
1260         error = 0;
1261         tid = (uintptr_t)curthread;
1262         op = (flags & LK_TYPE_MASK);
1263         iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
1264         ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
1265         itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
1266
1267         lwa.iwmesg = iwmesg;
1268         lwa.ipri = ipri;
1269         lwa.itimo = itimo;
1270
1271         MPASS((flags & ~LK_TOTAL_MASK) == 0);
1272         KASSERT((op & (op - 1)) == 0,
1273             ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
1274         KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
1275             (op != LK_DOWNGRADE && op != LK_RELEASE),
1276             ("%s: Invalid flags in regard of the operation desired @ %s:%d",
1277             __func__, file, line));
1278         KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
1279             ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
1280             __func__, file, line));
1281         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
1282             ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
1283             lk->lock_object.lo_name, file, line));
1284
1285         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
1286
1287         if (lk->lock_object.lo_flags & LK_NOSHARE) {
1288                 switch (op) {
1289                 case LK_SHARED:
1290                         op = LK_EXCLUSIVE;
1291                         break;
1292                 case LK_UPGRADE:
1293                 case LK_TRYUPGRADE:
1294                 case LK_DOWNGRADE:
1295                         _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
1296                             file, line);
1297                         if (flags & LK_INTERLOCK)
1298                                 class->lc_unlock(ilk);
1299                         return (0);
1300                 }
1301         }
1302
1303         wakeup_swapper = 0;
1304         switch (op) {
1305         case LK_SHARED:
1306                 return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa));
1307                 break;
1308         case LK_UPGRADE:
1309         case LK_TRYUPGRADE:
1310                 return (lockmgr_upgrade(lk, flags, ilk, file, line, &lwa));
1311                 break;
1312         case LK_EXCLUSIVE:
1313                 return (lockmgr_xlock_hard(lk, flags, ilk, file, line, &lwa));
1314                 break;
1315         case LK_DOWNGRADE:
1316                 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1317                 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
1318
1319                 /*
1320                  * Panic if the lock is recursed.
1321                  */
1322                 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1323                         if (flags & LK_INTERLOCK)
1324                                 class->lc_unlock(ilk);
1325                         panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
1326                             __func__, iwmesg, file, line);
1327                 }
1328                 TD_SLOCKS_INC(curthread);
1329
1330                 /*
1331                  * In order to preserve waiters flags, just spin.
1332                  */
1333                 for (;;) {
1334                         x = lk->lk_lock;
1335                         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1336                         x &= LK_ALL_WAITERS;
1337                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1338                             LK_SHARERS_LOCK(1) | x))
1339                                 break;
1340                         cpu_spinwait();
1341                 }
1342                 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
1343                 LOCKSTAT_RECORD0(lockmgr__downgrade, lk);
1344                 break;
1345         case LK_RELEASE:
1346                 _lockmgr_assert(lk, KA_LOCKED, file, line);
1347                 x = lk->lk_lock;
1348
1349                 if (__predict_true(x & LK_SHARE) != 0) {
1350                         lockmgr_note_shared_release(lk, file, line);
1351                         return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
1352                 } else {
1353                         lockmgr_note_exclusive_release(lk, file, line);
1354                         return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
1355                 }
1356                 break;
1357         case LK_DRAIN:
1358                 if (LK_CAN_WITNESS(flags))
1359                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1360                             LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1361                             ilk : NULL);
1362
1363                 /*
1364                  * Trying to drain a lock we already own will result in a
1365                  * deadlock.
1366                  */
1367                 if (lockmgr_xlocked(lk)) {
1368                         if (flags & LK_INTERLOCK)
1369                                 class->lc_unlock(ilk);
1370                         panic("%s: draining %s with the lock held @ %s:%d\n",
1371                             __func__, iwmesg, file, line);
1372                 }
1373
1374                 for (;;) {
1375                         if (lk->lk_lock == LK_UNLOCKED &&
1376                             atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1377                                 break;
1378
1379 #ifdef HWPMC_HOOKS
1380                         PMC_SOFT_CALL( , , lock, failed);
1381 #endif
1382                         lock_profile_obtain_lock_failed(&lk->lock_object,
1383                             &contested, &waittime);
1384
1385                         /*
1386                          * If the lock is expected to not sleep just give up
1387                          * and return.
1388                          */
1389                         if (LK_TRYOP(flags)) {
1390                                 LOCK_LOG2(lk, "%s: %p fails the try operation",
1391                                     __func__, lk);
1392                                 error = EBUSY;
1393                                 break;
1394                         }
1395
1396                         /*
1397                          * Acquire the sleepqueue chain lock because we
1398                          * probabilly will need to manipulate waiters flags.
1399                          */
1400                         sleepq_lock(&lk->lock_object);
1401                         x = lk->lk_lock;
1402
1403                         /*
1404                          * if the lock has been released while we spun on
1405                          * the sleepqueue chain lock just try again.
1406                          */
1407                         if (x == LK_UNLOCKED) {
1408                                 sleepq_release(&lk->lock_object);
1409                                 continue;
1410                         }
1411
1412                         v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1413                         if ((x & ~v) == LK_UNLOCKED) {
1414                                 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1415
1416                                 /*
1417                                  * If interruptible sleeps left the exclusive
1418                                  * queue empty avoid a starvation for the
1419                                  * threads sleeping on the shared queue by
1420                                  * giving them precedence and cleaning up the
1421                                  * exclusive waiters bit anyway.
1422                                  * Please note that lk_exslpfail count may be
1423                                  * lying about the real number of waiters with
1424                                  * the LK_SLEEPFAIL flag on because they may
1425                                  * be used in conjunction with interruptible
1426                                  * sleeps so lk_exslpfail might be considered
1427                                  * an 'upper limit' bound, including the edge
1428                                  * cases.
1429                                  */
1430                                 if (v & LK_EXCLUSIVE_WAITERS) {
1431                                         queue = SQ_EXCLUSIVE_QUEUE;
1432                                         v &= ~LK_EXCLUSIVE_WAITERS;
1433                                 } else {
1434
1435                                         /*
1436                                          * Exclusive waiters sleeping with
1437                                          * LK_SLEEPFAIL on and using
1438                                          * interruptible sleeps/timeout may
1439                                          * have left spourious lk_exslpfail
1440                                          * counts on, so clean it up anyway.
1441                                          */
1442                                         MPASS(v & LK_SHARED_WAITERS);
1443                                         lk->lk_exslpfail = 0;
1444                                         queue = SQ_SHARED_QUEUE;
1445                                         v &= ~LK_SHARED_WAITERS;
1446                                 }
1447                                 if (queue == SQ_EXCLUSIVE_QUEUE) {
1448                                         realexslp =
1449                                             sleepq_sleepcnt(&lk->lock_object,
1450                                             SQ_EXCLUSIVE_QUEUE);
1451                                         if (lk->lk_exslpfail >= realexslp) {
1452                                                 lk->lk_exslpfail = 0;
1453                                                 queue = SQ_SHARED_QUEUE;
1454                                                 v &= ~LK_SHARED_WAITERS;
1455                                                 if (realexslp != 0) {
1456                                                         LOCK_LOG2(lk,
1457                                         "%s: %p has only LK_SLEEPFAIL sleepers",
1458                                                             __func__, lk);
1459                                                         LOCK_LOG2(lk,
1460                         "%s: %p waking up threads on the exclusive queue",
1461                                                             __func__, lk);
1462                                                         wakeup_swapper =
1463                                                             sleepq_broadcast(
1464                                                             &lk->lock_object,
1465                                                             SLEEPQ_LK, 0,
1466                                                             SQ_EXCLUSIVE_QUEUE);
1467                                                 }
1468                                         } else
1469                                                 lk->lk_exslpfail = 0;
1470                                 }
1471                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1472                                         sleepq_release(&lk->lock_object);
1473                                         continue;
1474                                 }
1475                                 LOCK_LOG3(lk,
1476                                 "%s: %p waking up all threads on the %s queue",
1477                                     __func__, lk, queue == SQ_SHARED_QUEUE ?
1478                                     "shared" : "exclusive");
1479                                 wakeup_swapper |= sleepq_broadcast(
1480                                     &lk->lock_object, SLEEPQ_LK, 0, queue);
1481
1482                                 /*
1483                                  * If shared waiters have been woken up we need
1484                                  * to wait for one of them to acquire the lock
1485                                  * before to set the exclusive waiters in
1486                                  * order to avoid a deadlock.
1487                                  */
1488                                 if (queue == SQ_SHARED_QUEUE) {
1489                                         for (v = lk->lk_lock;
1490                                             (v & LK_SHARE) && !LK_SHARERS(v);
1491                                             v = lk->lk_lock)
1492                                                 cpu_spinwait();
1493                                 }
1494                         }
1495
1496                         /*
1497                          * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1498                          * fail, loop back and retry.
1499                          */
1500                         if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1501                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1502                                     x | LK_EXCLUSIVE_WAITERS)) {
1503                                         sleepq_release(&lk->lock_object);
1504                                         continue;
1505                                 }
1506                                 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1507                                     __func__, lk);
1508                         }
1509
1510                         /*
1511                          * As far as we have been unable to acquire the
1512                          * exclusive lock and the exclusive waiters flag
1513                          * is set, we will sleep.
1514                          */
1515                         if (flags & LK_INTERLOCK) {
1516                                 class->lc_unlock(ilk);
1517                                 flags &= ~LK_INTERLOCK;
1518                         }
1519                         GIANT_SAVE();
1520                         sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1521                             SQ_EXCLUSIVE_QUEUE);
1522                         sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1523                         GIANT_RESTORE();
1524                         LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1525                             __func__, lk);
1526                 }
1527
1528                 if (error == 0) {
1529                         lock_profile_obtain_lock_success(&lk->lock_object,
1530                             contested, waittime, file, line);
1531                         LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1532                             lk->lk_recurse, file, line);
1533                         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1534                             LK_TRYWIT(flags), file, line);
1535                         TD_LOCKS_INC(curthread);
1536                         STACK_SAVE(lk);
1537                 }
1538                 break;
1539         default:
1540                 if (flags & LK_INTERLOCK)
1541                         class->lc_unlock(ilk);
1542                 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1543         }
1544
1545         if (flags & LK_INTERLOCK)
1546                 class->lc_unlock(ilk);
1547         if (wakeup_swapper)
1548                 kick_proc0();
1549
1550         return (error);
1551 }
1552
1553 void
1554 _lockmgr_disown(struct lock *lk, const char *file, int line)
1555 {
1556         uintptr_t tid, x;
1557
1558         if (SCHEDULER_STOPPED())
1559                 return;
1560
1561         tid = (uintptr_t)curthread;
1562         _lockmgr_assert(lk, KA_XLOCKED, file, line);
1563
1564         /*
1565          * Panic if the lock is recursed.
1566          */
1567         if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1568                 panic("%s: disown a recursed lockmgr @ %s:%d\n",
1569                     __func__,  file, line);
1570
1571         /*
1572          * If the owner is already LK_KERNPROC just skip the whole operation.
1573          */
1574         if (LK_HOLDER(lk->lk_lock) != tid)
1575                 return;
1576         lock_profile_release_lock(&lk->lock_object);
1577         LOCKSTAT_RECORD1(lockmgr__disown, lk, LOCKSTAT_WRITER);
1578         LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1579         WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1580         TD_LOCKS_DEC(curthread);
1581         STACK_SAVE(lk);
1582
1583         /*
1584          * In order to preserve waiters flags, just spin.
1585          */
1586         for (;;) {
1587                 x = lk->lk_lock;
1588                 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1589                 x &= LK_ALL_WAITERS;
1590                 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1591                     LK_KERNPROC | x))
1592                         return;
1593                 cpu_spinwait();
1594         }
1595 }
1596
1597 void
1598 lockmgr_printinfo(const struct lock *lk)
1599 {
1600         struct thread *td;
1601         uintptr_t x;
1602
1603         if (lk->lk_lock == LK_UNLOCKED)
1604                 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1605         else if (lk->lk_lock & LK_SHARE)
1606                 printf("lock type %s: SHARED (count %ju)\n",
1607                     lk->lock_object.lo_name,
1608                     (uintmax_t)LK_SHARERS(lk->lk_lock));
1609         else {
1610                 td = lockmgr_xholder(lk);
1611                 if (td == (struct thread *)LK_KERNPROC)
1612                         printf("lock type %s: EXCL by KERNPROC\n",
1613                             lk->lock_object.lo_name);
1614                 else
1615                         printf("lock type %s: EXCL by thread %p "
1616                             "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1617                             td, td->td_proc->p_pid, td->td_proc->p_comm,
1618                             td->td_tid);
1619         }
1620
1621         x = lk->lk_lock;
1622         if (x & LK_EXCLUSIVE_WAITERS)
1623                 printf(" with exclusive waiters pending\n");
1624         if (x & LK_SHARED_WAITERS)
1625                 printf(" with shared waiters pending\n");
1626         if (x & LK_EXCLUSIVE_SPINNERS)
1627                 printf(" with exclusive spinners pending\n");
1628
1629         STACK_PRINT(lk);
1630 }
1631
1632 int
1633 lockstatus(const struct lock *lk)
1634 {
1635         uintptr_t v, x;
1636         int ret;
1637
1638         ret = LK_SHARED;
1639         x = lk->lk_lock;
1640         v = LK_HOLDER(x);
1641
1642         if ((x & LK_SHARE) == 0) {
1643                 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1644                         ret = LK_EXCLUSIVE;
1645                 else
1646                         ret = LK_EXCLOTHER;
1647         } else if (x == LK_UNLOCKED)
1648                 ret = 0;
1649
1650         return (ret);
1651 }
1652
1653 #ifdef INVARIANT_SUPPORT
1654
1655 FEATURE(invariant_support,
1656     "Support for modules compiled with INVARIANTS option");
1657
1658 #ifndef INVARIANTS
1659 #undef  _lockmgr_assert
1660 #endif
1661
1662 void
1663 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1664 {
1665         int slocked = 0;
1666
1667         if (KERNEL_PANICKED())
1668                 return;
1669         switch (what) {
1670         case KA_SLOCKED:
1671         case KA_SLOCKED | KA_NOTRECURSED:
1672         case KA_SLOCKED | KA_RECURSED:
1673                 slocked = 1;
1674         case KA_LOCKED:
1675         case KA_LOCKED | KA_NOTRECURSED:
1676         case KA_LOCKED | KA_RECURSED:
1677 #ifdef WITNESS
1678
1679                 /*
1680                  * We cannot trust WITNESS if the lock is held in exclusive
1681                  * mode and a call to lockmgr_disown() happened.
1682                  * Workaround this skipping the check if the lock is held in
1683                  * exclusive mode even for the KA_LOCKED case.
1684                  */
1685                 if (slocked || (lk->lk_lock & LK_SHARE)) {
1686                         witness_assert(&lk->lock_object, what, file, line);
1687                         break;
1688                 }
1689 #endif
1690                 if (lk->lk_lock == LK_UNLOCKED ||
1691                     ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1692                     (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1693                         panic("Lock %s not %slocked @ %s:%d\n",
1694                             lk->lock_object.lo_name, slocked ? "share" : "",
1695                             file, line);
1696
1697                 if ((lk->lk_lock & LK_SHARE) == 0) {
1698                         if (lockmgr_recursed(lk)) {
1699                                 if (what & KA_NOTRECURSED)
1700                                         panic("Lock %s recursed @ %s:%d\n",
1701                                             lk->lock_object.lo_name, file,
1702                                             line);
1703                         } else if (what & KA_RECURSED)
1704                                 panic("Lock %s not recursed @ %s:%d\n",
1705                                     lk->lock_object.lo_name, file, line);
1706                 }
1707                 break;
1708         case KA_XLOCKED:
1709         case KA_XLOCKED | KA_NOTRECURSED:
1710         case KA_XLOCKED | KA_RECURSED:
1711                 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1712                         panic("Lock %s not exclusively locked @ %s:%d\n",
1713                             lk->lock_object.lo_name, file, line);
1714                 if (lockmgr_recursed(lk)) {
1715                         if (what & KA_NOTRECURSED)
1716                                 panic("Lock %s recursed @ %s:%d\n",
1717                                     lk->lock_object.lo_name, file, line);
1718                 } else if (what & KA_RECURSED)
1719                         panic("Lock %s not recursed @ %s:%d\n",
1720                             lk->lock_object.lo_name, file, line);
1721                 break;
1722         case KA_UNLOCKED:
1723                 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1724                         panic("Lock %s exclusively locked @ %s:%d\n",
1725                             lk->lock_object.lo_name, file, line);
1726                 break;
1727         default:
1728                 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1729                     line);
1730         }
1731 }
1732 #endif
1733
1734 #ifdef DDB
1735 int
1736 lockmgr_chain(struct thread *td, struct thread **ownerp)
1737 {
1738         const struct lock *lk;
1739
1740         lk = td->td_wchan;
1741
1742         if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1743                 return (0);
1744         db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1745         if (lk->lk_lock & LK_SHARE)
1746                 db_printf("SHARED (count %ju)\n",
1747                     (uintmax_t)LK_SHARERS(lk->lk_lock));
1748         else
1749                 db_printf("EXCL\n");
1750         *ownerp = lockmgr_xholder(lk);
1751
1752         return (1);
1753 }
1754
1755 static void
1756 db_show_lockmgr(const struct lock_object *lock)
1757 {
1758         struct thread *td;
1759         const struct lock *lk;
1760
1761         lk = (const struct lock *)lock;
1762
1763         db_printf(" state: ");
1764         if (lk->lk_lock == LK_UNLOCKED)
1765                 db_printf("UNLOCKED\n");
1766         else if (lk->lk_lock & LK_SHARE)
1767                 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1768         else {
1769                 td = lockmgr_xholder(lk);
1770                 if (td == (struct thread *)LK_KERNPROC)
1771                         db_printf("XLOCK: LK_KERNPROC\n");
1772                 else
1773                         db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1774                             td->td_tid, td->td_proc->p_pid,
1775                             td->td_proc->p_comm);
1776                 if (lockmgr_recursed(lk))
1777                         db_printf(" recursed: %d\n", lk->lk_recurse);
1778         }
1779         db_printf(" waiters: ");
1780         switch (lk->lk_lock & LK_ALL_WAITERS) {
1781         case LK_SHARED_WAITERS:
1782                 db_printf("shared\n");
1783                 break;
1784         case LK_EXCLUSIVE_WAITERS:
1785                 db_printf("exclusive\n");
1786                 break;
1787         case LK_ALL_WAITERS:
1788                 db_printf("shared and exclusive\n");
1789                 break;
1790         default:
1791                 db_printf("none\n");
1792         }
1793         db_printf(" spinners: ");
1794         if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1795                 db_printf("exclusive\n");
1796         else
1797                 db_printf("none\n");
1798 }
1799 #endif