]> CyberLeo.Net >> Repos - FreeBSD/releng/10.2.git/blob - sys/kern/kern_sx.c
- Copy stable/10@285827 to releng/10.2 in preparation for 10.2-RC1
[FreeBSD/releng/10.2.git] / sys / kern / kern_sx.c
1 /*-
2  * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
3  * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice(s), this list of conditions and the following disclaimer as
11  *    the first lines of this file unmodified other than the possible
12  *    addition of one or more copyright notices.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice(s), this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
18  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
21  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
27  * DAMAGE.
28  */
29
30 /*
31  * Shared/exclusive locks.  This implementation attempts to ensure
32  * deterministic lock granting behavior, so that slocks and xlocks are
33  * interleaved.
34  *
35  * Priority propagation will not generally raise the priority of lock holders,
36  * so should not be relied upon in combination with sx locks.
37  */
38
39 #include "opt_ddb.h"
40 #include "opt_hwpmc_hooks.h"
41 #include "opt_kdtrace.h"
42 #include "opt_no_adaptive_sx.h"
43
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kdb.h>
50 #include <sys/ktr.h>
51 #include <sys/lock.h>
52 #include <sys/mutex.h>
53 #include <sys/proc.h>
54 #include <sys/sched.h>
55 #include <sys/sleepqueue.h>
56 #include <sys/sx.h>
57 #include <sys/sysctl.h>
58
59 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
60 #include <machine/cpu.h>
61 #endif
62
63 #ifdef DDB
64 #include <ddb/ddb.h>
65 #endif
66
67 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
68 #define ADAPTIVE_SX
69 #endif
70
71 CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE);
72
73 #ifdef HWPMC_HOOKS
74 #include <sys/pmckern.h>
75 PMC_SOFT_DECLARE( , , lock, failed);
76 #endif
77
78 /* Handy macros for sleep queues. */
79 #define SQ_EXCLUSIVE_QUEUE      0
80 #define SQ_SHARED_QUEUE         1
81
82 /*
83  * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file.  We
84  * drop Giant anytime we have to sleep or if we adaptively spin.
85  */
86 #define GIANT_DECLARE                                                   \
87         int _giantcnt = 0;                                              \
88         WITNESS_SAVE_DECL(Giant)                                        \
89
90 #define GIANT_SAVE() do {                                               \
91         if (mtx_owned(&Giant)) {                                        \
92                 WITNESS_SAVE(&Giant.lock_object, Giant);                \
93                 while (mtx_owned(&Giant)) {                             \
94                         _giantcnt++;                                    \
95                         mtx_unlock(&Giant);                             \
96                 }                                                       \
97         }                                                               \
98 } while (0)
99
100 #define GIANT_RESTORE() do {                                            \
101         if (_giantcnt > 0) {                                            \
102                 mtx_assert(&Giant, MA_NOTOWNED);                        \
103                 while (_giantcnt--)                                     \
104                         mtx_lock(&Giant);                               \
105                 WITNESS_RESTORE(&Giant.lock_object, Giant);             \
106         }                                                               \
107 } while (0)
108
109 /*
110  * Returns true if an exclusive lock is recursed.  It assumes
111  * curthread currently has an exclusive lock.
112  */
113 #define sx_recurse              lock_object.lo_data
114 #define sx_recursed(sx)         ((sx)->sx_recurse != 0)
115
116 static void     assert_sx(const struct lock_object *lock, int what);
117 #ifdef DDB
118 static void     db_show_sx(const struct lock_object *lock);
119 #endif
120 static void     lock_sx(struct lock_object *lock, uintptr_t how);
121 #ifdef KDTRACE_HOOKS
122 static int      owner_sx(const struct lock_object *lock, struct thread **owner);
123 #endif
124 static uintptr_t unlock_sx(struct lock_object *lock);
125
126 struct lock_class lock_class_sx = {
127         .lc_name = "sx",
128         .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
129         .lc_assert = assert_sx,
130 #ifdef DDB
131         .lc_ddb_show = db_show_sx,
132 #endif
133         .lc_lock = lock_sx,
134         .lc_unlock = unlock_sx,
135 #ifdef KDTRACE_HOOKS
136         .lc_owner = owner_sx,
137 #endif
138 };
139
140 #ifndef INVARIANTS
141 #define _sx_assert(sx, what, file, line)
142 #endif
143
144 #ifdef ADAPTIVE_SX
145 static u_int asx_retries = 10;
146 static u_int asx_loops = 10000;
147 static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging");
148 SYSCTL_UINT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, "");
149 SYSCTL_UINT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, "");
150 #endif
151
152 void
153 assert_sx(const struct lock_object *lock, int what)
154 {
155
156         sx_assert((const struct sx *)lock, what);
157 }
158
159 void
160 lock_sx(struct lock_object *lock, uintptr_t how)
161 {
162         struct sx *sx;
163
164         sx = (struct sx *)lock;
165         if (how)
166                 sx_slock(sx);
167         else
168                 sx_xlock(sx);
169 }
170
171 uintptr_t
172 unlock_sx(struct lock_object *lock)
173 {
174         struct sx *sx;
175
176         sx = (struct sx *)lock;
177         sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
178         if (sx_xlocked(sx)) {
179                 sx_xunlock(sx);
180                 return (0);
181         } else {
182                 sx_sunlock(sx);
183                 return (1);
184         }
185 }
186
187 #ifdef KDTRACE_HOOKS
188 int
189 owner_sx(const struct lock_object *lock, struct thread **owner)
190 {
191         const struct sx *sx = (const struct sx *)lock;
192         uintptr_t x = sx->sx_lock;
193
194         *owner = (struct thread *)SX_OWNER(x);
195         return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) :
196             (*owner != NULL));
197 }
198 #endif
199
200 void
201 sx_sysinit(void *arg)
202 {
203         struct sx_args *sargs = arg;
204
205         sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags);
206 }
207
208 void
209 sx_init_flags(struct sx *sx, const char *description, int opts)
210 {
211         int flags;
212
213         MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
214             SX_NOPROFILE | SX_NOADAPTIVE)) == 0);
215         ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
216             ("%s: sx_lock not aligned for %s: %p", __func__, description,
217             &sx->sx_lock));
218
219         flags = LO_SLEEPABLE | LO_UPGRADABLE;
220         if (opts & SX_DUPOK)
221                 flags |= LO_DUPOK;
222         if (opts & SX_NOPROFILE)
223                 flags |= LO_NOPROFILE;
224         if (!(opts & SX_NOWITNESS))
225                 flags |= LO_WITNESS;
226         if (opts & SX_RECURSE)
227                 flags |= LO_RECURSABLE;
228         if (opts & SX_QUIET)
229                 flags |= LO_QUIET;
230
231         flags |= opts & SX_NOADAPTIVE;
232         lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
233         sx->sx_lock = SX_LOCK_UNLOCKED;
234         sx->sx_recurse = 0;
235 }
236
237 void
238 sx_destroy(struct sx *sx)
239 {
240
241         KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held"));
242         KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
243         sx->sx_lock = SX_LOCK_DESTROYED;
244         lock_destroy(&sx->lock_object);
245 }
246
247 int
248 _sx_slock(struct sx *sx, int opts, const char *file, int line)
249 {
250         int error = 0;
251
252         if (SCHEDULER_STOPPED())
253                 return (0);
254         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
255             ("sx_slock() by idle thread %p on sx %s @ %s:%d",
256             curthread, sx->lock_object.lo_name, file, line));
257         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
258             ("sx_slock() of destroyed sx @ %s:%d", file, line));
259         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
260         error = __sx_slock(sx, opts, file, line);
261         if (!error) {
262                 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
263                 WITNESS_LOCK(&sx->lock_object, 0, file, line);
264                 curthread->td_locks++;
265         }
266
267         return (error);
268 }
269
270 int
271 sx_try_slock_(struct sx *sx, const char *file, int line)
272 {
273         uintptr_t x;
274
275         if (SCHEDULER_STOPPED())
276                 return (1);
277
278         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
279             ("sx_try_slock() by idle thread %p on sx %s @ %s:%d",
280             curthread, sx->lock_object.lo_name, file, line));
281
282         for (;;) {
283                 x = sx->sx_lock;
284                 KASSERT(x != SX_LOCK_DESTROYED,
285                     ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
286                 if (!(x & SX_LOCK_SHARED))
287                         break;
288                 if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) {
289                         LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
290                         WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
291                         LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE,
292                             sx, 0, 0, file, line);
293                         curthread->td_locks++;
294                         return (1);
295                 }
296         }
297
298         LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
299         return (0);
300 }
301
302 int
303 _sx_xlock(struct sx *sx, int opts, const char *file, int line)
304 {
305         int error = 0;
306
307         if (SCHEDULER_STOPPED())
308                 return (0);
309         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
310             ("sx_xlock() by idle thread %p on sx %s @ %s:%d",
311             curthread, sx->lock_object.lo_name, file, line));
312         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
313             ("sx_xlock() of destroyed sx @ %s:%d", file, line));
314         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
315             line, NULL);
316         error = __sx_xlock(sx, curthread, opts, file, line);
317         if (!error) {
318                 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse,
319                     file, line);
320                 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
321                 curthread->td_locks++;
322         }
323
324         return (error);
325 }
326
327 int
328 sx_try_xlock_(struct sx *sx, const char *file, int line)
329 {
330         int rval;
331
332         if (SCHEDULER_STOPPED())
333                 return (1);
334
335         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
336             ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d",
337             curthread, sx->lock_object.lo_name, file, line));
338         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
339             ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
340
341         if (sx_xlocked(sx) &&
342             (sx->lock_object.lo_flags & LO_RECURSABLE) != 0) {
343                 sx->sx_recurse++;
344                 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
345                 rval = 1;
346         } else
347                 rval = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED,
348                     (uintptr_t)curthread);
349         LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
350         if (rval) {
351                 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
352                     file, line);
353                 if (!sx_recursed(sx))
354                         LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE,
355                             sx, 0, 0, file, line);
356                 curthread->td_locks++;
357         }
358
359         return (rval);
360 }
361
362 void
363 _sx_sunlock(struct sx *sx, const char *file, int line)
364 {
365
366         if (SCHEDULER_STOPPED())
367                 return;
368         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
369             ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
370         _sx_assert(sx, SA_SLOCKED, file, line);
371         WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
372         LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
373         __sx_sunlock(sx, file, line);
374         LOCKSTAT_PROFILE_RELEASE_LOCK(LS_SX_SUNLOCK_RELEASE, sx);
375         curthread->td_locks--;
376 }
377
378 void
379 _sx_xunlock(struct sx *sx, const char *file, int line)
380 {
381
382         if (SCHEDULER_STOPPED())
383                 return;
384         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
385             ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
386         _sx_assert(sx, SA_XLOCKED, file, line);
387         WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
388         LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
389             line);
390         if (!sx_recursed(sx))
391                 LOCKSTAT_PROFILE_RELEASE_LOCK(LS_SX_XUNLOCK_RELEASE, sx);
392         __sx_xunlock(sx, curthread, file, line);
393         curthread->td_locks--;
394 }
395
396 /*
397  * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
398  * This will only succeed if this thread holds a single shared lock.
399  * Return 1 if if the upgrade succeed, 0 otherwise.
400  */
401 int
402 sx_try_upgrade_(struct sx *sx, const char *file, int line)
403 {
404         uintptr_t x;
405         int success;
406
407         if (SCHEDULER_STOPPED())
408                 return (1);
409
410         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
411             ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
412         _sx_assert(sx, SA_SLOCKED, file, line);
413
414         /*
415          * Try to switch from one shared lock to an exclusive lock.  We need
416          * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that
417          * we will wake up the exclusive waiters when we drop the lock.
418          */
419         x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS;
420         success = atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x,
421             (uintptr_t)curthread | x);
422         LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
423         if (success) {
424                 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
425                     file, line);
426                 LOCKSTAT_RECORD0(LS_SX_TRYUPGRADE_UPGRADE, sx);
427         }
428         return (success);
429 }
430
431 /*
432  * Downgrade an unrecursed exclusive lock into a single shared lock.
433  */
434 void
435 sx_downgrade_(struct sx *sx, const char *file, int line)
436 {
437         uintptr_t x;
438         int wakeup_swapper;
439
440         if (SCHEDULER_STOPPED())
441                 return;
442
443         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
444             ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
445         _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
446 #ifndef INVARIANTS
447         if (sx_recursed(sx))
448                 panic("downgrade of a recursed lock");
449 #endif
450
451         WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
452
453         /*
454          * Try to switch from an exclusive lock with no shared waiters
455          * to one sharer with no shared waiters.  If there are
456          * exclusive waiters, we don't need to lock the sleep queue so
457          * long as we preserve the flag.  We do one quick try and if
458          * that fails we grab the sleepq lock to keep the flags from
459          * changing and do it the slow way.
460          *
461          * We have to lock the sleep queue if there are shared waiters
462          * so we can wake them up.
463          */
464         x = sx->sx_lock;
465         if (!(x & SX_LOCK_SHARED_WAITERS) &&
466             atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
467             (x & SX_LOCK_EXCLUSIVE_WAITERS))) {
468                 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
469                 return;
470         }
471
472         /*
473          * Lock the sleep queue so we can read the waiters bits
474          * without any races and wakeup any shared waiters.
475          */
476         sleepq_lock(&sx->lock_object);
477
478         /*
479          * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
480          * shared lock.  If there are any shared waiters, wake them up.
481          */
482         wakeup_swapper = 0;
483         x = sx->sx_lock;
484         atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
485             (x & SX_LOCK_EXCLUSIVE_WAITERS));
486         if (x & SX_LOCK_SHARED_WAITERS)
487                 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
488                     0, SQ_SHARED_QUEUE);
489         sleepq_release(&sx->lock_object);
490
491         LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
492         LOCKSTAT_RECORD0(LS_SX_DOWNGRADE_DOWNGRADE, sx);
493
494         if (wakeup_swapper)
495                 kick_proc0();
496 }
497
498 /*
499  * This function represents the so-called 'hard case' for sx_xlock
500  * operation.  All 'easy case' failures are redirected to this.  Note
501  * that ideally this would be a static function, but it needs to be
502  * accessible from at least sx.h.
503  */
504 int
505 _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
506     int line)
507 {
508         GIANT_DECLARE;
509 #ifdef ADAPTIVE_SX
510         volatile struct thread *owner;
511         u_int i, spintries = 0;
512 #endif
513         uintptr_t x;
514 #ifdef LOCK_PROFILING
515         uint64_t waittime = 0;
516         int contested = 0;
517 #endif
518         int error = 0;
519 #ifdef  KDTRACE_HOOKS
520         uintptr_t state;
521         uint64_t spin_cnt = 0;
522         uint64_t sleep_cnt = 0;
523         int64_t sleep_time = 0;
524         int64_t all_time = 0;
525 #endif
526
527         if (SCHEDULER_STOPPED())
528                 return (0);
529
530         /* If we already hold an exclusive lock, then recurse. */
531         if (sx_xlocked(sx)) {
532                 KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
533             ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
534                     sx->lock_object.lo_name, file, line));
535                 sx->sx_recurse++;
536                 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
537                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
538                         CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
539                 return (0);
540         }
541
542         if (LOCK_LOG_TEST(&sx->lock_object, 0))
543                 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
544                     sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
545
546 #ifdef KDTRACE_HOOKS
547         all_time -= lockstat_nsecs(&sx->lock_object);
548         state = sx->sx_lock;
549 #endif
550         while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) {
551 #ifdef KDTRACE_HOOKS
552                 spin_cnt++;
553 #endif
554 #ifdef HWPMC_HOOKS
555                 PMC_SOFT_CALL( , , lock, failed);
556 #endif
557                 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
558                     &waittime);
559 #ifdef ADAPTIVE_SX
560                 /*
561                  * If the lock is write locked and the owner is
562                  * running on another CPU, spin until the owner stops
563                  * running or the state of the lock changes.
564                  */
565                 x = sx->sx_lock;
566                 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
567                         if ((x & SX_LOCK_SHARED) == 0) {
568                                 x = SX_OWNER(x);
569                                 owner = (struct thread *)x;
570                                 if (TD_IS_RUNNING(owner)) {
571                                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
572                                                 CTR3(KTR_LOCK,
573                                             "%s: spinning on %p held by %p",
574                                                     __func__, sx, owner);
575                                         KTR_STATE1(KTR_SCHED, "thread",
576                                             sched_tdname(curthread), "spinning",
577                                             "lockname:\"%s\"",
578                                             sx->lock_object.lo_name);
579                                         GIANT_SAVE();
580                                         while (SX_OWNER(sx->sx_lock) == x &&
581                                             TD_IS_RUNNING(owner)) {
582                                                 cpu_spinwait();
583 #ifdef KDTRACE_HOOKS
584                                                 spin_cnt++;
585 #endif
586                                         }
587                                         KTR_STATE0(KTR_SCHED, "thread",
588                                             sched_tdname(curthread), "running");
589                                         continue;
590                                 }
591                         } else if (SX_SHARERS(x) && spintries < asx_retries) {
592                                 KTR_STATE1(KTR_SCHED, "thread",
593                                     sched_tdname(curthread), "spinning",
594                                     "lockname:\"%s\"", sx->lock_object.lo_name);
595                                 GIANT_SAVE();
596                                 spintries++;
597                                 for (i = 0; i < asx_loops; i++) {
598                                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
599                                                 CTR4(KTR_LOCK,
600                                     "%s: shared spinning on %p with %u and %u",
601                                                     __func__, sx, spintries, i);
602                                         x = sx->sx_lock;
603                                         if ((x & SX_LOCK_SHARED) == 0 ||
604                                             SX_SHARERS(x) == 0)
605                                                 break;
606                                         cpu_spinwait();
607 #ifdef KDTRACE_HOOKS
608                                         spin_cnt++;
609 #endif
610                                 }
611                                 KTR_STATE0(KTR_SCHED, "thread",
612                                     sched_tdname(curthread), "running");
613                                 if (i != asx_loops)
614                                         continue;
615                         }
616                 }
617 #endif
618
619                 sleepq_lock(&sx->lock_object);
620                 x = sx->sx_lock;
621
622                 /*
623                  * If the lock was released while spinning on the
624                  * sleep queue chain lock, try again.
625                  */
626                 if (x == SX_LOCK_UNLOCKED) {
627                         sleepq_release(&sx->lock_object);
628                         continue;
629                 }
630
631 #ifdef ADAPTIVE_SX
632                 /*
633                  * The current lock owner might have started executing
634                  * on another CPU (or the lock could have changed
635                  * owners) while we were waiting on the sleep queue
636                  * chain lock.  If so, drop the sleep queue lock and try
637                  * again.
638                  */
639                 if (!(x & SX_LOCK_SHARED) &&
640                     (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
641                         owner = (struct thread *)SX_OWNER(x);
642                         if (TD_IS_RUNNING(owner)) {
643                                 sleepq_release(&sx->lock_object);
644                                 continue;
645                         }
646                 }
647 #endif
648
649                 /*
650                  * If an exclusive lock was released with both shared
651                  * and exclusive waiters and a shared waiter hasn't
652                  * woken up and acquired the lock yet, sx_lock will be
653                  * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS.
654                  * If we see that value, try to acquire it once.  Note
655                  * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS
656                  * as there are other exclusive waiters still.  If we
657                  * fail, restart the loop.
658                  */
659                 if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) {
660                         if (atomic_cmpset_acq_ptr(&sx->sx_lock,
661                             SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS,
662                             tid | SX_LOCK_EXCLUSIVE_WAITERS)) {
663                                 sleepq_release(&sx->lock_object);
664                                 CTR2(KTR_LOCK, "%s: %p claimed by new writer",
665                                     __func__, sx);
666                                 break;
667                         }
668                         sleepq_release(&sx->lock_object);
669                         continue;
670                 }
671
672                 /*
673                  * Try to set the SX_LOCK_EXCLUSIVE_WAITERS.  If we fail,
674                  * than loop back and retry.
675                  */
676                 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
677                         if (!atomic_cmpset_ptr(&sx->sx_lock, x,
678                             x | SX_LOCK_EXCLUSIVE_WAITERS)) {
679                                 sleepq_release(&sx->lock_object);
680                                 continue;
681                         }
682                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
683                                 CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
684                                     __func__, sx);
685                 }
686
687                 /*
688                  * Since we have been unable to acquire the exclusive
689                  * lock and the exclusive waiters flag is set, we have
690                  * to sleep.
691                  */
692                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
693                         CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
694                             __func__, sx);
695
696 #ifdef KDTRACE_HOOKS
697                 sleep_time -= lockstat_nsecs(&sx->lock_object);
698 #endif
699                 GIANT_SAVE();
700                 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
701                     SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
702                     SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
703                 if (!(opts & SX_INTERRUPTIBLE))
704                         sleepq_wait(&sx->lock_object, 0);
705                 else
706                         error = sleepq_wait_sig(&sx->lock_object, 0);
707 #ifdef KDTRACE_HOOKS
708                 sleep_time += lockstat_nsecs(&sx->lock_object);
709                 sleep_cnt++;
710 #endif
711                 if (error) {
712                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
713                                 CTR2(KTR_LOCK,
714                         "%s: interruptible sleep by %p suspended by signal",
715                                     __func__, sx);
716                         break;
717                 }
718                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
719                         CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
720                             __func__, sx);
721         }
722 #ifdef KDTRACE_HOOKS
723         all_time += lockstat_nsecs(&sx->lock_object);
724         if (sleep_time)
725                 LOCKSTAT_RECORD4(LS_SX_XLOCK_BLOCK, sx, sleep_time,
726                     LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
727                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
728         if (spin_cnt > sleep_cnt)
729                 LOCKSTAT_RECORD4(LS_SX_XLOCK_SPIN, sx, all_time - sleep_time,
730                     LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
731                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
732 #endif
733         if (!error)
734                 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE, sx,
735                     contested, waittime, file, line);
736         GIANT_RESTORE();
737         return (error);
738 }
739
740 /*
741  * This function represents the so-called 'hard case' for sx_xunlock
742  * operation.  All 'easy case' failures are redirected to this.  Note
743  * that ideally this would be a static function, but it needs to be
744  * accessible from at least sx.h.
745  */
746 void
747 _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
748 {
749         uintptr_t x;
750         int queue, wakeup_swapper;
751
752         if (SCHEDULER_STOPPED())
753                 return;
754
755         MPASS(!(sx->sx_lock & SX_LOCK_SHARED));
756
757         /* If the lock is recursed, then unrecurse one level. */
758         if (sx_xlocked(sx) && sx_recursed(sx)) {
759                 if ((--sx->sx_recurse) == 0)
760                         atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
761                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
762                         CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
763                 return;
764         }
765         MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS |
766             SX_LOCK_EXCLUSIVE_WAITERS));
767         if (LOCK_LOG_TEST(&sx->lock_object, 0))
768                 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);
769
770         sleepq_lock(&sx->lock_object);
771         x = SX_LOCK_UNLOCKED;
772
773         /*
774          * The wake up algorithm here is quite simple and probably not
775          * ideal.  It gives precedence to shared waiters if they are
776          * present.  For this condition, we have to preserve the
777          * state of the exclusive waiters flag.
778          * If interruptible sleeps left the shared queue empty avoid a
779          * starvation for the threads sleeping on the exclusive queue by giving
780          * them precedence and cleaning up the shared waiters bit anyway.
781          */
782         if ((sx->sx_lock & SX_LOCK_SHARED_WAITERS) != 0 &&
783             sleepq_sleepcnt(&sx->lock_object, SQ_SHARED_QUEUE) != 0) {
784                 queue = SQ_SHARED_QUEUE;
785                 x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS);
786         } else
787                 queue = SQ_EXCLUSIVE_QUEUE;
788
789         /* Wake up all the waiters for the specific queue. */
790         if (LOCK_LOG_TEST(&sx->lock_object, 0))
791                 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue",
792                     __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
793                     "exclusive");
794         atomic_store_rel_ptr(&sx->sx_lock, x);
795         wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
796             queue);
797         sleepq_release(&sx->lock_object);
798         if (wakeup_swapper)
799                 kick_proc0();
800 }
801
802 /*
803  * This function represents the so-called 'hard case' for sx_slock
804  * operation.  All 'easy case' failures are redirected to this.  Note
805  * that ideally this would be a static function, but it needs to be
806  * accessible from at least sx.h.
807  */
808 int
809 _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
810 {
811         GIANT_DECLARE;
812 #ifdef ADAPTIVE_SX
813         volatile struct thread *owner;
814 #endif
815 #ifdef LOCK_PROFILING
816         uint64_t waittime = 0;
817         int contested = 0;
818 #endif
819         uintptr_t x;
820         int error = 0;
821 #ifdef KDTRACE_HOOKS
822         uintptr_t state;
823         uint64_t spin_cnt = 0;
824         uint64_t sleep_cnt = 0;
825         int64_t sleep_time = 0;
826         int64_t all_time = 0;
827 #endif
828
829         if (SCHEDULER_STOPPED())
830                 return (0);
831
832 #ifdef KDTRACE_HOOKS
833         state = sx->sx_lock;
834         all_time -= lockstat_nsecs(&sx->lock_object);
835 #endif
836
837         /*
838          * As with rwlocks, we don't make any attempt to try to block
839          * shared locks once there is an exclusive waiter.
840          */
841         for (;;) {
842 #ifdef KDTRACE_HOOKS
843                 spin_cnt++;
844 #endif
845                 x = sx->sx_lock;
846
847                 /*
848                  * If no other thread has an exclusive lock then try to bump up
849                  * the count of sharers.  Since we have to preserve the state
850                  * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
851                  * shared lock loop back and retry.
852                  */
853                 if (x & SX_LOCK_SHARED) {
854                         MPASS(!(x & SX_LOCK_SHARED_WAITERS));
855                         if (atomic_cmpset_acq_ptr(&sx->sx_lock, x,
856                             x + SX_ONE_SHARER)) {
857                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
858                                         CTR4(KTR_LOCK,
859                                             "%s: %p succeed %p -> %p", __func__,
860                                             sx, (void *)x,
861                                             (void *)(x + SX_ONE_SHARER));
862                                 break;
863                         }
864                         continue;
865                 }
866 #ifdef HWPMC_HOOKS
867                 PMC_SOFT_CALL( , , lock, failed);
868 #endif
869                 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
870                     &waittime);
871
872 #ifdef ADAPTIVE_SX
873                 /*
874                  * If the owner is running on another CPU, spin until
875                  * the owner stops running or the state of the lock
876                  * changes.
877                  */
878                 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
879                         x = SX_OWNER(x);
880                         owner = (struct thread *)x;
881                         if (TD_IS_RUNNING(owner)) {
882                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
883                                         CTR3(KTR_LOCK,
884                                             "%s: spinning on %p held by %p",
885                                             __func__, sx, owner);
886                                 KTR_STATE1(KTR_SCHED, "thread",
887                                     sched_tdname(curthread), "spinning",
888                                     "lockname:\"%s\"", sx->lock_object.lo_name);
889                                 GIANT_SAVE();
890                                 while (SX_OWNER(sx->sx_lock) == x &&
891                                     TD_IS_RUNNING(owner)) {
892 #ifdef KDTRACE_HOOKS
893                                         spin_cnt++;
894 #endif
895                                         cpu_spinwait();
896                                 }
897                                 KTR_STATE0(KTR_SCHED, "thread",
898                                     sched_tdname(curthread), "running");
899                                 continue;
900                         }
901                 }
902 #endif
903
904                 /*
905                  * Some other thread already has an exclusive lock, so
906                  * start the process of blocking.
907                  */
908                 sleepq_lock(&sx->lock_object);
909                 x = sx->sx_lock;
910
911                 /*
912                  * The lock could have been released while we spun.
913                  * In this case loop back and retry.
914                  */
915                 if (x & SX_LOCK_SHARED) {
916                         sleepq_release(&sx->lock_object);
917                         continue;
918                 }
919
920 #ifdef ADAPTIVE_SX
921                 /*
922                  * If the owner is running on another CPU, spin until
923                  * the owner stops running or the state of the lock
924                  * changes.
925                  */
926                 if (!(x & SX_LOCK_SHARED) &&
927                     (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
928                         owner = (struct thread *)SX_OWNER(x);
929                         if (TD_IS_RUNNING(owner)) {
930                                 sleepq_release(&sx->lock_object);
931                                 continue;
932                         }
933                 }
934 #endif
935
936                 /*
937                  * Try to set the SX_LOCK_SHARED_WAITERS flag.  If we
938                  * fail to set it drop the sleep queue lock and loop
939                  * back.
940                  */
941                 if (!(x & SX_LOCK_SHARED_WAITERS)) {
942                         if (!atomic_cmpset_ptr(&sx->sx_lock, x,
943                             x | SX_LOCK_SHARED_WAITERS)) {
944                                 sleepq_release(&sx->lock_object);
945                                 continue;
946                         }
947                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
948                                 CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
949                                     __func__, sx);
950                 }
951
952                 /*
953                  * Since we have been unable to acquire the shared lock,
954                  * we have to sleep.
955                  */
956                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
957                         CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
958                             __func__, sx);
959
960 #ifdef KDTRACE_HOOKS
961                 sleep_time -= lockstat_nsecs(&sx->lock_object);
962 #endif
963                 GIANT_SAVE();
964                 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
965                     SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
966                     SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
967                 if (!(opts & SX_INTERRUPTIBLE))
968                         sleepq_wait(&sx->lock_object, 0);
969                 else
970                         error = sleepq_wait_sig(&sx->lock_object, 0);
971 #ifdef KDTRACE_HOOKS
972                 sleep_time += lockstat_nsecs(&sx->lock_object);
973                 sleep_cnt++;
974 #endif
975                 if (error) {
976                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
977                                 CTR2(KTR_LOCK,
978                         "%s: interruptible sleep by %p suspended by signal",
979                                     __func__, sx);
980                         break;
981                 }
982                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
983                         CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
984                             __func__, sx);
985         }
986 #ifdef KDTRACE_HOOKS
987         all_time += lockstat_nsecs(&sx->lock_object);
988         if (sleep_time)
989                 LOCKSTAT_RECORD4(LS_SX_SLOCK_BLOCK, sx, sleep_time,
990                     LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
991                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
992         if (spin_cnt > sleep_cnt)
993                 LOCKSTAT_RECORD4(LS_SX_SLOCK_SPIN, sx, all_time - sleep_time,
994                     LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
995                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
996 #endif
997         if (error == 0)
998                 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE, sx,
999                     contested, waittime, file, line);
1000         GIANT_RESTORE();
1001         return (error);
1002 }
1003
1004 /*
1005  * This function represents the so-called 'hard case' for sx_sunlock
1006  * operation.  All 'easy case' failures are redirected to this.  Note
1007  * that ideally this would be a static function, but it needs to be
1008  * accessible from at least sx.h.
1009  */
1010 void
1011 _sx_sunlock_hard(struct sx *sx, const char *file, int line)
1012 {
1013         uintptr_t x;
1014         int wakeup_swapper;
1015
1016         if (SCHEDULER_STOPPED())
1017                 return;
1018
1019         for (;;) {
1020                 x = sx->sx_lock;
1021
1022                 /*
1023                  * We should never have sharers while at least one thread
1024                  * holds a shared lock.
1025                  */
1026                 KASSERT(!(x & SX_LOCK_SHARED_WAITERS),
1027                     ("%s: waiting sharers", __func__));
1028
1029                 /*
1030                  * See if there is more than one shared lock held.  If
1031                  * so, just drop one and return.
1032                  */
1033                 if (SX_SHARERS(x) > 1) {
1034                         if (atomic_cmpset_rel_ptr(&sx->sx_lock, x,
1035                             x - SX_ONE_SHARER)) {
1036                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1037                                         CTR4(KTR_LOCK,
1038                                             "%s: %p succeeded %p -> %p",
1039                                             __func__, sx, (void *)x,
1040                                             (void *)(x - SX_ONE_SHARER));
1041                                 break;
1042                         }
1043                         continue;
1044                 }
1045
1046                 /*
1047                  * If there aren't any waiters for an exclusive lock,
1048                  * then try to drop it quickly.
1049                  */
1050                 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
1051                         MPASS(x == SX_SHARERS_LOCK(1));
1052                         if (atomic_cmpset_rel_ptr(&sx->sx_lock,
1053                             SX_SHARERS_LOCK(1), SX_LOCK_UNLOCKED)) {
1054                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1055                                         CTR2(KTR_LOCK, "%s: %p last succeeded",
1056                                             __func__, sx);
1057                                 break;
1058                         }
1059                         continue;
1060                 }
1061
1062                 /*
1063                  * At this point, there should just be one sharer with
1064                  * exclusive waiters.
1065                  */
1066                 MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS));
1067
1068                 sleepq_lock(&sx->lock_object);
1069
1070                 /*
1071                  * Wake up semantic here is quite simple:
1072                  * Just wake up all the exclusive waiters.
1073                  * Note that the state of the lock could have changed,
1074                  * so if it fails loop back and retry.
1075                  */
1076                 if (!atomic_cmpset_rel_ptr(&sx->sx_lock,
1077                     SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
1078                     SX_LOCK_UNLOCKED)) {
1079                         sleepq_release(&sx->lock_object);
1080                         continue;
1081                 }
1082                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1083                         CTR2(KTR_LOCK, "%s: %p waking up all thread on"
1084                             "exclusive queue", __func__, sx);
1085                 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
1086                     0, SQ_EXCLUSIVE_QUEUE);
1087                 sleepq_release(&sx->lock_object);
1088                 if (wakeup_swapper)
1089                         kick_proc0();
1090                 break;
1091         }
1092 }
1093
1094 #ifdef INVARIANT_SUPPORT
1095 #ifndef INVARIANTS
1096 #undef  _sx_assert
1097 #endif
1098
1099 /*
1100  * In the non-WITNESS case, sx_assert() can only detect that at least
1101  * *some* thread owns an slock, but it cannot guarantee that *this*
1102  * thread owns an slock.
1103  */
1104 void
1105 _sx_assert(const struct sx *sx, int what, const char *file, int line)
1106 {
1107 #ifndef WITNESS
1108         int slocked = 0;
1109 #endif
1110
1111         if (panicstr != NULL)
1112                 return;
1113         switch (what) {
1114         case SA_SLOCKED:
1115         case SA_SLOCKED | SA_NOTRECURSED:
1116         case SA_SLOCKED | SA_RECURSED:
1117 #ifndef WITNESS
1118                 slocked = 1;
1119                 /* FALLTHROUGH */
1120 #endif
1121         case SA_LOCKED:
1122         case SA_LOCKED | SA_NOTRECURSED:
1123         case SA_LOCKED | SA_RECURSED:
1124 #ifdef WITNESS
1125                 witness_assert(&sx->lock_object, what, file, line);
1126 #else
1127                 /*
1128                  * If some other thread has an exclusive lock or we
1129                  * have one and are asserting a shared lock, fail.
1130                  * Also, if no one has a lock at all, fail.
1131                  */
1132                 if (sx->sx_lock == SX_LOCK_UNLOCKED ||
1133                     (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
1134                     sx_xholder(sx) != curthread)))
1135                         panic("Lock %s not %slocked @ %s:%d\n",
1136                             sx->lock_object.lo_name, slocked ? "share " : "",
1137                             file, line);
1138
1139                 if (!(sx->sx_lock & SX_LOCK_SHARED)) {
1140                         if (sx_recursed(sx)) {
1141                                 if (what & SA_NOTRECURSED)
1142                                         panic("Lock %s recursed @ %s:%d\n",
1143                                             sx->lock_object.lo_name, file,
1144                                             line);
1145                         } else if (what & SA_RECURSED)
1146                                 panic("Lock %s not recursed @ %s:%d\n",
1147                                     sx->lock_object.lo_name, file, line);
1148                 }
1149 #endif
1150                 break;
1151         case SA_XLOCKED:
1152         case SA_XLOCKED | SA_NOTRECURSED:
1153         case SA_XLOCKED | SA_RECURSED:
1154                 if (sx_xholder(sx) != curthread)
1155                         panic("Lock %s not exclusively locked @ %s:%d\n",
1156                             sx->lock_object.lo_name, file, line);
1157                 if (sx_recursed(sx)) {
1158                         if (what & SA_NOTRECURSED)
1159                                 panic("Lock %s recursed @ %s:%d\n",
1160                                     sx->lock_object.lo_name, file, line);
1161                 } else if (what & SA_RECURSED)
1162                         panic("Lock %s not recursed @ %s:%d\n",
1163                             sx->lock_object.lo_name, file, line);
1164                 break;
1165         case SA_UNLOCKED:
1166 #ifdef WITNESS
1167                 witness_assert(&sx->lock_object, what, file, line);
1168 #else
1169                 /*
1170                  * If we hold an exclusve lock fail.  We can't
1171                  * reliably check to see if we hold a shared lock or
1172                  * not.
1173                  */
1174                 if (sx_xholder(sx) == curthread)
1175                         panic("Lock %s exclusively locked @ %s:%d\n",
1176                             sx->lock_object.lo_name, file, line);
1177 #endif
1178                 break;
1179         default:
1180                 panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
1181                     line);
1182         }
1183 }
1184 #endif  /* INVARIANT_SUPPORT */
1185
1186 #ifdef DDB
1187 static void
1188 db_show_sx(const struct lock_object *lock)
1189 {
1190         struct thread *td;
1191         const struct sx *sx;
1192
1193         sx = (const struct sx *)lock;
1194
1195         db_printf(" state: ");
1196         if (sx->sx_lock == SX_LOCK_UNLOCKED)
1197                 db_printf("UNLOCKED\n");
1198         else if (sx->sx_lock == SX_LOCK_DESTROYED) {
1199                 db_printf("DESTROYED\n");
1200                 return;
1201         } else if (sx->sx_lock & SX_LOCK_SHARED)
1202                 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
1203         else {
1204                 td = sx_xholder(sx);
1205                 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1206                     td->td_tid, td->td_proc->p_pid, td->td_name);
1207                 if (sx_recursed(sx))
1208                         db_printf(" recursed: %d\n", sx->sx_recurse);
1209         }
1210
1211         db_printf(" waiters: ");
1212         switch(sx->sx_lock &
1213             (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) {
1214         case SX_LOCK_SHARED_WAITERS:
1215                 db_printf("shared\n");
1216                 break;
1217         case SX_LOCK_EXCLUSIVE_WAITERS:
1218                 db_printf("exclusive\n");
1219                 break;
1220         case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS:
1221                 db_printf("exclusive and shared\n");
1222                 break;
1223         default:
1224                 db_printf("none\n");
1225         }
1226 }
1227
1228 /*
1229  * Check to see if a thread that is blocked on a sleep queue is actually
1230  * blocked on an sx lock.  If so, output some details and return true.
1231  * If the lock has an exclusive owner, return that in *ownerp.
1232  */
1233 int
1234 sx_chain(struct thread *td, struct thread **ownerp)
1235 {
1236         struct sx *sx;
1237
1238         /*
1239          * Check to see if this thread is blocked on an sx lock.
1240          * First, we check the lock class.  If that is ok, then we
1241          * compare the lock name against the wait message.
1242          */
1243         sx = td->td_wchan;
1244         if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
1245             sx->lock_object.lo_name != td->td_wmesg)
1246                 return (0);
1247
1248         /* We think we have an sx lock, so output some details. */
1249         db_printf("blocked on sx \"%s\" ", td->td_wmesg);
1250         *ownerp = sx_xholder(sx);
1251         if (sx->sx_lock & SX_LOCK_SHARED)
1252                 db_printf("SLOCK (count %ju)\n",
1253                     (uintmax_t)SX_SHARERS(sx->sx_lock));
1254         else
1255                 db_printf("XLOCK\n");
1256         return (1);
1257 }
1258 #endif