]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/kern_sx.c
MFV r356143:
[FreeBSD/FreeBSD.git] / sys / kern / kern_sx.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
5  * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice(s), this list of conditions and the following disclaimer as
13  *    the first lines of this file unmodified other than the possible
14  *    addition of one or more copyright notices.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice(s), this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
20  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
23  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
29  * DAMAGE.
30  */
31
32 /*
33  * Shared/exclusive locks.  This implementation attempts to ensure
34  * deterministic lock granting behavior, so that slocks and xlocks are
35  * interleaved.
36  *
37  * Priority propagation will not generally raise the priority of lock holders,
38  * so should not be relied upon in combination with sx locks.
39  */
40
41 #include "opt_ddb.h"
42 #include "opt_hwpmc_hooks.h"
43 #include "opt_no_adaptive_sx.h"
44
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kdb.h>
51 #include <sys/kernel.h>
52 #include <sys/ktr.h>
53 #include <sys/lock.h>
54 #include <sys/mutex.h>
55 #include <sys/proc.h>
56 #include <sys/sched.h>
57 #include <sys/sleepqueue.h>
58 #include <sys/sx.h>
59 #include <sys/smp.h>
60 #include <sys/sysctl.h>
61
62 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
63 #include <machine/cpu.h>
64 #endif
65
66 #ifdef DDB
67 #include <ddb/ddb.h>
68 #endif
69
70 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
71 #define ADAPTIVE_SX
72 #endif
73
74 #ifdef HWPMC_HOOKS
75 #include <sys/pmckern.h>
76 PMC_SOFT_DECLARE( , , lock, failed);
77 #endif
78
79 /* Handy macros for sleep queues. */
80 #define SQ_EXCLUSIVE_QUEUE      0
81 #define SQ_SHARED_QUEUE         1
82
83 /*
84  * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file.  We
85  * drop Giant anytime we have to sleep or if we adaptively spin.
86  */
87 #define GIANT_DECLARE                                                   \
88         int _giantcnt = 0;                                              \
89         WITNESS_SAVE_DECL(Giant)                                        \
90
91 #define GIANT_SAVE(work) do {                                           \
92         if (__predict_false(mtx_owned(&Giant))) {                       \
93                 work++;                                                 \
94                 WITNESS_SAVE(&Giant.lock_object, Giant);                \
95                 while (mtx_owned(&Giant)) {                             \
96                         _giantcnt++;                                    \
97                         mtx_unlock(&Giant);                             \
98                 }                                                       \
99         }                                                               \
100 } while (0)
101
102 #define GIANT_RESTORE() do {                                            \
103         if (_giantcnt > 0) {                                            \
104                 mtx_assert(&Giant, MA_NOTOWNED);                        \
105                 while (_giantcnt--)                                     \
106                         mtx_lock(&Giant);                               \
107                 WITNESS_RESTORE(&Giant.lock_object, Giant);             \
108         }                                                               \
109 } while (0)
110
111 /*
112  * Returns true if an exclusive lock is recursed.  It assumes
113  * curthread currently has an exclusive lock.
114  */
115 #define sx_recursed(sx)         ((sx)->sx_recurse != 0)
116
117 static void     assert_sx(const struct lock_object *lock, int what);
118 #ifdef DDB
119 static void     db_show_sx(const struct lock_object *lock);
120 #endif
121 static void     lock_sx(struct lock_object *lock, uintptr_t how);
122 #ifdef KDTRACE_HOOKS
123 static int      owner_sx(const struct lock_object *lock, struct thread **owner);
124 #endif
125 static uintptr_t unlock_sx(struct lock_object *lock);
126
127 struct lock_class lock_class_sx = {
128         .lc_name = "sx",
129         .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
130         .lc_assert = assert_sx,
131 #ifdef DDB
132         .lc_ddb_show = db_show_sx,
133 #endif
134         .lc_lock = lock_sx,
135         .lc_unlock = unlock_sx,
136 #ifdef KDTRACE_HOOKS
137         .lc_owner = owner_sx,
138 #endif
139 };
140
141 #ifndef INVARIANTS
142 #define _sx_assert(sx, what, file, line)
143 #endif
144
145 #ifdef ADAPTIVE_SX
146 static __read_frequently u_int asx_retries;
147 static __read_frequently u_int asx_loops;
148 static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging");
149 SYSCTL_UINT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, "");
150 SYSCTL_UINT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, "");
151
152 static struct lock_delay_config __read_frequently sx_delay;
153
154 SYSCTL_INT(_debug_sx, OID_AUTO, delay_base, CTLFLAG_RW, &sx_delay.base,
155     0, "");
156 SYSCTL_INT(_debug_sx, OID_AUTO, delay_max, CTLFLAG_RW, &sx_delay.max,
157     0, "");
158
159 static void
160 sx_lock_delay_init(void *arg __unused)
161 {
162
163         lock_delay_default_init(&sx_delay);
164         asx_retries = 10;
165         asx_loops = max(10000, sx_delay.max);
166 }
167 LOCK_DELAY_SYSINIT(sx_lock_delay_init);
168 #endif
169
170 void
171 assert_sx(const struct lock_object *lock, int what)
172 {
173
174         sx_assert((const struct sx *)lock, what);
175 }
176
177 void
178 lock_sx(struct lock_object *lock, uintptr_t how)
179 {
180         struct sx *sx;
181
182         sx = (struct sx *)lock;
183         if (how)
184                 sx_slock(sx);
185         else
186                 sx_xlock(sx);
187 }
188
189 uintptr_t
190 unlock_sx(struct lock_object *lock)
191 {
192         struct sx *sx;
193
194         sx = (struct sx *)lock;
195         sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
196         if (sx_xlocked(sx)) {
197                 sx_xunlock(sx);
198                 return (0);
199         } else {
200                 sx_sunlock(sx);
201                 return (1);
202         }
203 }
204
205 #ifdef KDTRACE_HOOKS
206 int
207 owner_sx(const struct lock_object *lock, struct thread **owner)
208 {
209         const struct sx *sx;
210         uintptr_t x;
211
212         sx = (const struct sx *)lock;
213         x = sx->sx_lock;
214         *owner = NULL;
215         return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) :
216             ((*owner = (struct thread *)SX_OWNER(x)) != NULL));
217 }
218 #endif
219
220 void
221 sx_sysinit(void *arg)
222 {
223         struct sx_args *sargs = arg;
224
225         sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags);
226 }
227
228 void
229 sx_init_flags(struct sx *sx, const char *description, int opts)
230 {
231         int flags;
232
233         MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
234             SX_NOPROFILE | SX_NEW)) == 0);
235         ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
236             ("%s: sx_lock not aligned for %s: %p", __func__, description,
237             &sx->sx_lock));
238
239         flags = LO_SLEEPABLE | LO_UPGRADABLE;
240         if (opts & SX_DUPOK)
241                 flags |= LO_DUPOK;
242         if (opts & SX_NOPROFILE)
243                 flags |= LO_NOPROFILE;
244         if (!(opts & SX_NOWITNESS))
245                 flags |= LO_WITNESS;
246         if (opts & SX_RECURSE)
247                 flags |= LO_RECURSABLE;
248         if (opts & SX_QUIET)
249                 flags |= LO_QUIET;
250         if (opts & SX_NEW)
251                 flags |= LO_NEW;
252
253         lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
254         sx->sx_lock = SX_LOCK_UNLOCKED;
255         sx->sx_recurse = 0;
256 }
257
258 void
259 sx_destroy(struct sx *sx)
260 {
261
262         KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held"));
263         KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
264         sx->sx_lock = SX_LOCK_DESTROYED;
265         lock_destroy(&sx->lock_object);
266 }
267
268 int
269 sx_try_slock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
270 {
271         uintptr_t x;
272
273         if (SCHEDULER_STOPPED())
274                 return (1);
275
276         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
277             ("sx_try_slock() by idle thread %p on sx %s @ %s:%d",
278             curthread, sx->lock_object.lo_name, file, line));
279
280         x = sx->sx_lock;
281         for (;;) {
282                 KASSERT(x != SX_LOCK_DESTROYED,
283                     ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
284                 if (!(x & SX_LOCK_SHARED))
285                         break;
286                 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, x + SX_ONE_SHARER)) {
287                         LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
288                         WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
289                         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
290                             sx, 0, 0, file, line, LOCKSTAT_READER);
291                         TD_LOCKS_INC(curthread);
292                         curthread->td_sx_slocks++;
293                         return (1);
294                 }
295         }
296
297         LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
298         return (0);
299 }
300
301 int
302 sx_try_slock_(struct sx *sx, const char *file, int line)
303 {
304
305         return (sx_try_slock_int(sx LOCK_FILE_LINE_ARG));
306 }
307
308 int
309 _sx_xlock(struct sx *sx, int opts, const char *file, int line)
310 {
311         uintptr_t tid, x;
312         int error = 0;
313
314         KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
315             !TD_IS_IDLETHREAD(curthread),
316             ("sx_xlock() by idle thread %p on sx %s @ %s:%d",
317             curthread, sx->lock_object.lo_name, file, line));
318         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
319             ("sx_xlock() of destroyed sx @ %s:%d", file, line));
320         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
321             line, NULL);
322         tid = (uintptr_t)curthread;
323         x = SX_LOCK_UNLOCKED;
324         if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
325                 error = _sx_xlock_hard(sx, x, opts LOCK_FILE_LINE_ARG);
326         else
327                 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
328                     0, 0, file, line, LOCKSTAT_WRITER);
329         if (!error) {
330                 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse,
331                     file, line);
332                 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
333                 TD_LOCKS_INC(curthread);
334         }
335
336         return (error);
337 }
338
339 int
340 sx_try_xlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
341 {
342         struct thread *td;
343         uintptr_t tid, x;
344         int rval;
345         bool recursed;
346
347         td = curthread;
348         tid = (uintptr_t)td;
349         if (SCHEDULER_STOPPED_TD(td))
350                 return (1);
351
352         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
353             ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d",
354             curthread, sx->lock_object.lo_name, file, line));
355         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
356             ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
357
358         rval = 1;
359         recursed = false;
360         x = SX_LOCK_UNLOCKED;
361         for (;;) {
362                 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
363                         break;
364                 if (x == SX_LOCK_UNLOCKED)
365                         continue;
366                 if (x == tid && (sx->lock_object.lo_flags & LO_RECURSABLE)) {
367                         sx->sx_recurse++;
368                         atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
369                         break;
370                 }
371                 rval = 0;
372                 break;
373         }
374
375         LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
376         if (rval) {
377                 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
378                     file, line);
379                 if (!recursed)
380                         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
381                             sx, 0, 0, file, line, LOCKSTAT_WRITER);
382                 TD_LOCKS_INC(curthread);
383         }
384
385         return (rval);
386 }
387
388 int
389 sx_try_xlock_(struct sx *sx, const char *file, int line)
390 {
391
392         return (sx_try_xlock_int(sx LOCK_FILE_LINE_ARG));
393 }
394
395 void
396 _sx_xunlock(struct sx *sx, const char *file, int line)
397 {
398
399         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
400             ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
401         _sx_assert(sx, SA_XLOCKED, file, line);
402         WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
403         LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
404             line);
405 #if LOCK_DEBUG > 0
406         _sx_xunlock_hard(sx, (uintptr_t)curthread, file, line);
407 #else
408         __sx_xunlock(sx, curthread, file, line);
409 #endif
410         TD_LOCKS_DEC(curthread);
411 }
412
413 /*
414  * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
415  * This will only succeed if this thread holds a single shared lock.
416  * Return 1 if if the upgrade succeed, 0 otherwise.
417  */
418 int
419 sx_try_upgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
420 {
421         uintptr_t x;
422         uintptr_t waiters;
423         int success;
424
425         if (SCHEDULER_STOPPED())
426                 return (1);
427
428         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
429             ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
430         _sx_assert(sx, SA_SLOCKED, file, line);
431
432         /*
433          * Try to switch from one shared lock to an exclusive lock.  We need
434          * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that
435          * we will wake up the exclusive waiters when we drop the lock.
436          */
437         success = 0;
438         x = SX_READ_VALUE(sx);
439         for (;;) {
440                 if (SX_SHARERS(x) > 1)
441                         break;
442                 waiters = (x & SX_LOCK_WAITERS);
443                 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x,
444                     (uintptr_t)curthread | waiters)) {
445                         success = 1;
446                         break;
447                 }
448         }
449         LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
450         if (success) {
451                 curthread->td_sx_slocks--;
452                 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
453                     file, line);
454                 LOCKSTAT_RECORD0(sx__upgrade, sx);
455         }
456         return (success);
457 }
458
459 int
460 sx_try_upgrade_(struct sx *sx, const char *file, int line)
461 {
462
463         return (sx_try_upgrade_int(sx LOCK_FILE_LINE_ARG));
464 }
465
466 /*
467  * Downgrade an unrecursed exclusive lock into a single shared lock.
468  */
469 void
470 sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
471 {
472         uintptr_t x;
473         int wakeup_swapper;
474
475         if (SCHEDULER_STOPPED())
476                 return;
477
478         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
479             ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
480         _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
481 #ifndef INVARIANTS
482         if (sx_recursed(sx))
483                 panic("downgrade of a recursed lock");
484 #endif
485
486         WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
487
488         /*
489          * Try to switch from an exclusive lock with no shared waiters
490          * to one sharer with no shared waiters.  If there are
491          * exclusive waiters, we don't need to lock the sleep queue so
492          * long as we preserve the flag.  We do one quick try and if
493          * that fails we grab the sleepq lock to keep the flags from
494          * changing and do it the slow way.
495          *
496          * We have to lock the sleep queue if there are shared waiters
497          * so we can wake them up.
498          */
499         x = sx->sx_lock;
500         if (!(x & SX_LOCK_SHARED_WAITERS) &&
501             atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
502             (x & SX_LOCK_EXCLUSIVE_WAITERS)))
503                 goto out;
504
505         /*
506          * Lock the sleep queue so we can read the waiters bits
507          * without any races and wakeup any shared waiters.
508          */
509         sleepq_lock(&sx->lock_object);
510
511         /*
512          * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
513          * shared lock.  If there are any shared waiters, wake them up.
514          */
515         wakeup_swapper = 0;
516         x = sx->sx_lock;
517         atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
518             (x & SX_LOCK_EXCLUSIVE_WAITERS));
519         if (x & SX_LOCK_SHARED_WAITERS)
520                 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
521                     0, SQ_SHARED_QUEUE);
522         sleepq_release(&sx->lock_object);
523
524         if (wakeup_swapper)
525                 kick_proc0();
526
527 out:
528         curthread->td_sx_slocks++;
529         LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
530         LOCKSTAT_RECORD0(sx__downgrade, sx);
531 }
532
533 void
534 sx_downgrade_(struct sx *sx, const char *file, int line)
535 {
536
537         sx_downgrade_int(sx LOCK_FILE_LINE_ARG);
538 }
539
540 #ifdef  ADAPTIVE_SX
541 static inline void
542 sx_drop_critical(uintptr_t x, bool *in_critical, int *extra_work)
543 {
544
545         if (x & SX_LOCK_WRITE_SPINNER)
546                 return;
547         if (*in_critical) {
548                 critical_exit();
549                 *in_critical = false;
550                 (*extra_work)--;
551         }
552 }
553 #else
554 #define sx_drop_critical(x, in_critical, extra_work) do { } while(0)
555 #endif
556
557 /*
558  * This function represents the so-called 'hard case' for sx_xlock
559  * operation.  All 'easy case' failures are redirected to this.  Note
560  * that ideally this would be a static function, but it needs to be
561  * accessible from at least sx.h.
562  */
563 int
564 _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
565 {
566         GIANT_DECLARE;
567         uintptr_t tid, setx;
568 #ifdef ADAPTIVE_SX
569         volatile struct thread *owner;
570         u_int i, n, spintries = 0;
571         enum { READERS, WRITER } sleep_reason = READERS;
572         bool in_critical = false;
573 #endif
574 #ifdef LOCK_PROFILING
575         uint64_t waittime = 0;
576         int contested = 0;
577 #endif
578         int error = 0;
579 #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
580         struct lock_delay_arg lda;
581 #endif
582 #ifdef  KDTRACE_HOOKS
583         u_int sleep_cnt = 0;
584         int64_t sleep_time = 0;
585         int64_t all_time = 0;
586 #endif
587 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
588         uintptr_t state = 0;
589         int doing_lockprof = 0;
590 #endif
591         int extra_work = 0;
592
593         tid = (uintptr_t)curthread;
594
595 #ifdef KDTRACE_HOOKS
596         if (LOCKSTAT_PROFILE_ENABLED(sx__acquire)) {
597                 while (x == SX_LOCK_UNLOCKED) {
598                         if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
599                                 goto out_lockstat;
600                 }
601                 extra_work = 1;
602                 doing_lockprof = 1;
603                 all_time -= lockstat_nsecs(&sx->lock_object);
604                 state = x;
605         }
606 #endif
607 #ifdef LOCK_PROFILING
608         extra_work = 1;
609         doing_lockprof = 1;
610         state = x;
611 #endif
612
613         if (SCHEDULER_STOPPED())
614                 return (0);
615
616 #if defined(ADAPTIVE_SX)
617         lock_delay_arg_init(&lda, &sx_delay);
618 #elif defined(KDTRACE_HOOKS)
619         lock_delay_arg_init(&lda, NULL);
620 #endif
621
622         if (__predict_false(x == SX_LOCK_UNLOCKED))
623                 x = SX_READ_VALUE(sx);
624
625         /* If we already hold an exclusive lock, then recurse. */
626         if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) {
627                 KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
628             ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
629                     sx->lock_object.lo_name, file, line));
630                 sx->sx_recurse++;
631                 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
632                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
633                         CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
634                 return (0);
635         }
636
637         if (LOCK_LOG_TEST(&sx->lock_object, 0))
638                 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
639                     sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
640
641 #ifdef HWPMC_HOOKS
642         PMC_SOFT_CALL( , , lock, failed);
643 #endif
644         lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
645             &waittime);
646
647 #ifndef INVARIANTS
648         GIANT_SAVE(extra_work);
649 #endif
650
651         for (;;) {
652                 if (x == SX_LOCK_UNLOCKED) {
653                         if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
654                                 break;
655                         continue;
656                 }
657 #ifdef INVARIANTS
658                 GIANT_SAVE(extra_work);
659 #endif
660 #ifdef KDTRACE_HOOKS
661                 lda.spin_cnt++;
662 #endif
663 #ifdef ADAPTIVE_SX
664                 if (x == (SX_LOCK_SHARED | SX_LOCK_WRITE_SPINNER)) {
665                         if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
666                                 break;
667                         continue;
668                 }
669
670                 /*
671                  * If the lock is write locked and the owner is
672                  * running on another CPU, spin until the owner stops
673                  * running or the state of the lock changes.
674                  */
675                 if ((x & SX_LOCK_SHARED) == 0) {
676                         sx_drop_critical(x, &in_critical, &extra_work);
677                         sleep_reason = WRITER;
678                         owner = lv_sx_owner(x);
679                         if (!TD_IS_RUNNING(owner))
680                                 goto sleepq;
681                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
682                                 CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
683                                     __func__, sx, owner);
684                         KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
685                             "spinning", "lockname:\"%s\"",
686                             sx->lock_object.lo_name);
687                         do {
688                                 lock_delay(&lda);
689                                 x = SX_READ_VALUE(sx);
690                                 owner = lv_sx_owner(x);
691                         } while (owner != NULL && TD_IS_RUNNING(owner));
692                         KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
693                             "running");
694                         continue;
695                 } else if (SX_SHARERS(x) > 0) {
696                         sleep_reason = READERS;
697                         if (spintries == asx_retries)
698                                 goto sleepq;
699                         if (!(x & SX_LOCK_WRITE_SPINNER)) {
700                                 if (!in_critical) {
701                                         critical_enter();
702                                         in_critical = true;
703                                         extra_work++;
704                                 }
705                                 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
706                                     x | SX_LOCK_WRITE_SPINNER)) {
707                                         critical_exit();
708                                         in_critical = false;
709                                         extra_work--;
710                                         continue;
711                                 }
712                         }
713                         spintries++;
714                         KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
715                             "spinning", "lockname:\"%s\"",
716                             sx->lock_object.lo_name);
717                         n = SX_SHARERS(x);
718                         for (i = 0; i < asx_loops; i += n) {
719                                 lock_delay_spin(n);
720                                 x = SX_READ_VALUE(sx);
721                                 if (!(x & SX_LOCK_WRITE_SPINNER))
722                                         break;
723                                 if (!(x & SX_LOCK_SHARED))
724                                         break;
725                                 n = SX_SHARERS(x);
726                                 if (n == 0)
727                                         break;
728                         }
729 #ifdef KDTRACE_HOOKS
730                         lda.spin_cnt += i;
731 #endif
732                         KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
733                             "running");
734                         if (i < asx_loops)
735                                 continue;
736                 }
737 sleepq:
738 #endif
739                 sleepq_lock(&sx->lock_object);
740                 x = SX_READ_VALUE(sx);
741 retry_sleepq:
742
743                 /*
744                  * If the lock was released while spinning on the
745                  * sleep queue chain lock, try again.
746                  */
747                 if (x == SX_LOCK_UNLOCKED) {
748                         sleepq_release(&sx->lock_object);
749                         sx_drop_critical(x, &in_critical, &extra_work);
750                         continue;
751                 }
752
753 #ifdef ADAPTIVE_SX
754                 /*
755                  * The current lock owner might have started executing
756                  * on another CPU (or the lock could have changed
757                  * owners) while we were waiting on the sleep queue
758                  * chain lock.  If so, drop the sleep queue lock and try
759                  * again.
760                  */
761                 if (!(x & SX_LOCK_SHARED)) {
762                         owner = (struct thread *)SX_OWNER(x);
763                         if (TD_IS_RUNNING(owner)) {
764                                 sleepq_release(&sx->lock_object);
765                                 sx_drop_critical(x, &in_critical,
766                                     &extra_work);
767                                 continue;
768                         }
769                 } else if (SX_SHARERS(x) > 0 && sleep_reason == WRITER) {
770                         sleepq_release(&sx->lock_object);
771                         sx_drop_critical(x, &in_critical, &extra_work);
772                         continue;
773                 }
774 #endif
775
776                 /*
777                  * If an exclusive lock was released with both shared
778                  * and exclusive waiters and a shared waiter hasn't
779                  * woken up and acquired the lock yet, sx_lock will be
780                  * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS.
781                  * If we see that value, try to acquire it once.  Note
782                  * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS
783                  * as there are other exclusive waiters still.  If we
784                  * fail, restart the loop.
785                  */
786                 setx = x & (SX_LOCK_WAITERS | SX_LOCK_WRITE_SPINNER);
787                 if ((x & ~setx) == SX_LOCK_SHARED) {
788                         setx &= ~SX_LOCK_WRITE_SPINNER;
789                         if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid | setx))
790                                 goto retry_sleepq;
791                         sleepq_release(&sx->lock_object);
792                         CTR2(KTR_LOCK, "%s: %p claimed by new writer",
793                             __func__, sx);
794                         break;
795                 }
796
797 #ifdef ADAPTIVE_SX
798                 /*
799                  * It is possible we set the SX_LOCK_WRITE_SPINNER bit.
800                  * It is an invariant that when the bit is set, there is
801                  * a writer ready to grab the lock. Thus clear the bit since
802                  * we are going to sleep.
803                  */
804                 if (in_critical) {
805                         if ((x & SX_LOCK_WRITE_SPINNER) ||
806                             !((x & SX_LOCK_EXCLUSIVE_WAITERS))) {
807                                 setx = x & ~SX_LOCK_WRITE_SPINNER;
808                                 setx |= SX_LOCK_EXCLUSIVE_WAITERS;
809                                 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
810                                     setx)) {
811                                         goto retry_sleepq;
812                                 }
813                         }
814                         critical_exit();
815                         in_critical = false;
816                 } else {
817 #endif
818                         /*
819                          * Try to set the SX_LOCK_EXCLUSIVE_WAITERS.  If we fail,
820                          * than loop back and retry.
821                          */
822                         if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
823                                 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
824                                     x | SX_LOCK_EXCLUSIVE_WAITERS)) {
825                                         goto retry_sleepq;
826                                 }
827                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
828                                         CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
829                                             __func__, sx);
830                         }
831 #ifdef ADAPTIVE_SX
832                 }
833 #endif
834
835                 /*
836                  * Since we have been unable to acquire the exclusive
837                  * lock and the exclusive waiters flag is set, we have
838                  * to sleep.
839                  */
840                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
841                         CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
842                             __func__, sx);
843
844 #ifdef KDTRACE_HOOKS
845                 sleep_time -= lockstat_nsecs(&sx->lock_object);
846 #endif
847                 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
848                     SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
849                     SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
850                 if (!(opts & SX_INTERRUPTIBLE))
851                         sleepq_wait(&sx->lock_object, 0);
852                 else
853                         error = sleepq_wait_sig(&sx->lock_object, 0);
854 #ifdef KDTRACE_HOOKS
855                 sleep_time += lockstat_nsecs(&sx->lock_object);
856                 sleep_cnt++;
857 #endif
858                 if (error) {
859                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
860                                 CTR2(KTR_LOCK,
861                         "%s: interruptible sleep by %p suspended by signal",
862                                     __func__, sx);
863                         break;
864                 }
865                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
866                         CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
867                             __func__, sx);
868                 x = SX_READ_VALUE(sx);
869         }
870         if (__predict_true(!extra_work))
871                 return (error);
872 #ifdef ADAPTIVE_SX
873         if (in_critical)
874                 critical_exit();
875 #endif
876         GIANT_RESTORE();
877 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
878         if (__predict_true(!doing_lockprof))
879                 return (error);
880 #endif
881 #ifdef KDTRACE_HOOKS
882         all_time += lockstat_nsecs(&sx->lock_object);
883         if (sleep_time)
884                 LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
885                     LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
886                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
887         if (lda.spin_cnt > sleep_cnt)
888                 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
889                     LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
890                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
891 out_lockstat:
892 #endif
893         if (!error)
894                 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
895                     contested, waittime, file, line, LOCKSTAT_WRITER);
896         return (error);
897 }
898
899 /*
900  * This function represents the so-called 'hard case' for sx_xunlock
901  * operation.  All 'easy case' failures are redirected to this.  Note
902  * that ideally this would be a static function, but it needs to be
903  * accessible from at least sx.h.
904  */
905 void
906 _sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
907 {
908         uintptr_t tid, setx;
909         int queue, wakeup_swapper;
910
911         if (SCHEDULER_STOPPED())
912                 return;
913
914         tid = (uintptr_t)curthread;
915
916         if (__predict_false(x == tid))
917                 x = SX_READ_VALUE(sx);
918
919         MPASS(!(x & SX_LOCK_SHARED));
920
921         if (__predict_false(x & SX_LOCK_RECURSED)) {
922                 /* The lock is recursed, unrecurse one level. */
923                 if ((--sx->sx_recurse) == 0)
924                         atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
925                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
926                         CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
927                 return;
928         }
929
930         LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_WRITER);
931         if (x == tid &&
932             atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED))
933                 return;
934
935         if (LOCK_LOG_TEST(&sx->lock_object, 0))
936                 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);
937
938         sleepq_lock(&sx->lock_object);
939         x = SX_READ_VALUE(sx);
940         MPASS(x & (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS));
941
942         /*
943          * The wake up algorithm here is quite simple and probably not
944          * ideal.  It gives precedence to shared waiters if they are
945          * present.  For this condition, we have to preserve the
946          * state of the exclusive waiters flag.
947          * If interruptible sleeps left the shared queue empty avoid a
948          * starvation for the threads sleeping on the exclusive queue by giving
949          * them precedence and cleaning up the shared waiters bit anyway.
950          */
951         setx = SX_LOCK_UNLOCKED;
952         queue = SQ_SHARED_QUEUE;
953         if ((x & SX_LOCK_EXCLUSIVE_WAITERS) != 0 &&
954             sleepq_sleepcnt(&sx->lock_object, SQ_EXCLUSIVE_QUEUE) != 0) {
955                 queue = SQ_EXCLUSIVE_QUEUE;
956                 setx |= (x & SX_LOCK_SHARED_WAITERS);
957         }
958         atomic_store_rel_ptr(&sx->sx_lock, setx);
959
960         /* Wake up all the waiters for the specific queue. */
961         if (LOCK_LOG_TEST(&sx->lock_object, 0))
962                 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue",
963                     __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
964                     "exclusive");
965
966         wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
967             queue);
968         sleepq_release(&sx->lock_object);
969         if (wakeup_swapper)
970                 kick_proc0();
971 }
972
973 static bool __always_inline
974 __sx_can_read(struct thread *td, uintptr_t x, bool fp)
975 {
976
977         if ((x & (SX_LOCK_SHARED | SX_LOCK_EXCLUSIVE_WAITERS | SX_LOCK_WRITE_SPINNER))
978                         == SX_LOCK_SHARED)
979                 return (true);
980         if (!fp && td->td_sx_slocks && (x & SX_LOCK_SHARED))
981                 return (true);
982         return (false);
983 }
984
985 static bool __always_inline
986 __sx_slock_try(struct sx *sx, struct thread *td, uintptr_t *xp, bool fp
987     LOCK_FILE_LINE_ARG_DEF)
988 {
989
990         /*
991          * If no other thread has an exclusive lock then try to bump up
992          * the count of sharers.  Since we have to preserve the state
993          * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
994          * shared lock loop back and retry.
995          */
996         while (__sx_can_read(td, *xp, fp)) {
997                 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, xp,
998                     *xp + SX_ONE_SHARER)) {
999                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
1000                                 CTR4(KTR_LOCK, "%s: %p succeed %p -> %p",
1001                                     __func__, sx, (void *)*xp,
1002                                     (void *)(*xp + SX_ONE_SHARER));
1003                         td->td_sx_slocks++;
1004                         return (true);
1005                 }
1006         }
1007         return (false);
1008 }
1009
1010 static int __noinline
1011 _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
1012 {
1013         GIANT_DECLARE;
1014         struct thread *td;
1015 #ifdef ADAPTIVE_SX
1016         volatile struct thread *owner;
1017         u_int i, n, spintries = 0;
1018 #endif
1019 #ifdef LOCK_PROFILING
1020         uint64_t waittime = 0;
1021         int contested = 0;
1022 #endif
1023         int error = 0;
1024 #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
1025         struct lock_delay_arg lda;
1026 #endif
1027 #ifdef KDTRACE_HOOKS
1028         u_int sleep_cnt = 0;
1029         int64_t sleep_time = 0;
1030         int64_t all_time = 0;
1031 #endif
1032 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
1033         uintptr_t state = 0;
1034 #endif
1035         int extra_work = 0;
1036
1037         td = curthread;
1038
1039 #ifdef KDTRACE_HOOKS
1040         if (LOCKSTAT_PROFILE_ENABLED(sx__acquire)) {
1041                 if (__sx_slock_try(sx, td, &x, false LOCK_FILE_LINE_ARG))
1042                         goto out_lockstat;
1043                 extra_work = 1;
1044                 all_time -= lockstat_nsecs(&sx->lock_object);
1045                 state = x;
1046         }
1047 #endif
1048 #ifdef LOCK_PROFILING
1049         extra_work = 1;
1050         state = x;
1051 #endif
1052
1053         if (SCHEDULER_STOPPED())
1054                 return (0);
1055
1056 #if defined(ADAPTIVE_SX)
1057         lock_delay_arg_init(&lda, &sx_delay);
1058 #elif defined(KDTRACE_HOOKS)
1059         lock_delay_arg_init(&lda, NULL);
1060 #endif
1061
1062 #ifdef HWPMC_HOOKS
1063         PMC_SOFT_CALL( , , lock, failed);
1064 #endif
1065         lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
1066             &waittime);
1067
1068 #ifndef INVARIANTS
1069         GIANT_SAVE(extra_work);
1070 #endif
1071
1072         /*
1073          * As with rwlocks, we don't make any attempt to try to block
1074          * shared locks once there is an exclusive waiter.
1075          */
1076         for (;;) {
1077                 if (__sx_slock_try(sx, td, &x, false LOCK_FILE_LINE_ARG))
1078                         break;
1079 #ifdef INVARIANTS
1080                 GIANT_SAVE(extra_work);
1081 #endif
1082 #ifdef KDTRACE_HOOKS
1083                 lda.spin_cnt++;
1084 #endif
1085
1086 #ifdef ADAPTIVE_SX
1087                 /*
1088                  * If the owner is running on another CPU, spin until
1089                  * the owner stops running or the state of the lock
1090                  * changes.
1091                  */
1092                 if ((x & SX_LOCK_SHARED) == 0) {
1093                         owner = lv_sx_owner(x);
1094                         if (TD_IS_RUNNING(owner)) {
1095                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1096                                         CTR3(KTR_LOCK,
1097                                             "%s: spinning on %p held by %p",
1098                                             __func__, sx, owner);
1099                                 KTR_STATE1(KTR_SCHED, "thread",
1100                                     sched_tdname(curthread), "spinning",
1101                                     "lockname:\"%s\"", sx->lock_object.lo_name);
1102                                 do {
1103                                         lock_delay(&lda);
1104                                         x = SX_READ_VALUE(sx);
1105                                         owner = lv_sx_owner(x);
1106                                 } while (owner != NULL && TD_IS_RUNNING(owner));
1107                                 KTR_STATE0(KTR_SCHED, "thread",
1108                                     sched_tdname(curthread), "running");
1109                                 continue;
1110                         }
1111                 } else {
1112                         if ((x & SX_LOCK_WRITE_SPINNER) && SX_SHARERS(x) == 0) {
1113                                 MPASS(!__sx_can_read(td, x, false));
1114                                 lock_delay_spin(2);
1115                                 x = SX_READ_VALUE(sx);
1116                                 continue;
1117                         }
1118                         if (spintries < asx_retries) {
1119                                 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
1120                                     "spinning", "lockname:\"%s\"",
1121                                     sx->lock_object.lo_name);
1122                                 n = SX_SHARERS(x);
1123                                 for (i = 0; i < asx_loops; i += n) {
1124                                         lock_delay_spin(n);
1125                                         x = SX_READ_VALUE(sx);
1126                                         if (!(x & SX_LOCK_SHARED))
1127                                                 break;
1128                                         n = SX_SHARERS(x);
1129                                         if (n == 0)
1130                                                 break;
1131                                         if (__sx_can_read(td, x, false))
1132                                                 break;
1133                                 }
1134 #ifdef KDTRACE_HOOKS
1135                                 lda.spin_cnt += i;
1136 #endif
1137                                 KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
1138                                     "running");
1139                                 if (i < asx_loops)
1140                                         continue;
1141                         }
1142                 }
1143 #endif
1144
1145                 /*
1146                  * Some other thread already has an exclusive lock, so
1147                  * start the process of blocking.
1148                  */
1149                 sleepq_lock(&sx->lock_object);
1150                 x = SX_READ_VALUE(sx);
1151 retry_sleepq:
1152                 if (((x & SX_LOCK_WRITE_SPINNER) && SX_SHARERS(x) == 0) ||
1153                     __sx_can_read(td, x, false)) {
1154                         sleepq_release(&sx->lock_object);
1155                         continue;
1156                 }
1157
1158 #ifdef ADAPTIVE_SX
1159                 /*
1160                  * If the owner is running on another CPU, spin until
1161                  * the owner stops running or the state of the lock
1162                  * changes.
1163                  */
1164                 if (!(x & SX_LOCK_SHARED)) {
1165                         owner = (struct thread *)SX_OWNER(x);
1166                         if (TD_IS_RUNNING(owner)) {
1167                                 sleepq_release(&sx->lock_object);
1168                                 x = SX_READ_VALUE(sx);
1169                                 continue;
1170                         }
1171                 }
1172 #endif
1173
1174                 /*
1175                  * Try to set the SX_LOCK_SHARED_WAITERS flag.  If we
1176                  * fail to set it drop the sleep queue lock and loop
1177                  * back.
1178                  */
1179                 if (!(x & SX_LOCK_SHARED_WAITERS)) {
1180                         if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
1181                             x | SX_LOCK_SHARED_WAITERS))
1182                                 goto retry_sleepq;
1183                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
1184                                 CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
1185                                     __func__, sx);
1186                 }
1187
1188                 /*
1189                  * Since we have been unable to acquire the shared lock,
1190                  * we have to sleep.
1191                  */
1192                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1193                         CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
1194                             __func__, sx);
1195
1196 #ifdef KDTRACE_HOOKS
1197                 sleep_time -= lockstat_nsecs(&sx->lock_object);
1198 #endif
1199                 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
1200                     SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
1201                     SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
1202                 if (!(opts & SX_INTERRUPTIBLE))
1203                         sleepq_wait(&sx->lock_object, 0);
1204                 else
1205                         error = sleepq_wait_sig(&sx->lock_object, 0);
1206 #ifdef KDTRACE_HOOKS
1207                 sleep_time += lockstat_nsecs(&sx->lock_object);
1208                 sleep_cnt++;
1209 #endif
1210                 if (error) {
1211                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
1212                                 CTR2(KTR_LOCK,
1213                         "%s: interruptible sleep by %p suspended by signal",
1214                                     __func__, sx);
1215                         break;
1216                 }
1217                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1218                         CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
1219                             __func__, sx);
1220                 x = SX_READ_VALUE(sx);
1221         }
1222 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
1223         if (__predict_true(!extra_work))
1224                 return (error);
1225 #endif
1226 #ifdef KDTRACE_HOOKS
1227         all_time += lockstat_nsecs(&sx->lock_object);
1228         if (sleep_time)
1229                 LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
1230                     LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
1231                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
1232         if (lda.spin_cnt > sleep_cnt)
1233                 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
1234                     LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
1235                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
1236 out_lockstat:
1237 #endif
1238         if (error == 0) {
1239                 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
1240                     contested, waittime, file, line, LOCKSTAT_READER);
1241         }
1242         GIANT_RESTORE();
1243         return (error);
1244 }
1245
1246 int
1247 _sx_slock_int(struct sx *sx, int opts LOCK_FILE_LINE_ARG_DEF)
1248 {
1249         struct thread *td;
1250         uintptr_t x;
1251         int error;
1252
1253         KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
1254             !TD_IS_IDLETHREAD(curthread),
1255             ("sx_slock() by idle thread %p on sx %s @ %s:%d",
1256             curthread, sx->lock_object.lo_name, file, line));
1257         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1258             ("sx_slock() of destroyed sx @ %s:%d", file, line));
1259         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
1260
1261         error = 0;
1262         td = curthread;
1263         x = SX_READ_VALUE(sx);
1264         if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__acquire) ||
1265             !__sx_slock_try(sx, td, &x, true LOCK_FILE_LINE_ARG)))
1266                 error = _sx_slock_hard(sx, opts, x LOCK_FILE_LINE_ARG);
1267         else
1268                 lock_profile_obtain_lock_success(&sx->lock_object, 0, 0,
1269                     file, line);
1270         if (error == 0) {
1271                 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
1272                 WITNESS_LOCK(&sx->lock_object, 0, file, line);
1273                 TD_LOCKS_INC(curthread);
1274         }
1275         return (error);
1276 }
1277
1278 int
1279 _sx_slock(struct sx *sx, int opts, const char *file, int line)
1280 {
1281
1282         return (_sx_slock_int(sx, opts LOCK_FILE_LINE_ARG));
1283 }
1284
1285 static bool __always_inline
1286 _sx_sunlock_try(struct sx *sx, struct thread *td, uintptr_t *xp)
1287 {
1288
1289         for (;;) {
1290                 if (SX_SHARERS(*xp) > 1 || !(*xp & SX_LOCK_WAITERS)) {
1291                         if (atomic_fcmpset_rel_ptr(&sx->sx_lock, xp,
1292                             *xp - SX_ONE_SHARER)) {
1293                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1294                                         CTR4(KTR_LOCK,
1295                                             "%s: %p succeeded %p -> %p",
1296                                             __func__, sx, (void *)*xp,
1297                                             (void *)(*xp - SX_ONE_SHARER));
1298                                 td->td_sx_slocks--;
1299                                 return (true);
1300                         }
1301                         continue;
1302                 }
1303                 break;
1304         }
1305         return (false);
1306 }
1307
1308 static void __noinline
1309 _sx_sunlock_hard(struct sx *sx, struct thread *td, uintptr_t x
1310     LOCK_FILE_LINE_ARG_DEF)
1311 {
1312         int wakeup_swapper = 0;
1313         uintptr_t setx, queue;
1314
1315         if (SCHEDULER_STOPPED())
1316                 return;
1317
1318         if (_sx_sunlock_try(sx, td, &x))
1319                 goto out_lockstat;
1320
1321         sleepq_lock(&sx->lock_object);
1322         x = SX_READ_VALUE(sx);
1323         for (;;) {
1324                 if (_sx_sunlock_try(sx, td, &x))
1325                         break;
1326
1327                 /*
1328                  * Wake up semantic here is quite simple:
1329                  * Just wake up all the exclusive waiters.
1330                  * Note that the state of the lock could have changed,
1331                  * so if it fails loop back and retry.
1332                  */
1333                 setx = SX_LOCK_UNLOCKED;
1334                 queue = SQ_SHARED_QUEUE;
1335                 if (x & SX_LOCK_EXCLUSIVE_WAITERS) {
1336                         setx |= (x & SX_LOCK_SHARED_WAITERS);
1337                         queue = SQ_EXCLUSIVE_QUEUE;
1338                 }
1339                 setx |= (x & SX_LOCK_WRITE_SPINNER);
1340                 if (!atomic_fcmpset_rel_ptr(&sx->sx_lock, &x, setx))
1341                         continue;
1342                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1343                         CTR2(KTR_LOCK, "%s: %p waking up all thread on"
1344                             "exclusive queue", __func__, sx);
1345                 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
1346                     0, queue);
1347                 td->td_sx_slocks--;
1348                 break;
1349         }
1350         sleepq_release(&sx->lock_object);
1351         if (wakeup_swapper)
1352                 kick_proc0();
1353 out_lockstat:
1354         LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER);
1355 }
1356
1357 void
1358 _sx_sunlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
1359 {
1360         struct thread *td;
1361         uintptr_t x;
1362
1363         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1364             ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
1365         _sx_assert(sx, SA_SLOCKED, file, line);
1366         WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
1367         LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
1368
1369         td = curthread;
1370         x = SX_READ_VALUE(sx);
1371         if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__release) ||
1372             !_sx_sunlock_try(sx, td, &x)))
1373                 _sx_sunlock_hard(sx, td, x LOCK_FILE_LINE_ARG);
1374         else
1375                 lock_profile_release_lock(&sx->lock_object);
1376
1377         TD_LOCKS_DEC(curthread);
1378 }
1379
1380 void
1381 _sx_sunlock(struct sx *sx, const char *file, int line)
1382 {
1383
1384         _sx_sunlock_int(sx LOCK_FILE_LINE_ARG);
1385 }
1386
1387 #ifdef INVARIANT_SUPPORT
1388 #ifndef INVARIANTS
1389 #undef  _sx_assert
1390 #endif
1391
1392 /*
1393  * In the non-WITNESS case, sx_assert() can only detect that at least
1394  * *some* thread owns an slock, but it cannot guarantee that *this*
1395  * thread owns an slock.
1396  */
1397 void
1398 _sx_assert(const struct sx *sx, int what, const char *file, int line)
1399 {
1400 #ifndef WITNESS
1401         int slocked = 0;
1402 #endif
1403
1404         if (SCHEDULER_STOPPED())
1405                 return;
1406         switch (what) {
1407         case SA_SLOCKED:
1408         case SA_SLOCKED | SA_NOTRECURSED:
1409         case SA_SLOCKED | SA_RECURSED:
1410 #ifndef WITNESS
1411                 slocked = 1;
1412                 /* FALLTHROUGH */
1413 #endif
1414         case SA_LOCKED:
1415         case SA_LOCKED | SA_NOTRECURSED:
1416         case SA_LOCKED | SA_RECURSED:
1417 #ifdef WITNESS
1418                 witness_assert(&sx->lock_object, what, file, line);
1419 #else
1420                 /*
1421                  * If some other thread has an exclusive lock or we
1422                  * have one and are asserting a shared lock, fail.
1423                  * Also, if no one has a lock at all, fail.
1424                  */
1425                 if (sx->sx_lock == SX_LOCK_UNLOCKED ||
1426                     (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
1427                     sx_xholder(sx) != curthread)))
1428                         panic("Lock %s not %slocked @ %s:%d\n",
1429                             sx->lock_object.lo_name, slocked ? "share " : "",
1430                             file, line);
1431
1432                 if (!(sx->sx_lock & SX_LOCK_SHARED)) {
1433                         if (sx_recursed(sx)) {
1434                                 if (what & SA_NOTRECURSED)
1435                                         panic("Lock %s recursed @ %s:%d\n",
1436                                             sx->lock_object.lo_name, file,
1437                                             line);
1438                         } else if (what & SA_RECURSED)
1439                                 panic("Lock %s not recursed @ %s:%d\n",
1440                                     sx->lock_object.lo_name, file, line);
1441                 }
1442 #endif
1443                 break;
1444         case SA_XLOCKED:
1445         case SA_XLOCKED | SA_NOTRECURSED:
1446         case SA_XLOCKED | SA_RECURSED:
1447                 if (sx_xholder(sx) != curthread)
1448                         panic("Lock %s not exclusively locked @ %s:%d\n",
1449                             sx->lock_object.lo_name, file, line);
1450                 if (sx_recursed(sx)) {
1451                         if (what & SA_NOTRECURSED)
1452                                 panic("Lock %s recursed @ %s:%d\n",
1453                                     sx->lock_object.lo_name, file, line);
1454                 } else if (what & SA_RECURSED)
1455                         panic("Lock %s not recursed @ %s:%d\n",
1456                             sx->lock_object.lo_name, file, line);
1457                 break;
1458         case SA_UNLOCKED:
1459 #ifdef WITNESS
1460                 witness_assert(&sx->lock_object, what, file, line);
1461 #else
1462                 /*
1463                  * If we hold an exclusve lock fail.  We can't
1464                  * reliably check to see if we hold a shared lock or
1465                  * not.
1466                  */
1467                 if (sx_xholder(sx) == curthread)
1468                         panic("Lock %s exclusively locked @ %s:%d\n",
1469                             sx->lock_object.lo_name, file, line);
1470 #endif
1471                 break;
1472         default:
1473                 panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
1474                     line);
1475         }
1476 }
1477 #endif  /* INVARIANT_SUPPORT */
1478
1479 #ifdef DDB
1480 static void
1481 db_show_sx(const struct lock_object *lock)
1482 {
1483         struct thread *td;
1484         const struct sx *sx;
1485
1486         sx = (const struct sx *)lock;
1487
1488         db_printf(" state: ");
1489         if (sx->sx_lock == SX_LOCK_UNLOCKED)
1490                 db_printf("UNLOCKED\n");
1491         else if (sx->sx_lock == SX_LOCK_DESTROYED) {
1492                 db_printf("DESTROYED\n");
1493                 return;
1494         } else if (sx->sx_lock & SX_LOCK_SHARED)
1495                 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
1496         else {
1497                 td = sx_xholder(sx);
1498                 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1499                     td->td_tid, td->td_proc->p_pid, td->td_name);
1500                 if (sx_recursed(sx))
1501                         db_printf(" recursed: %d\n", sx->sx_recurse);
1502         }
1503
1504         db_printf(" waiters: ");
1505         switch(sx->sx_lock &
1506             (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) {
1507         case SX_LOCK_SHARED_WAITERS:
1508                 db_printf("shared\n");
1509                 break;
1510         case SX_LOCK_EXCLUSIVE_WAITERS:
1511                 db_printf("exclusive\n");
1512                 break;
1513         case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS:
1514                 db_printf("exclusive and shared\n");
1515                 break;
1516         default:
1517                 db_printf("none\n");
1518         }
1519 }
1520
1521 /*
1522  * Check to see if a thread that is blocked on a sleep queue is actually
1523  * blocked on an sx lock.  If so, output some details and return true.
1524  * If the lock has an exclusive owner, return that in *ownerp.
1525  */
1526 int
1527 sx_chain(struct thread *td, struct thread **ownerp)
1528 {
1529         const struct sx *sx;
1530
1531         /*
1532          * Check to see if this thread is blocked on an sx lock.
1533          * First, we check the lock class.  If that is ok, then we
1534          * compare the lock name against the wait message.
1535          */
1536         sx = td->td_wchan;
1537         if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
1538             sx->lock_object.lo_name != td->td_wmesg)
1539                 return (0);
1540
1541         /* We think we have an sx lock, so output some details. */
1542         db_printf("blocked on sx \"%s\" ", td->td_wmesg);
1543         *ownerp = sx_xholder(sx);
1544         if (sx->sx_lock & SX_LOCK_SHARED)
1545                 db_printf("SLOCK (count %ju)\n",
1546                     (uintmax_t)SX_SHARERS(sx->sx_lock));
1547         else
1548                 db_printf("XLOCK\n");
1549         return (1);
1550 }
1551 #endif