]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/kern_sx.c
Remove some, but not all, assumptions that the BSP is CPU 0 and that CPUs
[FreeBSD/FreeBSD.git] / sys / kern / kern_sx.c
1 /*-
2  * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
3  * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice(s), this list of conditions and the following disclaimer as
11  *    the first lines of this file unmodified other than the possible
12  *    addition of one or more copyright notices.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice(s), this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
18  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
21  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
27  * DAMAGE.
28  */
29
30 /*
31  * Shared/exclusive locks.  This implementation attempts to ensure
32  * deterministic lock granting behavior, so that slocks and xlocks are
33  * interleaved.
34  *
35  * Priority propagation will not generally raise the priority of lock holders,
36  * so should not be relied upon in combination with sx locks.
37  */
38
39 #include "opt_ddb.h"
40 #include "opt_hwpmc_hooks.h"
41 #include "opt_no_adaptive_sx.h"
42
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kdb.h>
49 #include <sys/kernel.h>
50 #include <sys/ktr.h>
51 #include <sys/lock.h>
52 #include <sys/mutex.h>
53 #include <sys/proc.h>
54 #include <sys/sched.h>
55 #include <sys/sleepqueue.h>
56 #include <sys/sx.h>
57 #include <sys/smp.h>
58 #include <sys/sysctl.h>
59
60 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
61 #include <machine/cpu.h>
62 #endif
63
64 #ifdef DDB
65 #include <ddb/ddb.h>
66 #endif
67
68 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
69 #define ADAPTIVE_SX
70 #endif
71
72 CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE);
73
74 #ifdef HWPMC_HOOKS
75 #include <sys/pmckern.h>
76 PMC_SOFT_DECLARE( , , lock, failed);
77 #endif
78
79 /* Handy macros for sleep queues. */
80 #define SQ_EXCLUSIVE_QUEUE      0
81 #define SQ_SHARED_QUEUE         1
82
83 /*
84  * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file.  We
85  * drop Giant anytime we have to sleep or if we adaptively spin.
86  */
87 #define GIANT_DECLARE                                                   \
88         int _giantcnt = 0;                                              \
89         WITNESS_SAVE_DECL(Giant)                                        \
90
91 #define GIANT_SAVE(work) do {                                           \
92         if (mtx_owned(&Giant)) {                                        \
93                 work++;                                                 \
94                 WITNESS_SAVE(&Giant.lock_object, Giant);                \
95                 while (mtx_owned(&Giant)) {                             \
96                         _giantcnt++;                                    \
97                         mtx_unlock(&Giant);                             \
98                 }                                                       \
99         }                                                               \
100 } while (0)
101
102 #define GIANT_RESTORE() do {                                            \
103         if (_giantcnt > 0) {                                            \
104                 mtx_assert(&Giant, MA_NOTOWNED);                        \
105                 while (_giantcnt--)                                     \
106                         mtx_lock(&Giant);                               \
107                 WITNESS_RESTORE(&Giant.lock_object, Giant);             \
108         }                                                               \
109 } while (0)
110
111 /*
112  * Returns true if an exclusive lock is recursed.  It assumes
113  * curthread currently has an exclusive lock.
114  */
115 #define sx_recursed(sx)         ((sx)->sx_recurse != 0)
116
117 static void     assert_sx(const struct lock_object *lock, int what);
118 #ifdef DDB
119 static void     db_show_sx(const struct lock_object *lock);
120 #endif
121 static void     lock_sx(struct lock_object *lock, uintptr_t how);
122 #ifdef KDTRACE_HOOKS
123 static int      owner_sx(const struct lock_object *lock, struct thread **owner);
124 #endif
125 static uintptr_t unlock_sx(struct lock_object *lock);
126
127 struct lock_class lock_class_sx = {
128         .lc_name = "sx",
129         .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
130         .lc_assert = assert_sx,
131 #ifdef DDB
132         .lc_ddb_show = db_show_sx,
133 #endif
134         .lc_lock = lock_sx,
135         .lc_unlock = unlock_sx,
136 #ifdef KDTRACE_HOOKS
137         .lc_owner = owner_sx,
138 #endif
139 };
140
141 #ifndef INVARIANTS
142 #define _sx_assert(sx, what, file, line)
143 #endif
144
145 #ifdef ADAPTIVE_SX
146 static __read_frequently u_int asx_retries = 10;
147 static __read_frequently u_int asx_loops = 10000;
148 static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging");
149 SYSCTL_UINT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, "");
150 SYSCTL_UINT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, "");
151
152 static struct lock_delay_config __read_frequently sx_delay;
153
154 SYSCTL_INT(_debug_sx, OID_AUTO, delay_base, CTLFLAG_RW, &sx_delay.base,
155     0, "");
156 SYSCTL_INT(_debug_sx, OID_AUTO, delay_max, CTLFLAG_RW, &sx_delay.max,
157     0, "");
158
159 LOCK_DELAY_SYSINIT_DEFAULT(sx_delay);
160 #endif
161
162 void
163 assert_sx(const struct lock_object *lock, int what)
164 {
165
166         sx_assert((const struct sx *)lock, what);
167 }
168
169 void
170 lock_sx(struct lock_object *lock, uintptr_t how)
171 {
172         struct sx *sx;
173
174         sx = (struct sx *)lock;
175         if (how)
176                 sx_slock(sx);
177         else
178                 sx_xlock(sx);
179 }
180
181 uintptr_t
182 unlock_sx(struct lock_object *lock)
183 {
184         struct sx *sx;
185
186         sx = (struct sx *)lock;
187         sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
188         if (sx_xlocked(sx)) {
189                 sx_xunlock(sx);
190                 return (0);
191         } else {
192                 sx_sunlock(sx);
193                 return (1);
194         }
195 }
196
197 #ifdef KDTRACE_HOOKS
198 int
199 owner_sx(const struct lock_object *lock, struct thread **owner)
200 {
201         const struct sx *sx;
202         uintptr_t x;
203
204         sx = (const struct sx *)lock;
205         x = sx->sx_lock;
206         *owner = NULL;
207         return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) :
208             ((*owner = (struct thread *)SX_OWNER(x)) != NULL));
209 }
210 #endif
211
212 void
213 sx_sysinit(void *arg)
214 {
215         struct sx_args *sargs = arg;
216
217         sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags);
218 }
219
220 void
221 sx_init_flags(struct sx *sx, const char *description, int opts)
222 {
223         int flags;
224
225         MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
226             SX_NOPROFILE | SX_NOADAPTIVE | SX_NEW)) == 0);
227         ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
228             ("%s: sx_lock not aligned for %s: %p", __func__, description,
229             &sx->sx_lock));
230
231         flags = LO_SLEEPABLE | LO_UPGRADABLE;
232         if (opts & SX_DUPOK)
233                 flags |= LO_DUPOK;
234         if (opts & SX_NOPROFILE)
235                 flags |= LO_NOPROFILE;
236         if (!(opts & SX_NOWITNESS))
237                 flags |= LO_WITNESS;
238         if (opts & SX_RECURSE)
239                 flags |= LO_RECURSABLE;
240         if (opts & SX_QUIET)
241                 flags |= LO_QUIET;
242         if (opts & SX_NEW)
243                 flags |= LO_NEW;
244
245         flags |= opts & SX_NOADAPTIVE;
246         lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
247         sx->sx_lock = SX_LOCK_UNLOCKED;
248         sx->sx_recurse = 0;
249 }
250
251 void
252 sx_destroy(struct sx *sx)
253 {
254
255         KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held"));
256         KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
257         sx->sx_lock = SX_LOCK_DESTROYED;
258         lock_destroy(&sx->lock_object);
259 }
260
261 int
262 sx_try_slock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
263 {
264         uintptr_t x;
265
266         if (SCHEDULER_STOPPED())
267                 return (1);
268
269         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
270             ("sx_try_slock() by idle thread %p on sx %s @ %s:%d",
271             curthread, sx->lock_object.lo_name, file, line));
272
273         x = sx->sx_lock;
274         for (;;) {
275                 KASSERT(x != SX_LOCK_DESTROYED,
276                     ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
277                 if (!(x & SX_LOCK_SHARED))
278                         break;
279                 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, x + SX_ONE_SHARER)) {
280                         LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
281                         WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
282                         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
283                             sx, 0, 0, file, line, LOCKSTAT_READER);
284                         TD_LOCKS_INC(curthread);
285                         return (1);
286                 }
287         }
288
289         LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
290         return (0);
291 }
292
293 int
294 sx_try_slock_(struct sx *sx, const char *file, int line)
295 {
296
297         return (sx_try_slock_int(sx LOCK_FILE_LINE_ARG));
298 }
299
300 int
301 _sx_xlock(struct sx *sx, int opts, const char *file, int line)
302 {
303         uintptr_t tid, x;
304         int error = 0;
305
306         KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
307             !TD_IS_IDLETHREAD(curthread),
308             ("sx_xlock() by idle thread %p on sx %s @ %s:%d",
309             curthread, sx->lock_object.lo_name, file, line));
310         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
311             ("sx_xlock() of destroyed sx @ %s:%d", file, line));
312         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
313             line, NULL);
314         tid = (uintptr_t)curthread;
315         x = SX_LOCK_UNLOCKED;
316         if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
317                 error = _sx_xlock_hard(sx, x, opts LOCK_FILE_LINE_ARG);
318         else
319                 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
320                     0, 0, file, line, LOCKSTAT_WRITER);
321         if (!error) {
322                 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse,
323                     file, line);
324                 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
325                 TD_LOCKS_INC(curthread);
326         }
327
328         return (error);
329 }
330
331 int
332 sx_try_xlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
333 {
334         struct thread *td;
335         uintptr_t tid, x;
336         int rval;
337         bool recursed;
338
339         td = curthread;
340         tid = (uintptr_t)td;
341         if (SCHEDULER_STOPPED_TD(td))
342                 return (1);
343
344         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
345             ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d",
346             curthread, sx->lock_object.lo_name, file, line));
347         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
348             ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
349
350         rval = 1;
351         recursed = false;
352         x = SX_LOCK_UNLOCKED;
353         for (;;) {
354                 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
355                         break;
356                 if (x == SX_LOCK_UNLOCKED)
357                         continue;
358                 if (x == tid && (sx->lock_object.lo_flags & LO_RECURSABLE)) {
359                         sx->sx_recurse++;
360                         atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
361                         break;
362                 }
363                 rval = 0;
364                 break;
365         }
366
367         LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
368         if (rval) {
369                 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
370                     file, line);
371                 if (!recursed)
372                         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
373                             sx, 0, 0, file, line, LOCKSTAT_WRITER);
374                 TD_LOCKS_INC(curthread);
375         }
376
377         return (rval);
378 }
379
380 int
381 sx_try_xlock_(struct sx *sx, const char *file, int line)
382 {
383
384         return (sx_try_xlock_int(sx LOCK_FILE_LINE_ARG));
385 }
386
387 void
388 _sx_xunlock(struct sx *sx, const char *file, int line)
389 {
390
391         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
392             ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
393         _sx_assert(sx, SA_XLOCKED, file, line);
394         WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
395         LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
396             line);
397 #if LOCK_DEBUG > 0
398         _sx_xunlock_hard(sx, (uintptr_t)curthread, file, line);
399 #else
400         __sx_xunlock(sx, curthread, file, line);
401 #endif
402         TD_LOCKS_DEC(curthread);
403 }
404
405 /*
406  * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
407  * This will only succeed if this thread holds a single shared lock.
408  * Return 1 if if the upgrade succeed, 0 otherwise.
409  */
410 int
411 sx_try_upgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
412 {
413         uintptr_t x;
414         int success;
415
416         if (SCHEDULER_STOPPED())
417                 return (1);
418
419         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
420             ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
421         _sx_assert(sx, SA_SLOCKED, file, line);
422
423         /*
424          * Try to switch from one shared lock to an exclusive lock.  We need
425          * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that
426          * we will wake up the exclusive waiters when we drop the lock.
427          */
428         x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS;
429         success = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x,
430             (uintptr_t)curthread | x);
431         LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
432         if (success) {
433                 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
434                     file, line);
435                 LOCKSTAT_RECORD0(sx__upgrade, sx);
436         }
437         return (success);
438 }
439
440 int
441 sx_try_upgrade_(struct sx *sx, const char *file, int line)
442 {
443
444         return (sx_try_upgrade_int(sx LOCK_FILE_LINE_ARG));
445 }
446
447 /*
448  * Downgrade an unrecursed exclusive lock into a single shared lock.
449  */
450 void
451 sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
452 {
453         uintptr_t x;
454         int wakeup_swapper;
455
456         if (SCHEDULER_STOPPED())
457                 return;
458
459         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
460             ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
461         _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
462 #ifndef INVARIANTS
463         if (sx_recursed(sx))
464                 panic("downgrade of a recursed lock");
465 #endif
466
467         WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
468
469         /*
470          * Try to switch from an exclusive lock with no shared waiters
471          * to one sharer with no shared waiters.  If there are
472          * exclusive waiters, we don't need to lock the sleep queue so
473          * long as we preserve the flag.  We do one quick try and if
474          * that fails we grab the sleepq lock to keep the flags from
475          * changing and do it the slow way.
476          *
477          * We have to lock the sleep queue if there are shared waiters
478          * so we can wake them up.
479          */
480         x = sx->sx_lock;
481         if (!(x & SX_LOCK_SHARED_WAITERS) &&
482             atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
483             (x & SX_LOCK_EXCLUSIVE_WAITERS)))
484                 goto out;
485
486         /*
487          * Lock the sleep queue so we can read the waiters bits
488          * without any races and wakeup any shared waiters.
489          */
490         sleepq_lock(&sx->lock_object);
491
492         /*
493          * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
494          * shared lock.  If there are any shared waiters, wake them up.
495          */
496         wakeup_swapper = 0;
497         x = sx->sx_lock;
498         atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
499             (x & SX_LOCK_EXCLUSIVE_WAITERS));
500         if (x & SX_LOCK_SHARED_WAITERS)
501                 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
502                     0, SQ_SHARED_QUEUE);
503         sleepq_release(&sx->lock_object);
504
505         if (wakeup_swapper)
506                 kick_proc0();
507
508 out:
509         LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
510         LOCKSTAT_RECORD0(sx__downgrade, sx);
511 }
512
513 void
514 sx_downgrade_(struct sx *sx, const char *file, int line)
515 {
516
517         sx_downgrade_int(sx LOCK_FILE_LINE_ARG);
518 }
519
520 /*
521  * This function represents the so-called 'hard case' for sx_xlock
522  * operation.  All 'easy case' failures are redirected to this.  Note
523  * that ideally this would be a static function, but it needs to be
524  * accessible from at least sx.h.
525  */
526 int
527 _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
528 {
529         GIANT_DECLARE;
530         uintptr_t tid;
531 #ifdef ADAPTIVE_SX
532         volatile struct thread *owner;
533         u_int i, n, spintries = 0;
534 #endif
535 #ifdef LOCK_PROFILING
536         uint64_t waittime = 0;
537         int contested = 0;
538 #endif
539         int error = 0;
540 #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
541         struct lock_delay_arg lda;
542 #endif
543 #ifdef  KDTRACE_HOOKS
544         u_int sleep_cnt = 0;
545         int64_t sleep_time = 0;
546         int64_t all_time = 0;
547 #endif
548 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
549         uintptr_t state;
550 #endif
551         int extra_work = 0;
552
553         tid = (uintptr_t)curthread;
554         if (SCHEDULER_STOPPED())
555                 return (0);
556
557 #if defined(ADAPTIVE_SX)
558         lock_delay_arg_init(&lda, &sx_delay);
559 #elif defined(KDTRACE_HOOKS)
560         lock_delay_arg_init(&lda, NULL);
561 #endif
562
563         if (__predict_false(x == SX_LOCK_UNLOCKED))
564                 x = SX_READ_VALUE(sx);
565
566         /* If we already hold an exclusive lock, then recurse. */
567         if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) {
568                 KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
569             ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
570                     sx->lock_object.lo_name, file, line));
571                 sx->sx_recurse++;
572                 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
573                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
574                         CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
575                 return (0);
576         }
577
578         if (LOCK_LOG_TEST(&sx->lock_object, 0))
579                 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
580                     sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
581
582 #ifdef HWPMC_HOOKS
583         PMC_SOFT_CALL( , , lock, failed);
584 #endif
585         lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
586             &waittime);
587
588 #ifdef LOCK_PROFILING
589         extra_work = 1;
590         state = x;
591 #elif defined(KDTRACE_HOOKS)
592         extra_work = lockstat_enabled;
593         if (__predict_false(extra_work)) {
594                 all_time -= lockstat_nsecs(&sx->lock_object);
595                 state = x;
596         }
597 #endif
598
599         for (;;) {
600                 if (x == SX_LOCK_UNLOCKED) {
601                         if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
602                                 break;
603                         continue;
604                 }
605 #ifdef KDTRACE_HOOKS
606                 lda.spin_cnt++;
607 #endif
608 #ifdef ADAPTIVE_SX
609                 /*
610                  * If the lock is write locked and the owner is
611                  * running on another CPU, spin until the owner stops
612                  * running or the state of the lock changes.
613                  */
614                 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
615                         if ((x & SX_LOCK_SHARED) == 0) {
616                                 owner = lv_sx_owner(x);
617                                 if (TD_IS_RUNNING(owner)) {
618                                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
619                                                 CTR3(KTR_LOCK,
620                                             "%s: spinning on %p held by %p",
621                                                     __func__, sx, owner);
622                                         KTR_STATE1(KTR_SCHED, "thread",
623                                             sched_tdname(curthread), "spinning",
624                                             "lockname:\"%s\"",
625                                             sx->lock_object.lo_name);
626                                         GIANT_SAVE(extra_work);
627                                         do {
628                                                 lock_delay(&lda);
629                                                 x = SX_READ_VALUE(sx);
630                                                 owner = lv_sx_owner(x);
631                                         } while (owner != NULL &&
632                                                     TD_IS_RUNNING(owner));
633                                         KTR_STATE0(KTR_SCHED, "thread",
634                                             sched_tdname(curthread), "running");
635                                         continue;
636                                 }
637                         } else if (SX_SHARERS(x) && spintries < asx_retries) {
638                                 KTR_STATE1(KTR_SCHED, "thread",
639                                     sched_tdname(curthread), "spinning",
640                                     "lockname:\"%s\"", sx->lock_object.lo_name);
641                                 GIANT_SAVE(extra_work);
642                                 spintries++;
643                                 for (i = 0; i < asx_loops; i += n) {
644                                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
645                                                 CTR4(KTR_LOCK,
646                                     "%s: shared spinning on %p with %u and %u",
647                                                     __func__, sx, spintries, i);
648                                         n = SX_SHARERS(x);
649                                         lock_delay_spin(n);
650                                         x = SX_READ_VALUE(sx);
651                                         if ((x & SX_LOCK_SHARED) == 0 ||
652                                             SX_SHARERS(x) == 0)
653                                                 break;
654                                 }
655 #ifdef KDTRACE_HOOKS
656                                 lda.spin_cnt += i;
657 #endif
658                                 KTR_STATE0(KTR_SCHED, "thread",
659                                     sched_tdname(curthread), "running");
660                                 if (i != asx_loops)
661                                         continue;
662                         }
663                 }
664 #endif
665
666                 sleepq_lock(&sx->lock_object);
667                 x = SX_READ_VALUE(sx);
668 retry_sleepq:
669
670                 /*
671                  * If the lock was released while spinning on the
672                  * sleep queue chain lock, try again.
673                  */
674                 if (x == SX_LOCK_UNLOCKED) {
675                         sleepq_release(&sx->lock_object);
676                         continue;
677                 }
678
679 #ifdef ADAPTIVE_SX
680                 /*
681                  * The current lock owner might have started executing
682                  * on another CPU (or the lock could have changed
683                  * owners) while we were waiting on the sleep queue
684                  * chain lock.  If so, drop the sleep queue lock and try
685                  * again.
686                  */
687                 if (!(x & SX_LOCK_SHARED) &&
688                     (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
689                         owner = (struct thread *)SX_OWNER(x);
690                         if (TD_IS_RUNNING(owner)) {
691                                 sleepq_release(&sx->lock_object);
692                                 continue;
693                         }
694                 }
695 #endif
696
697                 /*
698                  * If an exclusive lock was released with both shared
699                  * and exclusive waiters and a shared waiter hasn't
700                  * woken up and acquired the lock yet, sx_lock will be
701                  * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS.
702                  * If we see that value, try to acquire it once.  Note
703                  * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS
704                  * as there are other exclusive waiters still.  If we
705                  * fail, restart the loop.
706                  */
707                 if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) {
708                         if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x,
709                             tid | SX_LOCK_EXCLUSIVE_WAITERS))
710                                 goto retry_sleepq;
711                         sleepq_release(&sx->lock_object);
712                         CTR2(KTR_LOCK, "%s: %p claimed by new writer",
713                             __func__, sx);
714                         break;
715                 }
716
717                 /*
718                  * Try to set the SX_LOCK_EXCLUSIVE_WAITERS.  If we fail,
719                  * than loop back and retry.
720                  */
721                 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
722                         if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
723                             x | SX_LOCK_EXCLUSIVE_WAITERS)) {
724                                 goto retry_sleepq;
725                         }
726                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
727                                 CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
728                                     __func__, sx);
729                 }
730
731                 /*
732                  * Since we have been unable to acquire the exclusive
733                  * lock and the exclusive waiters flag is set, we have
734                  * to sleep.
735                  */
736                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
737                         CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
738                             __func__, sx);
739
740 #ifdef KDTRACE_HOOKS
741                 sleep_time -= lockstat_nsecs(&sx->lock_object);
742 #endif
743                 GIANT_SAVE(extra_work);
744                 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
745                     SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
746                     SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
747                 if (!(opts & SX_INTERRUPTIBLE))
748                         sleepq_wait(&sx->lock_object, 0);
749                 else
750                         error = sleepq_wait_sig(&sx->lock_object, 0);
751 #ifdef KDTRACE_HOOKS
752                 sleep_time += lockstat_nsecs(&sx->lock_object);
753                 sleep_cnt++;
754 #endif
755                 if (error) {
756                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
757                                 CTR2(KTR_LOCK,
758                         "%s: interruptible sleep by %p suspended by signal",
759                                     __func__, sx);
760                         break;
761                 }
762                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
763                         CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
764                             __func__, sx);
765                 x = SX_READ_VALUE(sx);
766         }
767 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
768         if (__predict_true(!extra_work))
769                 return (error);
770 #endif
771 #ifdef KDTRACE_HOOKS
772         all_time += lockstat_nsecs(&sx->lock_object);
773         if (sleep_time)
774                 LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
775                     LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
776                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
777         if (lda.spin_cnt > sleep_cnt)
778                 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
779                     LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
780                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
781 #endif
782         if (!error)
783                 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
784                     contested, waittime, file, line, LOCKSTAT_WRITER);
785         GIANT_RESTORE();
786         return (error);
787 }
788
789 /*
790  * This function represents the so-called 'hard case' for sx_xunlock
791  * operation.  All 'easy case' failures are redirected to this.  Note
792  * that ideally this would be a static function, but it needs to be
793  * accessible from at least sx.h.
794  */
795 void
796 _sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
797 {
798         uintptr_t tid, setx;
799         int queue, wakeup_swapper;
800
801         if (SCHEDULER_STOPPED())
802                 return;
803
804         tid = (uintptr_t)curthread;
805
806         if (__predict_false(x == tid))
807                 x = SX_READ_VALUE(sx);
808
809         MPASS(!(x & SX_LOCK_SHARED));
810
811         if (__predict_false(x & SX_LOCK_RECURSED)) {
812                 /* The lock is recursed, unrecurse one level. */
813                 if ((--sx->sx_recurse) == 0)
814                         atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
815                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
816                         CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
817                 return;
818         }
819
820         LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_WRITER);
821         if (x == tid &&
822             atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED))
823                 return;
824
825         if (LOCK_LOG_TEST(&sx->lock_object, 0))
826                 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);
827
828         sleepq_lock(&sx->lock_object);
829         x = SX_READ_VALUE(sx);
830         MPASS(x & (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS));
831
832         /*
833          * The wake up algorithm here is quite simple and probably not
834          * ideal.  It gives precedence to shared waiters if they are
835          * present.  For this condition, we have to preserve the
836          * state of the exclusive waiters flag.
837          * If interruptible sleeps left the shared queue empty avoid a
838          * starvation for the threads sleeping on the exclusive queue by giving
839          * them precedence and cleaning up the shared waiters bit anyway.
840          */
841         setx = SX_LOCK_UNLOCKED;
842         queue = SQ_EXCLUSIVE_QUEUE;
843         if ((x & SX_LOCK_SHARED_WAITERS) != 0 &&
844             sleepq_sleepcnt(&sx->lock_object, SQ_SHARED_QUEUE) != 0) {
845                 queue = SQ_SHARED_QUEUE;
846                 setx |= (x & SX_LOCK_EXCLUSIVE_WAITERS);
847         }
848         atomic_store_rel_ptr(&sx->sx_lock, setx);
849
850         /* Wake up all the waiters for the specific queue. */
851         if (LOCK_LOG_TEST(&sx->lock_object, 0))
852                 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue",
853                     __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
854                     "exclusive");
855
856         wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
857             queue);
858         sleepq_release(&sx->lock_object);
859         if (wakeup_swapper)
860                 kick_proc0();
861 }
862
863 static bool __always_inline
864 __sx_slock_try(struct sx *sx, uintptr_t *xp LOCK_FILE_LINE_ARG_DEF)
865 {
866
867         /*
868          * If no other thread has an exclusive lock then try to bump up
869          * the count of sharers.  Since we have to preserve the state
870          * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
871          * shared lock loop back and retry.
872          */
873         while (*xp & SX_LOCK_SHARED) {
874                 MPASS(!(*xp & SX_LOCK_SHARED_WAITERS));
875                 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, xp,
876                     *xp + SX_ONE_SHARER)) {
877                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
878                                 CTR4(KTR_LOCK, "%s: %p succeed %p -> %p",
879                                     __func__, sx, (void *)*xp,
880                                     (void *)(*xp + SX_ONE_SHARER));
881                         return (true);
882                 }
883         }
884         return (false);
885 }
886
887 static int __noinline
888 _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
889 {
890         GIANT_DECLARE;
891 #ifdef ADAPTIVE_SX
892         volatile struct thread *owner;
893 #endif
894 #ifdef LOCK_PROFILING
895         uint64_t waittime = 0;
896         int contested = 0;
897 #endif
898         int error = 0;
899 #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
900         struct lock_delay_arg lda;
901 #endif
902 #ifdef KDTRACE_HOOKS
903         u_int sleep_cnt = 0;
904         int64_t sleep_time = 0;
905         int64_t all_time = 0;
906 #endif
907 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
908         uintptr_t state;
909 #endif
910         int extra_work = 0;
911
912         if (SCHEDULER_STOPPED())
913                 return (0);
914
915 #if defined(ADAPTIVE_SX)
916         lock_delay_arg_init(&lda, &sx_delay);
917 #elif defined(KDTRACE_HOOKS)
918         lock_delay_arg_init(&lda, NULL);
919 #endif
920
921 #ifdef HWPMC_HOOKS
922         PMC_SOFT_CALL( , , lock, failed);
923 #endif
924         lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
925             &waittime);
926
927 #ifdef LOCK_PROFILING
928         extra_work = 1;
929         state = x;
930 #elif defined(KDTRACE_HOOKS)
931         extra_work = lockstat_enabled;
932         if (__predict_false(extra_work)) {
933                 all_time -= lockstat_nsecs(&sx->lock_object);
934                 state = x;
935         }
936 #endif
937
938         /*
939          * As with rwlocks, we don't make any attempt to try to block
940          * shared locks once there is an exclusive waiter.
941          */
942         for (;;) {
943                 if (__sx_slock_try(sx, &x LOCK_FILE_LINE_ARG))
944                         break;
945 #ifdef KDTRACE_HOOKS
946                 lda.spin_cnt++;
947 #endif
948
949 #ifdef ADAPTIVE_SX
950                 /*
951                  * If the owner is running on another CPU, spin until
952                  * the owner stops running or the state of the lock
953                  * changes.
954                  */
955                 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
956                         owner = lv_sx_owner(x);
957                         if (TD_IS_RUNNING(owner)) {
958                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
959                                         CTR3(KTR_LOCK,
960                                             "%s: spinning on %p held by %p",
961                                             __func__, sx, owner);
962                                 KTR_STATE1(KTR_SCHED, "thread",
963                                     sched_tdname(curthread), "spinning",
964                                     "lockname:\"%s\"", sx->lock_object.lo_name);
965                                 GIANT_SAVE(extra_work);
966                                 do {
967                                         lock_delay(&lda);
968                                         x = SX_READ_VALUE(sx);
969                                         owner = lv_sx_owner(x);
970                                 } while (owner != NULL && TD_IS_RUNNING(owner));
971                                 KTR_STATE0(KTR_SCHED, "thread",
972                                     sched_tdname(curthread), "running");
973                                 continue;
974                         }
975                 }
976 #endif
977
978                 /*
979                  * Some other thread already has an exclusive lock, so
980                  * start the process of blocking.
981                  */
982                 sleepq_lock(&sx->lock_object);
983                 x = SX_READ_VALUE(sx);
984 retry_sleepq:
985                 /*
986                  * The lock could have been released while we spun.
987                  * In this case loop back and retry.
988                  */
989                 if (x & SX_LOCK_SHARED) {
990                         sleepq_release(&sx->lock_object);
991                         continue;
992                 }
993
994 #ifdef ADAPTIVE_SX
995                 /*
996                  * If the owner is running on another CPU, spin until
997                  * the owner stops running or the state of the lock
998                  * changes.
999                  */
1000                 if (!(x & SX_LOCK_SHARED) &&
1001                     (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
1002                         owner = (struct thread *)SX_OWNER(x);
1003                         if (TD_IS_RUNNING(owner)) {
1004                                 sleepq_release(&sx->lock_object);
1005                                 x = SX_READ_VALUE(sx);
1006                                 continue;
1007                         }
1008                 }
1009 #endif
1010
1011                 /*
1012                  * Try to set the SX_LOCK_SHARED_WAITERS flag.  If we
1013                  * fail to set it drop the sleep queue lock and loop
1014                  * back.
1015                  */
1016                 if (!(x & SX_LOCK_SHARED_WAITERS)) {
1017                         if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
1018                             x | SX_LOCK_SHARED_WAITERS))
1019                                 goto retry_sleepq;
1020                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
1021                                 CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
1022                                     __func__, sx);
1023                 }
1024
1025                 /*
1026                  * Since we have been unable to acquire the shared lock,
1027                  * we have to sleep.
1028                  */
1029                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1030                         CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
1031                             __func__, sx);
1032
1033 #ifdef KDTRACE_HOOKS
1034                 sleep_time -= lockstat_nsecs(&sx->lock_object);
1035 #endif
1036                 GIANT_SAVE(extra_work);
1037                 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
1038                     SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
1039                     SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
1040                 if (!(opts & SX_INTERRUPTIBLE))
1041                         sleepq_wait(&sx->lock_object, 0);
1042                 else
1043                         error = sleepq_wait_sig(&sx->lock_object, 0);
1044 #ifdef KDTRACE_HOOKS
1045                 sleep_time += lockstat_nsecs(&sx->lock_object);
1046                 sleep_cnt++;
1047 #endif
1048                 if (error) {
1049                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
1050                                 CTR2(KTR_LOCK,
1051                         "%s: interruptible sleep by %p suspended by signal",
1052                                     __func__, sx);
1053                         break;
1054                 }
1055                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1056                         CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
1057                             __func__, sx);
1058                 x = SX_READ_VALUE(sx);
1059         }
1060 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
1061         if (__predict_true(!extra_work))
1062                 return (error);
1063 #endif
1064 #ifdef KDTRACE_HOOKS
1065         all_time += lockstat_nsecs(&sx->lock_object);
1066         if (sleep_time)
1067                 LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
1068                     LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
1069                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
1070         if (lda.spin_cnt > sleep_cnt)
1071                 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
1072                     LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
1073                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
1074 #endif
1075         if (error == 0) {
1076                 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
1077                     contested, waittime, file, line, LOCKSTAT_READER);
1078         }
1079         GIANT_RESTORE();
1080         return (error);
1081 }
1082
1083 int
1084 _sx_slock_int(struct sx *sx, int opts LOCK_FILE_LINE_ARG_DEF)
1085 {
1086         uintptr_t x;
1087         int error;
1088
1089         KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
1090             !TD_IS_IDLETHREAD(curthread),
1091             ("sx_slock() by idle thread %p on sx %s @ %s:%d",
1092             curthread, sx->lock_object.lo_name, file, line));
1093         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1094             ("sx_slock() of destroyed sx @ %s:%d", file, line));
1095         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
1096
1097         error = 0;
1098         x = SX_READ_VALUE(sx);
1099         if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(sx__acquire) ||
1100             !__sx_slock_try(sx, &x LOCK_FILE_LINE_ARG)))
1101                 error = _sx_slock_hard(sx, opts, x LOCK_FILE_LINE_ARG);
1102         if (error == 0) {
1103                 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
1104                 WITNESS_LOCK(&sx->lock_object, 0, file, line);
1105                 TD_LOCKS_INC(curthread);
1106         }
1107         return (error);
1108 }
1109
1110 int
1111 _sx_slock(struct sx *sx, int opts, const char *file, int line)
1112 {
1113
1114         return (_sx_slock_int(sx, opts LOCK_FILE_LINE_ARG));
1115 }
1116
1117 static bool __always_inline
1118 _sx_sunlock_try(struct sx *sx, uintptr_t *xp)
1119 {
1120
1121         for (;;) {
1122                 /*
1123                  * We should never have sharers while at least one thread
1124                  * holds a shared lock.
1125                  */
1126                 KASSERT(!(*xp & SX_LOCK_SHARED_WAITERS),
1127                     ("%s: waiting sharers", __func__));
1128
1129                 /*
1130                  * See if there is more than one shared lock held.  If
1131                  * so, just drop one and return.
1132                  */
1133                 if (SX_SHARERS(*xp) > 1) {
1134                         if (atomic_fcmpset_rel_ptr(&sx->sx_lock, xp,
1135                             *xp - SX_ONE_SHARER)) {
1136                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1137                                         CTR4(KTR_LOCK,
1138                                             "%s: %p succeeded %p -> %p",
1139                                             __func__, sx, (void *)*xp,
1140                                             (void *)(*xp - SX_ONE_SHARER));
1141                                 return (true);
1142                         }
1143                         continue;
1144                 }
1145
1146                 /*
1147                  * If there aren't any waiters for an exclusive lock,
1148                  * then try to drop it quickly.
1149                  */
1150                 if (!(*xp & SX_LOCK_EXCLUSIVE_WAITERS)) {
1151                         MPASS(*xp == SX_SHARERS_LOCK(1));
1152                         *xp = SX_SHARERS_LOCK(1);
1153                         if (atomic_fcmpset_rel_ptr(&sx->sx_lock,
1154                             xp, SX_LOCK_UNLOCKED)) {
1155                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1156                                         CTR2(KTR_LOCK, "%s: %p last succeeded",
1157                                             __func__, sx);
1158                                 return (true);
1159                         }
1160                         continue;
1161                 }
1162                 break;
1163         }
1164         return (false);
1165 }
1166
1167 static void __noinline
1168 _sx_sunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
1169 {
1170         int wakeup_swapper;
1171         uintptr_t setx;
1172
1173         if (SCHEDULER_STOPPED())
1174                 return;
1175
1176         if (_sx_sunlock_try(sx, &x))
1177                 goto out_lockstat;
1178
1179         /*
1180          * At this point, there should just be one sharer with
1181          * exclusive waiters.
1182          */
1183         MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS));
1184
1185         sleepq_lock(&sx->lock_object);
1186         x = SX_READ_VALUE(sx);
1187         for (;;) {
1188                 MPASS(x & SX_LOCK_EXCLUSIVE_WAITERS);
1189                 MPASS(!(x & SX_LOCK_SHARED_WAITERS));
1190                 /*
1191                  * Wake up semantic here is quite simple:
1192                  * Just wake up all the exclusive waiters.
1193                  * Note that the state of the lock could have changed,
1194                  * so if it fails loop back and retry.
1195                  */
1196                 setx = x - SX_ONE_SHARER;
1197                 setx &= ~SX_LOCK_EXCLUSIVE_WAITERS;
1198                 if (!atomic_fcmpset_rel_ptr(&sx->sx_lock, &x, setx))
1199                         continue;
1200                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1201                         CTR2(KTR_LOCK, "%s: %p waking up all thread on"
1202                             "exclusive queue", __func__, sx);
1203                 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
1204                     0, SQ_EXCLUSIVE_QUEUE);
1205                 break;
1206         }
1207         sleepq_release(&sx->lock_object);
1208         if (wakeup_swapper)
1209                 kick_proc0();
1210 out_lockstat:
1211         LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER);
1212 }
1213
1214 void
1215 _sx_sunlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
1216 {
1217         uintptr_t x;
1218
1219         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1220             ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
1221         _sx_assert(sx, SA_SLOCKED, file, line);
1222         WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
1223         LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
1224
1225         x = SX_READ_VALUE(sx);
1226         if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(sx__release) ||
1227             !_sx_sunlock_try(sx, &x)))
1228                 _sx_sunlock_hard(sx, x LOCK_FILE_LINE_ARG);
1229
1230         TD_LOCKS_DEC(curthread);
1231 }
1232
1233 void
1234 _sx_sunlock(struct sx *sx, const char *file, int line)
1235 {
1236
1237         _sx_sunlock_int(sx LOCK_FILE_LINE_ARG);
1238 }
1239
1240 #ifdef INVARIANT_SUPPORT
1241 #ifndef INVARIANTS
1242 #undef  _sx_assert
1243 #endif
1244
1245 /*
1246  * In the non-WITNESS case, sx_assert() can only detect that at least
1247  * *some* thread owns an slock, but it cannot guarantee that *this*
1248  * thread owns an slock.
1249  */
1250 void
1251 _sx_assert(const struct sx *sx, int what, const char *file, int line)
1252 {
1253 #ifndef WITNESS
1254         int slocked = 0;
1255 #endif
1256
1257         if (panicstr != NULL)
1258                 return;
1259         switch (what) {
1260         case SA_SLOCKED:
1261         case SA_SLOCKED | SA_NOTRECURSED:
1262         case SA_SLOCKED | SA_RECURSED:
1263 #ifndef WITNESS
1264                 slocked = 1;
1265                 /* FALLTHROUGH */
1266 #endif
1267         case SA_LOCKED:
1268         case SA_LOCKED | SA_NOTRECURSED:
1269         case SA_LOCKED | SA_RECURSED:
1270 #ifdef WITNESS
1271                 witness_assert(&sx->lock_object, what, file, line);
1272 #else
1273                 /*
1274                  * If some other thread has an exclusive lock or we
1275                  * have one and are asserting a shared lock, fail.
1276                  * Also, if no one has a lock at all, fail.
1277                  */
1278                 if (sx->sx_lock == SX_LOCK_UNLOCKED ||
1279                     (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
1280                     sx_xholder(sx) != curthread)))
1281                         panic("Lock %s not %slocked @ %s:%d\n",
1282                             sx->lock_object.lo_name, slocked ? "share " : "",
1283                             file, line);
1284
1285                 if (!(sx->sx_lock & SX_LOCK_SHARED)) {
1286                         if (sx_recursed(sx)) {
1287                                 if (what & SA_NOTRECURSED)
1288                                         panic("Lock %s recursed @ %s:%d\n",
1289                                             sx->lock_object.lo_name, file,
1290                                             line);
1291                         } else if (what & SA_RECURSED)
1292                                 panic("Lock %s not recursed @ %s:%d\n",
1293                                     sx->lock_object.lo_name, file, line);
1294                 }
1295 #endif
1296                 break;
1297         case SA_XLOCKED:
1298         case SA_XLOCKED | SA_NOTRECURSED:
1299         case SA_XLOCKED | SA_RECURSED:
1300                 if (sx_xholder(sx) != curthread)
1301                         panic("Lock %s not exclusively locked @ %s:%d\n",
1302                             sx->lock_object.lo_name, file, line);
1303                 if (sx_recursed(sx)) {
1304                         if (what & SA_NOTRECURSED)
1305                                 panic("Lock %s recursed @ %s:%d\n",
1306                                     sx->lock_object.lo_name, file, line);
1307                 } else if (what & SA_RECURSED)
1308                         panic("Lock %s not recursed @ %s:%d\n",
1309                             sx->lock_object.lo_name, file, line);
1310                 break;
1311         case SA_UNLOCKED:
1312 #ifdef WITNESS
1313                 witness_assert(&sx->lock_object, what, file, line);
1314 #else
1315                 /*
1316                  * If we hold an exclusve lock fail.  We can't
1317                  * reliably check to see if we hold a shared lock or
1318                  * not.
1319                  */
1320                 if (sx_xholder(sx) == curthread)
1321                         panic("Lock %s exclusively locked @ %s:%d\n",
1322                             sx->lock_object.lo_name, file, line);
1323 #endif
1324                 break;
1325         default:
1326                 panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
1327                     line);
1328         }
1329 }
1330 #endif  /* INVARIANT_SUPPORT */
1331
1332 #ifdef DDB
1333 static void
1334 db_show_sx(const struct lock_object *lock)
1335 {
1336         struct thread *td;
1337         const struct sx *sx;
1338
1339         sx = (const struct sx *)lock;
1340
1341         db_printf(" state: ");
1342         if (sx->sx_lock == SX_LOCK_UNLOCKED)
1343                 db_printf("UNLOCKED\n");
1344         else if (sx->sx_lock == SX_LOCK_DESTROYED) {
1345                 db_printf("DESTROYED\n");
1346                 return;
1347         } else if (sx->sx_lock & SX_LOCK_SHARED)
1348                 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
1349         else {
1350                 td = sx_xholder(sx);
1351                 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1352                     td->td_tid, td->td_proc->p_pid, td->td_name);
1353                 if (sx_recursed(sx))
1354                         db_printf(" recursed: %d\n", sx->sx_recurse);
1355         }
1356
1357         db_printf(" waiters: ");
1358         switch(sx->sx_lock &
1359             (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) {
1360         case SX_LOCK_SHARED_WAITERS:
1361                 db_printf("shared\n");
1362                 break;
1363         case SX_LOCK_EXCLUSIVE_WAITERS:
1364                 db_printf("exclusive\n");
1365                 break;
1366         case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS:
1367                 db_printf("exclusive and shared\n");
1368                 break;
1369         default:
1370                 db_printf("none\n");
1371         }
1372 }
1373
1374 /*
1375  * Check to see if a thread that is blocked on a sleep queue is actually
1376  * blocked on an sx lock.  If so, output some details and return true.
1377  * If the lock has an exclusive owner, return that in *ownerp.
1378  */
1379 int
1380 sx_chain(struct thread *td, struct thread **ownerp)
1381 {
1382         struct sx *sx;
1383
1384         /*
1385          * Check to see if this thread is blocked on an sx lock.
1386          * First, we check the lock class.  If that is ok, then we
1387          * compare the lock name against the wait message.
1388          */
1389         sx = td->td_wchan;
1390         if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
1391             sx->lock_object.lo_name != td->td_wmesg)
1392                 return (0);
1393
1394         /* We think we have an sx lock, so output some details. */
1395         db_printf("blocked on sx \"%s\" ", td->td_wmesg);
1396         *ownerp = sx_xholder(sx);
1397         if (sx->sx_lock & SX_LOCK_SHARED)
1398                 db_printf("SLOCK (count %ju)\n",
1399                     (uintmax_t)SX_SHARERS(sx->sx_lock));
1400         else
1401                 db_printf("XLOCK\n");
1402         return (1);
1403 }
1404 #endif