]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/kern_sx.c
MFV r331712:
[FreeBSD/FreeBSD.git] / sys / kern / kern_sx.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
5  * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice(s), this list of conditions and the following disclaimer as
13  *    the first lines of this file unmodified other than the possible
14  *    addition of one or more copyright notices.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice(s), this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
20  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
23  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
29  * DAMAGE.
30  */
31
32 /*
33  * Shared/exclusive locks.  This implementation attempts to ensure
34  * deterministic lock granting behavior, so that slocks and xlocks are
35  * interleaved.
36  *
37  * Priority propagation will not generally raise the priority of lock holders,
38  * so should not be relied upon in combination with sx locks.
39  */
40
41 #include "opt_ddb.h"
42 #include "opt_hwpmc_hooks.h"
43 #include "opt_no_adaptive_sx.h"
44
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kdb.h>
51 #include <sys/kernel.h>
52 #include <sys/ktr.h>
53 #include <sys/lock.h>
54 #include <sys/mutex.h>
55 #include <sys/proc.h>
56 #include <sys/sched.h>
57 #include <sys/sleepqueue.h>
58 #include <sys/sx.h>
59 #include <sys/smp.h>
60 #include <sys/sysctl.h>
61
62 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
63 #include <machine/cpu.h>
64 #endif
65
66 #ifdef DDB
67 #include <ddb/ddb.h>
68 #endif
69
70 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
71 #define ADAPTIVE_SX
72 #endif
73
74 CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE);
75
76 #ifdef HWPMC_HOOKS
77 #include <sys/pmckern.h>
78 PMC_SOFT_DECLARE( , , lock, failed);
79 #endif
80
81 /* Handy macros for sleep queues. */
82 #define SQ_EXCLUSIVE_QUEUE      0
83 #define SQ_SHARED_QUEUE         1
84
85 /*
86  * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file.  We
87  * drop Giant anytime we have to sleep or if we adaptively spin.
88  */
89 #define GIANT_DECLARE                                                   \
90         int _giantcnt = 0;                                              \
91         WITNESS_SAVE_DECL(Giant)                                        \
92
93 #define GIANT_SAVE(work) do {                                           \
94         if (__predict_false(mtx_owned(&Giant))) {                       \
95                 work++;                                                 \
96                 WITNESS_SAVE(&Giant.lock_object, Giant);                \
97                 while (mtx_owned(&Giant)) {                             \
98                         _giantcnt++;                                    \
99                         mtx_unlock(&Giant);                             \
100                 }                                                       \
101         }                                                               \
102 } while (0)
103
104 #define GIANT_RESTORE() do {                                            \
105         if (_giantcnt > 0) {                                            \
106                 mtx_assert(&Giant, MA_NOTOWNED);                        \
107                 while (_giantcnt--)                                     \
108                         mtx_lock(&Giant);                               \
109                 WITNESS_RESTORE(&Giant.lock_object, Giant);             \
110         }                                                               \
111 } while (0)
112
113 /*
114  * Returns true if an exclusive lock is recursed.  It assumes
115  * curthread currently has an exclusive lock.
116  */
117 #define sx_recursed(sx)         ((sx)->sx_recurse != 0)
118
119 static void     assert_sx(const struct lock_object *lock, int what);
120 #ifdef DDB
121 static void     db_show_sx(const struct lock_object *lock);
122 #endif
123 static void     lock_sx(struct lock_object *lock, uintptr_t how);
124 #ifdef KDTRACE_HOOKS
125 static int      owner_sx(const struct lock_object *lock, struct thread **owner);
126 #endif
127 static uintptr_t unlock_sx(struct lock_object *lock);
128
129 struct lock_class lock_class_sx = {
130         .lc_name = "sx",
131         .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
132         .lc_assert = assert_sx,
133 #ifdef DDB
134         .lc_ddb_show = db_show_sx,
135 #endif
136         .lc_lock = lock_sx,
137         .lc_unlock = unlock_sx,
138 #ifdef KDTRACE_HOOKS
139         .lc_owner = owner_sx,
140 #endif
141 };
142
143 #ifndef INVARIANTS
144 #define _sx_assert(sx, what, file, line)
145 #endif
146
147 #ifdef ADAPTIVE_SX
148 static __read_frequently u_int asx_retries = 10;
149 static __read_frequently u_int asx_loops = 10000;
150 static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging");
151 SYSCTL_UINT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, "");
152 SYSCTL_UINT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, "");
153
154 static struct lock_delay_config __read_frequently sx_delay;
155
156 SYSCTL_INT(_debug_sx, OID_AUTO, delay_base, CTLFLAG_RW, &sx_delay.base,
157     0, "");
158 SYSCTL_INT(_debug_sx, OID_AUTO, delay_max, CTLFLAG_RW, &sx_delay.max,
159     0, "");
160
161 LOCK_DELAY_SYSINIT_DEFAULT(sx_delay);
162 #endif
163
164 void
165 assert_sx(const struct lock_object *lock, int what)
166 {
167
168         sx_assert((const struct sx *)lock, what);
169 }
170
171 void
172 lock_sx(struct lock_object *lock, uintptr_t how)
173 {
174         struct sx *sx;
175
176         sx = (struct sx *)lock;
177         if (how)
178                 sx_slock(sx);
179         else
180                 sx_xlock(sx);
181 }
182
183 uintptr_t
184 unlock_sx(struct lock_object *lock)
185 {
186         struct sx *sx;
187
188         sx = (struct sx *)lock;
189         sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
190         if (sx_xlocked(sx)) {
191                 sx_xunlock(sx);
192                 return (0);
193         } else {
194                 sx_sunlock(sx);
195                 return (1);
196         }
197 }
198
199 #ifdef KDTRACE_HOOKS
200 int
201 owner_sx(const struct lock_object *lock, struct thread **owner)
202 {
203         const struct sx *sx;
204         uintptr_t x;
205
206         sx = (const struct sx *)lock;
207         x = sx->sx_lock;
208         *owner = NULL;
209         return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) :
210             ((*owner = (struct thread *)SX_OWNER(x)) != NULL));
211 }
212 #endif
213
214 void
215 sx_sysinit(void *arg)
216 {
217         struct sx_args *sargs = arg;
218
219         sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags);
220 }
221
222 void
223 sx_init_flags(struct sx *sx, const char *description, int opts)
224 {
225         int flags;
226
227         MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
228             SX_NOPROFILE | SX_NOADAPTIVE | SX_NEW)) == 0);
229         ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
230             ("%s: sx_lock not aligned for %s: %p", __func__, description,
231             &sx->sx_lock));
232
233         flags = LO_SLEEPABLE | LO_UPGRADABLE;
234         if (opts & SX_DUPOK)
235                 flags |= LO_DUPOK;
236         if (opts & SX_NOPROFILE)
237                 flags |= LO_NOPROFILE;
238         if (!(opts & SX_NOWITNESS))
239                 flags |= LO_WITNESS;
240         if (opts & SX_RECURSE)
241                 flags |= LO_RECURSABLE;
242         if (opts & SX_QUIET)
243                 flags |= LO_QUIET;
244         if (opts & SX_NEW)
245                 flags |= LO_NEW;
246
247         flags |= opts & SX_NOADAPTIVE;
248         lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
249         sx->sx_lock = SX_LOCK_UNLOCKED;
250         sx->sx_recurse = 0;
251 }
252
253 void
254 sx_destroy(struct sx *sx)
255 {
256
257         KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held"));
258         KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
259         sx->sx_lock = SX_LOCK_DESTROYED;
260         lock_destroy(&sx->lock_object);
261 }
262
263 int
264 sx_try_slock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
265 {
266         uintptr_t x;
267
268         if (SCHEDULER_STOPPED())
269                 return (1);
270
271         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
272             ("sx_try_slock() by idle thread %p on sx %s @ %s:%d",
273             curthread, sx->lock_object.lo_name, file, line));
274
275         x = sx->sx_lock;
276         for (;;) {
277                 KASSERT(x != SX_LOCK_DESTROYED,
278                     ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
279                 if (!(x & SX_LOCK_SHARED))
280                         break;
281                 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, x + SX_ONE_SHARER)) {
282                         LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
283                         WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
284                         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
285                             sx, 0, 0, file, line, LOCKSTAT_READER);
286                         TD_LOCKS_INC(curthread);
287                         return (1);
288                 }
289         }
290
291         LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
292         return (0);
293 }
294
295 int
296 sx_try_slock_(struct sx *sx, const char *file, int line)
297 {
298
299         return (sx_try_slock_int(sx LOCK_FILE_LINE_ARG));
300 }
301
302 int
303 _sx_xlock(struct sx *sx, int opts, const char *file, int line)
304 {
305         uintptr_t tid, x;
306         int error = 0;
307
308         KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
309             !TD_IS_IDLETHREAD(curthread),
310             ("sx_xlock() by idle thread %p on sx %s @ %s:%d",
311             curthread, sx->lock_object.lo_name, file, line));
312         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
313             ("sx_xlock() of destroyed sx @ %s:%d", file, line));
314         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
315             line, NULL);
316         tid = (uintptr_t)curthread;
317         x = SX_LOCK_UNLOCKED;
318         if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
319                 error = _sx_xlock_hard(sx, x, opts LOCK_FILE_LINE_ARG);
320         else
321                 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
322                     0, 0, file, line, LOCKSTAT_WRITER);
323         if (!error) {
324                 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse,
325                     file, line);
326                 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
327                 TD_LOCKS_INC(curthread);
328         }
329
330         return (error);
331 }
332
333 int
334 sx_try_xlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
335 {
336         struct thread *td;
337         uintptr_t tid, x;
338         int rval;
339         bool recursed;
340
341         td = curthread;
342         tid = (uintptr_t)td;
343         if (SCHEDULER_STOPPED_TD(td))
344                 return (1);
345
346         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
347             ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d",
348             curthread, sx->lock_object.lo_name, file, line));
349         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
350             ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
351
352         rval = 1;
353         recursed = false;
354         x = SX_LOCK_UNLOCKED;
355         for (;;) {
356                 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
357                         break;
358                 if (x == SX_LOCK_UNLOCKED)
359                         continue;
360                 if (x == tid && (sx->lock_object.lo_flags & LO_RECURSABLE)) {
361                         sx->sx_recurse++;
362                         atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
363                         break;
364                 }
365                 rval = 0;
366                 break;
367         }
368
369         LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
370         if (rval) {
371                 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
372                     file, line);
373                 if (!recursed)
374                         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
375                             sx, 0, 0, file, line, LOCKSTAT_WRITER);
376                 TD_LOCKS_INC(curthread);
377         }
378
379         return (rval);
380 }
381
382 int
383 sx_try_xlock_(struct sx *sx, const char *file, int line)
384 {
385
386         return (sx_try_xlock_int(sx LOCK_FILE_LINE_ARG));
387 }
388
389 void
390 _sx_xunlock(struct sx *sx, const char *file, int line)
391 {
392
393         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
394             ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
395         _sx_assert(sx, SA_XLOCKED, file, line);
396         WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
397         LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
398             line);
399 #if LOCK_DEBUG > 0
400         _sx_xunlock_hard(sx, (uintptr_t)curthread, file, line);
401 #else
402         __sx_xunlock(sx, curthread, file, line);
403 #endif
404         TD_LOCKS_DEC(curthread);
405 }
406
407 /*
408  * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
409  * This will only succeed if this thread holds a single shared lock.
410  * Return 1 if if the upgrade succeed, 0 otherwise.
411  */
412 int
413 sx_try_upgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
414 {
415         uintptr_t x;
416         uintptr_t waiters;
417         int success;
418
419         if (SCHEDULER_STOPPED())
420                 return (1);
421
422         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
423             ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
424         _sx_assert(sx, SA_SLOCKED, file, line);
425
426         /*
427          * Try to switch from one shared lock to an exclusive lock.  We need
428          * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that
429          * we will wake up the exclusive waiters when we drop the lock.
430          */
431         success = 0;
432         x = SX_READ_VALUE(sx);
433         for (;;) {
434                 if (SX_SHARERS(x) > 1)
435                         break;
436                 waiters = (x & SX_LOCK_EXCLUSIVE_WAITERS);
437                 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x,
438                     (uintptr_t)curthread | waiters)) {
439                         success = 1;
440                         break;
441                 }
442         }
443         LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
444         if (success) {
445                 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
446                     file, line);
447                 LOCKSTAT_RECORD0(sx__upgrade, sx);
448         }
449         return (success);
450 }
451
452 int
453 sx_try_upgrade_(struct sx *sx, const char *file, int line)
454 {
455
456         return (sx_try_upgrade_int(sx LOCK_FILE_LINE_ARG));
457 }
458
459 /*
460  * Downgrade an unrecursed exclusive lock into a single shared lock.
461  */
462 void
463 sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
464 {
465         uintptr_t x;
466         int wakeup_swapper;
467
468         if (SCHEDULER_STOPPED())
469                 return;
470
471         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
472             ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
473         _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
474 #ifndef INVARIANTS
475         if (sx_recursed(sx))
476                 panic("downgrade of a recursed lock");
477 #endif
478
479         WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
480
481         /*
482          * Try to switch from an exclusive lock with no shared waiters
483          * to one sharer with no shared waiters.  If there are
484          * exclusive waiters, we don't need to lock the sleep queue so
485          * long as we preserve the flag.  We do one quick try and if
486          * that fails we grab the sleepq lock to keep the flags from
487          * changing and do it the slow way.
488          *
489          * We have to lock the sleep queue if there are shared waiters
490          * so we can wake them up.
491          */
492         x = sx->sx_lock;
493         if (!(x & SX_LOCK_SHARED_WAITERS) &&
494             atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
495             (x & SX_LOCK_EXCLUSIVE_WAITERS)))
496                 goto out;
497
498         /*
499          * Lock the sleep queue so we can read the waiters bits
500          * without any races and wakeup any shared waiters.
501          */
502         sleepq_lock(&sx->lock_object);
503
504         /*
505          * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
506          * shared lock.  If there are any shared waiters, wake them up.
507          */
508         wakeup_swapper = 0;
509         x = sx->sx_lock;
510         atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
511             (x & SX_LOCK_EXCLUSIVE_WAITERS));
512         if (x & SX_LOCK_SHARED_WAITERS)
513                 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
514                     0, SQ_SHARED_QUEUE);
515         sleepq_release(&sx->lock_object);
516
517         if (wakeup_swapper)
518                 kick_proc0();
519
520 out:
521         LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
522         LOCKSTAT_RECORD0(sx__downgrade, sx);
523 }
524
525 void
526 sx_downgrade_(struct sx *sx, const char *file, int line)
527 {
528
529         sx_downgrade_int(sx LOCK_FILE_LINE_ARG);
530 }
531
532 /*
533  * This function represents the so-called 'hard case' for sx_xlock
534  * operation.  All 'easy case' failures are redirected to this.  Note
535  * that ideally this would be a static function, but it needs to be
536  * accessible from at least sx.h.
537  */
538 int
539 _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
540 {
541         GIANT_DECLARE;
542         uintptr_t tid;
543 #ifdef ADAPTIVE_SX
544         volatile struct thread *owner;
545         u_int i, n, spintries = 0;
546         enum { READERS, WRITER } sleep_reason;
547         bool adaptive;
548 #endif
549 #ifdef LOCK_PROFILING
550         uint64_t waittime = 0;
551         int contested = 0;
552 #endif
553         int error = 0;
554 #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
555         struct lock_delay_arg lda;
556 #endif
557 #ifdef  KDTRACE_HOOKS
558         u_int sleep_cnt = 0;
559         int64_t sleep_time = 0;
560         int64_t all_time = 0;
561 #endif
562 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
563         uintptr_t state;
564 #endif
565         int extra_work = 0;
566
567         tid = (uintptr_t)curthread;
568
569 #ifdef KDTRACE_HOOKS
570         if (LOCKSTAT_PROFILE_ENABLED(sx__acquire)) {
571                 while (x == SX_LOCK_UNLOCKED) {
572                         if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
573                                 goto out_lockstat;
574                 }
575                 extra_work = 1;
576                 all_time -= lockstat_nsecs(&sx->lock_object);
577                 state = x;
578         }
579 #endif
580 #ifdef LOCK_PROFILING
581         extra_work = 1;
582         state = x;
583 #endif
584
585         if (SCHEDULER_STOPPED())
586                 return (0);
587
588 #if defined(ADAPTIVE_SX)
589         lock_delay_arg_init(&lda, &sx_delay);
590 #elif defined(KDTRACE_HOOKS)
591         lock_delay_arg_init(&lda, NULL);
592 #endif
593
594         if (__predict_false(x == SX_LOCK_UNLOCKED))
595                 x = SX_READ_VALUE(sx);
596
597         /* If we already hold an exclusive lock, then recurse. */
598         if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) {
599                 KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
600             ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
601                     sx->lock_object.lo_name, file, line));
602                 sx->sx_recurse++;
603                 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
604                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
605                         CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
606                 return (0);
607         }
608
609         if (LOCK_LOG_TEST(&sx->lock_object, 0))
610                 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
611                     sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
612
613 #ifdef ADAPTIVE_SX
614         adaptive = ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0);
615 #endif
616
617 #ifdef HWPMC_HOOKS
618         PMC_SOFT_CALL( , , lock, failed);
619 #endif
620         lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
621             &waittime);
622
623 #ifndef INVARIANTS
624         GIANT_SAVE(extra_work);
625 #endif
626
627         for (;;) {
628                 if (x == SX_LOCK_UNLOCKED) {
629                         if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
630                                 break;
631                         continue;
632                 }
633 #ifdef INVARIANTS
634                 GIANT_SAVE(extra_work);
635 #endif
636 #ifdef KDTRACE_HOOKS
637                 lda.spin_cnt++;
638 #endif
639 #ifdef ADAPTIVE_SX
640                 if (__predict_false(!adaptive))
641                         goto sleepq;
642                 /*
643                  * If the lock is write locked and the owner is
644                  * running on another CPU, spin until the owner stops
645                  * running or the state of the lock changes.
646                  */
647                 if ((x & SX_LOCK_SHARED) == 0) {
648                         sleep_reason = WRITER;
649                         owner = lv_sx_owner(x);
650                         if (!TD_IS_RUNNING(owner))
651                                 goto sleepq;
652                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
653                                 CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
654                                     __func__, sx, owner);
655                         KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
656                             "spinning", "lockname:\"%s\"",
657                             sx->lock_object.lo_name);
658                         do {
659                                 lock_delay(&lda);
660                                 x = SX_READ_VALUE(sx);
661                                 owner = lv_sx_owner(x);
662                         } while (owner != NULL && TD_IS_RUNNING(owner));
663                         KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
664                             "running");
665                         continue;
666                 } else if (SX_SHARERS(x) > 0) {
667                         sleep_reason = READERS;
668                         if (spintries == asx_retries)
669                                 goto sleepq;
670                         spintries++;
671                         KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
672                             "spinning", "lockname:\"%s\"",
673                             sx->lock_object.lo_name);
674                         for (i = 0; i < asx_loops; i += n) {
675                                 n = SX_SHARERS(x);
676                                 lock_delay_spin(n);
677                                 x = SX_READ_VALUE(sx);
678                                 if ((x & SX_LOCK_SHARED) == 0 ||
679                                     SX_SHARERS(x) == 0)
680                                         break;
681                         }
682 #ifdef KDTRACE_HOOKS
683                         lda.spin_cnt += i;
684 #endif
685                         KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
686                             "running");
687                         if (i < asx_loops)
688                                 continue;
689                 }
690 sleepq:
691 #endif
692                 sleepq_lock(&sx->lock_object);
693                 x = SX_READ_VALUE(sx);
694 retry_sleepq:
695
696                 /*
697                  * If the lock was released while spinning on the
698                  * sleep queue chain lock, try again.
699                  */
700                 if (x == SX_LOCK_UNLOCKED) {
701                         sleepq_release(&sx->lock_object);
702                         continue;
703                 }
704
705 #ifdef ADAPTIVE_SX
706                 /*
707                  * The current lock owner might have started executing
708                  * on another CPU (or the lock could have changed
709                  * owners) while we were waiting on the sleep queue
710                  * chain lock.  If so, drop the sleep queue lock and try
711                  * again.
712                  */
713                 if (adaptive) {
714                         if (!(x & SX_LOCK_SHARED)) {
715                                 owner = (struct thread *)SX_OWNER(x);
716                                 if (TD_IS_RUNNING(owner)) {
717                                         sleepq_release(&sx->lock_object);
718                                         continue;
719                                 }
720                         } else if (SX_SHARERS(x) > 0 && sleep_reason == WRITER) {
721                                 sleepq_release(&sx->lock_object);
722                                 continue;
723                         }
724                 }
725 #endif
726
727                 /*
728                  * If an exclusive lock was released with both shared
729                  * and exclusive waiters and a shared waiter hasn't
730                  * woken up and acquired the lock yet, sx_lock will be
731                  * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS.
732                  * If we see that value, try to acquire it once.  Note
733                  * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS
734                  * as there are other exclusive waiters still.  If we
735                  * fail, restart the loop.
736                  */
737                 if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) {
738                         if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x,
739                             tid | SX_LOCK_EXCLUSIVE_WAITERS))
740                                 goto retry_sleepq;
741                         sleepq_release(&sx->lock_object);
742                         CTR2(KTR_LOCK, "%s: %p claimed by new writer",
743                             __func__, sx);
744                         break;
745                 }
746
747                 /*
748                  * Try to set the SX_LOCK_EXCLUSIVE_WAITERS.  If we fail,
749                  * than loop back and retry.
750                  */
751                 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
752                         if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
753                             x | SX_LOCK_EXCLUSIVE_WAITERS)) {
754                                 goto retry_sleepq;
755                         }
756                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
757                                 CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
758                                     __func__, sx);
759                 }
760
761                 /*
762                  * Since we have been unable to acquire the exclusive
763                  * lock and the exclusive waiters flag is set, we have
764                  * to sleep.
765                  */
766                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
767                         CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
768                             __func__, sx);
769
770 #ifdef KDTRACE_HOOKS
771                 sleep_time -= lockstat_nsecs(&sx->lock_object);
772 #endif
773                 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
774                     SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
775                     SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
776                 if (!(opts & SX_INTERRUPTIBLE))
777                         sleepq_wait(&sx->lock_object, 0);
778                 else
779                         error = sleepq_wait_sig(&sx->lock_object, 0);
780 #ifdef KDTRACE_HOOKS
781                 sleep_time += lockstat_nsecs(&sx->lock_object);
782                 sleep_cnt++;
783 #endif
784                 if (error) {
785                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
786                                 CTR2(KTR_LOCK,
787                         "%s: interruptible sleep by %p suspended by signal",
788                                     __func__, sx);
789                         break;
790                 }
791                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
792                         CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
793                             __func__, sx);
794                 x = SX_READ_VALUE(sx);
795         }
796 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
797         if (__predict_true(!extra_work))
798                 return (error);
799 #endif
800 #ifdef KDTRACE_HOOKS
801         all_time += lockstat_nsecs(&sx->lock_object);
802         if (sleep_time)
803                 LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
804                     LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
805                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
806         if (lda.spin_cnt > sleep_cnt)
807                 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
808                     LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
809                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
810 out_lockstat:
811 #endif
812         if (!error)
813                 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
814                     contested, waittime, file, line, LOCKSTAT_WRITER);
815         GIANT_RESTORE();
816         return (error);
817 }
818
819 /*
820  * This function represents the so-called 'hard case' for sx_xunlock
821  * operation.  All 'easy case' failures are redirected to this.  Note
822  * that ideally this would be a static function, but it needs to be
823  * accessible from at least sx.h.
824  */
825 void
826 _sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
827 {
828         uintptr_t tid, setx;
829         int queue, wakeup_swapper;
830
831         if (SCHEDULER_STOPPED())
832                 return;
833
834         tid = (uintptr_t)curthread;
835
836         if (__predict_false(x == tid))
837                 x = SX_READ_VALUE(sx);
838
839         MPASS(!(x & SX_LOCK_SHARED));
840
841         if (__predict_false(x & SX_LOCK_RECURSED)) {
842                 /* The lock is recursed, unrecurse one level. */
843                 if ((--sx->sx_recurse) == 0)
844                         atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
845                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
846                         CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
847                 return;
848         }
849
850         LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_WRITER);
851         if (x == tid &&
852             atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED))
853                 return;
854
855         if (LOCK_LOG_TEST(&sx->lock_object, 0))
856                 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);
857
858         sleepq_lock(&sx->lock_object);
859         x = SX_READ_VALUE(sx);
860         MPASS(x & (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS));
861
862         /*
863          * The wake up algorithm here is quite simple and probably not
864          * ideal.  It gives precedence to shared waiters if they are
865          * present.  For this condition, we have to preserve the
866          * state of the exclusive waiters flag.
867          * If interruptible sleeps left the shared queue empty avoid a
868          * starvation for the threads sleeping on the exclusive queue by giving
869          * them precedence and cleaning up the shared waiters bit anyway.
870          */
871         setx = SX_LOCK_UNLOCKED;
872         queue = SQ_EXCLUSIVE_QUEUE;
873         if ((x & SX_LOCK_SHARED_WAITERS) != 0 &&
874             sleepq_sleepcnt(&sx->lock_object, SQ_SHARED_QUEUE) != 0) {
875                 queue = SQ_SHARED_QUEUE;
876                 setx |= (x & SX_LOCK_EXCLUSIVE_WAITERS);
877         }
878         atomic_store_rel_ptr(&sx->sx_lock, setx);
879
880         /* Wake up all the waiters for the specific queue. */
881         if (LOCK_LOG_TEST(&sx->lock_object, 0))
882                 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue",
883                     __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
884                     "exclusive");
885
886         wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
887             queue);
888         sleepq_release(&sx->lock_object);
889         if (wakeup_swapper)
890                 kick_proc0();
891 }
892
893 static bool __always_inline
894 __sx_slock_try(struct sx *sx, uintptr_t *xp LOCK_FILE_LINE_ARG_DEF)
895 {
896
897         /*
898          * If no other thread has an exclusive lock then try to bump up
899          * the count of sharers.  Since we have to preserve the state
900          * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
901          * shared lock loop back and retry.
902          */
903         while (*xp & SX_LOCK_SHARED) {
904                 MPASS(!(*xp & SX_LOCK_SHARED_WAITERS));
905                 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, xp,
906                     *xp + SX_ONE_SHARER)) {
907                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
908                                 CTR4(KTR_LOCK, "%s: %p succeed %p -> %p",
909                                     __func__, sx, (void *)*xp,
910                                     (void *)(*xp + SX_ONE_SHARER));
911                         return (true);
912                 }
913         }
914         return (false);
915 }
916
917 static int __noinline
918 _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
919 {
920         GIANT_DECLARE;
921 #ifdef ADAPTIVE_SX
922         volatile struct thread *owner;
923         bool adaptive;
924 #endif
925 #ifdef LOCK_PROFILING
926         uint64_t waittime = 0;
927         int contested = 0;
928 #endif
929         int error = 0;
930 #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
931         struct lock_delay_arg lda;
932 #endif
933 #ifdef KDTRACE_HOOKS
934         u_int sleep_cnt = 0;
935         int64_t sleep_time = 0;
936         int64_t all_time = 0;
937 #endif
938 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
939         uintptr_t state;
940 #endif
941         int extra_work = 0;
942
943 #ifdef KDTRACE_HOOKS
944         if (LOCKSTAT_PROFILE_ENABLED(sx__acquire)) {
945                 if (__sx_slock_try(sx, &x LOCK_FILE_LINE_ARG))
946                         goto out_lockstat;
947                 extra_work = 1;
948                 all_time -= lockstat_nsecs(&sx->lock_object);
949                 state = x;
950         }
951 #endif
952 #ifdef LOCK_PROFILING
953         extra_work = 1;
954         state = x;
955 #endif
956
957         if (SCHEDULER_STOPPED())
958                 return (0);
959
960 #if defined(ADAPTIVE_SX)
961         lock_delay_arg_init(&lda, &sx_delay);
962 #elif defined(KDTRACE_HOOKS)
963         lock_delay_arg_init(&lda, NULL);
964 #endif
965
966 #ifdef ADAPTIVE_SX
967         adaptive = ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0);
968 #endif
969
970 #ifdef HWPMC_HOOKS
971         PMC_SOFT_CALL( , , lock, failed);
972 #endif
973         lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
974             &waittime);
975
976 #ifndef INVARIANTS
977         GIANT_SAVE(extra_work);
978 #endif
979
980         /*
981          * As with rwlocks, we don't make any attempt to try to block
982          * shared locks once there is an exclusive waiter.
983          */
984         for (;;) {
985                 if (__sx_slock_try(sx, &x LOCK_FILE_LINE_ARG))
986                         break;
987 #ifdef INVARIANTS
988                 GIANT_SAVE(extra_work);
989 #endif
990 #ifdef KDTRACE_HOOKS
991                 lda.spin_cnt++;
992 #endif
993
994 #ifdef ADAPTIVE_SX
995                 if (__predict_false(!adaptive))
996                         goto sleepq;
997                 /*
998                  * If the owner is running on another CPU, spin until
999                  * the owner stops running or the state of the lock
1000                  * changes.
1001                  */
1002                 owner = lv_sx_owner(x);
1003                 if (TD_IS_RUNNING(owner)) {
1004                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
1005                                 CTR3(KTR_LOCK,
1006                                     "%s: spinning on %p held by %p",
1007                                     __func__, sx, owner);
1008                         KTR_STATE1(KTR_SCHED, "thread",
1009                             sched_tdname(curthread), "spinning",
1010                             "lockname:\"%s\"", sx->lock_object.lo_name);
1011                         do {
1012                                 lock_delay(&lda);
1013                                 x = SX_READ_VALUE(sx);
1014                                 owner = lv_sx_owner(x);
1015                         } while (owner != NULL && TD_IS_RUNNING(owner));
1016                         KTR_STATE0(KTR_SCHED, "thread",
1017                             sched_tdname(curthread), "running");
1018                         continue;
1019                 }
1020 sleepq:
1021 #endif
1022
1023                 /*
1024                  * Some other thread already has an exclusive lock, so
1025                  * start the process of blocking.
1026                  */
1027                 sleepq_lock(&sx->lock_object);
1028                 x = SX_READ_VALUE(sx);
1029 retry_sleepq:
1030                 /*
1031                  * The lock could have been released while we spun.
1032                  * In this case loop back and retry.
1033                  */
1034                 if (x & SX_LOCK_SHARED) {
1035                         sleepq_release(&sx->lock_object);
1036                         continue;
1037                 }
1038
1039 #ifdef ADAPTIVE_SX
1040                 /*
1041                  * If the owner is running on another CPU, spin until
1042                  * the owner stops running or the state of the lock
1043                  * changes.
1044                  */
1045                 if (!(x & SX_LOCK_SHARED) && adaptive) {
1046                         owner = (struct thread *)SX_OWNER(x);
1047                         if (TD_IS_RUNNING(owner)) {
1048                                 sleepq_release(&sx->lock_object);
1049                                 x = SX_READ_VALUE(sx);
1050                                 continue;
1051                         }
1052                 }
1053 #endif
1054
1055                 /*
1056                  * Try to set the SX_LOCK_SHARED_WAITERS flag.  If we
1057                  * fail to set it drop the sleep queue lock and loop
1058                  * back.
1059                  */
1060                 if (!(x & SX_LOCK_SHARED_WAITERS)) {
1061                         if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
1062                             x | SX_LOCK_SHARED_WAITERS))
1063                                 goto retry_sleepq;
1064                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
1065                                 CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
1066                                     __func__, sx);
1067                 }
1068
1069                 /*
1070                  * Since we have been unable to acquire the shared lock,
1071                  * we have to sleep.
1072                  */
1073                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1074                         CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
1075                             __func__, sx);
1076
1077 #ifdef KDTRACE_HOOKS
1078                 sleep_time -= lockstat_nsecs(&sx->lock_object);
1079 #endif
1080                 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
1081                     SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
1082                     SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
1083                 if (!(opts & SX_INTERRUPTIBLE))
1084                         sleepq_wait(&sx->lock_object, 0);
1085                 else
1086                         error = sleepq_wait_sig(&sx->lock_object, 0);
1087 #ifdef KDTRACE_HOOKS
1088                 sleep_time += lockstat_nsecs(&sx->lock_object);
1089                 sleep_cnt++;
1090 #endif
1091                 if (error) {
1092                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
1093                                 CTR2(KTR_LOCK,
1094                         "%s: interruptible sleep by %p suspended by signal",
1095                                     __func__, sx);
1096                         break;
1097                 }
1098                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1099                         CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
1100                             __func__, sx);
1101                 x = SX_READ_VALUE(sx);
1102         }
1103 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
1104         if (__predict_true(!extra_work))
1105                 return (error);
1106 #endif
1107 #ifdef KDTRACE_HOOKS
1108         all_time += lockstat_nsecs(&sx->lock_object);
1109         if (sleep_time)
1110                 LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
1111                     LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
1112                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
1113         if (lda.spin_cnt > sleep_cnt)
1114                 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
1115                     LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
1116                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
1117 out_lockstat:
1118 #endif
1119         if (error == 0) {
1120                 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
1121                     contested, waittime, file, line, LOCKSTAT_READER);
1122         }
1123         GIANT_RESTORE();
1124         return (error);
1125 }
1126
1127 int
1128 _sx_slock_int(struct sx *sx, int opts LOCK_FILE_LINE_ARG_DEF)
1129 {
1130         uintptr_t x;
1131         int error;
1132
1133         KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
1134             !TD_IS_IDLETHREAD(curthread),
1135             ("sx_slock() by idle thread %p on sx %s @ %s:%d",
1136             curthread, sx->lock_object.lo_name, file, line));
1137         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1138             ("sx_slock() of destroyed sx @ %s:%d", file, line));
1139         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
1140
1141         error = 0;
1142         x = SX_READ_VALUE(sx);
1143         if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__acquire) ||
1144             !__sx_slock_try(sx, &x LOCK_FILE_LINE_ARG)))
1145                 error = _sx_slock_hard(sx, opts, x LOCK_FILE_LINE_ARG);
1146         else
1147                 lock_profile_obtain_lock_success(&sx->lock_object, 0, 0,
1148                     file, line);
1149         if (error == 0) {
1150                 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
1151                 WITNESS_LOCK(&sx->lock_object, 0, file, line);
1152                 TD_LOCKS_INC(curthread);
1153         }
1154         return (error);
1155 }
1156
1157 int
1158 _sx_slock(struct sx *sx, int opts, const char *file, int line)
1159 {
1160
1161         return (_sx_slock_int(sx, opts LOCK_FILE_LINE_ARG));
1162 }
1163
1164 static bool __always_inline
1165 _sx_sunlock_try(struct sx *sx, uintptr_t *xp)
1166 {
1167
1168         for (;;) {
1169                 /*
1170                  * We should never have sharers while at least one thread
1171                  * holds a shared lock.
1172                  */
1173                 KASSERT(!(*xp & SX_LOCK_SHARED_WAITERS),
1174                     ("%s: waiting sharers", __func__));
1175
1176                 /*
1177                  * See if there is more than one shared lock held.  If
1178                  * so, just drop one and return.
1179                  */
1180                 if (SX_SHARERS(*xp) > 1) {
1181                         if (atomic_fcmpset_rel_ptr(&sx->sx_lock, xp,
1182                             *xp - SX_ONE_SHARER)) {
1183                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1184                                         CTR4(KTR_LOCK,
1185                                             "%s: %p succeeded %p -> %p",
1186                                             __func__, sx, (void *)*xp,
1187                                             (void *)(*xp - SX_ONE_SHARER));
1188                                 return (true);
1189                         }
1190                         continue;
1191                 }
1192
1193                 /*
1194                  * If there aren't any waiters for an exclusive lock,
1195                  * then try to drop it quickly.
1196                  */
1197                 if (!(*xp & SX_LOCK_EXCLUSIVE_WAITERS)) {
1198                         MPASS(*xp == SX_SHARERS_LOCK(1));
1199                         *xp = SX_SHARERS_LOCK(1);
1200                         if (atomic_fcmpset_rel_ptr(&sx->sx_lock,
1201                             xp, SX_LOCK_UNLOCKED)) {
1202                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1203                                         CTR2(KTR_LOCK, "%s: %p last succeeded",
1204                                             __func__, sx);
1205                                 return (true);
1206                         }
1207                         continue;
1208                 }
1209                 break;
1210         }
1211         return (false);
1212 }
1213
1214 static void __noinline
1215 _sx_sunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
1216 {
1217         int wakeup_swapper = 0;
1218         uintptr_t setx;
1219
1220         if (SCHEDULER_STOPPED())
1221                 return;
1222
1223         if (_sx_sunlock_try(sx, &x))
1224                 goto out_lockstat;
1225
1226         /*
1227          * At this point, there should just be one sharer with
1228          * exclusive waiters.
1229          */
1230         MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS));
1231
1232         sleepq_lock(&sx->lock_object);
1233         x = SX_READ_VALUE(sx);
1234         for (;;) {
1235                 MPASS(x & SX_LOCK_EXCLUSIVE_WAITERS);
1236                 MPASS(!(x & SX_LOCK_SHARED_WAITERS));
1237                 if (_sx_sunlock_try(sx, &x))
1238                         break;
1239
1240                 /*
1241                  * Wake up semantic here is quite simple:
1242                  * Just wake up all the exclusive waiters.
1243                  * Note that the state of the lock could have changed,
1244                  * so if it fails loop back and retry.
1245                  */
1246                 setx = x - SX_ONE_SHARER;
1247                 setx &= ~SX_LOCK_EXCLUSIVE_WAITERS;
1248                 if (!atomic_fcmpset_rel_ptr(&sx->sx_lock, &x, setx))
1249                         continue;
1250                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1251                         CTR2(KTR_LOCK, "%s: %p waking up all thread on"
1252                             "exclusive queue", __func__, sx);
1253                 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
1254                     0, SQ_EXCLUSIVE_QUEUE);
1255                 break;
1256         }
1257         sleepq_release(&sx->lock_object);
1258         if (wakeup_swapper)
1259                 kick_proc0();
1260 out_lockstat:
1261         LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER);
1262 }
1263
1264 void
1265 _sx_sunlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
1266 {
1267         uintptr_t x;
1268
1269         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1270             ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
1271         _sx_assert(sx, SA_SLOCKED, file, line);
1272         WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
1273         LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
1274
1275         x = SX_READ_VALUE(sx);
1276         if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__release) ||
1277             !_sx_sunlock_try(sx, &x)))
1278                 _sx_sunlock_hard(sx, x LOCK_FILE_LINE_ARG);
1279         else
1280                 lock_profile_release_lock(&sx->lock_object);
1281
1282         TD_LOCKS_DEC(curthread);
1283 }
1284
1285 void
1286 _sx_sunlock(struct sx *sx, const char *file, int line)
1287 {
1288
1289         _sx_sunlock_int(sx LOCK_FILE_LINE_ARG);
1290 }
1291
1292 #ifdef INVARIANT_SUPPORT
1293 #ifndef INVARIANTS
1294 #undef  _sx_assert
1295 #endif
1296
1297 /*
1298  * In the non-WITNESS case, sx_assert() can only detect that at least
1299  * *some* thread owns an slock, but it cannot guarantee that *this*
1300  * thread owns an slock.
1301  */
1302 void
1303 _sx_assert(const struct sx *sx, int what, const char *file, int line)
1304 {
1305 #ifndef WITNESS
1306         int slocked = 0;
1307 #endif
1308
1309         if (panicstr != NULL)
1310                 return;
1311         switch (what) {
1312         case SA_SLOCKED:
1313         case SA_SLOCKED | SA_NOTRECURSED:
1314         case SA_SLOCKED | SA_RECURSED:
1315 #ifndef WITNESS
1316                 slocked = 1;
1317                 /* FALLTHROUGH */
1318 #endif
1319         case SA_LOCKED:
1320         case SA_LOCKED | SA_NOTRECURSED:
1321         case SA_LOCKED | SA_RECURSED:
1322 #ifdef WITNESS
1323                 witness_assert(&sx->lock_object, what, file, line);
1324 #else
1325                 /*
1326                  * If some other thread has an exclusive lock or we
1327                  * have one and are asserting a shared lock, fail.
1328                  * Also, if no one has a lock at all, fail.
1329                  */
1330                 if (sx->sx_lock == SX_LOCK_UNLOCKED ||
1331                     (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
1332                     sx_xholder(sx) != curthread)))
1333                         panic("Lock %s not %slocked @ %s:%d\n",
1334                             sx->lock_object.lo_name, slocked ? "share " : "",
1335                             file, line);
1336
1337                 if (!(sx->sx_lock & SX_LOCK_SHARED)) {
1338                         if (sx_recursed(sx)) {
1339                                 if (what & SA_NOTRECURSED)
1340                                         panic("Lock %s recursed @ %s:%d\n",
1341                                             sx->lock_object.lo_name, file,
1342                                             line);
1343                         } else if (what & SA_RECURSED)
1344                                 panic("Lock %s not recursed @ %s:%d\n",
1345                                     sx->lock_object.lo_name, file, line);
1346                 }
1347 #endif
1348                 break;
1349         case SA_XLOCKED:
1350         case SA_XLOCKED | SA_NOTRECURSED:
1351         case SA_XLOCKED | SA_RECURSED:
1352                 if (sx_xholder(sx) != curthread)
1353                         panic("Lock %s not exclusively locked @ %s:%d\n",
1354                             sx->lock_object.lo_name, file, line);
1355                 if (sx_recursed(sx)) {
1356                         if (what & SA_NOTRECURSED)
1357                                 panic("Lock %s recursed @ %s:%d\n",
1358                                     sx->lock_object.lo_name, file, line);
1359                 } else if (what & SA_RECURSED)
1360                         panic("Lock %s not recursed @ %s:%d\n",
1361                             sx->lock_object.lo_name, file, line);
1362                 break;
1363         case SA_UNLOCKED:
1364 #ifdef WITNESS
1365                 witness_assert(&sx->lock_object, what, file, line);
1366 #else
1367                 /*
1368                  * If we hold an exclusve lock fail.  We can't
1369                  * reliably check to see if we hold a shared lock or
1370                  * not.
1371                  */
1372                 if (sx_xholder(sx) == curthread)
1373                         panic("Lock %s exclusively locked @ %s:%d\n",
1374                             sx->lock_object.lo_name, file, line);
1375 #endif
1376                 break;
1377         default:
1378                 panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
1379                     line);
1380         }
1381 }
1382 #endif  /* INVARIANT_SUPPORT */
1383
1384 #ifdef DDB
1385 static void
1386 db_show_sx(const struct lock_object *lock)
1387 {
1388         struct thread *td;
1389         const struct sx *sx;
1390
1391         sx = (const struct sx *)lock;
1392
1393         db_printf(" state: ");
1394         if (sx->sx_lock == SX_LOCK_UNLOCKED)
1395                 db_printf("UNLOCKED\n");
1396         else if (sx->sx_lock == SX_LOCK_DESTROYED) {
1397                 db_printf("DESTROYED\n");
1398                 return;
1399         } else if (sx->sx_lock & SX_LOCK_SHARED)
1400                 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
1401         else {
1402                 td = sx_xholder(sx);
1403                 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1404                     td->td_tid, td->td_proc->p_pid, td->td_name);
1405                 if (sx_recursed(sx))
1406                         db_printf(" recursed: %d\n", sx->sx_recurse);
1407         }
1408
1409         db_printf(" waiters: ");
1410         switch(sx->sx_lock &
1411             (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) {
1412         case SX_LOCK_SHARED_WAITERS:
1413                 db_printf("shared\n");
1414                 break;
1415         case SX_LOCK_EXCLUSIVE_WAITERS:
1416                 db_printf("exclusive\n");
1417                 break;
1418         case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS:
1419                 db_printf("exclusive and shared\n");
1420                 break;
1421         default:
1422                 db_printf("none\n");
1423         }
1424 }
1425
1426 /*
1427  * Check to see if a thread that is blocked on a sleep queue is actually
1428  * blocked on an sx lock.  If so, output some details and return true.
1429  * If the lock has an exclusive owner, return that in *ownerp.
1430  */
1431 int
1432 sx_chain(struct thread *td, struct thread **ownerp)
1433 {
1434         struct sx *sx;
1435
1436         /*
1437          * Check to see if this thread is blocked on an sx lock.
1438          * First, we check the lock class.  If that is ok, then we
1439          * compare the lock name against the wait message.
1440          */
1441         sx = td->td_wchan;
1442         if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
1443             sx->lock_object.lo_name != td->td_wmesg)
1444                 return (0);
1445
1446         /* We think we have an sx lock, so output some details. */
1447         db_printf("blocked on sx \"%s\" ", td->td_wmesg);
1448         *ownerp = sx_xholder(sx);
1449         if (sx->sx_lock & SX_LOCK_SHARED)
1450                 db_printf("SLOCK (count %ju)\n",
1451                     (uintmax_t)SX_SHARERS(sx->sx_lock));
1452         else
1453                 db_printf("XLOCK\n");
1454         return (1);
1455 }
1456 #endif