]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/kern_sx.c
zfs: merge openzfs/zfs@887a3c533
[FreeBSD/FreeBSD.git] / sys / kern / kern_sx.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
5  * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice(s), this list of conditions and the following disclaimer as
13  *    the first lines of this file unmodified other than the possible
14  *    addition of one or more copyright notices.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice(s), this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
20  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
23  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
29  * DAMAGE.
30  */
31
32 /*
33  * Shared/exclusive locks.  This implementation attempts to ensure
34  * deterministic lock granting behavior, so that slocks and xlocks are
35  * interleaved.
36  *
37  * Priority propagation will not generally raise the priority of lock holders,
38  * so should not be relied upon in combination with sx locks.
39  */
40
41 #include "opt_ddb.h"
42 #include "opt_hwpmc_hooks.h"
43 #include "opt_no_adaptive_sx.h"
44
45 #include <sys/cdefs.h>
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kdb.h>
49 #include <sys/kernel.h>
50 #include <sys/ktr.h>
51 #include <sys/lock.h>
52 #include <sys/mutex.h>
53 #include <sys/proc.h>
54 #include <sys/sched.h>
55 #include <sys/sleepqueue.h>
56 #include <sys/sx.h>
57 #include <sys/smp.h>
58 #include <sys/sysctl.h>
59
60 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
61 #include <machine/cpu.h>
62 #endif
63
64 #ifdef DDB
65 #include <ddb/ddb.h>
66 #endif
67
68 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
69 #define ADAPTIVE_SX
70 #endif
71
72 #ifdef HWPMC_HOOKS
73 #include <sys/pmckern.h>
74 PMC_SOFT_DECLARE( , , lock, failed);
75 #endif
76
77 /* Handy macros for sleep queues. */
78 #define SQ_EXCLUSIVE_QUEUE      0
79 #define SQ_SHARED_QUEUE         1
80
81 /*
82  * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file.  We
83  * drop Giant anytime we have to sleep or if we adaptively spin.
84  */
85 #define GIANT_DECLARE                                                   \
86         int _giantcnt = 0;                                              \
87         WITNESS_SAVE_DECL(Giant)                                        \
88
89 #define GIANT_SAVE(work) do {                                           \
90         if (__predict_false(mtx_owned(&Giant))) {                       \
91                 work++;                                                 \
92                 WITNESS_SAVE(&Giant.lock_object, Giant);                \
93                 while (mtx_owned(&Giant)) {                             \
94                         _giantcnt++;                                    \
95                         mtx_unlock(&Giant);                             \
96                 }                                                       \
97         }                                                               \
98 } while (0)
99
100 #define GIANT_RESTORE() do {                                            \
101         if (_giantcnt > 0) {                                            \
102                 mtx_assert(&Giant, MA_NOTOWNED);                        \
103                 while (_giantcnt--)                                     \
104                         mtx_lock(&Giant);                               \
105                 WITNESS_RESTORE(&Giant.lock_object, Giant);             \
106         }                                                               \
107 } while (0)
108
109 /*
110  * Returns true if an exclusive lock is recursed.  It assumes
111  * curthread currently has an exclusive lock.
112  */
113 #define sx_recursed(sx)         ((sx)->sx_recurse != 0)
114
115 static void     assert_sx(const struct lock_object *lock, int what);
116 #ifdef DDB
117 static void     db_show_sx(const struct lock_object *lock);
118 #endif
119 static void     lock_sx(struct lock_object *lock, uintptr_t how);
120 #ifdef KDTRACE_HOOKS
121 static int      owner_sx(const struct lock_object *lock, struct thread **owner);
122 #endif
123 static uintptr_t unlock_sx(struct lock_object *lock);
124
125 struct lock_class lock_class_sx = {
126         .lc_name = "sx",
127         .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
128         .lc_assert = assert_sx,
129 #ifdef DDB
130         .lc_ddb_show = db_show_sx,
131 #endif
132         .lc_lock = lock_sx,
133         .lc_unlock = unlock_sx,
134 #ifdef KDTRACE_HOOKS
135         .lc_owner = owner_sx,
136 #endif
137 };
138
139 #ifndef INVARIANTS
140 #define _sx_assert(sx, what, file, line)
141 #endif
142
143 #ifdef ADAPTIVE_SX
144 #ifdef SX_CUSTOM_BACKOFF
145 static u_short __read_frequently asx_retries;
146 static u_short __read_frequently asx_loops;
147 static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
148     "sxlock debugging");
149 SYSCTL_U16(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, "");
150 SYSCTL_U16(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, "");
151
152 static struct lock_delay_config __read_frequently sx_delay;
153
154 SYSCTL_U16(_debug_sx, OID_AUTO, delay_base, CTLFLAG_RW, &sx_delay.base,
155     0, "");
156 SYSCTL_U16(_debug_sx, OID_AUTO, delay_max, CTLFLAG_RW, &sx_delay.max,
157     0, "");
158
159 static void
160 sx_lock_delay_init(void *arg __unused)
161 {
162
163         lock_delay_default_init(&sx_delay);
164         asx_retries = 10;
165         asx_loops = max(10000, sx_delay.max);
166 }
167 LOCK_DELAY_SYSINIT(sx_lock_delay_init);
168 #else
169 #define sx_delay        locks_delay
170 #define asx_retries     locks_delay_retries
171 #define asx_loops       locks_delay_loops
172 #endif
173 #endif
174
175 void
176 assert_sx(const struct lock_object *lock, int what)
177 {
178
179         sx_assert((const struct sx *)lock, what);
180 }
181
182 void
183 lock_sx(struct lock_object *lock, uintptr_t how)
184 {
185         struct sx *sx;
186
187         sx = (struct sx *)lock;
188         if (how)
189                 sx_slock(sx);
190         else
191                 sx_xlock(sx);
192 }
193
194 uintptr_t
195 unlock_sx(struct lock_object *lock)
196 {
197         struct sx *sx;
198
199         sx = (struct sx *)lock;
200         sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
201         if (sx_xlocked(sx)) {
202                 sx_xunlock(sx);
203                 return (0);
204         } else {
205                 sx_sunlock(sx);
206                 return (1);
207         }
208 }
209
210 #ifdef KDTRACE_HOOKS
211 int
212 owner_sx(const struct lock_object *lock, struct thread **owner)
213 {
214         const struct sx *sx;
215         uintptr_t x;
216
217         sx = (const struct sx *)lock;
218         x = sx->sx_lock;
219         *owner = NULL;
220         return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) :
221             ((*owner = (struct thread *)SX_OWNER(x)) != NULL));
222 }
223 #endif
224
225 void
226 sx_sysinit(void *arg)
227 {
228         struct sx_args *sargs = arg;
229
230         sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags);
231 }
232
233 void
234 sx_init_flags(struct sx *sx, const char *description, int opts)
235 {
236         int flags;
237
238         MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
239             SX_NOPROFILE | SX_NEW)) == 0);
240         ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
241             ("%s: sx_lock not aligned for %s: %p", __func__, description,
242             &sx->sx_lock));
243
244         flags = LO_SLEEPABLE | LO_UPGRADABLE;
245         if (opts & SX_DUPOK)
246                 flags |= LO_DUPOK;
247         if (opts & SX_NOPROFILE)
248                 flags |= LO_NOPROFILE;
249         if (!(opts & SX_NOWITNESS))
250                 flags |= LO_WITNESS;
251         if (opts & SX_RECURSE)
252                 flags |= LO_RECURSABLE;
253         if (opts & SX_QUIET)
254                 flags |= LO_QUIET;
255         if (opts & SX_NEW)
256                 flags |= LO_NEW;
257
258         lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
259         sx->sx_lock = SX_LOCK_UNLOCKED;
260         sx->sx_recurse = 0;
261 }
262
263 void
264 sx_destroy(struct sx *sx)
265 {
266
267         KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held"));
268         KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
269         sx->sx_lock = SX_LOCK_DESTROYED;
270         lock_destroy(&sx->lock_object);
271 }
272
273 int
274 sx_try_slock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
275 {
276         uintptr_t x;
277
278         if (SCHEDULER_STOPPED())
279                 return (1);
280
281         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
282             ("sx_try_slock() by idle thread %p on sx %s @ %s:%d",
283             curthread, sx->lock_object.lo_name, file, line));
284
285         x = sx->sx_lock;
286         for (;;) {
287                 KASSERT(x != SX_LOCK_DESTROYED,
288                     ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
289                 if (!(x & SX_LOCK_SHARED))
290                         break;
291                 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, x + SX_ONE_SHARER)) {
292                         LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
293                         WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
294                         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
295                             sx, 0, 0, file, line, LOCKSTAT_READER);
296                         TD_LOCKS_INC(curthread);
297                         curthread->td_sx_slocks++;
298                         return (1);
299                 }
300         }
301
302         LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
303         return (0);
304 }
305
306 int
307 sx_try_slock_(struct sx *sx, const char *file, int line)
308 {
309
310         return (sx_try_slock_int(sx LOCK_FILE_LINE_ARG));
311 }
312
313 int
314 _sx_xlock(struct sx *sx, int opts, const char *file, int line)
315 {
316         uintptr_t tid, x;
317         int error = 0;
318
319         KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
320             !TD_IS_IDLETHREAD(curthread),
321             ("sx_xlock() by idle thread %p on sx %s @ %s:%d",
322             curthread, sx->lock_object.lo_name, file, line));
323         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
324             ("sx_xlock() of destroyed sx @ %s:%d", file, line));
325         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
326             line, NULL);
327         tid = (uintptr_t)curthread;
328         x = SX_LOCK_UNLOCKED;
329         if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
330                 error = _sx_xlock_hard(sx, x, opts LOCK_FILE_LINE_ARG);
331         else
332                 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
333                     0, 0, file, line, LOCKSTAT_WRITER);
334         if (!error) {
335                 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse,
336                     file, line);
337                 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
338                 TD_LOCKS_INC(curthread);
339         }
340
341         return (error);
342 }
343
344 int
345 sx_try_xlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
346 {
347         struct thread *td;
348         uintptr_t tid, x;
349         int rval;
350         bool recursed;
351
352         td = curthread;
353         tid = (uintptr_t)td;
354         if (SCHEDULER_STOPPED_TD(td))
355                 return (1);
356
357         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
358             ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d",
359             curthread, sx->lock_object.lo_name, file, line));
360         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
361             ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
362
363         rval = 1;
364         recursed = false;
365         x = SX_LOCK_UNLOCKED;
366         for (;;) {
367                 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
368                         break;
369                 if (x == SX_LOCK_UNLOCKED)
370                         continue;
371                 if (x == tid && (sx->lock_object.lo_flags & LO_RECURSABLE)) {
372                         sx->sx_recurse++;
373                         atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
374                         break;
375                 }
376                 rval = 0;
377                 break;
378         }
379
380         LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
381         if (rval) {
382                 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
383                     file, line);
384                 if (!recursed)
385                         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
386                             sx, 0, 0, file, line, LOCKSTAT_WRITER);
387                 TD_LOCKS_INC(curthread);
388         }
389
390         return (rval);
391 }
392
393 int
394 sx_try_xlock_(struct sx *sx, const char *file, int line)
395 {
396
397         return (sx_try_xlock_int(sx LOCK_FILE_LINE_ARG));
398 }
399
400 void
401 _sx_xunlock(struct sx *sx, const char *file, int line)
402 {
403
404         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
405             ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
406         _sx_assert(sx, SA_XLOCKED, file, line);
407         WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
408         LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
409             line);
410 #if LOCK_DEBUG > 0
411         _sx_xunlock_hard(sx, (uintptr_t)curthread, file, line);
412 #else
413         __sx_xunlock(sx, curthread, file, line);
414 #endif
415         TD_LOCKS_DEC(curthread);
416 }
417
418 /*
419  * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
420  * This will only succeed if this thread holds a single shared lock.
421  * Return 1 if if the upgrade succeed, 0 otherwise.
422  */
423 int
424 sx_try_upgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
425 {
426         uintptr_t x;
427         uintptr_t waiters;
428         int success;
429
430         if (SCHEDULER_STOPPED())
431                 return (1);
432
433         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
434             ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
435         _sx_assert(sx, SA_SLOCKED, file, line);
436
437         /*
438          * Try to switch from one shared lock to an exclusive lock.  We need
439          * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that
440          * we will wake up the exclusive waiters when we drop the lock.
441          */
442         success = 0;
443         x = SX_READ_VALUE(sx);
444         for (;;) {
445                 if (SX_SHARERS(x) > 1)
446                         break;
447                 waiters = (x & SX_LOCK_WAITERS);
448                 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x,
449                     (uintptr_t)curthread | waiters)) {
450                         success = 1;
451                         break;
452                 }
453         }
454         LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
455         if (success) {
456                 curthread->td_sx_slocks--;
457                 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
458                     file, line);
459                 LOCKSTAT_RECORD0(sx__upgrade, sx);
460         }
461         return (success);
462 }
463
464 int
465 sx_try_upgrade_(struct sx *sx, const char *file, int line)
466 {
467
468         return (sx_try_upgrade_int(sx LOCK_FILE_LINE_ARG));
469 }
470
471 /*
472  * Downgrade an unrecursed exclusive lock into a single shared lock.
473  */
474 void
475 sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
476 {
477         uintptr_t x;
478         int wakeup_swapper;
479
480         if (SCHEDULER_STOPPED())
481                 return;
482
483         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
484             ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
485         _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
486 #ifndef INVARIANTS
487         if (sx_recursed(sx))
488                 panic("downgrade of a recursed lock");
489 #endif
490
491         WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
492
493         /*
494          * Try to switch from an exclusive lock with no shared waiters
495          * to one sharer with no shared waiters.  If there are
496          * exclusive waiters, we don't need to lock the sleep queue so
497          * long as we preserve the flag.  We do one quick try and if
498          * that fails we grab the sleepq lock to keep the flags from
499          * changing and do it the slow way.
500          *
501          * We have to lock the sleep queue if there are shared waiters
502          * so we can wake them up.
503          */
504         x = sx->sx_lock;
505         if (!(x & SX_LOCK_SHARED_WAITERS) &&
506             atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
507             (x & SX_LOCK_EXCLUSIVE_WAITERS)))
508                 goto out;
509
510         /*
511          * Lock the sleep queue so we can read the waiters bits
512          * without any races and wakeup any shared waiters.
513          */
514         sleepq_lock(&sx->lock_object);
515
516         /*
517          * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
518          * shared lock.  If there are any shared waiters, wake them up.
519          */
520         wakeup_swapper = 0;
521         x = sx->sx_lock;
522         atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
523             (x & SX_LOCK_EXCLUSIVE_WAITERS));
524         if (x & SX_LOCK_SHARED_WAITERS)
525                 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
526                     0, SQ_SHARED_QUEUE);
527         sleepq_release(&sx->lock_object);
528
529         if (wakeup_swapper)
530                 kick_proc0();
531
532 out:
533         curthread->td_sx_slocks++;
534         LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
535         LOCKSTAT_RECORD0(sx__downgrade, sx);
536 }
537
538 void
539 sx_downgrade_(struct sx *sx, const char *file, int line)
540 {
541
542         sx_downgrade_int(sx LOCK_FILE_LINE_ARG);
543 }
544
545 #ifdef  ADAPTIVE_SX
546 static inline void
547 sx_drop_critical(uintptr_t x, bool *in_critical, int *extra_work)
548 {
549
550         if (x & SX_LOCK_WRITE_SPINNER)
551                 return;
552         if (*in_critical) {
553                 critical_exit();
554                 *in_critical = false;
555                 (*extra_work)--;
556         }
557 }
558 #else
559 #define sx_drop_critical(x, in_critical, extra_work) do { } while (0)
560 #endif
561
562 /*
563  * This function represents the so-called 'hard case' for sx_xlock
564  * operation.  All 'easy case' failures are redirected to this.  Note
565  * that ideally this would be a static function, but it needs to be
566  * accessible from at least sx.h.
567  */
568 int
569 _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
570 {
571         GIANT_DECLARE;
572         uintptr_t tid, setx;
573 #ifdef ADAPTIVE_SX
574         struct thread *owner;
575         u_int i, n, spintries = 0;
576         enum { READERS, WRITER } sleep_reason = READERS;
577         bool in_critical = false;
578 #endif
579 #ifdef LOCK_PROFILING
580         uint64_t waittime = 0;
581         int contested = 0;
582 #endif
583         int error = 0;
584 #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
585         struct lock_delay_arg lda;
586 #endif
587 #ifdef  KDTRACE_HOOKS
588         u_int sleep_cnt = 0;
589         int64_t sleep_time = 0;
590         int64_t all_time = 0;
591 #endif
592 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
593         uintptr_t state = 0;
594         int doing_lockprof = 0;
595 #endif
596         int extra_work = 0;
597
598         tid = (uintptr_t)curthread;
599
600 #ifdef KDTRACE_HOOKS
601         if (LOCKSTAT_PROFILE_ENABLED(sx__acquire)) {
602                 while (x == SX_LOCK_UNLOCKED) {
603                         if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
604                                 goto out_lockstat;
605                 }
606                 extra_work = 1;
607                 doing_lockprof = 1;
608                 all_time -= lockstat_nsecs(&sx->lock_object);
609                 state = x;
610         }
611 #endif
612 #ifdef LOCK_PROFILING
613         extra_work = 1;
614         doing_lockprof = 1;
615         state = x;
616 #endif
617
618         if (SCHEDULER_STOPPED())
619                 return (0);
620
621         if (__predict_false(x == SX_LOCK_UNLOCKED))
622                 x = SX_READ_VALUE(sx);
623
624         /* If we already hold an exclusive lock, then recurse. */
625         if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) {
626                 KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
627             ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
628                     sx->lock_object.lo_name, file, line));
629                 sx->sx_recurse++;
630                 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
631                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
632                         CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
633                 return (0);
634         }
635
636         if (LOCK_LOG_TEST(&sx->lock_object, 0))
637                 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
638                     sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
639
640 #if defined(ADAPTIVE_SX)
641         lock_delay_arg_init(&lda, &sx_delay);
642 #elif defined(KDTRACE_HOOKS)
643         lock_delay_arg_init_noadapt(&lda);
644 #endif
645
646 #ifdef HWPMC_HOOKS
647         PMC_SOFT_CALL( , , lock, failed);
648 #endif
649         lock_profile_obtain_lock_failed(&sx->lock_object, false, &contested,
650             &waittime);
651
652 #ifndef INVARIANTS
653         GIANT_SAVE(extra_work);
654 #endif
655
656         THREAD_CONTENDS_ON_LOCK(&sx->lock_object);
657
658         for (;;) {
659                 if (x == SX_LOCK_UNLOCKED) {
660                         if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
661                                 break;
662                         continue;
663                 }
664 #ifdef INVARIANTS
665                 GIANT_SAVE(extra_work);
666 #endif
667 #ifdef KDTRACE_HOOKS
668                 lda.spin_cnt++;
669 #endif
670 #ifdef ADAPTIVE_SX
671                 if (x == (SX_LOCK_SHARED | SX_LOCK_WRITE_SPINNER)) {
672                         if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
673                                 break;
674                         continue;
675                 }
676
677                 /*
678                  * If the lock is write locked and the owner is
679                  * running on another CPU, spin until the owner stops
680                  * running or the state of the lock changes.
681                  */
682                 if ((x & SX_LOCK_SHARED) == 0) {
683                         sx_drop_critical(x, &in_critical, &extra_work);
684                         sleep_reason = WRITER;
685                         owner = lv_sx_owner(x);
686                         if (!TD_IS_RUNNING(owner))
687                                 goto sleepq;
688                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
689                                 CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
690                                     __func__, sx, owner);
691                         KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
692                             "spinning", "lockname:\"%s\"",
693                             sx->lock_object.lo_name);
694                         do {
695                                 lock_delay(&lda);
696                                 x = SX_READ_VALUE(sx);
697                                 owner = lv_sx_owner(x);
698                         } while (owner != NULL && TD_IS_RUNNING(owner));
699                         KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
700                             "running");
701                         continue;
702                 } else if (SX_SHARERS(x) > 0) {
703                         sleep_reason = READERS;
704                         if (spintries == asx_retries)
705                                 goto sleepq;
706                         if (!(x & SX_LOCK_WRITE_SPINNER)) {
707                                 if (!in_critical) {
708                                         critical_enter();
709                                         in_critical = true;
710                                         extra_work++;
711                                 }
712                                 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
713                                     x | SX_LOCK_WRITE_SPINNER)) {
714                                         critical_exit();
715                                         in_critical = false;
716                                         extra_work--;
717                                         continue;
718                                 }
719                         }
720                         spintries++;
721                         KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
722                             "spinning", "lockname:\"%s\"",
723                             sx->lock_object.lo_name);
724                         n = SX_SHARERS(x);
725                         for (i = 0; i < asx_loops; i += n) {
726                                 lock_delay_spin(n);
727                                 x = SX_READ_VALUE(sx);
728                                 if (!(x & SX_LOCK_WRITE_SPINNER))
729                                         break;
730                                 if (!(x & SX_LOCK_SHARED))
731                                         break;
732                                 n = SX_SHARERS(x);
733                                 if (n == 0)
734                                         break;
735                         }
736 #ifdef KDTRACE_HOOKS
737                         lda.spin_cnt += i;
738 #endif
739                         KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
740                             "running");
741                         if (i < asx_loops)
742                                 continue;
743                 }
744 sleepq:
745 #endif
746                 sleepq_lock(&sx->lock_object);
747                 x = SX_READ_VALUE(sx);
748 retry_sleepq:
749
750                 /*
751                  * If the lock was released while spinning on the
752                  * sleep queue chain lock, try again.
753                  */
754                 if (x == SX_LOCK_UNLOCKED) {
755                         sleepq_release(&sx->lock_object);
756                         sx_drop_critical(x, &in_critical, &extra_work);
757                         continue;
758                 }
759
760 #ifdef ADAPTIVE_SX
761                 /*
762                  * The current lock owner might have started executing
763                  * on another CPU (or the lock could have changed
764                  * owners) while we were waiting on the sleep queue
765                  * chain lock.  If so, drop the sleep queue lock and try
766                  * again.
767                  */
768                 if (!(x & SX_LOCK_SHARED)) {
769                         owner = (struct thread *)SX_OWNER(x);
770                         if (TD_IS_RUNNING(owner)) {
771                                 sleepq_release(&sx->lock_object);
772                                 sx_drop_critical(x, &in_critical,
773                                     &extra_work);
774                                 continue;
775                         }
776                 } else if (SX_SHARERS(x) > 0 && sleep_reason == WRITER) {
777                         sleepq_release(&sx->lock_object);
778                         sx_drop_critical(x, &in_critical, &extra_work);
779                         continue;
780                 }
781 #endif
782
783                 /*
784                  * If an exclusive lock was released with both shared
785                  * and exclusive waiters and a shared waiter hasn't
786                  * woken up and acquired the lock yet, sx_lock will be
787                  * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS.
788                  * If we see that value, try to acquire it once.  Note
789                  * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS
790                  * as there are other exclusive waiters still.  If we
791                  * fail, restart the loop.
792                  */
793                 setx = x & (SX_LOCK_WAITERS | SX_LOCK_WRITE_SPINNER);
794                 if ((x & ~setx) == SX_LOCK_SHARED) {
795                         setx &= ~SX_LOCK_WRITE_SPINNER;
796                         if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid | setx))
797                                 goto retry_sleepq;
798                         sleepq_release(&sx->lock_object);
799                         CTR2(KTR_LOCK, "%s: %p claimed by new writer",
800                             __func__, sx);
801                         break;
802                 }
803
804 #ifdef ADAPTIVE_SX
805                 /*
806                  * It is possible we set the SX_LOCK_WRITE_SPINNER bit.
807                  * It is an invariant that when the bit is set, there is
808                  * a writer ready to grab the lock. Thus clear the bit since
809                  * we are going to sleep.
810                  */
811                 if (in_critical) {
812                         if ((x & SX_LOCK_WRITE_SPINNER) ||
813                             !((x & SX_LOCK_EXCLUSIVE_WAITERS))) {
814                                 setx = x & ~SX_LOCK_WRITE_SPINNER;
815                                 setx |= SX_LOCK_EXCLUSIVE_WAITERS;
816                                 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
817                                     setx)) {
818                                         goto retry_sleepq;
819                                 }
820                         }
821                         critical_exit();
822                         in_critical = false;
823                 } else {
824 #endif
825                         /*
826                          * Try to set the SX_LOCK_EXCLUSIVE_WAITERS.  If we fail,
827                          * than loop back and retry.
828                          */
829                         if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
830                                 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
831                                     x | SX_LOCK_EXCLUSIVE_WAITERS)) {
832                                         goto retry_sleepq;
833                                 }
834                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
835                                         CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
836                                             __func__, sx);
837                         }
838 #ifdef ADAPTIVE_SX
839                 }
840 #endif
841
842                 /*
843                  * Since we have been unable to acquire the exclusive
844                  * lock and the exclusive waiters flag is set, we have
845                  * to sleep.
846                  */
847                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
848                         CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
849                             __func__, sx);
850
851 #ifdef KDTRACE_HOOKS
852                 sleep_time -= lockstat_nsecs(&sx->lock_object);
853 #endif
854                 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
855                     SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
856                     SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
857                 /*
858                  * Hack: this can land in thread_suspend_check which will
859                  * conditionally take a mutex, tripping over an assert if a
860                  * lock we are waiting for is set.
861                  */
862                 THREAD_CONTENTION_DONE(&sx->lock_object);
863                 if (!(opts & SX_INTERRUPTIBLE))
864                         sleepq_wait(&sx->lock_object, 0);
865                 else
866                         error = sleepq_wait_sig(&sx->lock_object, 0);
867                 THREAD_CONTENDS_ON_LOCK(&sx->lock_object);
868 #ifdef KDTRACE_HOOKS
869                 sleep_time += lockstat_nsecs(&sx->lock_object);
870                 sleep_cnt++;
871 #endif
872                 if (error) {
873                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
874                                 CTR2(KTR_LOCK,
875                         "%s: interruptible sleep by %p suspended by signal",
876                                     __func__, sx);
877                         break;
878                 }
879                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
880                         CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
881                             __func__, sx);
882                 x = SX_READ_VALUE(sx);
883         }
884         THREAD_CONTENTION_DONE(&sx->lock_object);
885         if (__predict_true(!extra_work))
886                 return (error);
887 #ifdef ADAPTIVE_SX
888         if (in_critical)
889                 critical_exit();
890 #endif
891         GIANT_RESTORE();
892 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
893         if (__predict_true(!doing_lockprof))
894                 return (error);
895 #endif
896 #ifdef KDTRACE_HOOKS
897         all_time += lockstat_nsecs(&sx->lock_object);
898         if (sleep_time)
899                 LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
900                     LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
901                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
902         if (lda.spin_cnt > sleep_cnt)
903                 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
904                     LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
905                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
906 out_lockstat:
907 #endif
908         if (!error)
909                 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
910                     contested, waittime, file, line, LOCKSTAT_WRITER);
911         return (error);
912 }
913
914 /*
915  * This function represents the so-called 'hard case' for sx_xunlock
916  * operation.  All 'easy case' failures are redirected to this.  Note
917  * that ideally this would be a static function, but it needs to be
918  * accessible from at least sx.h.
919  */
920 void
921 _sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
922 {
923         uintptr_t tid, setx;
924         int queue, wakeup_swapper;
925
926         if (SCHEDULER_STOPPED())
927                 return;
928
929         tid = (uintptr_t)curthread;
930
931         if (__predict_false(x == tid))
932                 x = SX_READ_VALUE(sx);
933
934         MPASS(!(x & SX_LOCK_SHARED));
935
936         if (__predict_false(x & SX_LOCK_RECURSED)) {
937                 /* The lock is recursed, unrecurse one level. */
938                 if ((--sx->sx_recurse) == 0)
939                         atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
940                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
941                         CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
942                 return;
943         }
944
945         LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_WRITER);
946         if (x == tid &&
947             atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED))
948                 return;
949
950         if (LOCK_LOG_TEST(&sx->lock_object, 0))
951                 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);
952
953         sleepq_lock(&sx->lock_object);
954         x = SX_READ_VALUE(sx);
955         MPASS(x & (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS));
956
957         /*
958          * The wake up algorithm here is quite simple and probably not
959          * ideal.  It gives precedence to shared waiters if they are
960          * present.  For this condition, we have to preserve the
961          * state of the exclusive waiters flag.
962          * If interruptible sleeps left the shared queue empty avoid a
963          * starvation for the threads sleeping on the exclusive queue by giving
964          * them precedence and cleaning up the shared waiters bit anyway.
965          */
966         setx = SX_LOCK_UNLOCKED;
967         queue = SQ_SHARED_QUEUE;
968         if ((x & SX_LOCK_EXCLUSIVE_WAITERS) != 0 &&
969             sleepq_sleepcnt(&sx->lock_object, SQ_EXCLUSIVE_QUEUE) != 0) {
970                 queue = SQ_EXCLUSIVE_QUEUE;
971                 setx |= (x & SX_LOCK_SHARED_WAITERS);
972         }
973         atomic_store_rel_ptr(&sx->sx_lock, setx);
974
975         /* Wake up all the waiters for the specific queue. */
976         if (LOCK_LOG_TEST(&sx->lock_object, 0))
977                 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue",
978                     __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
979                     "exclusive");
980
981         wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
982             queue);
983         sleepq_release(&sx->lock_object);
984         if (wakeup_swapper)
985                 kick_proc0();
986 }
987
988 static bool __always_inline
989 __sx_can_read(struct thread *td, uintptr_t x, bool fp)
990 {
991
992         if ((x & (SX_LOCK_SHARED | SX_LOCK_EXCLUSIVE_WAITERS | SX_LOCK_WRITE_SPINNER))
993                         == SX_LOCK_SHARED)
994                 return (true);
995         if (!fp && td->td_sx_slocks && (x & SX_LOCK_SHARED))
996                 return (true);
997         return (false);
998 }
999
1000 static bool __always_inline
1001 __sx_slock_try(struct sx *sx, struct thread *td, uintptr_t *xp, bool fp
1002     LOCK_FILE_LINE_ARG_DEF)
1003 {
1004
1005         /*
1006          * If no other thread has an exclusive lock then try to bump up
1007          * the count of sharers.  Since we have to preserve the state
1008          * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
1009          * shared lock loop back and retry.
1010          */
1011         while (__sx_can_read(td, *xp, fp)) {
1012                 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, xp,
1013                     *xp + SX_ONE_SHARER)) {
1014                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
1015                                 CTR4(KTR_LOCK, "%s: %p succeed %p -> %p",
1016                                     __func__, sx, (void *)*xp,
1017                                     (void *)(*xp + SX_ONE_SHARER));
1018                         td->td_sx_slocks++;
1019                         return (true);
1020                 }
1021         }
1022         return (false);
1023 }
1024
1025 static int __noinline
1026 _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
1027 {
1028         GIANT_DECLARE;
1029         struct thread *td;
1030 #ifdef ADAPTIVE_SX
1031         struct thread *owner;
1032         u_int i, n, spintries = 0;
1033 #endif
1034 #ifdef LOCK_PROFILING
1035         uint64_t waittime = 0;
1036         int contested = 0;
1037 #endif
1038         int error = 0;
1039 #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
1040         struct lock_delay_arg lda;
1041 #endif
1042 #ifdef KDTRACE_HOOKS
1043         u_int sleep_cnt = 0;
1044         int64_t sleep_time = 0;
1045         int64_t all_time = 0;
1046 #endif
1047 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
1048         uintptr_t state = 0;
1049 #endif
1050         int extra_work __sdt_used = 0;
1051
1052         td = curthread;
1053
1054 #ifdef KDTRACE_HOOKS
1055         if (LOCKSTAT_PROFILE_ENABLED(sx__acquire)) {
1056                 if (__sx_slock_try(sx, td, &x, false LOCK_FILE_LINE_ARG))
1057                         goto out_lockstat;
1058                 extra_work = 1;
1059                 all_time -= lockstat_nsecs(&sx->lock_object);
1060                 state = x;
1061         }
1062 #endif
1063 #ifdef LOCK_PROFILING
1064         extra_work = 1;
1065         state = x;
1066 #endif
1067
1068         if (SCHEDULER_STOPPED())
1069                 return (0);
1070
1071 #if defined(ADAPTIVE_SX)
1072         lock_delay_arg_init(&lda, &sx_delay);
1073 #elif defined(KDTRACE_HOOKS)
1074         lock_delay_arg_init_noadapt(&lda);
1075 #endif
1076
1077 #ifdef HWPMC_HOOKS
1078         PMC_SOFT_CALL( , , lock, failed);
1079 #endif
1080         lock_profile_obtain_lock_failed(&sx->lock_object, false, &contested,
1081             &waittime);
1082
1083 #ifndef INVARIANTS
1084         GIANT_SAVE(extra_work);
1085 #endif
1086
1087         THREAD_CONTENDS_ON_LOCK(&sx->lock_object);
1088
1089         /*
1090          * As with rwlocks, we don't make any attempt to try to block
1091          * shared locks once there is an exclusive waiter.
1092          */
1093         for (;;) {
1094                 if (__sx_slock_try(sx, td, &x, false LOCK_FILE_LINE_ARG))
1095                         break;
1096 #ifdef INVARIANTS
1097                 GIANT_SAVE(extra_work);
1098 #endif
1099 #ifdef KDTRACE_HOOKS
1100                 lda.spin_cnt++;
1101 #endif
1102
1103 #ifdef ADAPTIVE_SX
1104                 /*
1105                  * If the owner is running on another CPU, spin until
1106                  * the owner stops running or the state of the lock
1107                  * changes.
1108                  */
1109                 if ((x & SX_LOCK_SHARED) == 0) {
1110                         owner = lv_sx_owner(x);
1111                         if (TD_IS_RUNNING(owner)) {
1112                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1113                                         CTR3(KTR_LOCK,
1114                                             "%s: spinning on %p held by %p",
1115                                             __func__, sx, owner);
1116                                 KTR_STATE1(KTR_SCHED, "thread",
1117                                     sched_tdname(curthread), "spinning",
1118                                     "lockname:\"%s\"", sx->lock_object.lo_name);
1119                                 do {
1120                                         lock_delay(&lda);
1121                                         x = SX_READ_VALUE(sx);
1122                                         owner = lv_sx_owner(x);
1123                                 } while (owner != NULL && TD_IS_RUNNING(owner));
1124                                 KTR_STATE0(KTR_SCHED, "thread",
1125                                     sched_tdname(curthread), "running");
1126                                 continue;
1127                         }
1128                 } else {
1129                         if ((x & SX_LOCK_WRITE_SPINNER) && SX_SHARERS(x) == 0) {
1130                                 MPASS(!__sx_can_read(td, x, false));
1131                                 lock_delay_spin(2);
1132                                 x = SX_READ_VALUE(sx);
1133                                 continue;
1134                         }
1135                         if (spintries < asx_retries) {
1136                                 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
1137                                     "spinning", "lockname:\"%s\"",
1138                                     sx->lock_object.lo_name);
1139                                 n = SX_SHARERS(x);
1140                                 for (i = 0; i < asx_loops; i += n) {
1141                                         lock_delay_spin(n);
1142                                         x = SX_READ_VALUE(sx);
1143                                         if (!(x & SX_LOCK_SHARED))
1144                                                 break;
1145                                         n = SX_SHARERS(x);
1146                                         if (n == 0)
1147                                                 break;
1148                                         if (__sx_can_read(td, x, false))
1149                                                 break;
1150                                 }
1151 #ifdef KDTRACE_HOOKS
1152                                 lda.spin_cnt += i;
1153 #endif
1154                                 KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
1155                                     "running");
1156                                 if (i < asx_loops)
1157                                         continue;
1158                         }
1159                 }
1160 #endif
1161
1162                 /*
1163                  * Some other thread already has an exclusive lock, so
1164                  * start the process of blocking.
1165                  */
1166                 sleepq_lock(&sx->lock_object);
1167                 x = SX_READ_VALUE(sx);
1168 retry_sleepq:
1169                 if (((x & SX_LOCK_WRITE_SPINNER) && SX_SHARERS(x) == 0) ||
1170                     __sx_can_read(td, x, false)) {
1171                         sleepq_release(&sx->lock_object);
1172                         continue;
1173                 }
1174
1175 #ifdef ADAPTIVE_SX
1176                 /*
1177                  * If the owner is running on another CPU, spin until
1178                  * the owner stops running or the state of the lock
1179                  * changes.
1180                  */
1181                 if (!(x & SX_LOCK_SHARED)) {
1182                         owner = (struct thread *)SX_OWNER(x);
1183                         if (TD_IS_RUNNING(owner)) {
1184                                 sleepq_release(&sx->lock_object);
1185                                 x = SX_READ_VALUE(sx);
1186                                 continue;
1187                         }
1188                 }
1189 #endif
1190
1191                 /*
1192                  * Try to set the SX_LOCK_SHARED_WAITERS flag.  If we
1193                  * fail to set it drop the sleep queue lock and loop
1194                  * back.
1195                  */
1196                 if (!(x & SX_LOCK_SHARED_WAITERS)) {
1197                         if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
1198                             x | SX_LOCK_SHARED_WAITERS))
1199                                 goto retry_sleepq;
1200                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
1201                                 CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
1202                                     __func__, sx);
1203                 }
1204
1205                 /*
1206                  * Since we have been unable to acquire the shared lock,
1207                  * we have to sleep.
1208                  */
1209                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1210                         CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
1211                             __func__, sx);
1212
1213 #ifdef KDTRACE_HOOKS
1214                 sleep_time -= lockstat_nsecs(&sx->lock_object);
1215 #endif
1216                 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
1217                     SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
1218                     SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
1219                 /*
1220                  * Hack: this can land in thread_suspend_check which will
1221                  * conditionally take a mutex, tripping over an assert if a
1222                  * lock we are waiting for is set.
1223                  */
1224                 THREAD_CONTENTION_DONE(&sx->lock_object);
1225                 if (!(opts & SX_INTERRUPTIBLE))
1226                         sleepq_wait(&sx->lock_object, 0);
1227                 else
1228                         error = sleepq_wait_sig(&sx->lock_object, 0);
1229                 THREAD_CONTENDS_ON_LOCK(&sx->lock_object);
1230 #ifdef KDTRACE_HOOKS
1231                 sleep_time += lockstat_nsecs(&sx->lock_object);
1232                 sleep_cnt++;
1233 #endif
1234                 if (error) {
1235                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
1236                                 CTR2(KTR_LOCK,
1237                         "%s: interruptible sleep by %p suspended by signal",
1238                                     __func__, sx);
1239                         break;
1240                 }
1241                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1242                         CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
1243                             __func__, sx);
1244                 x = SX_READ_VALUE(sx);
1245         }
1246         THREAD_CONTENTION_DONE(&sx->lock_object);
1247 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
1248         if (__predict_true(!extra_work))
1249                 return (error);
1250 #endif
1251 #ifdef KDTRACE_HOOKS
1252         all_time += lockstat_nsecs(&sx->lock_object);
1253         if (sleep_time)
1254                 LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
1255                     LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
1256                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
1257         if (lda.spin_cnt > sleep_cnt)
1258                 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
1259                     LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
1260                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
1261 out_lockstat:
1262 #endif
1263         if (error == 0) {
1264                 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
1265                     contested, waittime, file, line, LOCKSTAT_READER);
1266         }
1267         GIANT_RESTORE();
1268         return (error);
1269 }
1270
1271 int
1272 _sx_slock_int(struct sx *sx, int opts LOCK_FILE_LINE_ARG_DEF)
1273 {
1274         struct thread *td;
1275         uintptr_t x;
1276         int error;
1277
1278         KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
1279             !TD_IS_IDLETHREAD(curthread),
1280             ("sx_slock() by idle thread %p on sx %s @ %s:%d",
1281             curthread, sx->lock_object.lo_name, file, line));
1282         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1283             ("sx_slock() of destroyed sx @ %s:%d", file, line));
1284         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
1285
1286         error = 0;
1287         td = curthread;
1288         x = SX_READ_VALUE(sx);
1289         if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__acquire) ||
1290             !__sx_slock_try(sx, td, &x, true LOCK_FILE_LINE_ARG)))
1291                 error = _sx_slock_hard(sx, opts, x LOCK_FILE_LINE_ARG);
1292         else
1293                 lock_profile_obtain_lock_success(&sx->lock_object, false, 0, 0,
1294                     file, line);
1295         if (error == 0) {
1296                 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
1297                 WITNESS_LOCK(&sx->lock_object, 0, file, line);
1298                 TD_LOCKS_INC(curthread);
1299         }
1300         return (error);
1301 }
1302
1303 int
1304 _sx_slock(struct sx *sx, int opts, const char *file, int line)
1305 {
1306
1307         return (_sx_slock_int(sx, opts LOCK_FILE_LINE_ARG));
1308 }
1309
1310 static bool __always_inline
1311 _sx_sunlock_try(struct sx *sx, struct thread *td, uintptr_t *xp)
1312 {
1313
1314         for (;;) {
1315                 if (SX_SHARERS(*xp) > 1 || !(*xp & SX_LOCK_WAITERS)) {
1316                         if (atomic_fcmpset_rel_ptr(&sx->sx_lock, xp,
1317                             *xp - SX_ONE_SHARER)) {
1318                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1319                                         CTR4(KTR_LOCK,
1320                                             "%s: %p succeeded %p -> %p",
1321                                             __func__, sx, (void *)*xp,
1322                                             (void *)(*xp - SX_ONE_SHARER));
1323                                 td->td_sx_slocks--;
1324                                 return (true);
1325                         }
1326                         continue;
1327                 }
1328                 break;
1329         }
1330         return (false);
1331 }
1332
1333 static void __noinline
1334 _sx_sunlock_hard(struct sx *sx, struct thread *td, uintptr_t x
1335     LOCK_FILE_LINE_ARG_DEF)
1336 {
1337         int wakeup_swapper = 0;
1338         uintptr_t setx, queue;
1339
1340         if (SCHEDULER_STOPPED())
1341                 return;
1342
1343         if (_sx_sunlock_try(sx, td, &x))
1344                 goto out_lockstat;
1345
1346         sleepq_lock(&sx->lock_object);
1347         x = SX_READ_VALUE(sx);
1348         for (;;) {
1349                 if (_sx_sunlock_try(sx, td, &x))
1350                         break;
1351
1352                 /*
1353                  * Wake up semantic here is quite simple:
1354                  * Just wake up all the exclusive waiters.
1355                  * Note that the state of the lock could have changed,
1356                  * so if it fails loop back and retry.
1357                  */
1358                 setx = SX_LOCK_UNLOCKED;
1359                 queue = SQ_SHARED_QUEUE;
1360                 if (x & SX_LOCK_EXCLUSIVE_WAITERS) {
1361                         setx |= (x & SX_LOCK_SHARED_WAITERS);
1362                         queue = SQ_EXCLUSIVE_QUEUE;
1363                 }
1364                 setx |= (x & SX_LOCK_WRITE_SPINNER);
1365                 if (!atomic_fcmpset_rel_ptr(&sx->sx_lock, &x, setx))
1366                         continue;
1367                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1368                         CTR2(KTR_LOCK, "%s: %p waking up all thread on"
1369                             "exclusive queue", __func__, sx);
1370                 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
1371                     0, queue);
1372                 td->td_sx_slocks--;
1373                 break;
1374         }
1375         sleepq_release(&sx->lock_object);
1376         if (wakeup_swapper)
1377                 kick_proc0();
1378 out_lockstat:
1379         LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER);
1380 }
1381
1382 void
1383 _sx_sunlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
1384 {
1385         struct thread *td;
1386         uintptr_t x;
1387
1388         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1389             ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
1390         _sx_assert(sx, SA_SLOCKED, file, line);
1391         WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
1392         LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
1393
1394         td = curthread;
1395         x = SX_READ_VALUE(sx);
1396         if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__release) ||
1397             !_sx_sunlock_try(sx, td, &x)))
1398                 _sx_sunlock_hard(sx, td, x LOCK_FILE_LINE_ARG);
1399         else
1400                 lock_profile_release_lock(&sx->lock_object, false);
1401
1402         TD_LOCKS_DEC(curthread);
1403 }
1404
1405 void
1406 _sx_sunlock(struct sx *sx, const char *file, int line)
1407 {
1408
1409         _sx_sunlock_int(sx LOCK_FILE_LINE_ARG);
1410 }
1411
1412 #ifdef INVARIANT_SUPPORT
1413 #ifndef INVARIANTS
1414 #undef  _sx_assert
1415 #endif
1416
1417 /*
1418  * In the non-WITNESS case, sx_assert() can only detect that at least
1419  * *some* thread owns an slock, but it cannot guarantee that *this*
1420  * thread owns an slock.
1421  */
1422 void
1423 _sx_assert(const struct sx *sx, int what, const char *file, int line)
1424 {
1425 #ifndef WITNESS
1426         int slocked = 0;
1427 #endif
1428
1429         if (SCHEDULER_STOPPED())
1430                 return;
1431         switch (what) {
1432         case SA_SLOCKED:
1433         case SA_SLOCKED | SA_NOTRECURSED:
1434         case SA_SLOCKED | SA_RECURSED:
1435 #ifndef WITNESS
1436                 slocked = 1;
1437                 /* FALLTHROUGH */
1438 #endif
1439         case SA_LOCKED:
1440         case SA_LOCKED | SA_NOTRECURSED:
1441         case SA_LOCKED | SA_RECURSED:
1442 #ifdef WITNESS
1443                 witness_assert(&sx->lock_object, what, file, line);
1444 #else
1445                 /*
1446                  * If some other thread has an exclusive lock or we
1447                  * have one and are asserting a shared lock, fail.
1448                  * Also, if no one has a lock at all, fail.
1449                  */
1450                 if (sx->sx_lock == SX_LOCK_UNLOCKED ||
1451                     (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
1452                     sx_xholder(sx) != curthread)))
1453                         panic("Lock %s not %slocked @ %s:%d\n",
1454                             sx->lock_object.lo_name, slocked ? "share " : "",
1455                             file, line);
1456
1457                 if (!(sx->sx_lock & SX_LOCK_SHARED)) {
1458                         if (sx_recursed(sx)) {
1459                                 if (what & SA_NOTRECURSED)
1460                                         panic("Lock %s recursed @ %s:%d\n",
1461                                             sx->lock_object.lo_name, file,
1462                                             line);
1463                         } else if (what & SA_RECURSED)
1464                                 panic("Lock %s not recursed @ %s:%d\n",
1465                                     sx->lock_object.lo_name, file, line);
1466                 }
1467 #endif
1468                 break;
1469         case SA_XLOCKED:
1470         case SA_XLOCKED | SA_NOTRECURSED:
1471         case SA_XLOCKED | SA_RECURSED:
1472                 if (sx_xholder(sx) != curthread)
1473                         panic("Lock %s not exclusively locked @ %s:%d\n",
1474                             sx->lock_object.lo_name, file, line);
1475                 if (sx_recursed(sx)) {
1476                         if (what & SA_NOTRECURSED)
1477                                 panic("Lock %s recursed @ %s:%d\n",
1478                                     sx->lock_object.lo_name, file, line);
1479                 } else if (what & SA_RECURSED)
1480                         panic("Lock %s not recursed @ %s:%d\n",
1481                             sx->lock_object.lo_name, file, line);
1482                 break;
1483         case SA_UNLOCKED:
1484 #ifdef WITNESS
1485                 witness_assert(&sx->lock_object, what, file, line);
1486 #else
1487                 /*
1488                  * If we hold an exclusve lock fail.  We can't
1489                  * reliably check to see if we hold a shared lock or
1490                  * not.
1491                  */
1492                 if (sx_xholder(sx) == curthread)
1493                         panic("Lock %s exclusively locked @ %s:%d\n",
1494                             sx->lock_object.lo_name, file, line);
1495 #endif
1496                 break;
1497         default:
1498                 panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
1499                     line);
1500         }
1501 }
1502 #endif  /* INVARIANT_SUPPORT */
1503
1504 #ifdef DDB
1505 static void
1506 db_show_sx(const struct lock_object *lock)
1507 {
1508         struct thread *td;
1509         const struct sx *sx;
1510
1511         sx = (const struct sx *)lock;
1512
1513         db_printf(" state: ");
1514         if (sx->sx_lock == SX_LOCK_UNLOCKED)
1515                 db_printf("UNLOCKED\n");
1516         else if (sx->sx_lock == SX_LOCK_DESTROYED) {
1517                 db_printf("DESTROYED\n");
1518                 return;
1519         } else if (sx->sx_lock & SX_LOCK_SHARED)
1520                 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
1521         else {
1522                 td = sx_xholder(sx);
1523                 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1524                     td->td_tid, td->td_proc->p_pid, td->td_name);
1525                 if (sx_recursed(sx))
1526                         db_printf(" recursed: %d\n", sx->sx_recurse);
1527         }
1528
1529         db_printf(" waiters: ");
1530         switch(sx->sx_lock &
1531             (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) {
1532         case SX_LOCK_SHARED_WAITERS:
1533                 db_printf("shared\n");
1534                 break;
1535         case SX_LOCK_EXCLUSIVE_WAITERS:
1536                 db_printf("exclusive\n");
1537                 break;
1538         case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS:
1539                 db_printf("exclusive and shared\n");
1540                 break;
1541         default:
1542                 db_printf("none\n");
1543         }
1544 }
1545
1546 /*
1547  * Check to see if a thread that is blocked on a sleep queue is actually
1548  * blocked on an sx lock.  If so, output some details and return true.
1549  * If the lock has an exclusive owner, return that in *ownerp.
1550  */
1551 int
1552 sx_chain(struct thread *td, struct thread **ownerp)
1553 {
1554         const struct sx *sx;
1555
1556         /*
1557          * Check to see if this thread is blocked on an sx lock.
1558          * First, we check the lock class.  If that is ok, then we
1559          * compare the lock name against the wait message.
1560          */
1561         sx = td->td_wchan;
1562         if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
1563             sx->lock_object.lo_name != td->td_wmesg)
1564                 return (0);
1565
1566         /* We think we have an sx lock, so output some details. */
1567         db_printf("blocked on sx \"%s\" ", td->td_wmesg);
1568         *ownerp = sx_xholder(sx);
1569         if (sx->sx_lock & SX_LOCK_SHARED)
1570                 db_printf("SLOCK (count %ju)\n",
1571                     (uintmax_t)SX_SHARERS(sx->sx_lock));
1572         else
1573                 db_printf("XLOCK\n");
1574         return (1);
1575 }
1576 #endif