]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/kern_sx.c
ident(1): Normalizing date format
[FreeBSD/FreeBSD.git] / sys / kern / kern_sx.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
5  * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice(s), this list of conditions and the following disclaimer as
13  *    the first lines of this file unmodified other than the possible
14  *    addition of one or more copyright notices.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice(s), this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
20  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
23  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
29  * DAMAGE.
30  */
31
32 /*
33  * Shared/exclusive locks.  This implementation attempts to ensure
34  * deterministic lock granting behavior, so that slocks and xlocks are
35  * interleaved.
36  *
37  * Priority propagation will not generally raise the priority of lock holders,
38  * so should not be relied upon in combination with sx locks.
39  */
40
41 #include "opt_ddb.h"
42 #include "opt_hwpmc_hooks.h"
43 #include "opt_no_adaptive_sx.h"
44
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kdb.h>
51 #include <sys/kernel.h>
52 #include <sys/ktr.h>
53 #include <sys/lock.h>
54 #include <sys/mutex.h>
55 #include <sys/proc.h>
56 #include <sys/sched.h>
57 #include <sys/sleepqueue.h>
58 #include <sys/sx.h>
59 #include <sys/smp.h>
60 #include <sys/sysctl.h>
61
62 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
63 #include <machine/cpu.h>
64 #endif
65
66 #ifdef DDB
67 #include <ddb/ddb.h>
68 #endif
69
70 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
71 #define ADAPTIVE_SX
72 #endif
73
74 #ifdef HWPMC_HOOKS
75 #include <sys/pmckern.h>
76 PMC_SOFT_DECLARE( , , lock, failed);
77 #endif
78
79 /* Handy macros for sleep queues. */
80 #define SQ_EXCLUSIVE_QUEUE      0
81 #define SQ_SHARED_QUEUE         1
82
83 /*
84  * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file.  We
85  * drop Giant anytime we have to sleep or if we adaptively spin.
86  */
87 #define GIANT_DECLARE                                                   \
88         int _giantcnt = 0;                                              \
89         WITNESS_SAVE_DECL(Giant)                                        \
90
91 #define GIANT_SAVE(work) do {                                           \
92         if (__predict_false(mtx_owned(&Giant))) {                       \
93                 work++;                                                 \
94                 WITNESS_SAVE(&Giant.lock_object, Giant);                \
95                 while (mtx_owned(&Giant)) {                             \
96                         _giantcnt++;                                    \
97                         mtx_unlock(&Giant);                             \
98                 }                                                       \
99         }                                                               \
100 } while (0)
101
102 #define GIANT_RESTORE() do {                                            \
103         if (_giantcnt > 0) {                                            \
104                 mtx_assert(&Giant, MA_NOTOWNED);                        \
105                 while (_giantcnt--)                                     \
106                         mtx_lock(&Giant);                               \
107                 WITNESS_RESTORE(&Giant.lock_object, Giant);             \
108         }                                                               \
109 } while (0)
110
111 /*
112  * Returns true if an exclusive lock is recursed.  It assumes
113  * curthread currently has an exclusive lock.
114  */
115 #define sx_recursed(sx)         ((sx)->sx_recurse != 0)
116
117 static void     assert_sx(const struct lock_object *lock, int what);
118 #ifdef DDB
119 static void     db_show_sx(const struct lock_object *lock);
120 #endif
121 static void     lock_sx(struct lock_object *lock, uintptr_t how);
122 #ifdef KDTRACE_HOOKS
123 static int      owner_sx(const struct lock_object *lock, struct thread **owner);
124 #endif
125 static uintptr_t unlock_sx(struct lock_object *lock);
126
127 struct lock_class lock_class_sx = {
128         .lc_name = "sx",
129         .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
130         .lc_assert = assert_sx,
131 #ifdef DDB
132         .lc_ddb_show = db_show_sx,
133 #endif
134         .lc_lock = lock_sx,
135         .lc_unlock = unlock_sx,
136 #ifdef KDTRACE_HOOKS
137         .lc_owner = owner_sx,
138 #endif
139 };
140
141 #ifndef INVARIANTS
142 #define _sx_assert(sx, what, file, line)
143 #endif
144
145 #ifdef ADAPTIVE_SX
146 #ifdef SX_CUSTOM_BACKOFF
147 static u_short __read_frequently asx_retries;
148 static u_short __read_frequently asx_loops;
149 static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
150     "sxlock debugging");
151 SYSCTL_U16(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, "");
152 SYSCTL_U16(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, "");
153
154 static struct lock_delay_config __read_frequently sx_delay;
155
156 SYSCTL_U16(_debug_sx, OID_AUTO, delay_base, CTLFLAG_RW, &sx_delay.base,
157     0, "");
158 SYSCTL_U16(_debug_sx, OID_AUTO, delay_max, CTLFLAG_RW, &sx_delay.max,
159     0, "");
160
161 static void
162 sx_lock_delay_init(void *arg __unused)
163 {
164
165         lock_delay_default_init(&sx_delay);
166         asx_retries = 10;
167         asx_loops = max(10000, sx_delay.max);
168 }
169 LOCK_DELAY_SYSINIT(sx_lock_delay_init);
170 #else
171 #define sx_delay        locks_delay
172 #define asx_retries     locks_delay_retries
173 #define asx_loops       locks_delay_loops
174 #endif
175 #endif
176
177 void
178 assert_sx(const struct lock_object *lock, int what)
179 {
180
181         sx_assert((const struct sx *)lock, what);
182 }
183
184 void
185 lock_sx(struct lock_object *lock, uintptr_t how)
186 {
187         struct sx *sx;
188
189         sx = (struct sx *)lock;
190         if (how)
191                 sx_slock(sx);
192         else
193                 sx_xlock(sx);
194 }
195
196 uintptr_t
197 unlock_sx(struct lock_object *lock)
198 {
199         struct sx *sx;
200
201         sx = (struct sx *)lock;
202         sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
203         if (sx_xlocked(sx)) {
204                 sx_xunlock(sx);
205                 return (0);
206         } else {
207                 sx_sunlock(sx);
208                 return (1);
209         }
210 }
211
212 #ifdef KDTRACE_HOOKS
213 int
214 owner_sx(const struct lock_object *lock, struct thread **owner)
215 {
216         const struct sx *sx;
217         uintptr_t x;
218
219         sx = (const struct sx *)lock;
220         x = sx->sx_lock;
221         *owner = NULL;
222         return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) :
223             ((*owner = (struct thread *)SX_OWNER(x)) != NULL));
224 }
225 #endif
226
227 void
228 sx_sysinit(void *arg)
229 {
230         struct sx_args *sargs = arg;
231
232         sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags);
233 }
234
235 void
236 sx_init_flags(struct sx *sx, const char *description, int opts)
237 {
238         int flags;
239
240         MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
241             SX_NOPROFILE | SX_NEW)) == 0);
242         ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
243             ("%s: sx_lock not aligned for %s: %p", __func__, description,
244             &sx->sx_lock));
245
246         flags = LO_SLEEPABLE | LO_UPGRADABLE;
247         if (opts & SX_DUPOK)
248                 flags |= LO_DUPOK;
249         if (opts & SX_NOPROFILE)
250                 flags |= LO_NOPROFILE;
251         if (!(opts & SX_NOWITNESS))
252                 flags |= LO_WITNESS;
253         if (opts & SX_RECURSE)
254                 flags |= LO_RECURSABLE;
255         if (opts & SX_QUIET)
256                 flags |= LO_QUIET;
257         if (opts & SX_NEW)
258                 flags |= LO_NEW;
259
260         lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
261         sx->sx_lock = SX_LOCK_UNLOCKED;
262         sx->sx_recurse = 0;
263 }
264
265 void
266 sx_destroy(struct sx *sx)
267 {
268
269         KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held"));
270         KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
271         sx->sx_lock = SX_LOCK_DESTROYED;
272         lock_destroy(&sx->lock_object);
273 }
274
275 int
276 sx_try_slock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
277 {
278         uintptr_t x;
279
280         if (SCHEDULER_STOPPED())
281                 return (1);
282
283         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
284             ("sx_try_slock() by idle thread %p on sx %s @ %s:%d",
285             curthread, sx->lock_object.lo_name, file, line));
286
287         x = sx->sx_lock;
288         for (;;) {
289                 KASSERT(x != SX_LOCK_DESTROYED,
290                     ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
291                 if (!(x & SX_LOCK_SHARED))
292                         break;
293                 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, x + SX_ONE_SHARER)) {
294                         LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
295                         WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
296                         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
297                             sx, 0, 0, file, line, LOCKSTAT_READER);
298                         TD_LOCKS_INC(curthread);
299                         curthread->td_sx_slocks++;
300                         return (1);
301                 }
302         }
303
304         LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
305         return (0);
306 }
307
308 int
309 sx_try_slock_(struct sx *sx, const char *file, int line)
310 {
311
312         return (sx_try_slock_int(sx LOCK_FILE_LINE_ARG));
313 }
314
315 int
316 _sx_xlock(struct sx *sx, int opts, const char *file, int line)
317 {
318         uintptr_t tid, x;
319         int error = 0;
320
321         KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
322             !TD_IS_IDLETHREAD(curthread),
323             ("sx_xlock() by idle thread %p on sx %s @ %s:%d",
324             curthread, sx->lock_object.lo_name, file, line));
325         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
326             ("sx_xlock() of destroyed sx @ %s:%d", file, line));
327         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
328             line, NULL);
329         tid = (uintptr_t)curthread;
330         x = SX_LOCK_UNLOCKED;
331         if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
332                 error = _sx_xlock_hard(sx, x, opts LOCK_FILE_LINE_ARG);
333         else
334                 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
335                     0, 0, file, line, LOCKSTAT_WRITER);
336         if (!error) {
337                 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse,
338                     file, line);
339                 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
340                 TD_LOCKS_INC(curthread);
341         }
342
343         return (error);
344 }
345
346 int
347 sx_try_xlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
348 {
349         struct thread *td;
350         uintptr_t tid, x;
351         int rval;
352         bool recursed;
353
354         td = curthread;
355         tid = (uintptr_t)td;
356         if (SCHEDULER_STOPPED_TD(td))
357                 return (1);
358
359         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
360             ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d",
361             curthread, sx->lock_object.lo_name, file, line));
362         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
363             ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
364
365         rval = 1;
366         recursed = false;
367         x = SX_LOCK_UNLOCKED;
368         for (;;) {
369                 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
370                         break;
371                 if (x == SX_LOCK_UNLOCKED)
372                         continue;
373                 if (x == tid && (sx->lock_object.lo_flags & LO_RECURSABLE)) {
374                         sx->sx_recurse++;
375                         atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
376                         break;
377                 }
378                 rval = 0;
379                 break;
380         }
381
382         LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
383         if (rval) {
384                 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
385                     file, line);
386                 if (!recursed)
387                         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
388                             sx, 0, 0, file, line, LOCKSTAT_WRITER);
389                 TD_LOCKS_INC(curthread);
390         }
391
392         return (rval);
393 }
394
395 int
396 sx_try_xlock_(struct sx *sx, const char *file, int line)
397 {
398
399         return (sx_try_xlock_int(sx LOCK_FILE_LINE_ARG));
400 }
401
402 void
403 _sx_xunlock(struct sx *sx, const char *file, int line)
404 {
405
406         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
407             ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
408         _sx_assert(sx, SA_XLOCKED, file, line);
409         WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
410         LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
411             line);
412 #if LOCK_DEBUG > 0
413         _sx_xunlock_hard(sx, (uintptr_t)curthread, file, line);
414 #else
415         __sx_xunlock(sx, curthread, file, line);
416 #endif
417         TD_LOCKS_DEC(curthread);
418 }
419
420 /*
421  * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
422  * This will only succeed if this thread holds a single shared lock.
423  * Return 1 if if the upgrade succeed, 0 otherwise.
424  */
425 int
426 sx_try_upgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
427 {
428         uintptr_t x;
429         uintptr_t waiters;
430         int success;
431
432         if (SCHEDULER_STOPPED())
433                 return (1);
434
435         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
436             ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
437         _sx_assert(sx, SA_SLOCKED, file, line);
438
439         /*
440          * Try to switch from one shared lock to an exclusive lock.  We need
441          * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that
442          * we will wake up the exclusive waiters when we drop the lock.
443          */
444         success = 0;
445         x = SX_READ_VALUE(sx);
446         for (;;) {
447                 if (SX_SHARERS(x) > 1)
448                         break;
449                 waiters = (x & SX_LOCK_WAITERS);
450                 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x,
451                     (uintptr_t)curthread | waiters)) {
452                         success = 1;
453                         break;
454                 }
455         }
456         LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
457         if (success) {
458                 curthread->td_sx_slocks--;
459                 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
460                     file, line);
461                 LOCKSTAT_RECORD0(sx__upgrade, sx);
462         }
463         return (success);
464 }
465
466 int
467 sx_try_upgrade_(struct sx *sx, const char *file, int line)
468 {
469
470         return (sx_try_upgrade_int(sx LOCK_FILE_LINE_ARG));
471 }
472
473 /*
474  * Downgrade an unrecursed exclusive lock into a single shared lock.
475  */
476 void
477 sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
478 {
479         uintptr_t x;
480         int wakeup_swapper;
481
482         if (SCHEDULER_STOPPED())
483                 return;
484
485         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
486             ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
487         _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
488 #ifndef INVARIANTS
489         if (sx_recursed(sx))
490                 panic("downgrade of a recursed lock");
491 #endif
492
493         WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
494
495         /*
496          * Try to switch from an exclusive lock with no shared waiters
497          * to one sharer with no shared waiters.  If there are
498          * exclusive waiters, we don't need to lock the sleep queue so
499          * long as we preserve the flag.  We do one quick try and if
500          * that fails we grab the sleepq lock to keep the flags from
501          * changing and do it the slow way.
502          *
503          * We have to lock the sleep queue if there are shared waiters
504          * so we can wake them up.
505          */
506         x = sx->sx_lock;
507         if (!(x & SX_LOCK_SHARED_WAITERS) &&
508             atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
509             (x & SX_LOCK_EXCLUSIVE_WAITERS)))
510                 goto out;
511
512         /*
513          * Lock the sleep queue so we can read the waiters bits
514          * without any races and wakeup any shared waiters.
515          */
516         sleepq_lock(&sx->lock_object);
517
518         /*
519          * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
520          * shared lock.  If there are any shared waiters, wake them up.
521          */
522         wakeup_swapper = 0;
523         x = sx->sx_lock;
524         atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
525             (x & SX_LOCK_EXCLUSIVE_WAITERS));
526         if (x & SX_LOCK_SHARED_WAITERS)
527                 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
528                     0, SQ_SHARED_QUEUE);
529         sleepq_release(&sx->lock_object);
530
531         if (wakeup_swapper)
532                 kick_proc0();
533
534 out:
535         curthread->td_sx_slocks++;
536         LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
537         LOCKSTAT_RECORD0(sx__downgrade, sx);
538 }
539
540 void
541 sx_downgrade_(struct sx *sx, const char *file, int line)
542 {
543
544         sx_downgrade_int(sx LOCK_FILE_LINE_ARG);
545 }
546
547 #ifdef  ADAPTIVE_SX
548 static inline void
549 sx_drop_critical(uintptr_t x, bool *in_critical, int *extra_work)
550 {
551
552         if (x & SX_LOCK_WRITE_SPINNER)
553                 return;
554         if (*in_critical) {
555                 critical_exit();
556                 *in_critical = false;
557                 (*extra_work)--;
558         }
559 }
560 #else
561 #define sx_drop_critical(x, in_critical, extra_work) do { } while(0)
562 #endif
563
564 /*
565  * This function represents the so-called 'hard case' for sx_xlock
566  * operation.  All 'easy case' failures are redirected to this.  Note
567  * that ideally this would be a static function, but it needs to be
568  * accessible from at least sx.h.
569  */
570 int
571 _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
572 {
573         GIANT_DECLARE;
574         uintptr_t tid, setx;
575 #ifdef ADAPTIVE_SX
576         struct thread *owner;
577         u_int i, n, spintries = 0;
578         enum { READERS, WRITER } sleep_reason = READERS;
579         bool in_critical = false;
580 #endif
581 #ifdef LOCK_PROFILING
582         uint64_t waittime = 0;
583         int contested = 0;
584 #endif
585         int error = 0;
586 #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
587         struct lock_delay_arg lda;
588 #endif
589 #ifdef  KDTRACE_HOOKS
590         u_int sleep_cnt = 0;
591         int64_t sleep_time = 0;
592         int64_t all_time = 0;
593 #endif
594 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
595         uintptr_t state = 0;
596         int doing_lockprof = 0;
597 #endif
598         int extra_work = 0;
599
600         tid = (uintptr_t)curthread;
601
602 #ifdef KDTRACE_HOOKS
603         if (LOCKSTAT_PROFILE_ENABLED(sx__acquire)) {
604                 while (x == SX_LOCK_UNLOCKED) {
605                         if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
606                                 goto out_lockstat;
607                 }
608                 extra_work = 1;
609                 doing_lockprof = 1;
610                 all_time -= lockstat_nsecs(&sx->lock_object);
611                 state = x;
612         }
613 #endif
614 #ifdef LOCK_PROFILING
615         extra_work = 1;
616         doing_lockprof = 1;
617         state = x;
618 #endif
619
620         if (SCHEDULER_STOPPED())
621                 return (0);
622
623         if (__predict_false(x == SX_LOCK_UNLOCKED))
624                 x = SX_READ_VALUE(sx);
625
626         /* If we already hold an exclusive lock, then recurse. */
627         if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) {
628                 KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
629             ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
630                     sx->lock_object.lo_name, file, line));
631                 sx->sx_recurse++;
632                 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
633                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
634                         CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
635                 return (0);
636         }
637
638         if (LOCK_LOG_TEST(&sx->lock_object, 0))
639                 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
640                     sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
641
642 #if defined(ADAPTIVE_SX)
643         lock_delay_arg_init(&lda, &sx_delay);
644 #elif defined(KDTRACE_HOOKS)
645         lock_delay_arg_init_noadapt(&lda);
646 #endif
647
648 #ifdef HWPMC_HOOKS
649         PMC_SOFT_CALL( , , lock, failed);
650 #endif
651         lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
652             &waittime);
653
654 #ifndef INVARIANTS
655         GIANT_SAVE(extra_work);
656 #endif
657
658         for (;;) {
659                 if (x == SX_LOCK_UNLOCKED) {
660                         if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
661                                 break;
662                         continue;
663                 }
664 #ifdef INVARIANTS
665                 GIANT_SAVE(extra_work);
666 #endif
667 #ifdef KDTRACE_HOOKS
668                 lda.spin_cnt++;
669 #endif
670 #ifdef ADAPTIVE_SX
671                 if (x == (SX_LOCK_SHARED | SX_LOCK_WRITE_SPINNER)) {
672                         if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
673                                 break;
674                         continue;
675                 }
676
677                 /*
678                  * If the lock is write locked and the owner is
679                  * running on another CPU, spin until the owner stops
680                  * running or the state of the lock changes.
681                  */
682                 if ((x & SX_LOCK_SHARED) == 0) {
683                         sx_drop_critical(x, &in_critical, &extra_work);
684                         sleep_reason = WRITER;
685                         owner = lv_sx_owner(x);
686                         if (!TD_IS_RUNNING(owner))
687                                 goto sleepq;
688                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
689                                 CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
690                                     __func__, sx, owner);
691                         KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
692                             "spinning", "lockname:\"%s\"",
693                             sx->lock_object.lo_name);
694                         do {
695                                 lock_delay(&lda);
696                                 x = SX_READ_VALUE(sx);
697                                 owner = lv_sx_owner(x);
698                         } while (owner != NULL && TD_IS_RUNNING(owner));
699                         KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
700                             "running");
701                         continue;
702                 } else if (SX_SHARERS(x) > 0) {
703                         sleep_reason = READERS;
704                         if (spintries == asx_retries)
705                                 goto sleepq;
706                         if (!(x & SX_LOCK_WRITE_SPINNER)) {
707                                 if (!in_critical) {
708                                         critical_enter();
709                                         in_critical = true;
710                                         extra_work++;
711                                 }
712                                 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
713                                     x | SX_LOCK_WRITE_SPINNER)) {
714                                         critical_exit();
715                                         in_critical = false;
716                                         extra_work--;
717                                         continue;
718                                 }
719                         }
720                         spintries++;
721                         KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
722                             "spinning", "lockname:\"%s\"",
723                             sx->lock_object.lo_name);
724                         n = SX_SHARERS(x);
725                         for (i = 0; i < asx_loops; i += n) {
726                                 lock_delay_spin(n);
727                                 x = SX_READ_VALUE(sx);
728                                 if (!(x & SX_LOCK_WRITE_SPINNER))
729                                         break;
730                                 if (!(x & SX_LOCK_SHARED))
731                                         break;
732                                 n = SX_SHARERS(x);
733                                 if (n == 0)
734                                         break;
735                         }
736 #ifdef KDTRACE_HOOKS
737                         lda.spin_cnt += i;
738 #endif
739                         KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
740                             "running");
741                         if (i < asx_loops)
742                                 continue;
743                 }
744 sleepq:
745 #endif
746                 sleepq_lock(&sx->lock_object);
747                 x = SX_READ_VALUE(sx);
748 retry_sleepq:
749
750                 /*
751                  * If the lock was released while spinning on the
752                  * sleep queue chain lock, try again.
753                  */
754                 if (x == SX_LOCK_UNLOCKED) {
755                         sleepq_release(&sx->lock_object);
756                         sx_drop_critical(x, &in_critical, &extra_work);
757                         continue;
758                 }
759
760 #ifdef ADAPTIVE_SX
761                 /*
762                  * The current lock owner might have started executing
763                  * on another CPU (or the lock could have changed
764                  * owners) while we were waiting on the sleep queue
765                  * chain lock.  If so, drop the sleep queue lock and try
766                  * again.
767                  */
768                 if (!(x & SX_LOCK_SHARED)) {
769                         owner = (struct thread *)SX_OWNER(x);
770                         if (TD_IS_RUNNING(owner)) {
771                                 sleepq_release(&sx->lock_object);
772                                 sx_drop_critical(x, &in_critical,
773                                     &extra_work);
774                                 continue;
775                         }
776                 } else if (SX_SHARERS(x) > 0 && sleep_reason == WRITER) {
777                         sleepq_release(&sx->lock_object);
778                         sx_drop_critical(x, &in_critical, &extra_work);
779                         continue;
780                 }
781 #endif
782
783                 /*
784                  * If an exclusive lock was released with both shared
785                  * and exclusive waiters and a shared waiter hasn't
786                  * woken up and acquired the lock yet, sx_lock will be
787                  * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS.
788                  * If we see that value, try to acquire it once.  Note
789                  * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS
790                  * as there are other exclusive waiters still.  If we
791                  * fail, restart the loop.
792                  */
793                 setx = x & (SX_LOCK_WAITERS | SX_LOCK_WRITE_SPINNER);
794                 if ((x & ~setx) == SX_LOCK_SHARED) {
795                         setx &= ~SX_LOCK_WRITE_SPINNER;
796                         if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid | setx))
797                                 goto retry_sleepq;
798                         sleepq_release(&sx->lock_object);
799                         CTR2(KTR_LOCK, "%s: %p claimed by new writer",
800                             __func__, sx);
801                         break;
802                 }
803
804 #ifdef ADAPTIVE_SX
805                 /*
806                  * It is possible we set the SX_LOCK_WRITE_SPINNER bit.
807                  * It is an invariant that when the bit is set, there is
808                  * a writer ready to grab the lock. Thus clear the bit since
809                  * we are going to sleep.
810                  */
811                 if (in_critical) {
812                         if ((x & SX_LOCK_WRITE_SPINNER) ||
813                             !((x & SX_LOCK_EXCLUSIVE_WAITERS))) {
814                                 setx = x & ~SX_LOCK_WRITE_SPINNER;
815                                 setx |= SX_LOCK_EXCLUSIVE_WAITERS;
816                                 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
817                                     setx)) {
818                                         goto retry_sleepq;
819                                 }
820                         }
821                         critical_exit();
822                         in_critical = false;
823                 } else {
824 #endif
825                         /*
826                          * Try to set the SX_LOCK_EXCLUSIVE_WAITERS.  If we fail,
827                          * than loop back and retry.
828                          */
829                         if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
830                                 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
831                                     x | SX_LOCK_EXCLUSIVE_WAITERS)) {
832                                         goto retry_sleepq;
833                                 }
834                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
835                                         CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
836                                             __func__, sx);
837                         }
838 #ifdef ADAPTIVE_SX
839                 }
840 #endif
841
842                 /*
843                  * Since we have been unable to acquire the exclusive
844                  * lock and the exclusive waiters flag is set, we have
845                  * to sleep.
846                  */
847                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
848                         CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
849                             __func__, sx);
850
851 #ifdef KDTRACE_HOOKS
852                 sleep_time -= lockstat_nsecs(&sx->lock_object);
853 #endif
854                 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
855                     SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
856                     SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
857                 if (!(opts & SX_INTERRUPTIBLE))
858                         sleepq_wait(&sx->lock_object, 0);
859                 else
860                         error = sleepq_wait_sig(&sx->lock_object, 0);
861 #ifdef KDTRACE_HOOKS
862                 sleep_time += lockstat_nsecs(&sx->lock_object);
863                 sleep_cnt++;
864 #endif
865                 if (error) {
866                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
867                                 CTR2(KTR_LOCK,
868                         "%s: interruptible sleep by %p suspended by signal",
869                                     __func__, sx);
870                         break;
871                 }
872                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
873                         CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
874                             __func__, sx);
875                 x = SX_READ_VALUE(sx);
876         }
877         if (__predict_true(!extra_work))
878                 return (error);
879 #ifdef ADAPTIVE_SX
880         if (in_critical)
881                 critical_exit();
882 #endif
883         GIANT_RESTORE();
884 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
885         if (__predict_true(!doing_lockprof))
886                 return (error);
887 #endif
888 #ifdef KDTRACE_HOOKS
889         all_time += lockstat_nsecs(&sx->lock_object);
890         if (sleep_time)
891                 LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
892                     LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
893                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
894         if (lda.spin_cnt > sleep_cnt)
895                 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
896                     LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
897                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
898 out_lockstat:
899 #endif
900         if (!error)
901                 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
902                     contested, waittime, file, line, LOCKSTAT_WRITER);
903         return (error);
904 }
905
906 /*
907  * This function represents the so-called 'hard case' for sx_xunlock
908  * operation.  All 'easy case' failures are redirected to this.  Note
909  * that ideally this would be a static function, but it needs to be
910  * accessible from at least sx.h.
911  */
912 void
913 _sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
914 {
915         uintptr_t tid, setx;
916         int queue, wakeup_swapper;
917
918         if (SCHEDULER_STOPPED())
919                 return;
920
921         tid = (uintptr_t)curthread;
922
923         if (__predict_false(x == tid))
924                 x = SX_READ_VALUE(sx);
925
926         MPASS(!(x & SX_LOCK_SHARED));
927
928         if (__predict_false(x & SX_LOCK_RECURSED)) {
929                 /* The lock is recursed, unrecurse one level. */
930                 if ((--sx->sx_recurse) == 0)
931                         atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
932                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
933                         CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
934                 return;
935         }
936
937         LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_WRITER);
938         if (x == tid &&
939             atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED))
940                 return;
941
942         if (LOCK_LOG_TEST(&sx->lock_object, 0))
943                 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);
944
945         sleepq_lock(&sx->lock_object);
946         x = SX_READ_VALUE(sx);
947         MPASS(x & (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS));
948
949         /*
950          * The wake up algorithm here is quite simple and probably not
951          * ideal.  It gives precedence to shared waiters if they are
952          * present.  For this condition, we have to preserve the
953          * state of the exclusive waiters flag.
954          * If interruptible sleeps left the shared queue empty avoid a
955          * starvation for the threads sleeping on the exclusive queue by giving
956          * them precedence and cleaning up the shared waiters bit anyway.
957          */
958         setx = SX_LOCK_UNLOCKED;
959         queue = SQ_SHARED_QUEUE;
960         if ((x & SX_LOCK_EXCLUSIVE_WAITERS) != 0 &&
961             sleepq_sleepcnt(&sx->lock_object, SQ_EXCLUSIVE_QUEUE) != 0) {
962                 queue = SQ_EXCLUSIVE_QUEUE;
963                 setx |= (x & SX_LOCK_SHARED_WAITERS);
964         }
965         atomic_store_rel_ptr(&sx->sx_lock, setx);
966
967         /* Wake up all the waiters for the specific queue. */
968         if (LOCK_LOG_TEST(&sx->lock_object, 0))
969                 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue",
970                     __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
971                     "exclusive");
972
973         wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
974             queue);
975         sleepq_release(&sx->lock_object);
976         if (wakeup_swapper)
977                 kick_proc0();
978 }
979
980 static bool __always_inline
981 __sx_can_read(struct thread *td, uintptr_t x, bool fp)
982 {
983
984         if ((x & (SX_LOCK_SHARED | SX_LOCK_EXCLUSIVE_WAITERS | SX_LOCK_WRITE_SPINNER))
985                         == SX_LOCK_SHARED)
986                 return (true);
987         if (!fp && td->td_sx_slocks && (x & SX_LOCK_SHARED))
988                 return (true);
989         return (false);
990 }
991
992 static bool __always_inline
993 __sx_slock_try(struct sx *sx, struct thread *td, uintptr_t *xp, bool fp
994     LOCK_FILE_LINE_ARG_DEF)
995 {
996
997         /*
998          * If no other thread has an exclusive lock then try to bump up
999          * the count of sharers.  Since we have to preserve the state
1000          * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
1001          * shared lock loop back and retry.
1002          */
1003         while (__sx_can_read(td, *xp, fp)) {
1004                 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, xp,
1005                     *xp + SX_ONE_SHARER)) {
1006                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
1007                                 CTR4(KTR_LOCK, "%s: %p succeed %p -> %p",
1008                                     __func__, sx, (void *)*xp,
1009                                     (void *)(*xp + SX_ONE_SHARER));
1010                         td->td_sx_slocks++;
1011                         return (true);
1012                 }
1013         }
1014         return (false);
1015 }
1016
1017 static int __noinline
1018 _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
1019 {
1020         GIANT_DECLARE;
1021         struct thread *td;
1022 #ifdef ADAPTIVE_SX
1023         struct thread *owner;
1024         u_int i, n, spintries = 0;
1025 #endif
1026 #ifdef LOCK_PROFILING
1027         uint64_t waittime = 0;
1028         int contested = 0;
1029 #endif
1030         int error = 0;
1031 #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
1032         struct lock_delay_arg lda;
1033 #endif
1034 #ifdef KDTRACE_HOOKS
1035         u_int sleep_cnt = 0;
1036         int64_t sleep_time = 0;
1037         int64_t all_time = 0;
1038 #endif
1039 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
1040         uintptr_t state = 0;
1041 #endif
1042         int extra_work = 0;
1043
1044         td = curthread;
1045
1046 #ifdef KDTRACE_HOOKS
1047         if (LOCKSTAT_PROFILE_ENABLED(sx__acquire)) {
1048                 if (__sx_slock_try(sx, td, &x, false LOCK_FILE_LINE_ARG))
1049                         goto out_lockstat;
1050                 extra_work = 1;
1051                 all_time -= lockstat_nsecs(&sx->lock_object);
1052                 state = x;
1053         }
1054 #endif
1055 #ifdef LOCK_PROFILING
1056         extra_work = 1;
1057         state = x;
1058 #endif
1059
1060         if (SCHEDULER_STOPPED())
1061                 return (0);
1062
1063 #if defined(ADAPTIVE_SX)
1064         lock_delay_arg_init(&lda, &sx_delay);
1065 #elif defined(KDTRACE_HOOKS)
1066         lock_delay_arg_init_noadapt(&lda);
1067 #endif
1068
1069 #ifdef HWPMC_HOOKS
1070         PMC_SOFT_CALL( , , lock, failed);
1071 #endif
1072         lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
1073             &waittime);
1074
1075 #ifndef INVARIANTS
1076         GIANT_SAVE(extra_work);
1077 #endif
1078
1079         /*
1080          * As with rwlocks, we don't make any attempt to try to block
1081          * shared locks once there is an exclusive waiter.
1082          */
1083         for (;;) {
1084                 if (__sx_slock_try(sx, td, &x, false LOCK_FILE_LINE_ARG))
1085                         break;
1086 #ifdef INVARIANTS
1087                 GIANT_SAVE(extra_work);
1088 #endif
1089 #ifdef KDTRACE_HOOKS
1090                 lda.spin_cnt++;
1091 #endif
1092
1093 #ifdef ADAPTIVE_SX
1094                 /*
1095                  * If the owner is running on another CPU, spin until
1096                  * the owner stops running or the state of the lock
1097                  * changes.
1098                  */
1099                 if ((x & SX_LOCK_SHARED) == 0) {
1100                         owner = lv_sx_owner(x);
1101                         if (TD_IS_RUNNING(owner)) {
1102                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1103                                         CTR3(KTR_LOCK,
1104                                             "%s: spinning on %p held by %p",
1105                                             __func__, sx, owner);
1106                                 KTR_STATE1(KTR_SCHED, "thread",
1107                                     sched_tdname(curthread), "spinning",
1108                                     "lockname:\"%s\"", sx->lock_object.lo_name);
1109                                 do {
1110                                         lock_delay(&lda);
1111                                         x = SX_READ_VALUE(sx);
1112                                         owner = lv_sx_owner(x);
1113                                 } while (owner != NULL && TD_IS_RUNNING(owner));
1114                                 KTR_STATE0(KTR_SCHED, "thread",
1115                                     sched_tdname(curthread), "running");
1116                                 continue;
1117                         }
1118                 } else {
1119                         if ((x & SX_LOCK_WRITE_SPINNER) && SX_SHARERS(x) == 0) {
1120                                 MPASS(!__sx_can_read(td, x, false));
1121                                 lock_delay_spin(2);
1122                                 x = SX_READ_VALUE(sx);
1123                                 continue;
1124                         }
1125                         if (spintries < asx_retries) {
1126                                 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
1127                                     "spinning", "lockname:\"%s\"",
1128                                     sx->lock_object.lo_name);
1129                                 n = SX_SHARERS(x);
1130                                 for (i = 0; i < asx_loops; i += n) {
1131                                         lock_delay_spin(n);
1132                                         x = SX_READ_VALUE(sx);
1133                                         if (!(x & SX_LOCK_SHARED))
1134                                                 break;
1135                                         n = SX_SHARERS(x);
1136                                         if (n == 0)
1137                                                 break;
1138                                         if (__sx_can_read(td, x, false))
1139                                                 break;
1140                                 }
1141 #ifdef KDTRACE_HOOKS
1142                                 lda.spin_cnt += i;
1143 #endif
1144                                 KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
1145                                     "running");
1146                                 if (i < asx_loops)
1147                                         continue;
1148                         }
1149                 }
1150 #endif
1151
1152                 /*
1153                  * Some other thread already has an exclusive lock, so
1154                  * start the process of blocking.
1155                  */
1156                 sleepq_lock(&sx->lock_object);
1157                 x = SX_READ_VALUE(sx);
1158 retry_sleepq:
1159                 if (((x & SX_LOCK_WRITE_SPINNER) && SX_SHARERS(x) == 0) ||
1160                     __sx_can_read(td, x, false)) {
1161                         sleepq_release(&sx->lock_object);
1162                         continue;
1163                 }
1164
1165 #ifdef ADAPTIVE_SX
1166                 /*
1167                  * If the owner is running on another CPU, spin until
1168                  * the owner stops running or the state of the lock
1169                  * changes.
1170                  */
1171                 if (!(x & SX_LOCK_SHARED)) {
1172                         owner = (struct thread *)SX_OWNER(x);
1173                         if (TD_IS_RUNNING(owner)) {
1174                                 sleepq_release(&sx->lock_object);
1175                                 x = SX_READ_VALUE(sx);
1176                                 continue;
1177                         }
1178                 }
1179 #endif
1180
1181                 /*
1182                  * Try to set the SX_LOCK_SHARED_WAITERS flag.  If we
1183                  * fail to set it drop the sleep queue lock and loop
1184                  * back.
1185                  */
1186                 if (!(x & SX_LOCK_SHARED_WAITERS)) {
1187                         if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
1188                             x | SX_LOCK_SHARED_WAITERS))
1189                                 goto retry_sleepq;
1190                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
1191                                 CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
1192                                     __func__, sx);
1193                 }
1194
1195                 /*
1196                  * Since we have been unable to acquire the shared lock,
1197                  * we have to sleep.
1198                  */
1199                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1200                         CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
1201                             __func__, sx);
1202
1203 #ifdef KDTRACE_HOOKS
1204                 sleep_time -= lockstat_nsecs(&sx->lock_object);
1205 #endif
1206                 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
1207                     SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
1208                     SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
1209                 if (!(opts & SX_INTERRUPTIBLE))
1210                         sleepq_wait(&sx->lock_object, 0);
1211                 else
1212                         error = sleepq_wait_sig(&sx->lock_object, 0);
1213 #ifdef KDTRACE_HOOKS
1214                 sleep_time += lockstat_nsecs(&sx->lock_object);
1215                 sleep_cnt++;
1216 #endif
1217                 if (error) {
1218                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
1219                                 CTR2(KTR_LOCK,
1220                         "%s: interruptible sleep by %p suspended by signal",
1221                                     __func__, sx);
1222                         break;
1223                 }
1224                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1225                         CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
1226                             __func__, sx);
1227                 x = SX_READ_VALUE(sx);
1228         }
1229 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
1230         if (__predict_true(!extra_work))
1231                 return (error);
1232 #endif
1233 #ifdef KDTRACE_HOOKS
1234         all_time += lockstat_nsecs(&sx->lock_object);
1235         if (sleep_time)
1236                 LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
1237                     LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
1238                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
1239         if (lda.spin_cnt > sleep_cnt)
1240                 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
1241                     LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
1242                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
1243 out_lockstat:
1244 #endif
1245         if (error == 0) {
1246                 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
1247                     contested, waittime, file, line, LOCKSTAT_READER);
1248         }
1249         GIANT_RESTORE();
1250         return (error);
1251 }
1252
1253 int
1254 _sx_slock_int(struct sx *sx, int opts LOCK_FILE_LINE_ARG_DEF)
1255 {
1256         struct thread *td;
1257         uintptr_t x;
1258         int error;
1259
1260         KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
1261             !TD_IS_IDLETHREAD(curthread),
1262             ("sx_slock() by idle thread %p on sx %s @ %s:%d",
1263             curthread, sx->lock_object.lo_name, file, line));
1264         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1265             ("sx_slock() of destroyed sx @ %s:%d", file, line));
1266         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
1267
1268         error = 0;
1269         td = curthread;
1270         x = SX_READ_VALUE(sx);
1271         if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__acquire) ||
1272             !__sx_slock_try(sx, td, &x, true LOCK_FILE_LINE_ARG)))
1273                 error = _sx_slock_hard(sx, opts, x LOCK_FILE_LINE_ARG);
1274         else
1275                 lock_profile_obtain_lock_success(&sx->lock_object, 0, 0,
1276                     file, line);
1277         if (error == 0) {
1278                 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
1279                 WITNESS_LOCK(&sx->lock_object, 0, file, line);
1280                 TD_LOCKS_INC(curthread);
1281         }
1282         return (error);
1283 }
1284
1285 int
1286 _sx_slock(struct sx *sx, int opts, const char *file, int line)
1287 {
1288
1289         return (_sx_slock_int(sx, opts LOCK_FILE_LINE_ARG));
1290 }
1291
1292 static bool __always_inline
1293 _sx_sunlock_try(struct sx *sx, struct thread *td, uintptr_t *xp)
1294 {
1295
1296         for (;;) {
1297                 if (SX_SHARERS(*xp) > 1 || !(*xp & SX_LOCK_WAITERS)) {
1298                         if (atomic_fcmpset_rel_ptr(&sx->sx_lock, xp,
1299                             *xp - SX_ONE_SHARER)) {
1300                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1301                                         CTR4(KTR_LOCK,
1302                                             "%s: %p succeeded %p -> %p",
1303                                             __func__, sx, (void *)*xp,
1304                                             (void *)(*xp - SX_ONE_SHARER));
1305                                 td->td_sx_slocks--;
1306                                 return (true);
1307                         }
1308                         continue;
1309                 }
1310                 break;
1311         }
1312         return (false);
1313 }
1314
1315 static void __noinline
1316 _sx_sunlock_hard(struct sx *sx, struct thread *td, uintptr_t x
1317     LOCK_FILE_LINE_ARG_DEF)
1318 {
1319         int wakeup_swapper = 0;
1320         uintptr_t setx, queue;
1321
1322         if (SCHEDULER_STOPPED())
1323                 return;
1324
1325         if (_sx_sunlock_try(sx, td, &x))
1326                 goto out_lockstat;
1327
1328         sleepq_lock(&sx->lock_object);
1329         x = SX_READ_VALUE(sx);
1330         for (;;) {
1331                 if (_sx_sunlock_try(sx, td, &x))
1332                         break;
1333
1334                 /*
1335                  * Wake up semantic here is quite simple:
1336                  * Just wake up all the exclusive waiters.
1337                  * Note that the state of the lock could have changed,
1338                  * so if it fails loop back and retry.
1339                  */
1340                 setx = SX_LOCK_UNLOCKED;
1341                 queue = SQ_SHARED_QUEUE;
1342                 if (x & SX_LOCK_EXCLUSIVE_WAITERS) {
1343                         setx |= (x & SX_LOCK_SHARED_WAITERS);
1344                         queue = SQ_EXCLUSIVE_QUEUE;
1345                 }
1346                 setx |= (x & SX_LOCK_WRITE_SPINNER);
1347                 if (!atomic_fcmpset_rel_ptr(&sx->sx_lock, &x, setx))
1348                         continue;
1349                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1350                         CTR2(KTR_LOCK, "%s: %p waking up all thread on"
1351                             "exclusive queue", __func__, sx);
1352                 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
1353                     0, queue);
1354                 td->td_sx_slocks--;
1355                 break;
1356         }
1357         sleepq_release(&sx->lock_object);
1358         if (wakeup_swapper)
1359                 kick_proc0();
1360 out_lockstat:
1361         LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER);
1362 }
1363
1364 void
1365 _sx_sunlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
1366 {
1367         struct thread *td;
1368         uintptr_t x;
1369
1370         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1371             ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
1372         _sx_assert(sx, SA_SLOCKED, file, line);
1373         WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
1374         LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
1375
1376         td = curthread;
1377         x = SX_READ_VALUE(sx);
1378         if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__release) ||
1379             !_sx_sunlock_try(sx, td, &x)))
1380                 _sx_sunlock_hard(sx, td, x LOCK_FILE_LINE_ARG);
1381         else
1382                 lock_profile_release_lock(&sx->lock_object);
1383
1384         TD_LOCKS_DEC(curthread);
1385 }
1386
1387 void
1388 _sx_sunlock(struct sx *sx, const char *file, int line)
1389 {
1390
1391         _sx_sunlock_int(sx LOCK_FILE_LINE_ARG);
1392 }
1393
1394 #ifdef INVARIANT_SUPPORT
1395 #ifndef INVARIANTS
1396 #undef  _sx_assert
1397 #endif
1398
1399 /*
1400  * In the non-WITNESS case, sx_assert() can only detect that at least
1401  * *some* thread owns an slock, but it cannot guarantee that *this*
1402  * thread owns an slock.
1403  */
1404 void
1405 _sx_assert(const struct sx *sx, int what, const char *file, int line)
1406 {
1407 #ifndef WITNESS
1408         int slocked = 0;
1409 #endif
1410
1411         if (SCHEDULER_STOPPED())
1412                 return;
1413         switch (what) {
1414         case SA_SLOCKED:
1415         case SA_SLOCKED | SA_NOTRECURSED:
1416         case SA_SLOCKED | SA_RECURSED:
1417 #ifndef WITNESS
1418                 slocked = 1;
1419                 /* FALLTHROUGH */
1420 #endif
1421         case SA_LOCKED:
1422         case SA_LOCKED | SA_NOTRECURSED:
1423         case SA_LOCKED | SA_RECURSED:
1424 #ifdef WITNESS
1425                 witness_assert(&sx->lock_object, what, file, line);
1426 #else
1427                 /*
1428                  * If some other thread has an exclusive lock or we
1429                  * have one and are asserting a shared lock, fail.
1430                  * Also, if no one has a lock at all, fail.
1431                  */
1432                 if (sx->sx_lock == SX_LOCK_UNLOCKED ||
1433                     (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
1434                     sx_xholder(sx) != curthread)))
1435                         panic("Lock %s not %slocked @ %s:%d\n",
1436                             sx->lock_object.lo_name, slocked ? "share " : "",
1437                             file, line);
1438
1439                 if (!(sx->sx_lock & SX_LOCK_SHARED)) {
1440                         if (sx_recursed(sx)) {
1441                                 if (what & SA_NOTRECURSED)
1442                                         panic("Lock %s recursed @ %s:%d\n",
1443                                             sx->lock_object.lo_name, file,
1444                                             line);
1445                         } else if (what & SA_RECURSED)
1446                                 panic("Lock %s not recursed @ %s:%d\n",
1447                                     sx->lock_object.lo_name, file, line);
1448                 }
1449 #endif
1450                 break;
1451         case SA_XLOCKED:
1452         case SA_XLOCKED | SA_NOTRECURSED:
1453         case SA_XLOCKED | SA_RECURSED:
1454                 if (sx_xholder(sx) != curthread)
1455                         panic("Lock %s not exclusively locked @ %s:%d\n",
1456                             sx->lock_object.lo_name, file, line);
1457                 if (sx_recursed(sx)) {
1458                         if (what & SA_NOTRECURSED)
1459                                 panic("Lock %s recursed @ %s:%d\n",
1460                                     sx->lock_object.lo_name, file, line);
1461                 } else if (what & SA_RECURSED)
1462                         panic("Lock %s not recursed @ %s:%d\n",
1463                             sx->lock_object.lo_name, file, line);
1464                 break;
1465         case SA_UNLOCKED:
1466 #ifdef WITNESS
1467                 witness_assert(&sx->lock_object, what, file, line);
1468 #else
1469                 /*
1470                  * If we hold an exclusve lock fail.  We can't
1471                  * reliably check to see if we hold a shared lock or
1472                  * not.
1473                  */
1474                 if (sx_xholder(sx) == curthread)
1475                         panic("Lock %s exclusively locked @ %s:%d\n",
1476                             sx->lock_object.lo_name, file, line);
1477 #endif
1478                 break;
1479         default:
1480                 panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
1481                     line);
1482         }
1483 }
1484 #endif  /* INVARIANT_SUPPORT */
1485
1486 #ifdef DDB
1487 static void
1488 db_show_sx(const struct lock_object *lock)
1489 {
1490         struct thread *td;
1491         const struct sx *sx;
1492
1493         sx = (const struct sx *)lock;
1494
1495         db_printf(" state: ");
1496         if (sx->sx_lock == SX_LOCK_UNLOCKED)
1497                 db_printf("UNLOCKED\n");
1498         else if (sx->sx_lock == SX_LOCK_DESTROYED) {
1499                 db_printf("DESTROYED\n");
1500                 return;
1501         } else if (sx->sx_lock & SX_LOCK_SHARED)
1502                 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
1503         else {
1504                 td = sx_xholder(sx);
1505                 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1506                     td->td_tid, td->td_proc->p_pid, td->td_name);
1507                 if (sx_recursed(sx))
1508                         db_printf(" recursed: %d\n", sx->sx_recurse);
1509         }
1510
1511         db_printf(" waiters: ");
1512         switch(sx->sx_lock &
1513             (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) {
1514         case SX_LOCK_SHARED_WAITERS:
1515                 db_printf("shared\n");
1516                 break;
1517         case SX_LOCK_EXCLUSIVE_WAITERS:
1518                 db_printf("exclusive\n");
1519                 break;
1520         case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS:
1521                 db_printf("exclusive and shared\n");
1522                 break;
1523         default:
1524                 db_printf("none\n");
1525         }
1526 }
1527
1528 /*
1529  * Check to see if a thread that is blocked on a sleep queue is actually
1530  * blocked on an sx lock.  If so, output some details and return true.
1531  * If the lock has an exclusive owner, return that in *ownerp.
1532  */
1533 int
1534 sx_chain(struct thread *td, struct thread **ownerp)
1535 {
1536         const struct sx *sx;
1537
1538         /*
1539          * Check to see if this thread is blocked on an sx lock.
1540          * First, we check the lock class.  If that is ok, then we
1541          * compare the lock name against the wait message.
1542          */
1543         sx = td->td_wchan;
1544         if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
1545             sx->lock_object.lo_name != td->td_wmesg)
1546                 return (0);
1547
1548         /* We think we have an sx lock, so output some details. */
1549         db_printf("blocked on sx \"%s\" ", td->td_wmesg);
1550         *ownerp = sx_xholder(sx);
1551         if (sx->sx_lock & SX_LOCK_SHARED)
1552                 db_printf("SLOCK (count %ju)\n",
1553                     (uintmax_t)SX_SHARERS(sx->sx_lock));
1554         else
1555                 db_printf("XLOCK\n");
1556         return (1);
1557 }
1558 #endif