]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/kern_sx.c
Merge ^/vendor/compiler-rt/dist up to its last change, and resolve conflicts.
[FreeBSD/FreeBSD.git] / sys / kern / kern_sx.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
5  * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice(s), this list of conditions and the following disclaimer as
13  *    the first lines of this file unmodified other than the possible
14  *    addition of one or more copyright notices.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice(s), this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
20  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
23  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
29  * DAMAGE.
30  */
31
32 /*
33  * Shared/exclusive locks.  This implementation attempts to ensure
34  * deterministic lock granting behavior, so that slocks and xlocks are
35  * interleaved.
36  *
37  * Priority propagation will not generally raise the priority of lock holders,
38  * so should not be relied upon in combination with sx locks.
39  */
40
41 #include "opt_ddb.h"
42 #include "opt_hwpmc_hooks.h"
43 #include "opt_no_adaptive_sx.h"
44
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kdb.h>
51 #include <sys/kernel.h>
52 #include <sys/ktr.h>
53 #include <sys/lock.h>
54 #include <sys/mutex.h>
55 #include <sys/proc.h>
56 #include <sys/sched.h>
57 #include <sys/sleepqueue.h>
58 #include <sys/sx.h>
59 #include <sys/smp.h>
60 #include <sys/sysctl.h>
61
62 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
63 #include <machine/cpu.h>
64 #endif
65
66 #ifdef DDB
67 #include <ddb/ddb.h>
68 #endif
69
70 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
71 #define ADAPTIVE_SX
72 #endif
73
74 #ifdef HWPMC_HOOKS
75 #include <sys/pmckern.h>
76 PMC_SOFT_DECLARE( , , lock, failed);
77 #endif
78
79 /* Handy macros for sleep queues. */
80 #define SQ_EXCLUSIVE_QUEUE      0
81 #define SQ_SHARED_QUEUE         1
82
83 /*
84  * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file.  We
85  * drop Giant anytime we have to sleep or if we adaptively spin.
86  */
87 #define GIANT_DECLARE                                                   \
88         int _giantcnt = 0;                                              \
89         WITNESS_SAVE_DECL(Giant)                                        \
90
91 #define GIANT_SAVE(work) do {                                           \
92         if (__predict_false(mtx_owned(&Giant))) {                       \
93                 work++;                                                 \
94                 WITNESS_SAVE(&Giant.lock_object, Giant);                \
95                 while (mtx_owned(&Giant)) {                             \
96                         _giantcnt++;                                    \
97                         mtx_unlock(&Giant);                             \
98                 }                                                       \
99         }                                                               \
100 } while (0)
101
102 #define GIANT_RESTORE() do {                                            \
103         if (_giantcnt > 0) {                                            \
104                 mtx_assert(&Giant, MA_NOTOWNED);                        \
105                 while (_giantcnt--)                                     \
106                         mtx_lock(&Giant);                               \
107                 WITNESS_RESTORE(&Giant.lock_object, Giant);             \
108         }                                                               \
109 } while (0)
110
111 /*
112  * Returns true if an exclusive lock is recursed.  It assumes
113  * curthread currently has an exclusive lock.
114  */
115 #define sx_recursed(sx)         ((sx)->sx_recurse != 0)
116
117 static void     assert_sx(const struct lock_object *lock, int what);
118 #ifdef DDB
119 static void     db_show_sx(const struct lock_object *lock);
120 #endif
121 static void     lock_sx(struct lock_object *lock, uintptr_t how);
122 #ifdef KDTRACE_HOOKS
123 static int      owner_sx(const struct lock_object *lock, struct thread **owner);
124 #endif
125 static uintptr_t unlock_sx(struct lock_object *lock);
126
127 struct lock_class lock_class_sx = {
128         .lc_name = "sx",
129         .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
130         .lc_assert = assert_sx,
131 #ifdef DDB
132         .lc_ddb_show = db_show_sx,
133 #endif
134         .lc_lock = lock_sx,
135         .lc_unlock = unlock_sx,
136 #ifdef KDTRACE_HOOKS
137         .lc_owner = owner_sx,
138 #endif
139 };
140
141 #ifndef INVARIANTS
142 #define _sx_assert(sx, what, file, line)
143 #endif
144
145 #ifdef ADAPTIVE_SX
146 #ifdef SX_CUSTOM_BACKOFF
147 static u_short __read_frequently asx_retries;
148 static u_short __read_frequently asx_loops;
149 static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging");
150 SYSCTL_U16(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, "");
151 SYSCTL_U16(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, "");
152
153 static struct lock_delay_config __read_frequently sx_delay;
154
155 SYSCTL_U16(_debug_sx, OID_AUTO, delay_base, CTLFLAG_RW, &sx_delay.base,
156     0, "");
157 SYSCTL_U16(_debug_sx, OID_AUTO, delay_max, CTLFLAG_RW, &sx_delay.max,
158     0, "");
159
160 static void
161 sx_lock_delay_init(void *arg __unused)
162 {
163
164         lock_delay_default_init(&sx_delay);
165         asx_retries = 10;
166         asx_loops = max(10000, sx_delay.max);
167 }
168 LOCK_DELAY_SYSINIT(sx_lock_delay_init);
169 #else
170 #define sx_delay        locks_delay
171 #define asx_retries     locks_delay_retries
172 #define asx_loops       locks_delay_loops
173 #endif
174 #endif
175
176 void
177 assert_sx(const struct lock_object *lock, int what)
178 {
179
180         sx_assert((const struct sx *)lock, what);
181 }
182
183 void
184 lock_sx(struct lock_object *lock, uintptr_t how)
185 {
186         struct sx *sx;
187
188         sx = (struct sx *)lock;
189         if (how)
190                 sx_slock(sx);
191         else
192                 sx_xlock(sx);
193 }
194
195 uintptr_t
196 unlock_sx(struct lock_object *lock)
197 {
198         struct sx *sx;
199
200         sx = (struct sx *)lock;
201         sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
202         if (sx_xlocked(sx)) {
203                 sx_xunlock(sx);
204                 return (0);
205         } else {
206                 sx_sunlock(sx);
207                 return (1);
208         }
209 }
210
211 #ifdef KDTRACE_HOOKS
212 int
213 owner_sx(const struct lock_object *lock, struct thread **owner)
214 {
215         const struct sx *sx;
216         uintptr_t x;
217
218         sx = (const struct sx *)lock;
219         x = sx->sx_lock;
220         *owner = NULL;
221         return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) :
222             ((*owner = (struct thread *)SX_OWNER(x)) != NULL));
223 }
224 #endif
225
226 void
227 sx_sysinit(void *arg)
228 {
229         struct sx_args *sargs = arg;
230
231         sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags);
232 }
233
234 void
235 sx_init_flags(struct sx *sx, const char *description, int opts)
236 {
237         int flags;
238
239         MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
240             SX_NOPROFILE | SX_NEW)) == 0);
241         ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
242             ("%s: sx_lock not aligned for %s: %p", __func__, description,
243             &sx->sx_lock));
244
245         flags = LO_SLEEPABLE | LO_UPGRADABLE;
246         if (opts & SX_DUPOK)
247                 flags |= LO_DUPOK;
248         if (opts & SX_NOPROFILE)
249                 flags |= LO_NOPROFILE;
250         if (!(opts & SX_NOWITNESS))
251                 flags |= LO_WITNESS;
252         if (opts & SX_RECURSE)
253                 flags |= LO_RECURSABLE;
254         if (opts & SX_QUIET)
255                 flags |= LO_QUIET;
256         if (opts & SX_NEW)
257                 flags |= LO_NEW;
258
259         lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
260         sx->sx_lock = SX_LOCK_UNLOCKED;
261         sx->sx_recurse = 0;
262 }
263
264 void
265 sx_destroy(struct sx *sx)
266 {
267
268         KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held"));
269         KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
270         sx->sx_lock = SX_LOCK_DESTROYED;
271         lock_destroy(&sx->lock_object);
272 }
273
274 int
275 sx_try_slock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
276 {
277         uintptr_t x;
278
279         if (SCHEDULER_STOPPED())
280                 return (1);
281
282         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
283             ("sx_try_slock() by idle thread %p on sx %s @ %s:%d",
284             curthread, sx->lock_object.lo_name, file, line));
285
286         x = sx->sx_lock;
287         for (;;) {
288                 KASSERT(x != SX_LOCK_DESTROYED,
289                     ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
290                 if (!(x & SX_LOCK_SHARED))
291                         break;
292                 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, x + SX_ONE_SHARER)) {
293                         LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
294                         WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
295                         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
296                             sx, 0, 0, file, line, LOCKSTAT_READER);
297                         TD_LOCKS_INC(curthread);
298                         curthread->td_sx_slocks++;
299                         return (1);
300                 }
301         }
302
303         LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
304         return (0);
305 }
306
307 int
308 sx_try_slock_(struct sx *sx, const char *file, int line)
309 {
310
311         return (sx_try_slock_int(sx LOCK_FILE_LINE_ARG));
312 }
313
314 int
315 _sx_xlock(struct sx *sx, int opts, const char *file, int line)
316 {
317         uintptr_t tid, x;
318         int error = 0;
319
320         KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
321             !TD_IS_IDLETHREAD(curthread),
322             ("sx_xlock() by idle thread %p on sx %s @ %s:%d",
323             curthread, sx->lock_object.lo_name, file, line));
324         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
325             ("sx_xlock() of destroyed sx @ %s:%d", file, line));
326         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
327             line, NULL);
328         tid = (uintptr_t)curthread;
329         x = SX_LOCK_UNLOCKED;
330         if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
331                 error = _sx_xlock_hard(sx, x, opts LOCK_FILE_LINE_ARG);
332         else
333                 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
334                     0, 0, file, line, LOCKSTAT_WRITER);
335         if (!error) {
336                 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse,
337                     file, line);
338                 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
339                 TD_LOCKS_INC(curthread);
340         }
341
342         return (error);
343 }
344
345 int
346 sx_try_xlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
347 {
348         struct thread *td;
349         uintptr_t tid, x;
350         int rval;
351         bool recursed;
352
353         td = curthread;
354         tid = (uintptr_t)td;
355         if (SCHEDULER_STOPPED_TD(td))
356                 return (1);
357
358         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
359             ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d",
360             curthread, sx->lock_object.lo_name, file, line));
361         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
362             ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
363
364         rval = 1;
365         recursed = false;
366         x = SX_LOCK_UNLOCKED;
367         for (;;) {
368                 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
369                         break;
370                 if (x == SX_LOCK_UNLOCKED)
371                         continue;
372                 if (x == tid && (sx->lock_object.lo_flags & LO_RECURSABLE)) {
373                         sx->sx_recurse++;
374                         atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
375                         break;
376                 }
377                 rval = 0;
378                 break;
379         }
380
381         LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
382         if (rval) {
383                 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
384                     file, line);
385                 if (!recursed)
386                         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
387                             sx, 0, 0, file, line, LOCKSTAT_WRITER);
388                 TD_LOCKS_INC(curthread);
389         }
390
391         return (rval);
392 }
393
394 int
395 sx_try_xlock_(struct sx *sx, const char *file, int line)
396 {
397
398         return (sx_try_xlock_int(sx LOCK_FILE_LINE_ARG));
399 }
400
401 void
402 _sx_xunlock(struct sx *sx, const char *file, int line)
403 {
404
405         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
406             ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
407         _sx_assert(sx, SA_XLOCKED, file, line);
408         WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
409         LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
410             line);
411 #if LOCK_DEBUG > 0
412         _sx_xunlock_hard(sx, (uintptr_t)curthread, file, line);
413 #else
414         __sx_xunlock(sx, curthread, file, line);
415 #endif
416         TD_LOCKS_DEC(curthread);
417 }
418
419 /*
420  * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
421  * This will only succeed if this thread holds a single shared lock.
422  * Return 1 if if the upgrade succeed, 0 otherwise.
423  */
424 int
425 sx_try_upgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
426 {
427         uintptr_t x;
428         uintptr_t waiters;
429         int success;
430
431         if (SCHEDULER_STOPPED())
432                 return (1);
433
434         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
435             ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
436         _sx_assert(sx, SA_SLOCKED, file, line);
437
438         /*
439          * Try to switch from one shared lock to an exclusive lock.  We need
440          * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that
441          * we will wake up the exclusive waiters when we drop the lock.
442          */
443         success = 0;
444         x = SX_READ_VALUE(sx);
445         for (;;) {
446                 if (SX_SHARERS(x) > 1)
447                         break;
448                 waiters = (x & SX_LOCK_WAITERS);
449                 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x,
450                     (uintptr_t)curthread | waiters)) {
451                         success = 1;
452                         break;
453                 }
454         }
455         LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
456         if (success) {
457                 curthread->td_sx_slocks--;
458                 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
459                     file, line);
460                 LOCKSTAT_RECORD0(sx__upgrade, sx);
461         }
462         return (success);
463 }
464
465 int
466 sx_try_upgrade_(struct sx *sx, const char *file, int line)
467 {
468
469         return (sx_try_upgrade_int(sx LOCK_FILE_LINE_ARG));
470 }
471
472 /*
473  * Downgrade an unrecursed exclusive lock into a single shared lock.
474  */
475 void
476 sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
477 {
478         uintptr_t x;
479         int wakeup_swapper;
480
481         if (SCHEDULER_STOPPED())
482                 return;
483
484         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
485             ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
486         _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
487 #ifndef INVARIANTS
488         if (sx_recursed(sx))
489                 panic("downgrade of a recursed lock");
490 #endif
491
492         WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
493
494         /*
495          * Try to switch from an exclusive lock with no shared waiters
496          * to one sharer with no shared waiters.  If there are
497          * exclusive waiters, we don't need to lock the sleep queue so
498          * long as we preserve the flag.  We do one quick try and if
499          * that fails we grab the sleepq lock to keep the flags from
500          * changing and do it the slow way.
501          *
502          * We have to lock the sleep queue if there are shared waiters
503          * so we can wake them up.
504          */
505         x = sx->sx_lock;
506         if (!(x & SX_LOCK_SHARED_WAITERS) &&
507             atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
508             (x & SX_LOCK_EXCLUSIVE_WAITERS)))
509                 goto out;
510
511         /*
512          * Lock the sleep queue so we can read the waiters bits
513          * without any races and wakeup any shared waiters.
514          */
515         sleepq_lock(&sx->lock_object);
516
517         /*
518          * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
519          * shared lock.  If there are any shared waiters, wake them up.
520          */
521         wakeup_swapper = 0;
522         x = sx->sx_lock;
523         atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
524             (x & SX_LOCK_EXCLUSIVE_WAITERS));
525         if (x & SX_LOCK_SHARED_WAITERS)
526                 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
527                     0, SQ_SHARED_QUEUE);
528         sleepq_release(&sx->lock_object);
529
530         if (wakeup_swapper)
531                 kick_proc0();
532
533 out:
534         curthread->td_sx_slocks++;
535         LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
536         LOCKSTAT_RECORD0(sx__downgrade, sx);
537 }
538
539 void
540 sx_downgrade_(struct sx *sx, const char *file, int line)
541 {
542
543         sx_downgrade_int(sx LOCK_FILE_LINE_ARG);
544 }
545
546 #ifdef  ADAPTIVE_SX
547 static inline void
548 sx_drop_critical(uintptr_t x, bool *in_critical, int *extra_work)
549 {
550
551         if (x & SX_LOCK_WRITE_SPINNER)
552                 return;
553         if (*in_critical) {
554                 critical_exit();
555                 *in_critical = false;
556                 (*extra_work)--;
557         }
558 }
559 #else
560 #define sx_drop_critical(x, in_critical, extra_work) do { } while(0)
561 #endif
562
563 /*
564  * This function represents the so-called 'hard case' for sx_xlock
565  * operation.  All 'easy case' failures are redirected to this.  Note
566  * that ideally this would be a static function, but it needs to be
567  * accessible from at least sx.h.
568  */
569 int
570 _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
571 {
572         GIANT_DECLARE;
573         uintptr_t tid, setx;
574 #ifdef ADAPTIVE_SX
575         volatile struct thread *owner;
576         u_int i, n, spintries = 0;
577         enum { READERS, WRITER } sleep_reason = READERS;
578         bool in_critical = false;
579 #endif
580 #ifdef LOCK_PROFILING
581         uint64_t waittime = 0;
582         int contested = 0;
583 #endif
584         int error = 0;
585 #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
586         struct lock_delay_arg lda;
587 #endif
588 #ifdef  KDTRACE_HOOKS
589         u_int sleep_cnt = 0;
590         int64_t sleep_time = 0;
591         int64_t all_time = 0;
592 #endif
593 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
594         uintptr_t state = 0;
595         int doing_lockprof = 0;
596 #endif
597         int extra_work = 0;
598
599         tid = (uintptr_t)curthread;
600
601 #ifdef KDTRACE_HOOKS
602         if (LOCKSTAT_PROFILE_ENABLED(sx__acquire)) {
603                 while (x == SX_LOCK_UNLOCKED) {
604                         if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
605                                 goto out_lockstat;
606                 }
607                 extra_work = 1;
608                 doing_lockprof = 1;
609                 all_time -= lockstat_nsecs(&sx->lock_object);
610                 state = x;
611         }
612 #endif
613 #ifdef LOCK_PROFILING
614         extra_work = 1;
615         doing_lockprof = 1;
616         state = x;
617 #endif
618
619         if (SCHEDULER_STOPPED())
620                 return (0);
621
622 #if defined(ADAPTIVE_SX)
623         lock_delay_arg_init(&lda, &sx_delay);
624 #elif defined(KDTRACE_HOOKS)
625         lock_delay_arg_init(&lda, NULL);
626 #endif
627
628         if (__predict_false(x == SX_LOCK_UNLOCKED))
629                 x = SX_READ_VALUE(sx);
630
631         /* If we already hold an exclusive lock, then recurse. */
632         if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) {
633                 KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
634             ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
635                     sx->lock_object.lo_name, file, line));
636                 sx->sx_recurse++;
637                 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
638                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
639                         CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
640                 return (0);
641         }
642
643         if (LOCK_LOG_TEST(&sx->lock_object, 0))
644                 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
645                     sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
646
647 #ifdef HWPMC_HOOKS
648         PMC_SOFT_CALL( , , lock, failed);
649 #endif
650         lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
651             &waittime);
652
653 #ifndef INVARIANTS
654         GIANT_SAVE(extra_work);
655 #endif
656
657         for (;;) {
658                 if (x == SX_LOCK_UNLOCKED) {
659                         if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
660                                 break;
661                         continue;
662                 }
663 #ifdef INVARIANTS
664                 GIANT_SAVE(extra_work);
665 #endif
666 #ifdef KDTRACE_HOOKS
667                 lda.spin_cnt++;
668 #endif
669 #ifdef ADAPTIVE_SX
670                 if (x == (SX_LOCK_SHARED | SX_LOCK_WRITE_SPINNER)) {
671                         if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
672                                 break;
673                         continue;
674                 }
675
676                 /*
677                  * If the lock is write locked and the owner is
678                  * running on another CPU, spin until the owner stops
679                  * running or the state of the lock changes.
680                  */
681                 if ((x & SX_LOCK_SHARED) == 0) {
682                         sx_drop_critical(x, &in_critical, &extra_work);
683                         sleep_reason = WRITER;
684                         owner = lv_sx_owner(x);
685                         if (!TD_IS_RUNNING(owner))
686                                 goto sleepq;
687                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
688                                 CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
689                                     __func__, sx, owner);
690                         KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
691                             "spinning", "lockname:\"%s\"",
692                             sx->lock_object.lo_name);
693                         do {
694                                 lock_delay(&lda);
695                                 x = SX_READ_VALUE(sx);
696                                 owner = lv_sx_owner(x);
697                         } while (owner != NULL && TD_IS_RUNNING(owner));
698                         KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
699                             "running");
700                         continue;
701                 } else if (SX_SHARERS(x) > 0) {
702                         sleep_reason = READERS;
703                         if (spintries == asx_retries)
704                                 goto sleepq;
705                         if (!(x & SX_LOCK_WRITE_SPINNER)) {
706                                 if (!in_critical) {
707                                         critical_enter();
708                                         in_critical = true;
709                                         extra_work++;
710                                 }
711                                 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
712                                     x | SX_LOCK_WRITE_SPINNER)) {
713                                         critical_exit();
714                                         in_critical = false;
715                                         extra_work--;
716                                         continue;
717                                 }
718                         }
719                         spintries++;
720                         KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
721                             "spinning", "lockname:\"%s\"",
722                             sx->lock_object.lo_name);
723                         n = SX_SHARERS(x);
724                         for (i = 0; i < asx_loops; i += n) {
725                                 lock_delay_spin(n);
726                                 x = SX_READ_VALUE(sx);
727                                 if (!(x & SX_LOCK_WRITE_SPINNER))
728                                         break;
729                                 if (!(x & SX_LOCK_SHARED))
730                                         break;
731                                 n = SX_SHARERS(x);
732                                 if (n == 0)
733                                         break;
734                         }
735 #ifdef KDTRACE_HOOKS
736                         lda.spin_cnt += i;
737 #endif
738                         KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
739                             "running");
740                         if (i < asx_loops)
741                                 continue;
742                 }
743 sleepq:
744 #endif
745                 sleepq_lock(&sx->lock_object);
746                 x = SX_READ_VALUE(sx);
747 retry_sleepq:
748
749                 /*
750                  * If the lock was released while spinning on the
751                  * sleep queue chain lock, try again.
752                  */
753                 if (x == SX_LOCK_UNLOCKED) {
754                         sleepq_release(&sx->lock_object);
755                         sx_drop_critical(x, &in_critical, &extra_work);
756                         continue;
757                 }
758
759 #ifdef ADAPTIVE_SX
760                 /*
761                  * The current lock owner might have started executing
762                  * on another CPU (or the lock could have changed
763                  * owners) while we were waiting on the sleep queue
764                  * chain lock.  If so, drop the sleep queue lock and try
765                  * again.
766                  */
767                 if (!(x & SX_LOCK_SHARED)) {
768                         owner = (struct thread *)SX_OWNER(x);
769                         if (TD_IS_RUNNING(owner)) {
770                                 sleepq_release(&sx->lock_object);
771                                 sx_drop_critical(x, &in_critical,
772                                     &extra_work);
773                                 continue;
774                         }
775                 } else if (SX_SHARERS(x) > 0 && sleep_reason == WRITER) {
776                         sleepq_release(&sx->lock_object);
777                         sx_drop_critical(x, &in_critical, &extra_work);
778                         continue;
779                 }
780 #endif
781
782                 /*
783                  * If an exclusive lock was released with both shared
784                  * and exclusive waiters and a shared waiter hasn't
785                  * woken up and acquired the lock yet, sx_lock will be
786                  * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS.
787                  * If we see that value, try to acquire it once.  Note
788                  * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS
789                  * as there are other exclusive waiters still.  If we
790                  * fail, restart the loop.
791                  */
792                 setx = x & (SX_LOCK_WAITERS | SX_LOCK_WRITE_SPINNER);
793                 if ((x & ~setx) == SX_LOCK_SHARED) {
794                         setx &= ~SX_LOCK_WRITE_SPINNER;
795                         if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid | setx))
796                                 goto retry_sleepq;
797                         sleepq_release(&sx->lock_object);
798                         CTR2(KTR_LOCK, "%s: %p claimed by new writer",
799                             __func__, sx);
800                         break;
801                 }
802
803 #ifdef ADAPTIVE_SX
804                 /*
805                  * It is possible we set the SX_LOCK_WRITE_SPINNER bit.
806                  * It is an invariant that when the bit is set, there is
807                  * a writer ready to grab the lock. Thus clear the bit since
808                  * we are going to sleep.
809                  */
810                 if (in_critical) {
811                         if ((x & SX_LOCK_WRITE_SPINNER) ||
812                             !((x & SX_LOCK_EXCLUSIVE_WAITERS))) {
813                                 setx = x & ~SX_LOCK_WRITE_SPINNER;
814                                 setx |= SX_LOCK_EXCLUSIVE_WAITERS;
815                                 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
816                                     setx)) {
817                                         goto retry_sleepq;
818                                 }
819                         }
820                         critical_exit();
821                         in_critical = false;
822                 } else {
823 #endif
824                         /*
825                          * Try to set the SX_LOCK_EXCLUSIVE_WAITERS.  If we fail,
826                          * than loop back and retry.
827                          */
828                         if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
829                                 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
830                                     x | SX_LOCK_EXCLUSIVE_WAITERS)) {
831                                         goto retry_sleepq;
832                                 }
833                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
834                                         CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
835                                             __func__, sx);
836                         }
837 #ifdef ADAPTIVE_SX
838                 }
839 #endif
840
841                 /*
842                  * Since we have been unable to acquire the exclusive
843                  * lock and the exclusive waiters flag is set, we have
844                  * to sleep.
845                  */
846                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
847                         CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
848                             __func__, sx);
849
850 #ifdef KDTRACE_HOOKS
851                 sleep_time -= lockstat_nsecs(&sx->lock_object);
852 #endif
853                 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
854                     SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
855                     SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
856                 if (!(opts & SX_INTERRUPTIBLE))
857                         sleepq_wait(&sx->lock_object, 0);
858                 else
859                         error = sleepq_wait_sig(&sx->lock_object, 0);
860 #ifdef KDTRACE_HOOKS
861                 sleep_time += lockstat_nsecs(&sx->lock_object);
862                 sleep_cnt++;
863 #endif
864                 if (error) {
865                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
866                                 CTR2(KTR_LOCK,
867                         "%s: interruptible sleep by %p suspended by signal",
868                                     __func__, sx);
869                         break;
870                 }
871                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
872                         CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
873                             __func__, sx);
874                 x = SX_READ_VALUE(sx);
875         }
876         if (__predict_true(!extra_work))
877                 return (error);
878 #ifdef ADAPTIVE_SX
879         if (in_critical)
880                 critical_exit();
881 #endif
882         GIANT_RESTORE();
883 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
884         if (__predict_true(!doing_lockprof))
885                 return (error);
886 #endif
887 #ifdef KDTRACE_HOOKS
888         all_time += lockstat_nsecs(&sx->lock_object);
889         if (sleep_time)
890                 LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
891                     LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
892                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
893         if (lda.spin_cnt > sleep_cnt)
894                 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
895                     LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0,
896                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
897 out_lockstat:
898 #endif
899         if (!error)
900                 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
901                     contested, waittime, file, line, LOCKSTAT_WRITER);
902         return (error);
903 }
904
905 /*
906  * This function represents the so-called 'hard case' for sx_xunlock
907  * operation.  All 'easy case' failures are redirected to this.  Note
908  * that ideally this would be a static function, but it needs to be
909  * accessible from at least sx.h.
910  */
911 void
912 _sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
913 {
914         uintptr_t tid, setx;
915         int queue, wakeup_swapper;
916
917         if (SCHEDULER_STOPPED())
918                 return;
919
920         tid = (uintptr_t)curthread;
921
922         if (__predict_false(x == tid))
923                 x = SX_READ_VALUE(sx);
924
925         MPASS(!(x & SX_LOCK_SHARED));
926
927         if (__predict_false(x & SX_LOCK_RECURSED)) {
928                 /* The lock is recursed, unrecurse one level. */
929                 if ((--sx->sx_recurse) == 0)
930                         atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
931                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
932                         CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
933                 return;
934         }
935
936         LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_WRITER);
937         if (x == tid &&
938             atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED))
939                 return;
940
941         if (LOCK_LOG_TEST(&sx->lock_object, 0))
942                 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);
943
944         sleepq_lock(&sx->lock_object);
945         x = SX_READ_VALUE(sx);
946         MPASS(x & (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS));
947
948         /*
949          * The wake up algorithm here is quite simple and probably not
950          * ideal.  It gives precedence to shared waiters if they are
951          * present.  For this condition, we have to preserve the
952          * state of the exclusive waiters flag.
953          * If interruptible sleeps left the shared queue empty avoid a
954          * starvation for the threads sleeping on the exclusive queue by giving
955          * them precedence and cleaning up the shared waiters bit anyway.
956          */
957         setx = SX_LOCK_UNLOCKED;
958         queue = SQ_SHARED_QUEUE;
959         if ((x & SX_LOCK_EXCLUSIVE_WAITERS) != 0 &&
960             sleepq_sleepcnt(&sx->lock_object, SQ_EXCLUSIVE_QUEUE) != 0) {
961                 queue = SQ_EXCLUSIVE_QUEUE;
962                 setx |= (x & SX_LOCK_SHARED_WAITERS);
963         }
964         atomic_store_rel_ptr(&sx->sx_lock, setx);
965
966         /* Wake up all the waiters for the specific queue. */
967         if (LOCK_LOG_TEST(&sx->lock_object, 0))
968                 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue",
969                     __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
970                     "exclusive");
971
972         wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
973             queue);
974         sleepq_release(&sx->lock_object);
975         if (wakeup_swapper)
976                 kick_proc0();
977 }
978
979 static bool __always_inline
980 __sx_can_read(struct thread *td, uintptr_t x, bool fp)
981 {
982
983         if ((x & (SX_LOCK_SHARED | SX_LOCK_EXCLUSIVE_WAITERS | SX_LOCK_WRITE_SPINNER))
984                         == SX_LOCK_SHARED)
985                 return (true);
986         if (!fp && td->td_sx_slocks && (x & SX_LOCK_SHARED))
987                 return (true);
988         return (false);
989 }
990
991 static bool __always_inline
992 __sx_slock_try(struct sx *sx, struct thread *td, uintptr_t *xp, bool fp
993     LOCK_FILE_LINE_ARG_DEF)
994 {
995
996         /*
997          * If no other thread has an exclusive lock then try to bump up
998          * the count of sharers.  Since we have to preserve the state
999          * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
1000          * shared lock loop back and retry.
1001          */
1002         while (__sx_can_read(td, *xp, fp)) {
1003                 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, xp,
1004                     *xp + SX_ONE_SHARER)) {
1005                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
1006                                 CTR4(KTR_LOCK, "%s: %p succeed %p -> %p",
1007                                     __func__, sx, (void *)*xp,
1008                                     (void *)(*xp + SX_ONE_SHARER));
1009                         td->td_sx_slocks++;
1010                         return (true);
1011                 }
1012         }
1013         return (false);
1014 }
1015
1016 static int __noinline
1017 _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
1018 {
1019         GIANT_DECLARE;
1020         struct thread *td;
1021 #ifdef ADAPTIVE_SX
1022         volatile struct thread *owner;
1023         u_int i, n, spintries = 0;
1024 #endif
1025 #ifdef LOCK_PROFILING
1026         uint64_t waittime = 0;
1027         int contested = 0;
1028 #endif
1029         int error = 0;
1030 #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS)
1031         struct lock_delay_arg lda;
1032 #endif
1033 #ifdef KDTRACE_HOOKS
1034         u_int sleep_cnt = 0;
1035         int64_t sleep_time = 0;
1036         int64_t all_time = 0;
1037 #endif
1038 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
1039         uintptr_t state = 0;
1040 #endif
1041         int extra_work = 0;
1042
1043         td = curthread;
1044
1045 #ifdef KDTRACE_HOOKS
1046         if (LOCKSTAT_PROFILE_ENABLED(sx__acquire)) {
1047                 if (__sx_slock_try(sx, td, &x, false LOCK_FILE_LINE_ARG))
1048                         goto out_lockstat;
1049                 extra_work = 1;
1050                 all_time -= lockstat_nsecs(&sx->lock_object);
1051                 state = x;
1052         }
1053 #endif
1054 #ifdef LOCK_PROFILING
1055         extra_work = 1;
1056         state = x;
1057 #endif
1058
1059         if (SCHEDULER_STOPPED())
1060                 return (0);
1061
1062 #if defined(ADAPTIVE_SX)
1063         lock_delay_arg_init(&lda, &sx_delay);
1064 #elif defined(KDTRACE_HOOKS)
1065         lock_delay_arg_init(&lda, NULL);
1066 #endif
1067
1068 #ifdef HWPMC_HOOKS
1069         PMC_SOFT_CALL( , , lock, failed);
1070 #endif
1071         lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
1072             &waittime);
1073
1074 #ifndef INVARIANTS
1075         GIANT_SAVE(extra_work);
1076 #endif
1077
1078         /*
1079          * As with rwlocks, we don't make any attempt to try to block
1080          * shared locks once there is an exclusive waiter.
1081          */
1082         for (;;) {
1083                 if (__sx_slock_try(sx, td, &x, false LOCK_FILE_LINE_ARG))
1084                         break;
1085 #ifdef INVARIANTS
1086                 GIANT_SAVE(extra_work);
1087 #endif
1088 #ifdef KDTRACE_HOOKS
1089                 lda.spin_cnt++;
1090 #endif
1091
1092 #ifdef ADAPTIVE_SX
1093                 /*
1094                  * If the owner is running on another CPU, spin until
1095                  * the owner stops running or the state of the lock
1096                  * changes.
1097                  */
1098                 if ((x & SX_LOCK_SHARED) == 0) {
1099                         owner = lv_sx_owner(x);
1100                         if (TD_IS_RUNNING(owner)) {
1101                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1102                                         CTR3(KTR_LOCK,
1103                                             "%s: spinning on %p held by %p",
1104                                             __func__, sx, owner);
1105                                 KTR_STATE1(KTR_SCHED, "thread",
1106                                     sched_tdname(curthread), "spinning",
1107                                     "lockname:\"%s\"", sx->lock_object.lo_name);
1108                                 do {
1109                                         lock_delay(&lda);
1110                                         x = SX_READ_VALUE(sx);
1111                                         owner = lv_sx_owner(x);
1112                                 } while (owner != NULL && TD_IS_RUNNING(owner));
1113                                 KTR_STATE0(KTR_SCHED, "thread",
1114                                     sched_tdname(curthread), "running");
1115                                 continue;
1116                         }
1117                 } else {
1118                         if ((x & SX_LOCK_WRITE_SPINNER) && SX_SHARERS(x) == 0) {
1119                                 MPASS(!__sx_can_read(td, x, false));
1120                                 lock_delay_spin(2);
1121                                 x = SX_READ_VALUE(sx);
1122                                 continue;
1123                         }
1124                         if (spintries < asx_retries) {
1125                                 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
1126                                     "spinning", "lockname:\"%s\"",
1127                                     sx->lock_object.lo_name);
1128                                 n = SX_SHARERS(x);
1129                                 for (i = 0; i < asx_loops; i += n) {
1130                                         lock_delay_spin(n);
1131                                         x = SX_READ_VALUE(sx);
1132                                         if (!(x & SX_LOCK_SHARED))
1133                                                 break;
1134                                         n = SX_SHARERS(x);
1135                                         if (n == 0)
1136                                                 break;
1137                                         if (__sx_can_read(td, x, false))
1138                                                 break;
1139                                 }
1140 #ifdef KDTRACE_HOOKS
1141                                 lda.spin_cnt += i;
1142 #endif
1143                                 KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
1144                                     "running");
1145                                 if (i < asx_loops)
1146                                         continue;
1147                         }
1148                 }
1149 #endif
1150
1151                 /*
1152                  * Some other thread already has an exclusive lock, so
1153                  * start the process of blocking.
1154                  */
1155                 sleepq_lock(&sx->lock_object);
1156                 x = SX_READ_VALUE(sx);
1157 retry_sleepq:
1158                 if (((x & SX_LOCK_WRITE_SPINNER) && SX_SHARERS(x) == 0) ||
1159                     __sx_can_read(td, x, false)) {
1160                         sleepq_release(&sx->lock_object);
1161                         continue;
1162                 }
1163
1164 #ifdef ADAPTIVE_SX
1165                 /*
1166                  * If the owner is running on another CPU, spin until
1167                  * the owner stops running or the state of the lock
1168                  * changes.
1169                  */
1170                 if (!(x & SX_LOCK_SHARED)) {
1171                         owner = (struct thread *)SX_OWNER(x);
1172                         if (TD_IS_RUNNING(owner)) {
1173                                 sleepq_release(&sx->lock_object);
1174                                 x = SX_READ_VALUE(sx);
1175                                 continue;
1176                         }
1177                 }
1178 #endif
1179
1180                 /*
1181                  * Try to set the SX_LOCK_SHARED_WAITERS flag.  If we
1182                  * fail to set it drop the sleep queue lock and loop
1183                  * back.
1184                  */
1185                 if (!(x & SX_LOCK_SHARED_WAITERS)) {
1186                         if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
1187                             x | SX_LOCK_SHARED_WAITERS))
1188                                 goto retry_sleepq;
1189                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
1190                                 CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
1191                                     __func__, sx);
1192                 }
1193
1194                 /*
1195                  * Since we have been unable to acquire the shared lock,
1196                  * we have to sleep.
1197                  */
1198                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1199                         CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
1200                             __func__, sx);
1201
1202 #ifdef KDTRACE_HOOKS
1203                 sleep_time -= lockstat_nsecs(&sx->lock_object);
1204 #endif
1205                 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
1206                     SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
1207                     SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
1208                 if (!(opts & SX_INTERRUPTIBLE))
1209                         sleepq_wait(&sx->lock_object, 0);
1210                 else
1211                         error = sleepq_wait_sig(&sx->lock_object, 0);
1212 #ifdef KDTRACE_HOOKS
1213                 sleep_time += lockstat_nsecs(&sx->lock_object);
1214                 sleep_cnt++;
1215 #endif
1216                 if (error) {
1217                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
1218                                 CTR2(KTR_LOCK,
1219                         "%s: interruptible sleep by %p suspended by signal",
1220                                     __func__, sx);
1221                         break;
1222                 }
1223                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1224                         CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
1225                             __func__, sx);
1226                 x = SX_READ_VALUE(sx);
1227         }
1228 #if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING)
1229         if (__predict_true(!extra_work))
1230                 return (error);
1231 #endif
1232 #ifdef KDTRACE_HOOKS
1233         all_time += lockstat_nsecs(&sx->lock_object);
1234         if (sleep_time)
1235                 LOCKSTAT_RECORD4(sx__block, sx, sleep_time,
1236                     LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
1237                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
1238         if (lda.spin_cnt > sleep_cnt)
1239                 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time,
1240                     LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0,
1241                     (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state));
1242 out_lockstat:
1243 #endif
1244         if (error == 0) {
1245                 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx,
1246                     contested, waittime, file, line, LOCKSTAT_READER);
1247         }
1248         GIANT_RESTORE();
1249         return (error);
1250 }
1251
1252 int
1253 _sx_slock_int(struct sx *sx, int opts LOCK_FILE_LINE_ARG_DEF)
1254 {
1255         struct thread *td;
1256         uintptr_t x;
1257         int error;
1258
1259         KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
1260             !TD_IS_IDLETHREAD(curthread),
1261             ("sx_slock() by idle thread %p on sx %s @ %s:%d",
1262             curthread, sx->lock_object.lo_name, file, line));
1263         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1264             ("sx_slock() of destroyed sx @ %s:%d", file, line));
1265         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
1266
1267         error = 0;
1268         td = curthread;
1269         x = SX_READ_VALUE(sx);
1270         if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__acquire) ||
1271             !__sx_slock_try(sx, td, &x, true LOCK_FILE_LINE_ARG)))
1272                 error = _sx_slock_hard(sx, opts, x LOCK_FILE_LINE_ARG);
1273         else
1274                 lock_profile_obtain_lock_success(&sx->lock_object, 0, 0,
1275                     file, line);
1276         if (error == 0) {
1277                 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
1278                 WITNESS_LOCK(&sx->lock_object, 0, file, line);
1279                 TD_LOCKS_INC(curthread);
1280         }
1281         return (error);
1282 }
1283
1284 int
1285 _sx_slock(struct sx *sx, int opts, const char *file, int line)
1286 {
1287
1288         return (_sx_slock_int(sx, opts LOCK_FILE_LINE_ARG));
1289 }
1290
1291 static bool __always_inline
1292 _sx_sunlock_try(struct sx *sx, struct thread *td, uintptr_t *xp)
1293 {
1294
1295         for (;;) {
1296                 if (SX_SHARERS(*xp) > 1 || !(*xp & SX_LOCK_WAITERS)) {
1297                         if (atomic_fcmpset_rel_ptr(&sx->sx_lock, xp,
1298                             *xp - SX_ONE_SHARER)) {
1299                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1300                                         CTR4(KTR_LOCK,
1301                                             "%s: %p succeeded %p -> %p",
1302                                             __func__, sx, (void *)*xp,
1303                                             (void *)(*xp - SX_ONE_SHARER));
1304                                 td->td_sx_slocks--;
1305                                 return (true);
1306                         }
1307                         continue;
1308                 }
1309                 break;
1310         }
1311         return (false);
1312 }
1313
1314 static void __noinline
1315 _sx_sunlock_hard(struct sx *sx, struct thread *td, uintptr_t x
1316     LOCK_FILE_LINE_ARG_DEF)
1317 {
1318         int wakeup_swapper = 0;
1319         uintptr_t setx, queue;
1320
1321         if (SCHEDULER_STOPPED())
1322                 return;
1323
1324         if (_sx_sunlock_try(sx, td, &x))
1325                 goto out_lockstat;
1326
1327         sleepq_lock(&sx->lock_object);
1328         x = SX_READ_VALUE(sx);
1329         for (;;) {
1330                 if (_sx_sunlock_try(sx, td, &x))
1331                         break;
1332
1333                 /*
1334                  * Wake up semantic here is quite simple:
1335                  * Just wake up all the exclusive waiters.
1336                  * Note that the state of the lock could have changed,
1337                  * so if it fails loop back and retry.
1338                  */
1339                 setx = SX_LOCK_UNLOCKED;
1340                 queue = SQ_SHARED_QUEUE;
1341                 if (x & SX_LOCK_EXCLUSIVE_WAITERS) {
1342                         setx |= (x & SX_LOCK_SHARED_WAITERS);
1343                         queue = SQ_EXCLUSIVE_QUEUE;
1344                 }
1345                 setx |= (x & SX_LOCK_WRITE_SPINNER);
1346                 if (!atomic_fcmpset_rel_ptr(&sx->sx_lock, &x, setx))
1347                         continue;
1348                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1349                         CTR2(KTR_LOCK, "%s: %p waking up all thread on"
1350                             "exclusive queue", __func__, sx);
1351                 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
1352                     0, queue);
1353                 td->td_sx_slocks--;
1354                 break;
1355         }
1356         sleepq_release(&sx->lock_object);
1357         if (wakeup_swapper)
1358                 kick_proc0();
1359 out_lockstat:
1360         LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER);
1361 }
1362
1363 void
1364 _sx_sunlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
1365 {
1366         struct thread *td;
1367         uintptr_t x;
1368
1369         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
1370             ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
1371         _sx_assert(sx, SA_SLOCKED, file, line);
1372         WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
1373         LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
1374
1375         td = curthread;
1376         x = SX_READ_VALUE(sx);
1377         if (__predict_false(LOCKSTAT_PROFILE_ENABLED(sx__release) ||
1378             !_sx_sunlock_try(sx, td, &x)))
1379                 _sx_sunlock_hard(sx, td, x LOCK_FILE_LINE_ARG);
1380         else
1381                 lock_profile_release_lock(&sx->lock_object);
1382
1383         TD_LOCKS_DEC(curthread);
1384 }
1385
1386 void
1387 _sx_sunlock(struct sx *sx, const char *file, int line)
1388 {
1389
1390         _sx_sunlock_int(sx LOCK_FILE_LINE_ARG);
1391 }
1392
1393 #ifdef INVARIANT_SUPPORT
1394 #ifndef INVARIANTS
1395 #undef  _sx_assert
1396 #endif
1397
1398 /*
1399  * In the non-WITNESS case, sx_assert() can only detect that at least
1400  * *some* thread owns an slock, but it cannot guarantee that *this*
1401  * thread owns an slock.
1402  */
1403 void
1404 _sx_assert(const struct sx *sx, int what, const char *file, int line)
1405 {
1406 #ifndef WITNESS
1407         int slocked = 0;
1408 #endif
1409
1410         if (SCHEDULER_STOPPED())
1411                 return;
1412         switch (what) {
1413         case SA_SLOCKED:
1414         case SA_SLOCKED | SA_NOTRECURSED:
1415         case SA_SLOCKED | SA_RECURSED:
1416 #ifndef WITNESS
1417                 slocked = 1;
1418                 /* FALLTHROUGH */
1419 #endif
1420         case SA_LOCKED:
1421         case SA_LOCKED | SA_NOTRECURSED:
1422         case SA_LOCKED | SA_RECURSED:
1423 #ifdef WITNESS
1424                 witness_assert(&sx->lock_object, what, file, line);
1425 #else
1426                 /*
1427                  * If some other thread has an exclusive lock or we
1428                  * have one and are asserting a shared lock, fail.
1429                  * Also, if no one has a lock at all, fail.
1430                  */
1431                 if (sx->sx_lock == SX_LOCK_UNLOCKED ||
1432                     (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
1433                     sx_xholder(sx) != curthread)))
1434                         panic("Lock %s not %slocked @ %s:%d\n",
1435                             sx->lock_object.lo_name, slocked ? "share " : "",
1436                             file, line);
1437
1438                 if (!(sx->sx_lock & SX_LOCK_SHARED)) {
1439                         if (sx_recursed(sx)) {
1440                                 if (what & SA_NOTRECURSED)
1441                                         panic("Lock %s recursed @ %s:%d\n",
1442                                             sx->lock_object.lo_name, file,
1443                                             line);
1444                         } else if (what & SA_RECURSED)
1445                                 panic("Lock %s not recursed @ %s:%d\n",
1446                                     sx->lock_object.lo_name, file, line);
1447                 }
1448 #endif
1449                 break;
1450         case SA_XLOCKED:
1451         case SA_XLOCKED | SA_NOTRECURSED:
1452         case SA_XLOCKED | SA_RECURSED:
1453                 if (sx_xholder(sx) != curthread)
1454                         panic("Lock %s not exclusively locked @ %s:%d\n",
1455                             sx->lock_object.lo_name, file, line);
1456                 if (sx_recursed(sx)) {
1457                         if (what & SA_NOTRECURSED)
1458                                 panic("Lock %s recursed @ %s:%d\n",
1459                                     sx->lock_object.lo_name, file, line);
1460                 } else if (what & SA_RECURSED)
1461                         panic("Lock %s not recursed @ %s:%d\n",
1462                             sx->lock_object.lo_name, file, line);
1463                 break;
1464         case SA_UNLOCKED:
1465 #ifdef WITNESS
1466                 witness_assert(&sx->lock_object, what, file, line);
1467 #else
1468                 /*
1469                  * If we hold an exclusve lock fail.  We can't
1470                  * reliably check to see if we hold a shared lock or
1471                  * not.
1472                  */
1473                 if (sx_xholder(sx) == curthread)
1474                         panic("Lock %s exclusively locked @ %s:%d\n",
1475                             sx->lock_object.lo_name, file, line);
1476 #endif
1477                 break;
1478         default:
1479                 panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
1480                     line);
1481         }
1482 }
1483 #endif  /* INVARIANT_SUPPORT */
1484
1485 #ifdef DDB
1486 static void
1487 db_show_sx(const struct lock_object *lock)
1488 {
1489         struct thread *td;
1490         const struct sx *sx;
1491
1492         sx = (const struct sx *)lock;
1493
1494         db_printf(" state: ");
1495         if (sx->sx_lock == SX_LOCK_UNLOCKED)
1496                 db_printf("UNLOCKED\n");
1497         else if (sx->sx_lock == SX_LOCK_DESTROYED) {
1498                 db_printf("DESTROYED\n");
1499                 return;
1500         } else if (sx->sx_lock & SX_LOCK_SHARED)
1501                 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
1502         else {
1503                 td = sx_xholder(sx);
1504                 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1505                     td->td_tid, td->td_proc->p_pid, td->td_name);
1506                 if (sx_recursed(sx))
1507                         db_printf(" recursed: %d\n", sx->sx_recurse);
1508         }
1509
1510         db_printf(" waiters: ");
1511         switch(sx->sx_lock &
1512             (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) {
1513         case SX_LOCK_SHARED_WAITERS:
1514                 db_printf("shared\n");
1515                 break;
1516         case SX_LOCK_EXCLUSIVE_WAITERS:
1517                 db_printf("exclusive\n");
1518                 break;
1519         case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS:
1520                 db_printf("exclusive and shared\n");
1521                 break;
1522         default:
1523                 db_printf("none\n");
1524         }
1525 }
1526
1527 /*
1528  * Check to see if a thread that is blocked on a sleep queue is actually
1529  * blocked on an sx lock.  If so, output some details and return true.
1530  * If the lock has an exclusive owner, return that in *ownerp.
1531  */
1532 int
1533 sx_chain(struct thread *td, struct thread **ownerp)
1534 {
1535         const struct sx *sx;
1536
1537         /*
1538          * Check to see if this thread is blocked on an sx lock.
1539          * First, we check the lock class.  If that is ok, then we
1540          * compare the lock name against the wait message.
1541          */
1542         sx = td->td_wchan;
1543         if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
1544             sx->lock_object.lo_name != td->td_wmesg)
1545                 return (0);
1546
1547         /* We think we have an sx lock, so output some details. */
1548         db_printf("blocked on sx \"%s\" ", td->td_wmesg);
1549         *ownerp = sx_xholder(sx);
1550         if (sx->sx_lock & SX_LOCK_SHARED)
1551                 db_printf("SLOCK (count %ju)\n",
1552                     (uintmax_t)SX_SHARERS(sx->sx_lock));
1553         else
1554                 db_printf("XLOCK\n");
1555         return (1);
1556 }
1557 #endif