]> CyberLeo.Net >> Repos - FreeBSD/stable/8.git/blob - sys/kern/kern_sx.c
MFC r290326:
[FreeBSD/stable/8.git] / sys / kern / kern_sx.c
1 /*-
2  * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
3  * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice(s), this list of conditions and the following disclaimer as
11  *    the first lines of this file unmodified other than the possible
12  *    addition of one or more copyright notices.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice(s), this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
18  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
21  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
27  * DAMAGE.
28  */
29
30 /*
31  * Shared/exclusive locks.  This implementation attempts to ensure
32  * deterministic lock granting behavior, so that slocks and xlocks are
33  * interleaved.
34  *
35  * Priority propagation will not generally raise the priority of lock holders,
36  * so should not be relied upon in combination with sx locks.
37  */
38
39 #include "opt_ddb.h"
40 #include "opt_kdtrace.h"
41 #include "opt_no_adaptive_sx.h"
42
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/ktr.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/proc.h>
52 #include <sys/sleepqueue.h>
53 #include <sys/sx.h>
54 #include <sys/sysctl.h>
55
56 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
57 #include <machine/cpu.h>
58 #endif
59
60 #ifdef DDB
61 #include <ddb/ddb.h>
62 #endif
63
64 #if defined(SMP) && !defined(NO_ADAPTIVE_SX)
65 #define ADAPTIVE_SX
66 #endif
67
68 CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE);
69
70 /* Handy macros for sleep queues. */
71 #define SQ_EXCLUSIVE_QUEUE      0
72 #define SQ_SHARED_QUEUE         1
73
74 #ifdef ADAPTIVE_SX
75 #define ASX_RETRIES             10
76 #define ASX_LOOPS               10000
77 #endif
78
79 /*
80  * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file.  We
81  * drop Giant anytime we have to sleep or if we adaptively spin.
82  */
83 #define GIANT_DECLARE                                                   \
84         int _giantcnt = 0;                                              \
85         WITNESS_SAVE_DECL(Giant)                                        \
86
87 #define GIANT_SAVE() do {                                               \
88         if (mtx_owned(&Giant)) {                                        \
89                 WITNESS_SAVE(&Giant.lock_object, Giant);                \
90                 while (mtx_owned(&Giant)) {                             \
91                         _giantcnt++;                                    \
92                         mtx_unlock(&Giant);                             \
93                 }                                                       \
94         }                                                               \
95 } while (0)
96
97 #define GIANT_RESTORE() do {                                            \
98         if (_giantcnt > 0) {                                            \
99                 mtx_assert(&Giant, MA_NOTOWNED);                        \
100                 while (_giantcnt--)                                     \
101                         mtx_lock(&Giant);                               \
102                 WITNESS_RESTORE(&Giant.lock_object, Giant);             \
103         }                                                               \
104 } while (0)
105
106 /*
107  * Returns true if an exclusive lock is recursed.  It assumes
108  * curthread currently has an exclusive lock.
109  */
110 #define sx_recurse              lock_object.lo_data
111 #define sx_recursed(sx)         ((sx)->sx_recurse != 0)
112
113 static void     assert_sx(struct lock_object *lock, int what);
114 #ifdef DDB
115 static void     db_show_sx(struct lock_object *lock);
116 #endif
117 static void     lock_sx(struct lock_object *lock, int how);
118 #ifdef KDTRACE_HOOKS
119 static int      owner_sx(struct lock_object *lock, struct thread **owner);
120 #endif
121 static int      unlock_sx(struct lock_object *lock);
122
123 struct lock_class lock_class_sx = {
124         .lc_name = "sx",
125         .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
126         .lc_assert = assert_sx,
127 #ifdef DDB
128         .lc_ddb_show = db_show_sx,
129 #endif
130         .lc_lock = lock_sx,
131         .lc_unlock = unlock_sx,
132 #ifdef KDTRACE_HOOKS
133         .lc_owner = owner_sx,
134 #endif
135 };
136
137 #ifndef INVARIANTS
138 #define _sx_assert(sx, what, file, line)
139 #endif
140
141 void
142 assert_sx(struct lock_object *lock, int what)
143 {
144
145         sx_assert((struct sx *)lock, what);
146 }
147
148 void
149 lock_sx(struct lock_object *lock, int how)
150 {
151         struct sx *sx;
152
153         sx = (struct sx *)lock;
154         if (how)
155                 sx_xlock(sx);
156         else
157                 sx_slock(sx);
158 }
159
160 int
161 unlock_sx(struct lock_object *lock)
162 {
163         struct sx *sx;
164
165         sx = (struct sx *)lock;
166         sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
167         if (sx_xlocked(sx)) {
168                 sx_xunlock(sx);
169                 return (1);
170         } else {
171                 sx_sunlock(sx);
172                 return (0);
173         }
174 }
175
176 #ifdef KDTRACE_HOOKS
177 int
178 owner_sx(struct lock_object *lock, struct thread **owner)
179 {
180         struct sx *sx = (struct sx *)lock;
181         uintptr_t x = sx->sx_lock;
182
183         *owner = (struct thread *)SX_OWNER(x);
184         return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) :
185             (*owner != NULL));
186 }
187 #endif
188
189 void
190 sx_sysinit(void *arg)
191 {
192         struct sx_args *sargs = arg;
193
194         sx_init(sargs->sa_sx, sargs->sa_desc);
195 }
196
197 void
198 sx_init_flags(struct sx *sx, const char *description, int opts)
199 {
200         int flags;
201
202         MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
203             SX_NOPROFILE | SX_NOADAPTIVE)) == 0);
204         ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
205             ("%s: sx_lock not aligned for %s: %p", __func__, description,
206             &sx->sx_lock));
207
208         flags = LO_SLEEPABLE | LO_UPGRADABLE;
209         if (opts & SX_DUPOK)
210                 flags |= LO_DUPOK;
211         if (opts & SX_NOPROFILE)
212                 flags |= LO_NOPROFILE;
213         if (!(opts & SX_NOWITNESS))
214                 flags |= LO_WITNESS;
215         if (opts & SX_RECURSE)
216                 flags |= LO_RECURSABLE;
217         if (opts & SX_QUIET)
218                 flags |= LO_QUIET;
219
220         flags |= opts & SX_NOADAPTIVE;
221         sx->sx_lock = SX_LOCK_UNLOCKED;
222         sx->sx_recurse = 0;
223         lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
224 }
225
226 void
227 sx_destroy(struct sx *sx)
228 {
229
230         KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held"));
231         KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
232         sx->sx_lock = SX_LOCK_DESTROYED;
233         lock_destroy(&sx->lock_object);
234 }
235
236 int
237 _sx_slock(struct sx *sx, int opts, const char *file, int line)
238 {
239         int error = 0;
240
241         if (SCHEDULER_STOPPED())
242                 return (0);
243         MPASS(curthread != NULL);
244         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
245             ("sx_slock() of destroyed sx @ %s:%d", file, line));
246         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
247         error = __sx_slock(sx, opts, file, line);
248         if (!error) {
249                 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
250                 WITNESS_LOCK(&sx->lock_object, 0, file, line);
251                 curthread->td_locks++;
252         }
253
254         return (error);
255 }
256
257 int
258 _sx_try_slock(struct sx *sx, const char *file, int line)
259 {
260         uintptr_t x;
261
262         if (SCHEDULER_STOPPED())
263                 return (1);
264
265         for (;;) {
266                 x = sx->sx_lock;
267                 KASSERT(x != SX_LOCK_DESTROYED,
268                     ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
269                 if (!(x & SX_LOCK_SHARED))
270                         break;
271                 if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) {
272                         LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
273                         WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
274                         curthread->td_locks++;
275                         return (1);
276                 }
277         }
278
279         LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
280         return (0);
281 }
282
283 int
284 _sx_xlock(struct sx *sx, int opts, const char *file, int line)
285 {
286         int error = 0;
287
288         if (SCHEDULER_STOPPED())
289                 return (0);
290         MPASS(curthread != NULL);
291         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
292             ("sx_xlock() of destroyed sx @ %s:%d", file, line));
293         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
294             line, NULL);
295         error = __sx_xlock(sx, curthread, opts, file, line);
296         if (!error) {
297                 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse,
298                     file, line);
299                 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
300                 curthread->td_locks++;
301         }
302
303         return (error);
304 }
305
306 int
307 _sx_try_xlock(struct sx *sx, const char *file, int line)
308 {
309         int rval;
310
311         if (SCHEDULER_STOPPED())
312                 return (1);
313
314         MPASS(curthread != NULL);
315         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
316             ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
317
318         if (sx_xlocked(sx) &&
319             (sx->lock_object.lo_flags & LO_RECURSABLE) != 0) {
320                 sx->sx_recurse++;
321                 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
322                 rval = 1;
323         } else
324                 rval = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED,
325                     (uintptr_t)curthread);
326         LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
327         if (rval) {
328                 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
329                     file, line);
330                 curthread->td_locks++;
331         }
332
333         return (rval);
334 }
335
336 void
337 _sx_sunlock(struct sx *sx, const char *file, int line)
338 {
339
340         if (SCHEDULER_STOPPED())
341                 return;
342         MPASS(curthread != NULL);
343         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
344             ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
345         _sx_assert(sx, SA_SLOCKED, file, line);
346         curthread->td_locks--;
347         WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
348         LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
349         __sx_sunlock(sx, file, line);
350         LOCKSTAT_PROFILE_RELEASE_LOCK(LS_SX_SUNLOCK_RELEASE, sx);
351 }
352
353 void
354 _sx_xunlock(struct sx *sx, const char *file, int line)
355 {
356
357         if (SCHEDULER_STOPPED())
358                 return;
359         MPASS(curthread != NULL);
360         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
361             ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
362         _sx_assert(sx, SA_XLOCKED, file, line);
363         curthread->td_locks--;
364         WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
365         LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
366             line);
367         if (!sx_recursed(sx))
368                 LOCKSTAT_PROFILE_RELEASE_LOCK(LS_SX_XUNLOCK_RELEASE, sx);
369         __sx_xunlock(sx, curthread, file, line);
370 }
371
372 /*
373  * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
374  * This will only succeed if this thread holds a single shared lock.
375  * Return 1 if if the upgrade succeed, 0 otherwise.
376  */
377 int
378 _sx_try_upgrade(struct sx *sx, const char *file, int line)
379 {
380         uintptr_t x;
381         int success;
382
383         if (SCHEDULER_STOPPED())
384                 return (1);
385
386         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
387             ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
388         _sx_assert(sx, SA_SLOCKED, file, line);
389
390         /*
391          * Try to switch from one shared lock to an exclusive lock.  We need
392          * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that
393          * we will wake up the exclusive waiters when we drop the lock.
394          */
395         x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS;
396         success = atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x,
397             (uintptr_t)curthread | x);
398         LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
399         if (success) {
400                 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
401                     file, line);
402                 LOCKSTAT_RECORD0(LS_SX_TRYUPGRADE_UPGRADE, sx);
403         }
404         return (success);
405 }
406
407 /*
408  * Downgrade an unrecursed exclusive lock into a single shared lock.
409  */
410 void
411 _sx_downgrade(struct sx *sx, const char *file, int line)
412 {
413         uintptr_t x;
414         int wakeup_swapper;
415
416         if (SCHEDULER_STOPPED())
417                 return;
418
419         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
420             ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
421         _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
422 #ifndef INVARIANTS
423         if (sx_recursed(sx))
424                 panic("downgrade of a recursed lock");
425 #endif
426
427         WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
428
429         /*
430          * Try to switch from an exclusive lock with no shared waiters
431          * to one sharer with no shared waiters.  If there are
432          * exclusive waiters, we don't need to lock the sleep queue so
433          * long as we preserve the flag.  We do one quick try and if
434          * that fails we grab the sleepq lock to keep the flags from
435          * changing and do it the slow way.
436          *
437          * We have to lock the sleep queue if there are shared waiters
438          * so we can wake them up.
439          */
440         x = sx->sx_lock;
441         if (!(x & SX_LOCK_SHARED_WAITERS) &&
442             atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
443             (x & SX_LOCK_EXCLUSIVE_WAITERS))) {
444                 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
445                 return;
446         }
447
448         /*
449          * Lock the sleep queue so we can read the waiters bits
450          * without any races and wakeup any shared waiters.
451          */
452         sleepq_lock(&sx->lock_object);
453
454         /*
455          * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
456          * shared lock.  If there are any shared waiters, wake them up.
457          */
458         wakeup_swapper = 0;
459         x = sx->sx_lock;
460         atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
461             (x & SX_LOCK_EXCLUSIVE_WAITERS));
462         if (x & SX_LOCK_SHARED_WAITERS)
463                 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
464                     0, SQ_SHARED_QUEUE);
465         sleepq_release(&sx->lock_object);
466
467         LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
468         LOCKSTAT_RECORD0(LS_SX_DOWNGRADE_DOWNGRADE, sx);
469
470         if (wakeup_swapper)
471                 kick_proc0();
472 }
473
474 /*
475  * This function represents the so-called 'hard case' for sx_xlock
476  * operation.  All 'easy case' failures are redirected to this.  Note
477  * that ideally this would be a static function, but it needs to be
478  * accessible from at least sx.h.
479  */
480 int
481 _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
482     int line)
483 {
484         GIANT_DECLARE;
485 #ifdef ADAPTIVE_SX
486         volatile struct thread *owner;
487         u_int i, spintries = 0;
488 #endif
489         uintptr_t x;
490 #ifdef LOCK_PROFILING
491         uint64_t waittime = 0;
492         int contested = 0;
493 #endif
494         int error = 0;
495 #ifdef  KDTRACE_HOOKS
496         uint64_t spin_cnt = 0;
497         uint64_t sleep_cnt = 0;
498         int64_t sleep_time = 0;
499 #endif
500
501         if (SCHEDULER_STOPPED())
502                 return (0);
503
504         /* If we already hold an exclusive lock, then recurse. */
505         if (sx_xlocked(sx)) {
506                 KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
507             ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
508                     sx->lock_object.lo_name, file, line));
509                 sx->sx_recurse++;
510                 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
511                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
512                         CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
513                 return (0);
514         }
515
516         if (LOCK_LOG_TEST(&sx->lock_object, 0))
517                 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
518                     sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
519
520         while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) {
521 #ifdef KDTRACE_HOOKS
522                 spin_cnt++;
523 #endif
524                 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
525                     &waittime);
526 #ifdef ADAPTIVE_SX
527                 /*
528                  * If the lock is write locked and the owner is
529                  * running on another CPU, spin until the owner stops
530                  * running or the state of the lock changes.
531                  */
532                 x = sx->sx_lock;
533                 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
534                         if ((x & SX_LOCK_SHARED) == 0) {
535                                 x = SX_OWNER(x);
536                                 owner = (struct thread *)x;
537                                 if (TD_IS_RUNNING(owner)) {
538                                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
539                                                 CTR3(KTR_LOCK,
540                                             "%s: spinning on %p held by %p",
541                                                     __func__, sx, owner);
542                                         GIANT_SAVE();
543                                         while (SX_OWNER(sx->sx_lock) == x &&
544                                             TD_IS_RUNNING(owner)) {
545                                                 cpu_spinwait();
546 #ifdef KDTRACE_HOOKS
547                                                 spin_cnt++;
548 #endif
549                                         }
550                                         continue;
551                                 }
552                         } else if (SX_SHARERS(x) && spintries < ASX_RETRIES) {
553                                 GIANT_SAVE();
554                                 spintries++;
555                                 for (i = 0; i < ASX_LOOPS; i++) {
556                                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
557                                                 CTR4(KTR_LOCK,
558                                     "%s: shared spinning on %p with %u and %u",
559                                                     __func__, sx, spintries, i);
560                                         x = sx->sx_lock;
561                                         if ((x & SX_LOCK_SHARED) == 0 ||
562                                             SX_SHARERS(x) == 0)
563                                                 break;
564                                         cpu_spinwait();
565 #ifdef KDTRACE_HOOKS
566                                         spin_cnt++;
567 #endif
568                                 }
569                                 if (i != ASX_LOOPS)
570                                         continue;
571                         }
572                 }
573 #endif
574
575                 sleepq_lock(&sx->lock_object);
576                 x = sx->sx_lock;
577
578                 /*
579                  * If the lock was released while spinning on the
580                  * sleep queue chain lock, try again.
581                  */
582                 if (x == SX_LOCK_UNLOCKED) {
583                         sleepq_release(&sx->lock_object);
584                         continue;
585                 }
586
587 #ifdef ADAPTIVE_SX
588                 /*
589                  * The current lock owner might have started executing
590                  * on another CPU (or the lock could have changed
591                  * owners) while we were waiting on the sleep queue
592                  * chain lock.  If so, drop the sleep queue lock and try
593                  * again.
594                  */
595                 if (!(x & SX_LOCK_SHARED) &&
596                     (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
597                         owner = (struct thread *)SX_OWNER(x);
598                         if (TD_IS_RUNNING(owner)) {
599                                 sleepq_release(&sx->lock_object);
600                                 continue;
601                         }
602                 }
603 #endif
604
605                 /*
606                  * If an exclusive lock was released with both shared
607                  * and exclusive waiters and a shared waiter hasn't
608                  * woken up and acquired the lock yet, sx_lock will be
609                  * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS.
610                  * If we see that value, try to acquire it once.  Note
611                  * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS
612                  * as there are other exclusive waiters still.  If we
613                  * fail, restart the loop.
614                  */
615                 if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) {
616                         if (atomic_cmpset_acq_ptr(&sx->sx_lock,
617                             SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS,
618                             tid | SX_LOCK_EXCLUSIVE_WAITERS)) {
619                                 sleepq_release(&sx->lock_object);
620                                 CTR2(KTR_LOCK, "%s: %p claimed by new writer",
621                                     __func__, sx);
622                                 break;
623                         }
624                         sleepq_release(&sx->lock_object);
625                         continue;
626                 }
627
628                 /*
629                  * Try to set the SX_LOCK_EXCLUSIVE_WAITERS.  If we fail,
630                  * than loop back and retry.
631                  */
632                 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
633                         if (!atomic_cmpset_ptr(&sx->sx_lock, x,
634                             x | SX_LOCK_EXCLUSIVE_WAITERS)) {
635                                 sleepq_release(&sx->lock_object);
636                                 continue;
637                         }
638                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
639                                 CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
640                                     __func__, sx);
641                 }
642
643                 /*
644                  * Since we have been unable to acquire the exclusive
645                  * lock and the exclusive waiters flag is set, we have
646                  * to sleep.
647                  */
648                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
649                         CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
650                             __func__, sx);
651
652 #ifdef KDTRACE_HOOKS
653                 sleep_time -= lockstat_nsecs();
654 #endif
655                 GIANT_SAVE();
656                 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
657                     SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
658                     SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
659                 if (!(opts & SX_INTERRUPTIBLE))
660                         sleepq_wait(&sx->lock_object, 0);
661                 else
662                         error = sleepq_wait_sig(&sx->lock_object, 0);
663 #ifdef KDTRACE_HOOKS
664                 sleep_time += lockstat_nsecs();
665                 sleep_cnt++;
666 #endif
667                 if (error) {
668                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
669                                 CTR2(KTR_LOCK,
670                         "%s: interruptible sleep by %p suspended by signal",
671                                     __func__, sx);
672                         break;
673                 }
674                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
675                         CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
676                             __func__, sx);
677         }
678
679         GIANT_RESTORE();
680         if (!error)
681                 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE, sx,
682                     contested, waittime, file, line);
683 #ifdef KDTRACE_HOOKS
684         if (sleep_time)
685                 LOCKSTAT_RECORD1(LS_SX_XLOCK_BLOCK, sx, sleep_time);
686         if (spin_cnt > sleep_cnt)
687                 LOCKSTAT_RECORD1(LS_SX_XLOCK_SPIN, sx, (spin_cnt - sleep_cnt));
688 #endif
689         return (error);
690 }
691
692 /*
693  * This function represents the so-called 'hard case' for sx_xunlock
694  * operation.  All 'easy case' failures are redirected to this.  Note
695  * that ideally this would be a static function, but it needs to be
696  * accessible from at least sx.h.
697  */
698 void
699 _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
700 {
701         uintptr_t x;
702         int queue, wakeup_swapper;
703
704         if (SCHEDULER_STOPPED())
705                 return;
706
707         MPASS(!(sx->sx_lock & SX_LOCK_SHARED));
708
709         /* If the lock is recursed, then unrecurse one level. */
710         if (sx_xlocked(sx) && sx_recursed(sx)) {
711                 if ((--sx->sx_recurse) == 0)
712                         atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
713                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
714                         CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
715                 return;
716         }
717         MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS |
718             SX_LOCK_EXCLUSIVE_WAITERS));
719         if (LOCK_LOG_TEST(&sx->lock_object, 0))
720                 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);
721
722         sleepq_lock(&sx->lock_object);
723         x = SX_LOCK_UNLOCKED;
724
725         /*
726          * The wake up algorithm here is quite simple and probably not
727          * ideal.  It gives precedence to shared waiters if they are
728          * present.  For this condition, we have to preserve the
729          * state of the exclusive waiters flag.
730          * If interruptible sleeps left the shared queue empty avoid a
731          * starvation for the threads sleeping on the exclusive queue by giving
732          * them precedence and cleaning up the shared waiters bit anyway.
733          */
734         if ((sx->sx_lock & SX_LOCK_SHARED_WAITERS) != 0 &&
735             sleepq_sleepcnt(&sx->lock_object, SQ_SHARED_QUEUE) != 0) {
736                 queue = SQ_SHARED_QUEUE;
737                 x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS);
738         } else
739                 queue = SQ_EXCLUSIVE_QUEUE;
740
741         /* Wake up all the waiters for the specific queue. */
742         if (LOCK_LOG_TEST(&sx->lock_object, 0))
743                 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue",
744                     __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
745                     "exclusive");
746         atomic_store_rel_ptr(&sx->sx_lock, x);
747         wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
748             queue);
749         sleepq_release(&sx->lock_object);
750         if (wakeup_swapper)
751                 kick_proc0();
752 }
753
754 /*
755  * This function represents the so-called 'hard case' for sx_slock
756  * operation.  All 'easy case' failures are redirected to this.  Note
757  * that ideally this would be a static function, but it needs to be
758  * accessible from at least sx.h.
759  */
760 int
761 _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
762 {
763         GIANT_DECLARE;
764 #ifdef ADAPTIVE_SX
765         volatile struct thread *owner;
766 #endif
767 #ifdef LOCK_PROFILING
768         uint64_t waittime = 0;
769         int contested = 0;
770 #endif
771         uintptr_t x;
772         int error = 0;
773 #ifdef KDTRACE_HOOKS
774         uint64_t spin_cnt = 0;
775         uint64_t sleep_cnt = 0;
776         int64_t sleep_time = 0;
777 #endif
778
779         if (SCHEDULER_STOPPED())
780                 return (0);
781
782         /*
783          * As with rwlocks, we don't make any attempt to try to block
784          * shared locks once there is an exclusive waiter.
785          */
786         for (;;) {
787 #ifdef KDTRACE_HOOKS
788                 spin_cnt++;
789 #endif
790                 x = sx->sx_lock;
791
792                 /*
793                  * If no other thread has an exclusive lock then try to bump up
794                  * the count of sharers.  Since we have to preserve the state
795                  * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
796                  * shared lock loop back and retry.
797                  */
798                 if (x & SX_LOCK_SHARED) {
799                         MPASS(!(x & SX_LOCK_SHARED_WAITERS));
800                         if (atomic_cmpset_acq_ptr(&sx->sx_lock, x,
801                             x + SX_ONE_SHARER)) {
802                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
803                                         CTR4(KTR_LOCK,
804                                             "%s: %p succeed %p -> %p", __func__,
805                                             sx, (void *)x,
806                                             (void *)(x + SX_ONE_SHARER));
807                                 break;
808                         }
809                         continue;
810                 }
811                 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
812                     &waittime);
813
814 #ifdef ADAPTIVE_SX
815                 /*
816                  * If the owner is running on another CPU, spin until
817                  * the owner stops running or the state of the lock
818                  * changes.
819                  */
820                 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
821                         x = SX_OWNER(x);
822                         owner = (struct thread *)x;
823                         if (TD_IS_RUNNING(owner)) {
824                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
825                                         CTR3(KTR_LOCK,
826                                             "%s: spinning on %p held by %p",
827                                             __func__, sx, owner);
828                                 GIANT_SAVE();
829                                 while (SX_OWNER(sx->sx_lock) == x &&
830                                     TD_IS_RUNNING(owner)) {
831 #ifdef KDTRACE_HOOKS
832                                         spin_cnt++;
833 #endif
834                                         cpu_spinwait();
835                                 }
836                                 continue;
837                         }
838                 }
839 #endif
840
841                 /*
842                  * Some other thread already has an exclusive lock, so
843                  * start the process of blocking.
844                  */
845                 sleepq_lock(&sx->lock_object);
846                 x = sx->sx_lock;
847
848                 /*
849                  * The lock could have been released while we spun.
850                  * In this case loop back and retry.
851                  */
852                 if (x & SX_LOCK_SHARED) {
853                         sleepq_release(&sx->lock_object);
854                         continue;
855                 }
856
857 #ifdef ADAPTIVE_SX
858                 /*
859                  * If the owner is running on another CPU, spin until
860                  * the owner stops running or the state of the lock
861                  * changes.
862                  */
863                 if (!(x & SX_LOCK_SHARED) &&
864                     (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
865                         owner = (struct thread *)SX_OWNER(x);
866                         if (TD_IS_RUNNING(owner)) {
867                                 sleepq_release(&sx->lock_object);
868                                 continue;
869                         }
870                 }
871 #endif
872
873                 /*
874                  * Try to set the SX_LOCK_SHARED_WAITERS flag.  If we
875                  * fail to set it drop the sleep queue lock and loop
876                  * back.
877                  */
878                 if (!(x & SX_LOCK_SHARED_WAITERS)) {
879                         if (!atomic_cmpset_ptr(&sx->sx_lock, x,
880                             x | SX_LOCK_SHARED_WAITERS)) {
881                                 sleepq_release(&sx->lock_object);
882                                 continue;
883                         }
884                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
885                                 CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
886                                     __func__, sx);
887                 }
888
889                 /*
890                  * Since we have been unable to acquire the shared lock,
891                  * we have to sleep.
892                  */
893                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
894                         CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
895                             __func__, sx);
896
897 #ifdef KDTRACE_HOOKS
898                 sleep_time -= lockstat_nsecs();
899 #endif
900                 GIANT_SAVE();
901                 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
902                     SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
903                     SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
904                 if (!(opts & SX_INTERRUPTIBLE))
905                         sleepq_wait(&sx->lock_object, 0);
906                 else
907                         error = sleepq_wait_sig(&sx->lock_object, 0);
908 #ifdef KDTRACE_HOOKS
909                 sleep_time += lockstat_nsecs();
910                 sleep_cnt++;
911 #endif
912                 if (error) {
913                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
914                                 CTR2(KTR_LOCK,
915                         "%s: interruptible sleep by %p suspended by signal",
916                                     __func__, sx);
917                         break;
918                 }
919                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
920                         CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
921                             __func__, sx);
922         }
923         if (error == 0)
924                 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE, sx,
925                     contested, waittime, file, line);
926 #ifdef KDTRACE_HOOKS
927         if (sleep_time)
928                 LOCKSTAT_RECORD1(LS_SX_XLOCK_BLOCK, sx, sleep_time);
929         if (spin_cnt > sleep_cnt)
930                 LOCKSTAT_RECORD1(LS_SX_XLOCK_SPIN, sx, (spin_cnt - sleep_cnt));
931 #endif
932         GIANT_RESTORE();
933         return (error);
934 }
935
936 /*
937  * This function represents the so-called 'hard case' for sx_sunlock
938  * operation.  All 'easy case' failures are redirected to this.  Note
939  * that ideally this would be a static function, but it needs to be
940  * accessible from at least sx.h.
941  */
942 void
943 _sx_sunlock_hard(struct sx *sx, const char *file, int line)
944 {
945         uintptr_t x;
946         int wakeup_swapper;
947
948         if (SCHEDULER_STOPPED())
949                 return;
950
951         for (;;) {
952                 x = sx->sx_lock;
953
954                 /*
955                  * We should never have sharers while at least one thread
956                  * holds a shared lock.
957                  */
958                 KASSERT(!(x & SX_LOCK_SHARED_WAITERS),
959                     ("%s: waiting sharers", __func__));
960
961                 /*
962                  * See if there is more than one shared lock held.  If
963                  * so, just drop one and return.
964                  */
965                 if (SX_SHARERS(x) > 1) {
966                         if (atomic_cmpset_rel_ptr(&sx->sx_lock, x,
967                             x - SX_ONE_SHARER)) {
968                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
969                                         CTR4(KTR_LOCK,
970                                             "%s: %p succeeded %p -> %p",
971                                             __func__, sx, (void *)x,
972                                             (void *)(x - SX_ONE_SHARER));
973                                 break;
974                         }
975                         continue;
976                 }
977
978                 /*
979                  * If there aren't any waiters for an exclusive lock,
980                  * then try to drop it quickly.
981                  */
982                 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
983                         MPASS(x == SX_SHARERS_LOCK(1));
984                         if (atomic_cmpset_rel_ptr(&sx->sx_lock,
985                             SX_SHARERS_LOCK(1), SX_LOCK_UNLOCKED)) {
986                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
987                                         CTR2(KTR_LOCK, "%s: %p last succeeded",
988                                             __func__, sx);
989                                 break;
990                         }
991                         continue;
992                 }
993
994                 /*
995                  * At this point, there should just be one sharer with
996                  * exclusive waiters.
997                  */
998                 MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS));
999
1000                 sleepq_lock(&sx->lock_object);
1001
1002                 /*
1003                  * Wake up semantic here is quite simple:
1004                  * Just wake up all the exclusive waiters.
1005                  * Note that the state of the lock could have changed,
1006                  * so if it fails loop back and retry.
1007                  */
1008                 if (!atomic_cmpset_rel_ptr(&sx->sx_lock,
1009                     SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
1010                     SX_LOCK_UNLOCKED)) {
1011                         sleepq_release(&sx->lock_object);
1012                         continue;
1013                 }
1014                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
1015                         CTR2(KTR_LOCK, "%s: %p waking up all thread on"
1016                             "exclusive queue", __func__, sx);
1017                 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
1018                     0, SQ_EXCLUSIVE_QUEUE);
1019                 sleepq_release(&sx->lock_object);
1020                 if (wakeup_swapper)
1021                         kick_proc0();
1022                 break;
1023         }
1024 }
1025
1026 #ifdef INVARIANT_SUPPORT
1027 #ifndef INVARIANTS
1028 #undef  _sx_assert
1029 #endif
1030
1031 /*
1032  * In the non-WITNESS case, sx_assert() can only detect that at least
1033  * *some* thread owns an slock, but it cannot guarantee that *this*
1034  * thread owns an slock.
1035  */
1036 void
1037 _sx_assert(struct sx *sx, int what, const char *file, int line)
1038 {
1039 #ifndef WITNESS
1040         int slocked = 0;
1041 #endif
1042
1043         if (panicstr != NULL)
1044                 return;
1045         switch (what) {
1046         case SA_SLOCKED:
1047         case SA_SLOCKED | SA_NOTRECURSED:
1048         case SA_SLOCKED | SA_RECURSED:
1049 #ifndef WITNESS
1050                 slocked = 1;
1051                 /* FALLTHROUGH */
1052 #endif
1053         case SA_LOCKED:
1054         case SA_LOCKED | SA_NOTRECURSED:
1055         case SA_LOCKED | SA_RECURSED:
1056 #ifdef WITNESS
1057                 witness_assert(&sx->lock_object, what, file, line);
1058 #else
1059                 /*
1060                  * If some other thread has an exclusive lock or we
1061                  * have one and are asserting a shared lock, fail.
1062                  * Also, if no one has a lock at all, fail.
1063                  */
1064                 if (sx->sx_lock == SX_LOCK_UNLOCKED ||
1065                     (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
1066                     sx_xholder(sx) != curthread)))
1067                         panic("Lock %s not %slocked @ %s:%d\n",
1068                             sx->lock_object.lo_name, slocked ? "share " : "",
1069                             file, line);
1070
1071                 if (!(sx->sx_lock & SX_LOCK_SHARED)) {
1072                         if (sx_recursed(sx)) {
1073                                 if (what & SA_NOTRECURSED)
1074                                         panic("Lock %s recursed @ %s:%d\n",
1075                                             sx->lock_object.lo_name, file,
1076                                             line);
1077                         } else if (what & SA_RECURSED)
1078                                 panic("Lock %s not recursed @ %s:%d\n",
1079                                     sx->lock_object.lo_name, file, line);
1080                 }
1081 #endif
1082                 break;
1083         case SA_XLOCKED:
1084         case SA_XLOCKED | SA_NOTRECURSED:
1085         case SA_XLOCKED | SA_RECURSED:
1086                 if (sx_xholder(sx) != curthread)
1087                         panic("Lock %s not exclusively locked @ %s:%d\n",
1088                             sx->lock_object.lo_name, file, line);
1089                 if (sx_recursed(sx)) {
1090                         if (what & SA_NOTRECURSED)
1091                                 panic("Lock %s recursed @ %s:%d\n",
1092                                     sx->lock_object.lo_name, file, line);
1093                 } else if (what & SA_RECURSED)
1094                         panic("Lock %s not recursed @ %s:%d\n",
1095                             sx->lock_object.lo_name, file, line);
1096                 break;
1097         case SA_UNLOCKED:
1098 #ifdef WITNESS
1099                 witness_assert(&sx->lock_object, what, file, line);
1100 #else
1101                 /*
1102                  * If we hold an exclusve lock fail.  We can't
1103                  * reliably check to see if we hold a shared lock or
1104                  * not.
1105                  */
1106                 if (sx_xholder(sx) == curthread)
1107                         panic("Lock %s exclusively locked @ %s:%d\n",
1108                             sx->lock_object.lo_name, file, line);
1109 #endif
1110                 break;
1111         default:
1112                 panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
1113                     line);
1114         }
1115 }
1116 #endif  /* INVARIANT_SUPPORT */
1117
1118 #ifdef DDB
1119 static void
1120 db_show_sx(struct lock_object *lock)
1121 {
1122         struct thread *td;
1123         struct sx *sx;
1124
1125         sx = (struct sx *)lock;
1126
1127         db_printf(" state: ");
1128         if (sx->sx_lock == SX_LOCK_UNLOCKED)
1129                 db_printf("UNLOCKED\n");
1130         else if (sx->sx_lock == SX_LOCK_DESTROYED) {
1131                 db_printf("DESTROYED\n");
1132                 return;
1133         } else if (sx->sx_lock & SX_LOCK_SHARED)
1134                 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
1135         else {
1136                 td = sx_xholder(sx);
1137                 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1138                     td->td_tid, td->td_proc->p_pid, td->td_name);
1139                 if (sx_recursed(sx))
1140                         db_printf(" recursed: %d\n", sx->sx_recurse);
1141         }
1142
1143         db_printf(" waiters: ");
1144         switch(sx->sx_lock &
1145             (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) {
1146         case SX_LOCK_SHARED_WAITERS:
1147                 db_printf("shared\n");
1148                 break;
1149         case SX_LOCK_EXCLUSIVE_WAITERS:
1150                 db_printf("exclusive\n");
1151                 break;
1152         case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS:
1153                 db_printf("exclusive and shared\n");
1154                 break;
1155         default:
1156                 db_printf("none\n");
1157         }
1158 }
1159
1160 /*
1161  * Check to see if a thread that is blocked on a sleep queue is actually
1162  * blocked on an sx lock.  If so, output some details and return true.
1163  * If the lock has an exclusive owner, return that in *ownerp.
1164  */
1165 int
1166 sx_chain(struct thread *td, struct thread **ownerp)
1167 {
1168         struct sx *sx;
1169
1170         /*
1171          * Check to see if this thread is blocked on an sx lock.
1172          * First, we check the lock class.  If that is ok, then we
1173          * compare the lock name against the wait message.
1174          */
1175         sx = td->td_wchan;
1176         if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
1177             sx->lock_object.lo_name != td->td_wmesg)
1178                 return (0);
1179
1180         /* We think we have an sx lock, so output some details. */
1181         db_printf("blocked on sx \"%s\" ", td->td_wmesg);
1182         *ownerp = sx_xholder(sx);
1183         if (sx->sx_lock & SX_LOCK_SHARED)
1184                 db_printf("SLOCK (count %ju)\n",
1185                     (uintmax_t)SX_SHARERS(sx->sx_lock));
1186         else
1187                 db_printf("XLOCK\n");
1188         return (1);
1189 }
1190 #endif