]> CyberLeo.Net >> Repos - FreeBSD/releng/7.2.git/blob - sys/kern/kern_sx.c
Create releng/7.2 from stable/7 in preparation for 7.2-RELEASE.
[FreeBSD/releng/7.2.git] / sys / kern / kern_sx.c
1 /*-
2  * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
3  * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice(s), this list of conditions and the following disclaimer as
11  *    the first lines of this file unmodified other than the possible
12  *    addition of one or more copyright notices.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice(s), this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
18  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
21  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
27  * DAMAGE.
28  */
29
30 /*
31  * Shared/exclusive locks.  This implementation attempts to ensure
32  * deterministic lock granting behavior, so that slocks and xlocks are
33  * interleaved.
34  *
35  * Priority propagation will not generally raise the priority of lock holders,
36  * so should not be relied upon in combination with sx locks.
37  */
38
39 #include "opt_adaptive_sx.h"
40 #include "opt_ddb.h"
41
42 #include <sys/cdefs.h>
43 __FBSDID("$FreeBSD$");
44
45 #include <sys/param.h>
46 #include <sys/ktr.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/sleepqueue.h>
51 #include <sys/sx.h>
52 #include <sys/systm.h>
53
54 #ifdef ADAPTIVE_SX
55 #include <machine/cpu.h>
56 #endif
57
58 #ifdef DDB
59 #include <ddb/ddb.h>
60 #endif
61
62 #if !defined(SMP) && defined(ADAPTIVE_SX)
63 #error "You must have SMP to enable the ADAPTIVE_SX option"
64 #endif
65
66 CTASSERT(((SX_ADAPTIVESPIN | SX_RECURSE) & LO_CLASSFLAGS) ==
67     (SX_ADAPTIVESPIN | SX_RECURSE));
68
69 /* Handy macros for sleep queues. */
70 #define SQ_EXCLUSIVE_QUEUE      0
71 #define SQ_SHARED_QUEUE         1
72
73 /*
74  * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file.  We
75  * drop Giant anytime we have to sleep or if we adaptively spin.
76  */
77 #define GIANT_DECLARE                                                   \
78         int _giantcnt = 0;                                              \
79         WITNESS_SAVE_DECL(Giant)                                        \
80
81 #define GIANT_SAVE() do {                                               \
82         if (mtx_owned(&Giant)) {                                        \
83                 WITNESS_SAVE(&Giant.lock_object, Giant);                \
84                 while (mtx_owned(&Giant)) {                             \
85                         _giantcnt++;                                    \
86                         mtx_unlock(&Giant);                             \
87                 }                                                       \
88         }                                                               \
89 } while (0)
90
91 #define GIANT_RESTORE() do {                                            \
92         if (_giantcnt > 0) {                                            \
93                 mtx_assert(&Giant, MA_NOTOWNED);                        \
94                 while (_giantcnt--)                                     \
95                         mtx_lock(&Giant);                               \
96                 WITNESS_RESTORE(&Giant.lock_object, Giant);             \
97         }                                                               \
98 } while (0)
99
100 /*
101  * Returns true if an exclusive lock is recursed.  It assumes
102  * curthread currently has an exclusive lock.
103  */
104 #define sx_recursed(sx)         ((sx)->sx_recurse != 0)
105
106 #ifdef DDB
107 static void     db_show_sx(struct lock_object *lock);
108 #endif
109 static void     lock_sx(struct lock_object *lock, int how);
110 static int      unlock_sx(struct lock_object *lock);
111
112 struct lock_class lock_class_sx = {
113         .lc_name = "sx",
114         .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
115 #ifdef DDB
116         .lc_ddb_show = db_show_sx,
117 #endif
118         .lc_lock = lock_sx,
119         .lc_unlock = unlock_sx,
120 };
121
122 #ifndef INVARIANTS
123 #define _sx_assert(sx, what, file, line)
124 #endif
125
126 void
127 lock_sx(struct lock_object *lock, int how)
128 {
129         struct sx *sx;
130
131         sx = (struct sx *)lock;
132         if (how)
133                 sx_xlock(sx);
134         else
135                 sx_slock(sx);
136 }
137
138 int
139 unlock_sx(struct lock_object *lock)
140 {
141         struct sx *sx;
142
143         sx = (struct sx *)lock;
144         sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
145         if (sx_xlocked(sx)) {
146                 sx_xunlock(sx);
147                 return (1);
148         } else {
149                 sx_sunlock(sx);
150                 return (0);
151         }
152 }
153
154 void
155 sx_sysinit(void *arg)
156 {
157         struct sx_args *sargs = arg;
158
159         sx_init(sargs->sa_sx, sargs->sa_desc);
160 }
161
162 void
163 sx_init_flags(struct sx *sx, const char *description, int opts)
164 {
165         int flags;
166
167         MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
168             SX_NOPROFILE | SX_ADAPTIVESPIN)) == 0);
169
170         flags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE;
171         if (opts & SX_DUPOK)
172                 flags |= LO_DUPOK;
173         if (opts & SX_NOPROFILE)
174                 flags |= LO_NOPROFILE;
175         if (!(opts & SX_NOWITNESS))
176                 flags |= LO_WITNESS;
177         if (opts & SX_QUIET)
178                 flags |= LO_QUIET;
179
180         flags |= opts & (SX_ADAPTIVESPIN | SX_RECURSE);
181         sx->sx_lock = SX_LOCK_UNLOCKED;
182         sx->sx_recurse = 0;
183         lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
184 }
185
186 void
187 sx_destroy(struct sx *sx)
188 {
189
190         KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held"));
191         KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
192         sx->sx_lock = SX_LOCK_DESTROYED;
193         lock_destroy(&sx->lock_object);
194 }
195
196 int
197 _sx_slock(struct sx *sx, int opts, const char *file, int line)
198 {
199         int error = 0;
200
201         MPASS(curthread != NULL);
202         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
203             ("sx_slock() of destroyed sx @ %s:%d", file, line));
204         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line);
205         error = __sx_slock(sx, opts, file, line);
206         if (!error) {
207                 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
208                 WITNESS_LOCK(&sx->lock_object, 0, file, line);
209                 curthread->td_locks++;
210         }
211
212         return (error);
213 }
214
215 int
216 _sx_try_slock(struct sx *sx, const char *file, int line)
217 {
218         uintptr_t x;
219
220         for (;;) {
221                 x = sx->sx_lock;
222                 KASSERT(x != SX_LOCK_DESTROYED,
223                     ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
224                 if (!(x & SX_LOCK_SHARED))
225                         break;
226                 if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) {
227                         LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
228                         WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
229                         curthread->td_locks++;
230                         return (1);
231                 }
232         }
233
234         LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
235         return (0);
236 }
237
238 int
239 _sx_xlock(struct sx *sx, int opts, const char *file, int line)
240 {
241         int error = 0;
242
243         MPASS(curthread != NULL);
244         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
245             ("sx_xlock() of destroyed sx @ %s:%d", file, line));
246         WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
247             line);
248         error = __sx_xlock(sx, curthread, opts, file, line);
249         if (!error) {
250                 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse,
251                     file, line);
252                 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
253                 curthread->td_locks++;
254         }
255
256         return (error);
257 }
258
259 int
260 _sx_try_xlock(struct sx *sx, const char *file, int line)
261 {
262         int rval;
263
264         MPASS(curthread != NULL);
265         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
266             ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
267
268         if (sx_xlocked(sx) && (sx->lock_object.lo_flags & SX_RECURSE) != 0) {
269                 sx->sx_recurse++;
270                 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
271                 rval = 1;
272         } else
273                 rval = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED,
274                     (uintptr_t)curthread);
275         LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
276         if (rval) {
277                 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
278                     file, line);
279                 curthread->td_locks++;
280         }
281
282         return (rval);
283 }
284
285 void
286 _sx_sunlock(struct sx *sx, const char *file, int line)
287 {
288
289         MPASS(curthread != NULL);
290         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
291             ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
292         _sx_assert(sx, SA_SLOCKED, file, line);
293         curthread->td_locks--;
294         WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
295         LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
296 #ifdef LOCK_PROFILING_SHARED
297         if (SX_SHARERS(sx->sx_lock) == 1)
298                 lock_profile_release_lock(&sx->lock_object);
299 #endif
300         __sx_sunlock(sx, file, line);
301 }
302
303 void
304 _sx_xunlock(struct sx *sx, const char *file, int line)
305 {
306
307         MPASS(curthread != NULL);
308         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
309             ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
310         _sx_assert(sx, SA_XLOCKED, file, line);
311         curthread->td_locks--;
312         WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
313         LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
314             line);
315         if (!sx_recursed(sx))
316                 lock_profile_release_lock(&sx->lock_object);
317         __sx_xunlock(sx, curthread, file, line);
318 }
319
320 /*
321  * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
322  * This will only succeed if this thread holds a single shared lock.
323  * Return 1 if if the upgrade succeed, 0 otherwise.
324  */
325 int
326 _sx_try_upgrade(struct sx *sx, const char *file, int line)
327 {
328         uintptr_t x;
329         int success;
330
331         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
332             ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
333         _sx_assert(sx, SA_SLOCKED, file, line);
334
335         /*
336          * Try to switch from one shared lock to an exclusive lock.  We need
337          * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that
338          * we will wake up the exclusive waiters when we drop the lock.
339          */
340         x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS;
341         success = atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x,
342             (uintptr_t)curthread | x);
343         LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
344         if (success)
345                 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
346                     file, line);
347         return (success);
348 }
349
350 /*
351  * Downgrade an unrecursed exclusive lock into a single shared lock.
352  */
353 void
354 _sx_downgrade(struct sx *sx, const char *file, int line)
355 {
356         uintptr_t x;
357         int wakeup_swapper;
358
359         KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
360             ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
361         _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
362 #ifndef INVARIANTS
363         if (sx_recursed(sx))
364                 panic("downgrade of a recursed lock");
365 #endif
366
367         WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
368
369         /*
370          * Try to switch from an exclusive lock with no shared waiters
371          * to one sharer with no shared waiters.  If there are
372          * exclusive waiters, we don't need to lock the sleep queue so
373          * long as we preserve the flag.  We do one quick try and if
374          * that fails we grab the sleepq lock to keep the flags from
375          * changing and do it the slow way.
376          *
377          * We have to lock the sleep queue if there are shared waiters
378          * so we can wake them up.
379          */
380         x = sx->sx_lock;
381         if (!(x & SX_LOCK_SHARED_WAITERS) &&
382             atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
383             (x & SX_LOCK_EXCLUSIVE_WAITERS))) {
384                 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
385                 return;
386         }
387
388         /*
389          * Lock the sleep queue so we can read the waiters bits
390          * without any races and wakeup any shared waiters.
391          */
392         sleepq_lock(&sx->lock_object);
393
394         /*
395          * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
396          * shared lock.  If there are any shared waiters, wake them up.
397          */
398         wakeup_swapper = 0;
399         x = sx->sx_lock;
400         atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
401             (x & SX_LOCK_EXCLUSIVE_WAITERS));
402         if (x & SX_LOCK_SHARED_WAITERS)
403                 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
404                     -1, SQ_SHARED_QUEUE);
405         else
406                 sleepq_release(&sx->lock_object);
407
408         LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
409
410         if (wakeup_swapper)
411                 kick_proc0();
412 }
413
414 /*
415  * This function represents the so-called 'hard case' for sx_xlock
416  * operation.  All 'easy case' failures are redirected to this.  Note
417  * that ideally this would be a static function, but it needs to be
418  * accessible from at least sx.h.
419  */
420 int
421 _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
422     int line)
423 {
424         GIANT_DECLARE;
425 #ifdef ADAPTIVE_SX
426         volatile struct thread *owner;
427 #endif
428         uint64_t waittime = 0;
429         uintptr_t x;
430         int contested = 0, error = 0;
431
432         /* If we already hold an exclusive lock, then recurse. */
433         if (sx_xlocked(sx)) {
434                 KASSERT((sx->lock_object.lo_flags & SX_RECURSE) != 0,
435             ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
436                     sx->lock_object.lo_name, file, line));
437                 sx->sx_recurse++;
438                 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
439                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
440                         CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
441                 return (0);
442         }
443
444         if (LOCK_LOG_TEST(&sx->lock_object, 0))
445                 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
446                     sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
447
448         while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) {
449 #ifdef ADAPTIVE_SX
450                 /*
451                  * If the lock is write locked and the owner is
452                  * running on another CPU, spin until the owner stops
453                  * running or the state of the lock changes.
454                  */
455                 x = sx->sx_lock;
456                 if (!(x & SX_LOCK_SHARED) &&
457                     (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) {
458                         x = SX_OWNER(x);
459                         owner = (struct thread *)x;
460                         if (TD_IS_RUNNING(owner)) {
461                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
462                                         CTR3(KTR_LOCK,
463                                             "%s: spinning on %p held by %p",
464                                             __func__, sx, owner);
465                                 GIANT_SAVE();
466                                 lock_profile_obtain_lock_failed(
467                                     &sx->lock_object, &contested, &waittime);
468                                 while (SX_OWNER(sx->sx_lock) == x &&
469                                     TD_IS_RUNNING(owner))
470                                         cpu_spinwait();
471                                 continue;
472                         }
473                 }
474 #endif
475
476                 sleepq_lock(&sx->lock_object);
477                 x = sx->sx_lock;
478
479                 /*
480                  * If the lock was released while spinning on the
481                  * sleep queue chain lock, try again.
482                  */
483                 if (x == SX_LOCK_UNLOCKED) {
484                         sleepq_release(&sx->lock_object);
485                         continue;
486                 }
487
488 #ifdef ADAPTIVE_SX
489                 /*
490                  * The current lock owner might have started executing
491                  * on another CPU (or the lock could have changed
492                  * owners) while we were waiting on the sleep queue
493                  * chain lock.  If so, drop the sleep queue lock and try
494                  * again.
495                  */
496                 if (!(x & SX_LOCK_SHARED) &&
497                     (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) {
498                         owner = (struct thread *)SX_OWNER(x);
499                         if (TD_IS_RUNNING(owner)) {
500                                 sleepq_release(&sx->lock_object);
501                                 continue;
502                         }
503                 }
504 #endif
505
506                 /*
507                  * If an exclusive lock was released with both shared
508                  * and exclusive waiters and a shared waiter hasn't
509                  * woken up and acquired the lock yet, sx_lock will be
510                  * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS.
511                  * If we see that value, try to acquire it once.  Note
512                  * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS
513                  * as there are other exclusive waiters still.  If we
514                  * fail, restart the loop.
515                  */
516                 if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) {
517                         if (atomic_cmpset_acq_ptr(&sx->sx_lock,
518                             SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS,
519                             tid | SX_LOCK_EXCLUSIVE_WAITERS)) {
520                                 sleepq_release(&sx->lock_object);
521                                 CTR2(KTR_LOCK, "%s: %p claimed by new writer",
522                                     __func__, sx);
523                                 break;
524                         }
525                         sleepq_release(&sx->lock_object);
526                         continue;
527                 }
528
529                 /*
530                  * Try to set the SX_LOCK_EXCLUSIVE_WAITERS.  If we fail,
531                  * than loop back and retry.
532                  */
533                 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
534                         if (!atomic_cmpset_ptr(&sx->sx_lock, x,
535                             x | SX_LOCK_EXCLUSIVE_WAITERS)) {
536                                 sleepq_release(&sx->lock_object);
537                                 continue;
538                         }
539                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
540                                 CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
541                                     __func__, sx);
542                 }
543
544                 /*
545                  * Since we have been unable to acquire the exclusive
546                  * lock and the exclusive waiters flag is set, we have
547                  * to sleep.
548                  */
549                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
550                         CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
551                             __func__, sx);
552
553                 GIANT_SAVE();
554                 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
555                     &waittime);
556                 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
557                     SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
558                     SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
559                 if (!(opts & SX_INTERRUPTIBLE))
560                         sleepq_wait(&sx->lock_object);
561                 else
562                         error = sleepq_wait_sig(&sx->lock_object);
563
564                 if (error) {
565                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
566                                 CTR2(KTR_LOCK,
567                         "%s: interruptible sleep by %p suspended by signal",
568                                     __func__, sx);
569                         break;
570                 }
571                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
572                         CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
573                             __func__, sx);
574         }
575
576         GIANT_RESTORE();
577         if (!error)
578                 lock_profile_obtain_lock_success(&sx->lock_object, contested,
579                     waittime, file, line);
580         return (error);
581 }
582
583 /*
584  * This function represents the so-called 'hard case' for sx_xunlock
585  * operation.  All 'easy case' failures are redirected to this.  Note
586  * that ideally this would be a static function, but it needs to be
587  * accessible from at least sx.h.
588  */
589 void
590 _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
591 {
592         uintptr_t x;
593         int queue, wakeup_swapper;
594
595         MPASS(!(sx->sx_lock & SX_LOCK_SHARED));
596
597         /* If the lock is recursed, then unrecurse one level. */
598         if (sx_xlocked(sx) && sx_recursed(sx)) {
599                 if ((--sx->sx_recurse) == 0)
600                         atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
601                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
602                         CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
603                 return;
604         }
605         MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS |
606             SX_LOCK_EXCLUSIVE_WAITERS));
607         if (LOCK_LOG_TEST(&sx->lock_object, 0))
608                 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);
609
610         sleepq_lock(&sx->lock_object);
611         x = SX_LOCK_UNLOCKED;
612
613         /*
614          * The wake up algorithm here is quite simple and probably not
615          * ideal.  It gives precedence to shared waiters if they are
616          * present.  For this condition, we have to preserve the
617          * state of the exclusive waiters flag.
618          */
619         if (sx->sx_lock & SX_LOCK_SHARED_WAITERS) {
620                 queue = SQ_SHARED_QUEUE;
621                 x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS);
622         } else
623                 queue = SQ_EXCLUSIVE_QUEUE;
624
625         /* Wake up all the waiters for the specific queue. */
626         if (LOCK_LOG_TEST(&sx->lock_object, 0))
627                 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue",
628                     __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
629                     "exclusive");
630         atomic_store_rel_ptr(&sx->sx_lock, x);
631         wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, -1,
632             queue);
633         if (wakeup_swapper)
634                 kick_proc0();
635 }
636
637 /*
638  * This function represents the so-called 'hard case' for sx_slock
639  * operation.  All 'easy case' failures are redirected to this.  Note
640  * that ideally this would be a static function, but it needs to be
641  * accessible from at least sx.h.
642  */
643 int
644 _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
645 {
646         GIANT_DECLARE;
647 #ifdef ADAPTIVE_SX
648         volatile struct thread *owner;
649 #endif
650 #ifdef LOCK_PROFILING_SHARED
651         uint64_t waittime = 0;
652         int contested = 0;
653 #endif
654         uintptr_t x;
655         int error = 0;
656
657         /*
658          * As with rwlocks, we don't make any attempt to try to block
659          * shared locks once there is an exclusive waiter.
660          */
661         for (;;) {
662                 x = sx->sx_lock;
663
664                 /*
665                  * If no other thread has an exclusive lock then try to bump up
666                  * the count of sharers.  Since we have to preserve the state
667                  * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
668                  * shared lock loop back and retry.
669                  */
670                 if (x & SX_LOCK_SHARED) {
671                         MPASS(!(x & SX_LOCK_SHARED_WAITERS));
672                         if (atomic_cmpset_acq_ptr(&sx->sx_lock, x,
673                             x + SX_ONE_SHARER)) {
674 #ifdef LOCK_PROFILING_SHARED
675                                 if (SX_SHARERS(x) == 0)
676                                         lock_profile_obtain_lock_success(
677                                             &sx->lock_object, contested,
678                                             waittime, file, line);
679 #endif
680                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
681                                         CTR4(KTR_LOCK,
682                                             "%s: %p succeed %p -> %p", __func__,
683                                             sx, (void *)x,
684                                             (void *)(x + SX_ONE_SHARER));
685                                 break;
686                         }
687                         continue;
688                 }
689
690 #ifdef ADAPTIVE_SX
691                 /*
692                  * If the owner is running on another CPU, spin until
693                  * the owner stops running or the state of the lock
694                  * changes.
695                  */
696                 else if (sx->lock_object.lo_flags & SX_ADAPTIVESPIN) {
697                         x = SX_OWNER(x);
698                         owner = (struct thread *)x;
699                         if (TD_IS_RUNNING(owner)) {
700                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
701                                         CTR3(KTR_LOCK,
702                                             "%s: spinning on %p held by %p",
703                                             __func__, sx, owner);
704                                 GIANT_SAVE();
705 #ifdef LOCK_PROFILING_SHARED
706                                 lock_profile_obtain_lock_failed(
707                                     &sx->lock_object, &contested, &waittime);
708 #endif
709                                 while (SX_OWNER(sx->sx_lock) == x &&
710                                     TD_IS_RUNNING(owner))
711                                         cpu_spinwait();
712                                 continue;
713                         }
714                 }
715 #endif
716
717                 /*
718                  * Some other thread already has an exclusive lock, so
719                  * start the process of blocking.
720                  */
721                 sleepq_lock(&sx->lock_object);
722                 x = sx->sx_lock;
723
724                 /*
725                  * The lock could have been released while we spun.
726                  * In this case loop back and retry.
727                  */
728                 if (x & SX_LOCK_SHARED) {
729                         sleepq_release(&sx->lock_object);
730                         continue;
731                 }
732
733 #ifdef ADAPTIVE_SX
734                 /*
735                  * If the owner is running on another CPU, spin until
736                  * the owner stops running or the state of the lock
737                  * changes.
738                  */
739                 if (!(x & SX_LOCK_SHARED) &&
740                     (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) {
741                         owner = (struct thread *)SX_OWNER(x);
742                         if (TD_IS_RUNNING(owner)) {
743                                 sleepq_release(&sx->lock_object);
744                                 continue;
745                         }
746                 }
747 #endif
748
749                 /*
750                  * Try to set the SX_LOCK_SHARED_WAITERS flag.  If we
751                  * fail to set it drop the sleep queue lock and loop
752                  * back.
753                  */
754                 if (!(x & SX_LOCK_SHARED_WAITERS)) {
755                         if (!atomic_cmpset_ptr(&sx->sx_lock, x,
756                             x | SX_LOCK_SHARED_WAITERS)) {
757                                 sleepq_release(&sx->lock_object);
758                                 continue;
759                         }
760                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
761                                 CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
762                                     __func__, sx);
763                 }
764
765                 /*
766                  * Since we have been unable to acquire the shared lock,
767                  * we have to sleep.
768                  */
769                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
770                         CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
771                             __func__, sx);
772
773                 GIANT_SAVE();
774 #ifdef LOCK_PROFILING_SHARED
775                 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
776                     &waittime);
777 #endif
778                 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
779                     SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
780                     SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
781                 if (!(opts & SX_INTERRUPTIBLE))
782                         sleepq_wait(&sx->lock_object);
783                 else
784                         error = sleepq_wait_sig(&sx->lock_object);
785
786                 if (error) {
787                         if (LOCK_LOG_TEST(&sx->lock_object, 0))
788                                 CTR2(KTR_LOCK,
789                         "%s: interruptible sleep by %p suspended by signal",
790                                     __func__, sx);
791                         break;
792                 }
793                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
794                         CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
795                             __func__, sx);
796         }
797
798         GIANT_RESTORE();
799         return (error);
800 }
801
802 /*
803  * This function represents the so-called 'hard case' for sx_sunlock
804  * operation.  All 'easy case' failures are redirected to this.  Note
805  * that ideally this would be a static function, but it needs to be
806  * accessible from at least sx.h.
807  */
808 void
809 _sx_sunlock_hard(struct sx *sx, const char *file, int line)
810 {
811         uintptr_t x;
812         int wakeup_swapper;
813
814         for (;;) {
815                 x = sx->sx_lock;
816
817                 /*
818                  * We should never have sharers while at least one thread
819                  * holds a shared lock.
820                  */
821                 KASSERT(!(x & SX_LOCK_SHARED_WAITERS),
822                     ("%s: waiting sharers", __func__));
823
824                 /*
825                  * See if there is more than one shared lock held.  If
826                  * so, just drop one and return.
827                  */
828                 if (SX_SHARERS(x) > 1) {
829                         if (atomic_cmpset_ptr(&sx->sx_lock, x,
830                             x - SX_ONE_SHARER)) {
831                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
832                                         CTR4(KTR_LOCK,
833                                             "%s: %p succeeded %p -> %p",
834                                             __func__, sx, (void *)x,
835                                             (void *)(x - SX_ONE_SHARER));
836                                 break;
837                         }
838                         continue;
839                 }
840
841                 /*
842                  * If there aren't any waiters for an exclusive lock,
843                  * then try to drop it quickly.
844                  */
845                 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
846                         MPASS(x == SX_SHARERS_LOCK(1));
847                         if (atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1),
848                             SX_LOCK_UNLOCKED)) {
849                                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
850                                         CTR2(KTR_LOCK, "%s: %p last succeeded",
851                                             __func__, sx);
852                                 break;
853                         }
854                         continue;
855                 }
856
857                 /*
858                  * At this point, there should just be one sharer with
859                  * exclusive waiters.
860                  */
861                 MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS));
862
863                 sleepq_lock(&sx->lock_object);
864
865                 /*
866                  * Wake up semantic here is quite simple:
867                  * Just wake up all the exclusive waiters.
868                  * Note that the state of the lock could have changed,
869                  * so if it fails loop back and retry.
870                  */
871                 if (!atomic_cmpset_ptr(&sx->sx_lock,
872                     SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
873                     SX_LOCK_UNLOCKED)) {
874                         sleepq_release(&sx->lock_object);
875                         continue;
876                 }
877                 if (LOCK_LOG_TEST(&sx->lock_object, 0))
878                         CTR2(KTR_LOCK, "%s: %p waking up all thread on"
879                             "exclusive queue", __func__, sx);
880                 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
881                     -1, SQ_EXCLUSIVE_QUEUE);
882                 if (wakeup_swapper)
883                         kick_proc0();
884                 break;
885         }
886 }
887
888 #ifdef INVARIANT_SUPPORT
889 #ifndef INVARIANTS
890 #undef  _sx_assert
891 #endif
892
893 /*
894  * In the non-WITNESS case, sx_assert() can only detect that at least
895  * *some* thread owns an slock, but it cannot guarantee that *this*
896  * thread owns an slock.
897  */
898 void
899 _sx_assert(struct sx *sx, int what, const char *file, int line)
900 {
901 #ifndef WITNESS
902         int slocked = 0;
903 #endif
904
905         if (panicstr != NULL)
906                 return;
907         switch (what) {
908         case SA_SLOCKED:
909         case SA_SLOCKED | SA_NOTRECURSED:
910         case SA_SLOCKED | SA_RECURSED:
911 #ifndef WITNESS
912                 slocked = 1;
913                 /* FALLTHROUGH */
914 #endif
915         case SA_LOCKED:
916         case SA_LOCKED | SA_NOTRECURSED:
917         case SA_LOCKED | SA_RECURSED:
918 #ifdef WITNESS
919                 witness_assert(&sx->lock_object, what, file, line);
920 #else
921                 /*
922                  * If some other thread has an exclusive lock or we
923                  * have one and are asserting a shared lock, fail.
924                  * Also, if no one has a lock at all, fail.
925                  */
926                 if (sx->sx_lock == SX_LOCK_UNLOCKED ||
927                     (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
928                     sx_xholder(sx) != curthread)))
929                         panic("Lock %s not %slocked @ %s:%d\n",
930                             sx->lock_object.lo_name, slocked ? "share " : "",
931                             file, line);
932
933                 if (!(sx->sx_lock & SX_LOCK_SHARED)) {
934                         if (sx_recursed(sx)) {
935                                 if (what & SA_NOTRECURSED)
936                                         panic("Lock %s recursed @ %s:%d\n",
937                                             sx->lock_object.lo_name, file,
938                                             line);
939                         } else if (what & SA_RECURSED)
940                                 panic("Lock %s not recursed @ %s:%d\n",
941                                     sx->lock_object.lo_name, file, line);
942                 }
943 #endif
944                 break;
945         case SA_XLOCKED:
946         case SA_XLOCKED | SA_NOTRECURSED:
947         case SA_XLOCKED | SA_RECURSED:
948                 if (sx_xholder(sx) != curthread)
949                         panic("Lock %s not exclusively locked @ %s:%d\n",
950                             sx->lock_object.lo_name, file, line);
951                 if (sx_recursed(sx)) {
952                         if (what & SA_NOTRECURSED)
953                                 panic("Lock %s recursed @ %s:%d\n",
954                                     sx->lock_object.lo_name, file, line);
955                 } else if (what & SA_RECURSED)
956                         panic("Lock %s not recursed @ %s:%d\n",
957                             sx->lock_object.lo_name, file, line);
958                 break;
959         case SA_UNLOCKED:
960 #ifdef WITNESS
961                 witness_assert(&sx->lock_object, what, file, line);
962 #else
963                 /*
964                  * If we hold an exclusve lock fail.  We can't
965                  * reliably check to see if we hold a shared lock or
966                  * not.
967                  */
968                 if (sx_xholder(sx) == curthread)
969                         panic("Lock %s exclusively locked @ %s:%d\n",
970                             sx->lock_object.lo_name, file, line);
971 #endif
972                 break;
973         default:
974                 panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
975                     line);
976         }
977 }
978 #endif  /* INVARIANT_SUPPORT */
979
980 #ifdef DDB
981 static void
982 db_show_sx(struct lock_object *lock)
983 {
984         struct thread *td;
985         struct sx *sx;
986
987         sx = (struct sx *)lock;
988
989         db_printf(" state: ");
990         if (sx->sx_lock == SX_LOCK_UNLOCKED)
991                 db_printf("UNLOCKED\n");
992         else if (sx->sx_lock == SX_LOCK_DESTROYED) {
993                 db_printf("DESTROYED\n");
994                 return;
995         } else if (sx->sx_lock & SX_LOCK_SHARED)
996                 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
997         else {
998                 td = sx_xholder(sx);
999                 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1000                     td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
1001                 if (sx_recursed(sx))
1002                         db_printf(" recursed: %d\n", sx->sx_recurse);
1003         }
1004
1005         db_printf(" waiters: ");
1006         switch(sx->sx_lock &
1007             (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) {
1008         case SX_LOCK_SHARED_WAITERS:
1009                 db_printf("shared\n");
1010                 break;
1011         case SX_LOCK_EXCLUSIVE_WAITERS:
1012                 db_printf("exclusive\n");
1013                 break;
1014         case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS:
1015                 db_printf("exclusive and shared\n");
1016                 break;
1017         default:
1018                 db_printf("none\n");
1019         }
1020 }
1021
1022 /*
1023  * Check to see if a thread that is blocked on a sleep queue is actually
1024  * blocked on an sx lock.  If so, output some details and return true.
1025  * If the lock has an exclusive owner, return that in *ownerp.
1026  */
1027 int
1028 sx_chain(struct thread *td, struct thread **ownerp)
1029 {
1030         struct sx *sx;
1031
1032         /*
1033          * Check to see if this thread is blocked on an sx lock.
1034          * First, we check the lock class.  If that is ok, then we
1035          * compare the lock name against the wait message.
1036          */
1037         sx = td->td_wchan;
1038         if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
1039             sx->lock_object.lo_name != td->td_wmesg)
1040                 return (0);
1041
1042         /* We think we have an sx lock, so output some details. */
1043         db_printf("blocked on sx \"%s\" ", td->td_wmesg);
1044         *ownerp = sx_xholder(sx);
1045         if (sx->sx_lock & SX_LOCK_SHARED)
1046                 db_printf("SLOCK (count %ju)\n",
1047                     (uintmax_t)SX_SHARERS(sx->sx_lock));
1048         else
1049                 db_printf("XLOCK\n");
1050         return (1);
1051 }
1052 #endif