]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/kern_rwlock.c
Reorder the minimum_cmd_size code to make it a little smaller and
[FreeBSD/FreeBSD.git] / sys / kern / kern_rwlock.c
1 /*-
2  * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 /*
28  * Machine independent bits of reader/writer lock implementation.
29  */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include "opt_ddb.h"
35 #include "opt_hwpmc_hooks.h"
36 #include "opt_no_adaptive_rwlocks.h"
37
38 #include <sys/param.h>
39 #include <sys/kdb.h>
40 #include <sys/ktr.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/proc.h>
45 #include <sys/rwlock.h>
46 #include <sys/sched.h>
47 #include <sys/smp.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
50 #include <sys/turnstile.h>
51
52 #include <machine/cpu.h>
53
54 #if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
55 #define ADAPTIVE_RWLOCKS
56 #endif
57
58 #ifdef HWPMC_HOOKS
59 #include <sys/pmckern.h>
60 PMC_SOFT_DECLARE( , , lock, failed);
61 #endif
62
63 /*
64  * Return the rwlock address when the lock cookie address is provided.
65  * This functionality assumes that struct rwlock* have a member named rw_lock.
66  */
67 #define rwlock2rw(c)    (__containerof(c, struct rwlock, rw_lock))
68
69 #ifdef DDB
70 #include <ddb/ddb.h>
71
72 static void     db_show_rwlock(const struct lock_object *lock);
73 #endif
74 static void     assert_rw(const struct lock_object *lock, int what);
75 static void     lock_rw(struct lock_object *lock, uintptr_t how);
76 #ifdef KDTRACE_HOOKS
77 static int      owner_rw(const struct lock_object *lock, struct thread **owner);
78 #endif
79 static uintptr_t unlock_rw(struct lock_object *lock);
80
81 struct lock_class lock_class_rw = {
82         .lc_name = "rw",
83         .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
84         .lc_assert = assert_rw,
85 #ifdef DDB
86         .lc_ddb_show = db_show_rwlock,
87 #endif
88         .lc_lock = lock_rw,
89         .lc_unlock = unlock_rw,
90 #ifdef KDTRACE_HOOKS
91         .lc_owner = owner_rw,
92 #endif
93 };
94
95 #ifdef ADAPTIVE_RWLOCKS
96 static int rowner_retries = 10;
97 static int rowner_loops = 10000;
98 static SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL,
99     "rwlock debugging");
100 SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, "");
101 SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, "");
102
103 static struct lock_delay_config __read_mostly rw_delay;
104
105 SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_base, CTLFLAG_RW, &rw_delay.base,
106     0, "");
107 SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_max, CTLFLAG_RW, &rw_delay.max,
108     0, "");
109
110 LOCK_DELAY_SYSINIT_DEFAULT(rw_delay);
111 #endif
112
113 /*
114  * Return a pointer to the owning thread if the lock is write-locked or
115  * NULL if the lock is unlocked or read-locked.
116  */
117
118 #define lv_rw_wowner(v)                                                 \
119         ((v) & RW_LOCK_READ ? NULL :                                    \
120          (struct thread *)RW_OWNER((v)))
121
122 #define rw_wowner(rw)   lv_rw_wowner(RW_READ_VALUE(rw))
123
124 /*
125  * Returns if a write owner is recursed.  Write ownership is not assured
126  * here and should be previously checked.
127  */
128 #define rw_recursed(rw)         ((rw)->rw_recurse != 0)
129
130 /*
131  * Return true if curthread helds the lock.
132  */
133 #define rw_wlocked(rw)          (rw_wowner((rw)) == curthread)
134
135 /*
136  * Return a pointer to the owning thread for this lock who should receive
137  * any priority lent by threads that block on this lock.  Currently this
138  * is identical to rw_wowner().
139  */
140 #define rw_owner(rw)            rw_wowner(rw)
141
142 #ifndef INVARIANTS
143 #define __rw_assert(c, what, file, line)
144 #endif
145
146 void
147 assert_rw(const struct lock_object *lock, int what)
148 {
149
150         rw_assert((const struct rwlock *)lock, what);
151 }
152
153 void
154 lock_rw(struct lock_object *lock, uintptr_t how)
155 {
156         struct rwlock *rw;
157
158         rw = (struct rwlock *)lock;
159         if (how)
160                 rw_rlock(rw);
161         else
162                 rw_wlock(rw);
163 }
164
165 uintptr_t
166 unlock_rw(struct lock_object *lock)
167 {
168         struct rwlock *rw;
169
170         rw = (struct rwlock *)lock;
171         rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
172         if (rw->rw_lock & RW_LOCK_READ) {
173                 rw_runlock(rw);
174                 return (1);
175         } else {
176                 rw_wunlock(rw);
177                 return (0);
178         }
179 }
180
181 #ifdef KDTRACE_HOOKS
182 int
183 owner_rw(const struct lock_object *lock, struct thread **owner)
184 {
185         const struct rwlock *rw = (const struct rwlock *)lock;
186         uintptr_t x = rw->rw_lock;
187
188         *owner = rw_wowner(rw);
189         return ((x & RW_LOCK_READ) != 0 ?  (RW_READERS(x) != 0) :
190             (*owner != NULL));
191 }
192 #endif
193
194 void
195 _rw_init_flags(volatile uintptr_t *c, const char *name, int opts)
196 {
197         struct rwlock *rw;
198         int flags;
199
200         rw = rwlock2rw(c);
201
202         MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
203             RW_RECURSE | RW_NEW)) == 0);
204         ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock,
205             ("%s: rw_lock not aligned for %s: %p", __func__, name,
206             &rw->rw_lock));
207
208         flags = LO_UPGRADABLE;
209         if (opts & RW_DUPOK)
210                 flags |= LO_DUPOK;
211         if (opts & RW_NOPROFILE)
212                 flags |= LO_NOPROFILE;
213         if (!(opts & RW_NOWITNESS))
214                 flags |= LO_WITNESS;
215         if (opts & RW_RECURSE)
216                 flags |= LO_RECURSABLE;
217         if (opts & RW_QUIET)
218                 flags |= LO_QUIET;
219         if (opts & RW_NEW)
220                 flags |= LO_NEW;
221
222         lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags);
223         rw->rw_lock = RW_UNLOCKED;
224         rw->rw_recurse = 0;
225 }
226
227 void
228 _rw_destroy(volatile uintptr_t *c)
229 {
230         struct rwlock *rw;
231
232         rw = rwlock2rw(c);
233
234         KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw));
235         KASSERT(rw->rw_recurse == 0, ("rw lock %p still recursed", rw));
236         rw->rw_lock = RW_DESTROYED;
237         lock_destroy(&rw->lock_object);
238 }
239
240 void
241 rw_sysinit(void *arg)
242 {
243         struct rw_args *args = arg;
244
245         rw_init((struct rwlock *)args->ra_rw, args->ra_desc);
246 }
247
248 void
249 rw_sysinit_flags(void *arg)
250 {
251         struct rw_args_flags *args = arg;
252
253         rw_init_flags((struct rwlock *)args->ra_rw, args->ra_desc,
254             args->ra_flags);
255 }
256
257 int
258 _rw_wowned(const volatile uintptr_t *c)
259 {
260
261         return (rw_wowner(rwlock2rw(c)) == curthread);
262 }
263
264 void
265 _rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line)
266 {
267         struct rwlock *rw;
268         uintptr_t tid, v;
269
270         rw = rwlock2rw(c);
271
272         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
273             ("rw_wlock() by idle thread %p on rwlock %s @ %s:%d",
274             curthread, rw->lock_object.lo_name, file, line));
275         KASSERT(rw->rw_lock != RW_DESTROYED,
276             ("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
277         WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
278             line, NULL);
279         tid = (uintptr_t)curthread;
280         v = RW_UNLOCKED;
281         if (!_rw_write_lock_fetch(rw, &v, tid))
282                 _rw_wlock_hard(rw, v, tid, file, line);
283         else
284                 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw,
285                     0, 0, file, line, LOCKSTAT_WRITER);
286
287         LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line);
288         WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
289         TD_LOCKS_INC(curthread);
290 }
291
292 int
293 __rw_try_wlock(volatile uintptr_t *c, const char *file, int line)
294 {
295         struct rwlock *rw;
296         struct thread *td;
297         uintptr_t tid, v;
298         int rval;
299         bool recursed;
300
301         td = curthread;
302         tid = (uintptr_t)td;
303         if (SCHEDULER_STOPPED_TD(td))
304                 return (1);
305
306         rw = rwlock2rw(c);
307
308         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
309             ("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d",
310             curthread, rw->lock_object.lo_name, file, line));
311         KASSERT(rw->rw_lock != RW_DESTROYED,
312             ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
313
314         rval = 1;
315         recursed = false;
316         v = RW_UNLOCKED;
317         for (;;) {
318                 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid))
319                         break;
320                 if (v == RW_UNLOCKED)
321                         continue;
322                 if (v == tid && (rw->lock_object.lo_flags & LO_RECURSABLE)) {
323                         rw->rw_recurse++;
324                         atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
325                         break;
326                 }
327                 rval = 0;
328                 break;
329         }
330
331         LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line);
332         if (rval) {
333                 WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
334                     file, line);
335                 if (!recursed)
336                         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire,
337                             rw, 0, 0, file, line, LOCKSTAT_WRITER);
338                 TD_LOCKS_INC(curthread);
339         }
340         return (rval);
341 }
342
343 void
344 _rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line)
345 {
346         struct rwlock *rw;
347
348         rw = rwlock2rw(c);
349
350         KASSERT(rw->rw_lock != RW_DESTROYED,
351             ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
352         __rw_assert(c, RA_WLOCKED, file, line);
353         WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
354         LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
355             line);
356
357 #ifdef LOCK_PROFILING
358         _rw_wunlock_hard(rw, (uintptr_t)curthread, file, line);
359 #else
360         __rw_wunlock(rw, curthread, file, line);
361 #endif
362
363         TD_LOCKS_DEC(curthread);
364 }
365
366 /*
367  * Determines whether a new reader can acquire a lock.  Succeeds if the
368  * reader already owns a read lock and the lock is locked for read to
369  * prevent deadlock from reader recursion.  Also succeeds if the lock
370  * is unlocked and has no writer waiters or spinners.  Failing otherwise
371  * prioritizes writers before readers.
372  */
373 #define RW_CAN_READ(td, _rw)                                            \
374     (((td)->td_rw_rlocks && (_rw) & RW_LOCK_READ) || ((_rw) &   \
375     (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) ==  \
376     RW_LOCK_READ)
377
378 static bool __always_inline
379 __rw_rlock_try(struct rwlock *rw, struct thread *td, uintptr_t *vp,
380     const char *file, int line)
381 {
382
383         /*
384          * Handle the easy case.  If no other thread has a write
385          * lock, then try to bump up the count of read locks.  Note
386          * that we have to preserve the current state of the
387          * RW_LOCK_WRITE_WAITERS flag.  If we fail to acquire a
388          * read lock, then rw_lock must have changed, so restart
389          * the loop.  Note that this handles the case of a
390          * completely unlocked rwlock since such a lock is encoded
391          * as a read lock with no waiters.
392          */
393         while (RW_CAN_READ(td, *vp)) {
394                 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, vp,
395                         *vp + RW_ONE_READER)) {
396                         if (LOCK_LOG_TEST(&rw->lock_object, 0))
397                                 CTR4(KTR_LOCK,
398                                     "%s: %p succeed %p -> %p", __func__,
399                                     rw, (void *)*vp,
400                                     (void *)(*vp + RW_ONE_READER));
401                         td->td_rw_rlocks++;
402                         return (true);
403                 }
404         }
405         return (false);
406 }
407
408 static void __noinline
409 __rw_rlock_hard(volatile uintptr_t *c, struct thread *td, uintptr_t v,
410     const char *file, int line)
411 {
412         struct rwlock *rw;
413         struct turnstile *ts;
414 #ifdef ADAPTIVE_RWLOCKS
415         volatile struct thread *owner;
416         int spintries = 0;
417         int i;
418 #endif
419 #ifdef LOCK_PROFILING
420         uint64_t waittime = 0;
421         int contested = 0;
422 #endif
423 #if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS)
424         struct lock_delay_arg lda;
425 #endif
426 #ifdef KDTRACE_HOOKS
427         uintptr_t state;
428         u_int sleep_cnt = 0;
429         int64_t sleep_time = 0;
430         int64_t all_time = 0;
431 #endif
432
433         if (SCHEDULER_STOPPED())
434                 return;
435
436 #if defined(ADAPTIVE_RWLOCKS)
437         lock_delay_arg_init(&lda, &rw_delay);
438 #elif defined(KDTRACE_HOOKS)
439         lock_delay_arg_init(&lda, NULL);
440 #endif
441         rw = rwlock2rw(c);
442
443 #ifdef KDTRACE_HOOKS
444         all_time -= lockstat_nsecs(&rw->lock_object);
445 #endif
446 #ifdef KDTRACE_HOOKS
447         state = v;
448 #endif
449         for (;;) {
450                 if (__rw_rlock_try(rw, td, &v, file, line))
451                         break;
452 #ifdef KDTRACE_HOOKS
453                 lda.spin_cnt++;
454 #endif
455 #ifdef HWPMC_HOOKS
456                 PMC_SOFT_CALL( , , lock, failed);
457 #endif
458                 lock_profile_obtain_lock_failed(&rw->lock_object,
459                     &contested, &waittime);
460
461 #ifdef ADAPTIVE_RWLOCKS
462                 /*
463                  * If the owner is running on another CPU, spin until
464                  * the owner stops running or the state of the lock
465                  * changes.
466                  */
467                 if ((v & RW_LOCK_READ) == 0) {
468                         owner = (struct thread *)RW_OWNER(v);
469                         if (TD_IS_RUNNING(owner)) {
470                                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
471                                         CTR3(KTR_LOCK,
472                                             "%s: spinning on %p held by %p",
473                                             __func__, rw, owner);
474                                 KTR_STATE1(KTR_SCHED, "thread",
475                                     sched_tdname(curthread), "spinning",
476                                     "lockname:\"%s\"", rw->lock_object.lo_name);
477                                 do {
478                                         lock_delay(&lda);
479                                         v = RW_READ_VALUE(rw);
480                                         owner = lv_rw_wowner(v);
481                                 } while (owner != NULL && TD_IS_RUNNING(owner));
482                                 KTR_STATE0(KTR_SCHED, "thread",
483                                     sched_tdname(curthread), "running");
484                                 continue;
485                         }
486                 } else if (spintries < rowner_retries) {
487                         spintries++;
488                         KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
489                             "spinning", "lockname:\"%s\"",
490                             rw->lock_object.lo_name);
491                         for (i = 0; i < rowner_loops; i++) {
492                                 v = RW_READ_VALUE(rw);
493                                 if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(td, v))
494                                         break;
495                                 cpu_spinwait();
496                         }
497                         v = RW_READ_VALUE(rw);
498 #ifdef KDTRACE_HOOKS
499                         lda.spin_cnt += rowner_loops - i;
500 #endif
501                         KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
502                             "running");
503                         if (i != rowner_loops)
504                                 continue;
505                 }
506 #endif
507
508                 /*
509                  * Okay, now it's the hard case.  Some other thread already
510                  * has a write lock or there are write waiters present,
511                  * acquire the turnstile lock so we can begin the process
512                  * of blocking.
513                  */
514                 ts = turnstile_trywait(&rw->lock_object);
515
516                 /*
517                  * The lock might have been released while we spun, so
518                  * recheck its state and restart the loop if needed.
519                  */
520                 v = RW_READ_VALUE(rw);
521                 if (RW_CAN_READ(td, v)) {
522                         turnstile_cancel(ts);
523                         continue;
524                 }
525
526 #ifdef ADAPTIVE_RWLOCKS
527                 /*
528                  * The current lock owner might have started executing
529                  * on another CPU (or the lock could have changed
530                  * owners) while we were waiting on the turnstile
531                  * chain lock.  If so, drop the turnstile lock and try
532                  * again.
533                  */
534                 if ((v & RW_LOCK_READ) == 0) {
535                         owner = (struct thread *)RW_OWNER(v);
536                         if (TD_IS_RUNNING(owner)) {
537                                 turnstile_cancel(ts);
538                                 continue;
539                         }
540                 }
541 #endif
542
543                 /*
544                  * The lock is held in write mode or it already has waiters.
545                  */
546                 MPASS(!RW_CAN_READ(td, v));
547
548                 /*
549                  * If the RW_LOCK_READ_WAITERS flag is already set, then
550                  * we can go ahead and block.  If it is not set then try
551                  * to set it.  If we fail to set it drop the turnstile
552                  * lock and restart the loop.
553                  */
554                 if (!(v & RW_LOCK_READ_WAITERS)) {
555                         if (!atomic_cmpset_ptr(&rw->rw_lock, v,
556                             v | RW_LOCK_READ_WAITERS)) {
557                                 turnstile_cancel(ts);
558                                 v = RW_READ_VALUE(rw);
559                                 continue;
560                         }
561                         if (LOCK_LOG_TEST(&rw->lock_object, 0))
562                                 CTR2(KTR_LOCK, "%s: %p set read waiters flag",
563                                     __func__, rw);
564                 }
565
566                 /*
567                  * We were unable to acquire the lock and the read waiters
568                  * flag is set, so we must block on the turnstile.
569                  */
570                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
571                         CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
572                             rw);
573 #ifdef KDTRACE_HOOKS
574                 sleep_time -= lockstat_nsecs(&rw->lock_object);
575 #endif
576                 turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE);
577 #ifdef KDTRACE_HOOKS
578                 sleep_time += lockstat_nsecs(&rw->lock_object);
579                 sleep_cnt++;
580 #endif
581                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
582                         CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
583                             __func__, rw);
584                 v = RW_READ_VALUE(rw);
585         }
586 #ifdef KDTRACE_HOOKS
587         all_time += lockstat_nsecs(&rw->lock_object);
588         if (sleep_time)
589                 LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
590                     LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
591                     (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
592
593         /* Record only the loops spinning and not sleeping. */
594         if (lda.spin_cnt > sleep_cnt)
595                 LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
596                     LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
597                     (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
598 #endif
599         /*
600          * TODO: acquire "owner of record" here.  Here be turnstile dragons
601          * however.  turnstiles don't like owners changing between calls to
602          * turnstile_wait() currently.
603          */
604         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested,
605             waittime, file, line, LOCKSTAT_READER);
606 }
607
608 void
609 __rw_rlock(volatile uintptr_t *c, const char *file, int line)
610 {
611         struct rwlock *rw;
612         struct thread *td;
613         uintptr_t v;
614
615         td = curthread;
616         rw = rwlock2rw(c);
617
618         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
619             ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d",
620             td, rw->lock_object.lo_name, file, line));
621         KASSERT(rw->rw_lock != RW_DESTROYED,
622             ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
623         KASSERT(rw_wowner(rw) != td,
624             ("rw_rlock: wlock already held for %s @ %s:%d",
625             rw->lock_object.lo_name, file, line));
626         WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);
627
628         v = RW_READ_VALUE(rw);
629         if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(rw__acquire) ||
630             !__rw_rlock_try(rw, td, &v, file, line)))
631                 __rw_rlock_hard(c, td, v, file, line);
632
633         LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
634         WITNESS_LOCK(&rw->lock_object, 0, file, line);
635         TD_LOCKS_INC(curthread);
636 }
637
638 int
639 __rw_try_rlock(volatile uintptr_t *c, const char *file, int line)
640 {
641         struct rwlock *rw;
642         uintptr_t x;
643
644         if (SCHEDULER_STOPPED())
645                 return (1);
646
647         rw = rwlock2rw(c);
648
649         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
650             ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d",
651             curthread, rw->lock_object.lo_name, file, line));
652
653         x = rw->rw_lock;
654         for (;;) {
655                 KASSERT(rw->rw_lock != RW_DESTROYED,
656                     ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line));
657                 if (!(x & RW_LOCK_READ))
658                         break;
659                 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &x, x + RW_ONE_READER)) {
660                         LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file,
661                             line);
662                         WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);
663                         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire,
664                             rw, 0, 0, file, line, LOCKSTAT_READER);
665                         TD_LOCKS_INC(curthread);
666                         curthread->td_rw_rlocks++;
667                         return (1);
668                 }
669         }
670
671         LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line);
672         return (0);
673 }
674
675 static bool __always_inline
676 __rw_runlock_try(struct rwlock *rw, struct thread *td, uintptr_t *vp)
677 {
678
679         for (;;) {
680                 /*
681                  * See if there is more than one read lock held.  If so,
682                  * just drop one and return.
683                  */
684                 if (RW_READERS(*vp) > 1) {
685                         if (atomic_fcmpset_rel_ptr(&rw->rw_lock, vp,
686                             *vp - RW_ONE_READER)) {
687                                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
688                                         CTR4(KTR_LOCK,
689                                             "%s: %p succeeded %p -> %p",
690                                             __func__, rw, (void *)*vp,
691                                             (void *)(*vp - RW_ONE_READER));
692                                 td->td_rw_rlocks--;
693                                 return (true);
694                         }
695                         continue;
696                 }
697                 /*
698                  * If there aren't any waiters for a write lock, then try
699                  * to drop it quickly.
700                  */
701                 if (!(*vp & RW_LOCK_WAITERS)) {
702                         MPASS((*vp & ~RW_LOCK_WRITE_SPINNER) ==
703                             RW_READERS_LOCK(1));
704                         if (atomic_fcmpset_rel_ptr(&rw->rw_lock, vp,
705                             RW_UNLOCKED)) {
706                                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
707                                         CTR2(KTR_LOCK, "%s: %p last succeeded",
708                                             __func__, rw);
709                                 td->td_rw_rlocks--;
710                                 return (true);
711                         }
712                         continue;
713                 }
714                 break;
715         }
716         return (false);
717 }
718
719 static void __noinline
720 __rw_runlock_hard(volatile uintptr_t *c, struct thread *td, uintptr_t v,
721     const char *file, int line)
722 {
723         struct rwlock *rw;
724         struct turnstile *ts;
725         uintptr_t x, queue;
726
727         if (SCHEDULER_STOPPED())
728                 return;
729
730         rw = rwlock2rw(c);
731
732         for (;;) {
733                 if (__rw_runlock_try(rw, td, &v))
734                         break;
735
736                 /*
737                  * Ok, we know we have waiters and we think we are the
738                  * last reader, so grab the turnstile lock.
739                  */
740                 turnstile_chain_lock(&rw->lock_object);
741                 v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
742                 MPASS(v & RW_LOCK_WAITERS);
743
744                 /*
745                  * Try to drop our lock leaving the lock in a unlocked
746                  * state.
747                  *
748                  * If you wanted to do explicit lock handoff you'd have to
749                  * do it here.  You'd also want to use turnstile_signal()
750                  * and you'd have to handle the race where a higher
751                  * priority thread blocks on the write lock before the
752                  * thread you wakeup actually runs and have the new thread
753                  * "steal" the lock.  For now it's a lot simpler to just
754                  * wakeup all of the waiters.
755                  *
756                  * As above, if we fail, then another thread might have
757                  * acquired a read lock, so drop the turnstile lock and
758                  * restart.
759                  */
760                 x = RW_UNLOCKED;
761                 if (v & RW_LOCK_WRITE_WAITERS) {
762                         queue = TS_EXCLUSIVE_QUEUE;
763                         x |= (v & RW_LOCK_READ_WAITERS);
764                 } else
765                         queue = TS_SHARED_QUEUE;
766                 if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
767                     x)) {
768                         turnstile_chain_unlock(&rw->lock_object);
769                         v = RW_READ_VALUE(rw);
770                         continue;
771                 }
772                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
773                         CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
774                             __func__, rw);
775
776                 /*
777                  * Ok.  The lock is released and all that's left is to
778                  * wake up the waiters.  Note that the lock might not be
779                  * free anymore, but in that case the writers will just
780                  * block again if they run before the new lock holder(s)
781                  * release the lock.
782                  */
783                 ts = turnstile_lookup(&rw->lock_object);
784                 MPASS(ts != NULL);
785                 turnstile_broadcast(ts, queue);
786                 turnstile_unpend(ts, TS_SHARED_LOCK);
787                 turnstile_chain_unlock(&rw->lock_object);
788                 td->td_rw_rlocks--;
789                 break;
790         }
791         LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_READER);
792 }
793
794 void
795 _rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line)
796 {
797         struct rwlock *rw;
798         struct thread *td;
799         uintptr_t v;
800
801         rw = rwlock2rw(c);
802
803         KASSERT(rw->rw_lock != RW_DESTROYED,
804             ("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
805         __rw_assert(c, RA_RLOCKED, file, line);
806         WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
807         LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);
808
809         td = curthread;
810         v = RW_READ_VALUE(rw);
811
812         if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(rw__release) ||
813             !__rw_runlock_try(rw, td, &v)))
814                 __rw_runlock_hard(c, td, v, file, line);
815
816         TD_LOCKS_DEC(curthread);
817 }
818
819
820 /*
821  * This function is called when we are unable to obtain a write lock on the
822  * first try.  This means that at least one other thread holds either a
823  * read or write lock.
824  */
825 void
826 __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
827     const char *file, int line)
828 {
829         struct rwlock *rw;
830         struct turnstile *ts;
831 #ifdef ADAPTIVE_RWLOCKS
832         volatile struct thread *owner;
833         int spintries = 0;
834         int i;
835 #endif
836         uintptr_t x;
837 #ifdef LOCK_PROFILING
838         uint64_t waittime = 0;
839         int contested = 0;
840 #endif
841 #if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS)
842         struct lock_delay_arg lda;
843 #endif
844 #ifdef KDTRACE_HOOKS
845         uintptr_t state;
846         u_int sleep_cnt = 0;
847         int64_t sleep_time = 0;
848         int64_t all_time = 0;
849 #endif
850
851         if (SCHEDULER_STOPPED())
852                 return;
853
854 #if defined(ADAPTIVE_RWLOCKS)
855         lock_delay_arg_init(&lda, &rw_delay);
856 #elif defined(KDTRACE_HOOKS)
857         lock_delay_arg_init(&lda, NULL);
858 #endif
859         rw = rwlock2rw(c);
860         if (__predict_false(v == RW_UNLOCKED))
861                 v = RW_READ_VALUE(rw);
862
863         if (__predict_false(lv_rw_wowner(v) == (struct thread *)tid)) {
864                 KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
865                     ("%s: recursing but non-recursive rw %s @ %s:%d\n",
866                     __func__, rw->lock_object.lo_name, file, line));
867                 rw->rw_recurse++;
868                 atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
869                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
870                         CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw);
871                 return;
872         }
873
874         if (LOCK_LOG_TEST(&rw->lock_object, 0))
875                 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
876                     rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
877
878 #ifdef KDTRACE_HOOKS
879         all_time -= lockstat_nsecs(&rw->lock_object);
880         state = v;
881 #endif
882         for (;;) {
883                 if (v == RW_UNLOCKED) {
884                         if (_rw_write_lock_fetch(rw, &v, tid))
885                                 break;
886                         continue;
887                 }
888 #ifdef KDTRACE_HOOKS
889                 lda.spin_cnt++;
890 #endif
891 #ifdef HWPMC_HOOKS
892                 PMC_SOFT_CALL( , , lock, failed);
893 #endif
894                 lock_profile_obtain_lock_failed(&rw->lock_object,
895                     &contested, &waittime);
896 #ifdef ADAPTIVE_RWLOCKS
897                 /*
898                  * If the lock is write locked and the owner is
899                  * running on another CPU, spin until the owner stops
900                  * running or the state of the lock changes.
901                  */
902                 owner = lv_rw_wowner(v);
903                 if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
904                         if (LOCK_LOG_TEST(&rw->lock_object, 0))
905                                 CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
906                                     __func__, rw, owner);
907                         KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
908                             "spinning", "lockname:\"%s\"",
909                             rw->lock_object.lo_name);
910                         do {
911                                 lock_delay(&lda);
912                                 v = RW_READ_VALUE(rw);
913                                 owner = lv_rw_wowner(v);
914                         } while (owner != NULL && TD_IS_RUNNING(owner));
915                         KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
916                             "running");
917                         continue;
918                 }
919                 if ((v & RW_LOCK_READ) && RW_READERS(v) &&
920                     spintries < rowner_retries) {
921                         if (!(v & RW_LOCK_WRITE_SPINNER)) {
922                                 if (!atomic_cmpset_ptr(&rw->rw_lock, v,
923                                     v | RW_LOCK_WRITE_SPINNER)) {
924                                         v = RW_READ_VALUE(rw);
925                                         continue;
926                                 }
927                         }
928                         spintries++;
929                         KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
930                             "spinning", "lockname:\"%s\"",
931                             rw->lock_object.lo_name);
932                         for (i = 0; i < rowner_loops; i++) {
933                                 if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0)
934                                         break;
935                                 cpu_spinwait();
936                         }
937                         KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
938                             "running");
939                         v = RW_READ_VALUE(rw);
940 #ifdef KDTRACE_HOOKS
941                         lda.spin_cnt += rowner_loops - i;
942 #endif
943                         if (i != rowner_loops)
944                                 continue;
945                 }
946 #endif
947                 ts = turnstile_trywait(&rw->lock_object);
948                 v = RW_READ_VALUE(rw);
949
950 #ifdef ADAPTIVE_RWLOCKS
951                 /*
952                  * The current lock owner might have started executing
953                  * on another CPU (or the lock could have changed
954                  * owners) while we were waiting on the turnstile
955                  * chain lock.  If so, drop the turnstile lock and try
956                  * again.
957                  */
958                 if (!(v & RW_LOCK_READ)) {
959                         owner = (struct thread *)RW_OWNER(v);
960                         if (TD_IS_RUNNING(owner)) {
961                                 turnstile_cancel(ts);
962                                 continue;
963                         }
964                 }
965 #endif
966                 /*
967                  * Check for the waiters flags about this rwlock.
968                  * If the lock was released, without maintain any pending
969                  * waiters queue, simply try to acquire it.
970                  * If a pending waiters queue is present, claim the lock
971                  * ownership and maintain the pending queue.
972                  */
973                 x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
974                 if ((v & ~x) == RW_UNLOCKED) {
975                         x &= ~RW_LOCK_WRITE_SPINNER;
976                         if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid | x)) {
977                                 if (x)
978                                         turnstile_claim(ts);
979                                 else
980                                         turnstile_cancel(ts);
981                                 break;
982                         }
983                         turnstile_cancel(ts);
984                         v = RW_READ_VALUE(rw);
985                         continue;
986                 }
987                 /*
988                  * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
989                  * set it.  If we fail to set it, then loop back and try
990                  * again.
991                  */
992                 if (!(v & RW_LOCK_WRITE_WAITERS)) {
993                         if (!atomic_cmpset_ptr(&rw->rw_lock, v,
994                             v | RW_LOCK_WRITE_WAITERS)) {
995                                 turnstile_cancel(ts);
996                                 v = RW_READ_VALUE(rw);
997                                 continue;
998                         }
999                         if (LOCK_LOG_TEST(&rw->lock_object, 0))
1000                                 CTR2(KTR_LOCK, "%s: %p set write waiters flag",
1001                                     __func__, rw);
1002                 }
1003                 /*
1004                  * We were unable to acquire the lock and the write waiters
1005                  * flag is set, so we must block on the turnstile.
1006                  */
1007                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
1008                         CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
1009                             rw);
1010 #ifdef KDTRACE_HOOKS
1011                 sleep_time -= lockstat_nsecs(&rw->lock_object);
1012 #endif
1013                 turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE);
1014 #ifdef KDTRACE_HOOKS
1015                 sleep_time += lockstat_nsecs(&rw->lock_object);
1016                 sleep_cnt++;
1017 #endif
1018                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
1019                         CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
1020                             __func__, rw);
1021 #ifdef ADAPTIVE_RWLOCKS
1022                 spintries = 0;
1023 #endif
1024                 v = RW_READ_VALUE(rw);
1025         }
1026 #ifdef KDTRACE_HOOKS
1027         all_time += lockstat_nsecs(&rw->lock_object);
1028         if (sleep_time)
1029                 LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
1030                     LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0,
1031                     (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
1032
1033         /* Record only the loops spinning and not sleeping. */
1034         if (lda.spin_cnt > sleep_cnt)
1035                 LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
1036                     LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0,
1037                     (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
1038 #endif
1039         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested,
1040             waittime, file, line, LOCKSTAT_WRITER);
1041 }
1042
1043 /*
1044  * This function is called if lockstat is active or the first try at releasing
1045  * a write lock failed.  The latter means that the lock is recursed or one of
1046  * the 2 waiter bits must be set indicating that at least one thread is waiting
1047  * on this lock.
1048  */
1049 void
1050 __rw_wunlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
1051     int line)
1052 {
1053         struct rwlock *rw;
1054         struct turnstile *ts;
1055         uintptr_t v;
1056         int queue;
1057
1058         if (SCHEDULER_STOPPED())
1059                 return;
1060
1061         rw = rwlock2rw(c);
1062         v = RW_READ_VALUE(rw);
1063         if (v & RW_LOCK_WRITER_RECURSED) {
1064                 if (--(rw->rw_recurse) == 0)
1065                         atomic_clear_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
1066                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
1067                         CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw);
1068                 return;
1069         }
1070
1071         LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_WRITER);
1072         if (v == tid && _rw_write_unlock(rw, tid))
1073                 return;
1074
1075         KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
1076             ("%s: neither of the waiter flags are set", __func__));
1077
1078         if (LOCK_LOG_TEST(&rw->lock_object, 0))
1079                 CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);
1080
1081         turnstile_chain_lock(&rw->lock_object);
1082         ts = turnstile_lookup(&rw->lock_object);
1083         MPASS(ts != NULL);
1084
1085         /*
1086          * Use the same algo as sx locks for now.  Prefer waking up shared
1087          * waiters if we have any over writers.  This is probably not ideal.
1088          *
1089          * 'v' is the value we are going to write back to rw_lock.  If we
1090          * have waiters on both queues, we need to preserve the state of
1091          * the waiter flag for the queue we don't wake up.  For now this is
1092          * hardcoded for the algorithm mentioned above.
1093          *
1094          * In the case of both readers and writers waiting we wakeup the
1095          * readers but leave the RW_LOCK_WRITE_WAITERS flag set.  If a
1096          * new writer comes in before a reader it will claim the lock up
1097          * above.  There is probably a potential priority inversion in
1098          * there that could be worked around either by waking both queues
1099          * of waiters or doing some complicated lock handoff gymnastics.
1100          */
1101         v = RW_UNLOCKED;
1102         if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) {
1103                 queue = TS_EXCLUSIVE_QUEUE;
1104                 v |= (rw->rw_lock & RW_LOCK_READ_WAITERS);
1105         } else
1106                 queue = TS_SHARED_QUEUE;
1107
1108         /* Wake up all waiters for the specific queue. */
1109         if (LOCK_LOG_TEST(&rw->lock_object, 0))
1110                 CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
1111                     queue == TS_SHARED_QUEUE ? "read" : "write");
1112         turnstile_broadcast(ts, queue);
1113         atomic_store_rel_ptr(&rw->rw_lock, v);
1114         turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
1115         turnstile_chain_unlock(&rw->lock_object);
1116 }
1117
1118 /*
1119  * Attempt to do a non-blocking upgrade from a read lock to a write
1120  * lock.  This will only succeed if this thread holds a single read
1121  * lock.  Returns true if the upgrade succeeded and false otherwise.
1122  */
1123 int
1124 __rw_try_upgrade(volatile uintptr_t *c, const char *file, int line)
1125 {
1126         struct rwlock *rw;
1127         uintptr_t v, x, tid;
1128         struct turnstile *ts;
1129         int success;
1130
1131         if (SCHEDULER_STOPPED())
1132                 return (1);
1133
1134         rw = rwlock2rw(c);
1135
1136         KASSERT(rw->rw_lock != RW_DESTROYED,
1137             ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
1138         __rw_assert(c, RA_RLOCKED, file, line);
1139
1140         /*
1141          * Attempt to switch from one reader to a writer.  If there
1142          * are any write waiters, then we will have to lock the
1143          * turnstile first to prevent races with another writer
1144          * calling turnstile_wait() before we have claimed this
1145          * turnstile.  So, do the simple case of no waiters first.
1146          */
1147         tid = (uintptr_t)curthread;
1148         success = 0;
1149         for (;;) {
1150                 v = rw->rw_lock;
1151                 if (RW_READERS(v) > 1)
1152                         break;
1153                 if (!(v & RW_LOCK_WAITERS)) {
1154                         success = atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid);
1155                         if (!success)
1156                                 continue;
1157                         break;
1158                 }
1159
1160                 /*
1161                  * Ok, we think we have waiters, so lock the turnstile.
1162                  */
1163                 ts = turnstile_trywait(&rw->lock_object);
1164                 v = rw->rw_lock;
1165                 if (RW_READERS(v) > 1) {
1166                         turnstile_cancel(ts);
1167                         break;
1168                 }
1169                 /*
1170                  * Try to switch from one reader to a writer again.  This time
1171                  * we honor the current state of the waiters flags.
1172                  * If we obtain the lock with the flags set, then claim
1173                  * ownership of the turnstile.
1174                  */
1175                 x = rw->rw_lock & RW_LOCK_WAITERS;
1176                 success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x);
1177                 if (success) {
1178                         if (x)
1179                                 turnstile_claim(ts);
1180                         else
1181                                 turnstile_cancel(ts);
1182                         break;
1183                 }
1184                 turnstile_cancel(ts);
1185         }
1186         LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line);
1187         if (success) {
1188                 curthread->td_rw_rlocks--;
1189                 WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
1190                     file, line);
1191                 LOCKSTAT_RECORD0(rw__upgrade, rw);
1192         }
1193         return (success);
1194 }
1195
1196 /*
1197  * Downgrade a write lock into a single read lock.
1198  */
1199 void
1200 __rw_downgrade(volatile uintptr_t *c, const char *file, int line)
1201 {
1202         struct rwlock *rw;
1203         struct turnstile *ts;
1204         uintptr_t tid, v;
1205         int rwait, wwait;
1206
1207         if (SCHEDULER_STOPPED())
1208                 return;
1209
1210         rw = rwlock2rw(c);
1211
1212         KASSERT(rw->rw_lock != RW_DESTROYED,
1213             ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
1214         __rw_assert(c, RA_WLOCKED | RA_NOTRECURSED, file, line);
1215 #ifndef INVARIANTS
1216         if (rw_recursed(rw))
1217                 panic("downgrade of a recursed lock");
1218 #endif
1219
1220         WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line);
1221
1222         /*
1223          * Convert from a writer to a single reader.  First we handle
1224          * the easy case with no waiters.  If there are any waiters, we
1225          * lock the turnstile and "disown" the lock.
1226          */
1227         tid = (uintptr_t)curthread;
1228         if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
1229                 goto out;
1230
1231         /*
1232          * Ok, we think we have waiters, so lock the turnstile so we can
1233          * read the waiter flags without any races.
1234          */
1235         turnstile_chain_lock(&rw->lock_object);
1236         v = rw->rw_lock & RW_LOCK_WAITERS;
1237         rwait = v & RW_LOCK_READ_WAITERS;
1238         wwait = v & RW_LOCK_WRITE_WAITERS;
1239         MPASS(rwait | wwait);
1240
1241         /*
1242          * Downgrade from a write lock while preserving waiters flag
1243          * and give up ownership of the turnstile.
1244          */
1245         ts = turnstile_lookup(&rw->lock_object);
1246         MPASS(ts != NULL);
1247         if (!wwait)
1248                 v &= ~RW_LOCK_READ_WAITERS;
1249         atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v);
1250         /*
1251          * Wake other readers if there are no writers pending.  Otherwise they
1252          * won't be able to acquire the lock anyway.
1253          */
1254         if (rwait && !wwait) {
1255                 turnstile_broadcast(ts, TS_SHARED_QUEUE);
1256                 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
1257         } else
1258                 turnstile_disown(ts);
1259         turnstile_chain_unlock(&rw->lock_object);
1260 out:
1261         curthread->td_rw_rlocks++;
1262         LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
1263         LOCKSTAT_RECORD0(rw__downgrade, rw);
1264 }
1265
1266 #ifdef INVARIANT_SUPPORT
1267 #ifndef INVARIANTS
1268 #undef __rw_assert
1269 #endif
1270
1271 /*
1272  * In the non-WITNESS case, rw_assert() can only detect that at least
1273  * *some* thread owns an rlock, but it cannot guarantee that *this*
1274  * thread owns an rlock.
1275  */
1276 void
1277 __rw_assert(const volatile uintptr_t *c, int what, const char *file, int line)
1278 {
1279         const struct rwlock *rw;
1280
1281         if (panicstr != NULL)
1282                 return;
1283
1284         rw = rwlock2rw(c);
1285
1286         switch (what) {
1287         case RA_LOCKED:
1288         case RA_LOCKED | RA_RECURSED:
1289         case RA_LOCKED | RA_NOTRECURSED:
1290         case RA_RLOCKED:
1291         case RA_RLOCKED | RA_RECURSED:
1292         case RA_RLOCKED | RA_NOTRECURSED:
1293 #ifdef WITNESS
1294                 witness_assert(&rw->lock_object, what, file, line);
1295 #else
1296                 /*
1297                  * If some other thread has a write lock or we have one
1298                  * and are asserting a read lock, fail.  Also, if no one
1299                  * has a lock at all, fail.
1300                  */
1301                 if (rw->rw_lock == RW_UNLOCKED ||
1302                     (!(rw->rw_lock & RW_LOCK_READ) && (what & RA_RLOCKED ||
1303                     rw_wowner(rw) != curthread)))
1304                         panic("Lock %s not %slocked @ %s:%d\n",
1305                             rw->lock_object.lo_name, (what & RA_RLOCKED) ?
1306                             "read " : "", file, line);
1307
1308                 if (!(rw->rw_lock & RW_LOCK_READ) && !(what & RA_RLOCKED)) {
1309                         if (rw_recursed(rw)) {
1310                                 if (what & RA_NOTRECURSED)
1311                                         panic("Lock %s recursed @ %s:%d\n",
1312                                             rw->lock_object.lo_name, file,
1313                                             line);
1314                         } else if (what & RA_RECURSED)
1315                                 panic("Lock %s not recursed @ %s:%d\n",
1316                                     rw->lock_object.lo_name, file, line);
1317                 }
1318 #endif
1319                 break;
1320         case RA_WLOCKED:
1321         case RA_WLOCKED | RA_RECURSED:
1322         case RA_WLOCKED | RA_NOTRECURSED:
1323                 if (rw_wowner(rw) != curthread)
1324                         panic("Lock %s not exclusively locked @ %s:%d\n",
1325                             rw->lock_object.lo_name, file, line);
1326                 if (rw_recursed(rw)) {
1327                         if (what & RA_NOTRECURSED)
1328                                 panic("Lock %s recursed @ %s:%d\n",
1329                                     rw->lock_object.lo_name, file, line);
1330                 } else if (what & RA_RECURSED)
1331                         panic("Lock %s not recursed @ %s:%d\n",
1332                             rw->lock_object.lo_name, file, line);
1333                 break;
1334         case RA_UNLOCKED:
1335 #ifdef WITNESS
1336                 witness_assert(&rw->lock_object, what, file, line);
1337 #else
1338                 /*
1339                  * If we hold a write lock fail.  We can't reliably check
1340                  * to see if we hold a read lock or not.
1341                  */
1342                 if (rw_wowner(rw) == curthread)
1343                         panic("Lock %s exclusively locked @ %s:%d\n",
1344                             rw->lock_object.lo_name, file, line);
1345 #endif
1346                 break;
1347         default:
1348                 panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
1349                     line);
1350         }
1351 }
1352 #endif /* INVARIANT_SUPPORT */
1353
1354 #ifdef DDB
1355 void
1356 db_show_rwlock(const struct lock_object *lock)
1357 {
1358         const struct rwlock *rw;
1359         struct thread *td;
1360
1361         rw = (const struct rwlock *)lock;
1362
1363         db_printf(" state: ");
1364         if (rw->rw_lock == RW_UNLOCKED)
1365                 db_printf("UNLOCKED\n");
1366         else if (rw->rw_lock == RW_DESTROYED) {
1367                 db_printf("DESTROYED\n");
1368                 return;
1369         } else if (rw->rw_lock & RW_LOCK_READ)
1370                 db_printf("RLOCK: %ju locks\n",
1371                     (uintmax_t)(RW_READERS(rw->rw_lock)));
1372         else {
1373                 td = rw_wowner(rw);
1374                 db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1375                     td->td_tid, td->td_proc->p_pid, td->td_name);
1376                 if (rw_recursed(rw))
1377                         db_printf(" recursed: %u\n", rw->rw_recurse);
1378         }
1379         db_printf(" waiters: ");
1380         switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {
1381         case RW_LOCK_READ_WAITERS:
1382                 db_printf("readers\n");
1383                 break;
1384         case RW_LOCK_WRITE_WAITERS:
1385                 db_printf("writers\n");
1386                 break;
1387         case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS:
1388                 db_printf("readers and writers\n");
1389                 break;
1390         default:
1391                 db_printf("none\n");
1392                 break;
1393         }
1394 }
1395
1396 #endif