]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/kern_rwlock.c
hwpmc(4): Force sufficiently wide type for left shift
[FreeBSD/FreeBSD.git] / sys / kern / kern_rwlock.c
1 /*-
2  * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 /*
28  * Machine independent bits of reader/writer lock implementation.
29  */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include "opt_ddb.h"
35 #include "opt_hwpmc_hooks.h"
36 #include "opt_no_adaptive_rwlocks.h"
37
38 #include <sys/param.h>
39 #include <sys/kdb.h>
40 #include <sys/ktr.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/proc.h>
45 #include <sys/rwlock.h>
46 #include <sys/sched.h>
47 #include <sys/smp.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
50 #include <sys/turnstile.h>
51
52 #include <machine/cpu.h>
53
54 #if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
55 #define ADAPTIVE_RWLOCKS
56 #endif
57
58 #ifdef HWPMC_HOOKS
59 #include <sys/pmckern.h>
60 PMC_SOFT_DECLARE( , , lock, failed);
61 #endif
62
63 /*
64  * Return the rwlock address when the lock cookie address is provided.
65  * This functionality assumes that struct rwlock* have a member named rw_lock.
66  */
67 #define rwlock2rw(c)    (__containerof(c, struct rwlock, rw_lock))
68
69 #ifdef DDB
70 #include <ddb/ddb.h>
71
72 static void     db_show_rwlock(const struct lock_object *lock);
73 #endif
74 static void     assert_rw(const struct lock_object *lock, int what);
75 static void     lock_rw(struct lock_object *lock, uintptr_t how);
76 #ifdef KDTRACE_HOOKS
77 static int      owner_rw(const struct lock_object *lock, struct thread **owner);
78 #endif
79 static uintptr_t unlock_rw(struct lock_object *lock);
80
81 struct lock_class lock_class_rw = {
82         .lc_name = "rw",
83         .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
84         .lc_assert = assert_rw,
85 #ifdef DDB
86         .lc_ddb_show = db_show_rwlock,
87 #endif
88         .lc_lock = lock_rw,
89         .lc_unlock = unlock_rw,
90 #ifdef KDTRACE_HOOKS
91         .lc_owner = owner_rw,
92 #endif
93 };
94
95 #ifdef ADAPTIVE_RWLOCKS
96 static int __read_frequently rowner_retries = 10;
97 static int __read_frequently rowner_loops = 10000;
98 static SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL,
99     "rwlock debugging");
100 SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, "");
101 SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, "");
102
103 static struct lock_delay_config __read_frequently rw_delay;
104
105 SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_base, CTLFLAG_RW, &rw_delay.base,
106     0, "");
107 SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_max, CTLFLAG_RW, &rw_delay.max,
108     0, "");
109
110 LOCK_DELAY_SYSINIT_DEFAULT(rw_delay);
111 #endif
112
113 /*
114  * Return a pointer to the owning thread if the lock is write-locked or
115  * NULL if the lock is unlocked or read-locked.
116  */
117
118 #define lv_rw_wowner(v)                                                 \
119         ((v) & RW_LOCK_READ ? NULL :                                    \
120          (struct thread *)RW_OWNER((v)))
121
122 #define rw_wowner(rw)   lv_rw_wowner(RW_READ_VALUE(rw))
123
124 /*
125  * Returns if a write owner is recursed.  Write ownership is not assured
126  * here and should be previously checked.
127  */
128 #define rw_recursed(rw)         ((rw)->rw_recurse != 0)
129
130 /*
131  * Return true if curthread helds the lock.
132  */
133 #define rw_wlocked(rw)          (rw_wowner((rw)) == curthread)
134
135 /*
136  * Return a pointer to the owning thread for this lock who should receive
137  * any priority lent by threads that block on this lock.  Currently this
138  * is identical to rw_wowner().
139  */
140 #define rw_owner(rw)            rw_wowner(rw)
141
142 #ifndef INVARIANTS
143 #define __rw_assert(c, what, file, line)
144 #endif
145
146 void
147 assert_rw(const struct lock_object *lock, int what)
148 {
149
150         rw_assert((const struct rwlock *)lock, what);
151 }
152
153 void
154 lock_rw(struct lock_object *lock, uintptr_t how)
155 {
156         struct rwlock *rw;
157
158         rw = (struct rwlock *)lock;
159         if (how)
160                 rw_rlock(rw);
161         else
162                 rw_wlock(rw);
163 }
164
165 uintptr_t
166 unlock_rw(struct lock_object *lock)
167 {
168         struct rwlock *rw;
169
170         rw = (struct rwlock *)lock;
171         rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
172         if (rw->rw_lock & RW_LOCK_READ) {
173                 rw_runlock(rw);
174                 return (1);
175         } else {
176                 rw_wunlock(rw);
177                 return (0);
178         }
179 }
180
181 #ifdef KDTRACE_HOOKS
182 int
183 owner_rw(const struct lock_object *lock, struct thread **owner)
184 {
185         const struct rwlock *rw = (const struct rwlock *)lock;
186         uintptr_t x = rw->rw_lock;
187
188         *owner = rw_wowner(rw);
189         return ((x & RW_LOCK_READ) != 0 ?  (RW_READERS(x) != 0) :
190             (*owner != NULL));
191 }
192 #endif
193
194 void
195 _rw_init_flags(volatile uintptr_t *c, const char *name, int opts)
196 {
197         struct rwlock *rw;
198         int flags;
199
200         rw = rwlock2rw(c);
201
202         MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
203             RW_RECURSE | RW_NEW)) == 0);
204         ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock,
205             ("%s: rw_lock not aligned for %s: %p", __func__, name,
206             &rw->rw_lock));
207
208         flags = LO_UPGRADABLE;
209         if (opts & RW_DUPOK)
210                 flags |= LO_DUPOK;
211         if (opts & RW_NOPROFILE)
212                 flags |= LO_NOPROFILE;
213         if (!(opts & RW_NOWITNESS))
214                 flags |= LO_WITNESS;
215         if (opts & RW_RECURSE)
216                 flags |= LO_RECURSABLE;
217         if (opts & RW_QUIET)
218                 flags |= LO_QUIET;
219         if (opts & RW_NEW)
220                 flags |= LO_NEW;
221
222         lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags);
223         rw->rw_lock = RW_UNLOCKED;
224         rw->rw_recurse = 0;
225 }
226
227 void
228 _rw_destroy(volatile uintptr_t *c)
229 {
230         struct rwlock *rw;
231
232         rw = rwlock2rw(c);
233
234         KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw));
235         KASSERT(rw->rw_recurse == 0, ("rw lock %p still recursed", rw));
236         rw->rw_lock = RW_DESTROYED;
237         lock_destroy(&rw->lock_object);
238 }
239
240 void
241 rw_sysinit(void *arg)
242 {
243         struct rw_args *args = arg;
244
245         rw_init((struct rwlock *)args->ra_rw, args->ra_desc);
246 }
247
248 void
249 rw_sysinit_flags(void *arg)
250 {
251         struct rw_args_flags *args = arg;
252
253         rw_init_flags((struct rwlock *)args->ra_rw, args->ra_desc,
254             args->ra_flags);
255 }
256
257 int
258 _rw_wowned(const volatile uintptr_t *c)
259 {
260
261         return (rw_wowner(rwlock2rw(c)) == curthread);
262 }
263
264 void
265 _rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line)
266 {
267         struct rwlock *rw;
268         uintptr_t tid, v;
269
270         rw = rwlock2rw(c);
271
272         KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
273             !TD_IS_IDLETHREAD(curthread),
274             ("rw_wlock() by idle thread %p on rwlock %s @ %s:%d",
275             curthread, rw->lock_object.lo_name, file, line));
276         KASSERT(rw->rw_lock != RW_DESTROYED,
277             ("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
278         WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
279             line, NULL);
280         tid = (uintptr_t)curthread;
281         v = RW_UNLOCKED;
282         if (!_rw_write_lock_fetch(rw, &v, tid))
283                 _rw_wlock_hard(rw, v, tid, file, line);
284         else
285                 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw,
286                     0, 0, file, line, LOCKSTAT_WRITER);
287
288         LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line);
289         WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
290         TD_LOCKS_INC(curthread);
291 }
292
293 int
294 __rw_try_wlock(volatile uintptr_t *c, const char *file, int line)
295 {
296         struct rwlock *rw;
297         struct thread *td;
298         uintptr_t tid, v;
299         int rval;
300         bool recursed;
301
302         td = curthread;
303         tid = (uintptr_t)td;
304         if (SCHEDULER_STOPPED_TD(td))
305                 return (1);
306
307         rw = rwlock2rw(c);
308
309         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
310             ("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d",
311             curthread, rw->lock_object.lo_name, file, line));
312         KASSERT(rw->rw_lock != RW_DESTROYED,
313             ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
314
315         rval = 1;
316         recursed = false;
317         v = RW_UNLOCKED;
318         for (;;) {
319                 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid))
320                         break;
321                 if (v == RW_UNLOCKED)
322                         continue;
323                 if (v == tid && (rw->lock_object.lo_flags & LO_RECURSABLE)) {
324                         rw->rw_recurse++;
325                         atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
326                         break;
327                 }
328                 rval = 0;
329                 break;
330         }
331
332         LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line);
333         if (rval) {
334                 WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
335                     file, line);
336                 if (!recursed)
337                         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire,
338                             rw, 0, 0, file, line, LOCKSTAT_WRITER);
339                 TD_LOCKS_INC(curthread);
340         }
341         return (rval);
342 }
343
344 void
345 _rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line)
346 {
347         struct rwlock *rw;
348
349         rw = rwlock2rw(c);
350
351         KASSERT(rw->rw_lock != RW_DESTROYED,
352             ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
353         __rw_assert(c, RA_WLOCKED, file, line);
354         WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
355         LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
356             line);
357
358 #ifdef LOCK_PROFILING
359         _rw_wunlock_hard(rw, (uintptr_t)curthread, file, line);
360 #else
361         __rw_wunlock(rw, curthread, file, line);
362 #endif
363
364         TD_LOCKS_DEC(curthread);
365 }
366
367 /*
368  * Determines whether a new reader can acquire a lock.  Succeeds if the
369  * reader already owns a read lock and the lock is locked for read to
370  * prevent deadlock from reader recursion.  Also succeeds if the lock
371  * is unlocked and has no writer waiters or spinners.  Failing otherwise
372  * prioritizes writers before readers.
373  */
374 #define RW_CAN_READ(td, _rw)                                            \
375     (((_rw) & (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) ==\
376     RW_LOCK_READ || ((td)->td_rw_rlocks && (_rw) & RW_LOCK_READ))
377
378 static bool __always_inline
379 __rw_rlock_try(struct rwlock *rw, struct thread *td, uintptr_t *vp,
380     const char *file, int line)
381 {
382
383         /*
384          * Handle the easy case.  If no other thread has a write
385          * lock, then try to bump up the count of read locks.  Note
386          * that we have to preserve the current state of the
387          * RW_LOCK_WRITE_WAITERS flag.  If we fail to acquire a
388          * read lock, then rw_lock must have changed, so restart
389          * the loop.  Note that this handles the case of a
390          * completely unlocked rwlock since such a lock is encoded
391          * as a read lock with no waiters.
392          */
393         while (RW_CAN_READ(td, *vp)) {
394                 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, vp,
395                         *vp + RW_ONE_READER)) {
396                         if (LOCK_LOG_TEST(&rw->lock_object, 0))
397                                 CTR4(KTR_LOCK,
398                                     "%s: %p succeed %p -> %p", __func__,
399                                     rw, (void *)*vp,
400                                     (void *)(*vp + RW_ONE_READER));
401                         td->td_rw_rlocks++;
402                         return (true);
403                 }
404         }
405         return (false);
406 }
407
408 static void __noinline
409 __rw_rlock_hard(volatile uintptr_t *c, struct thread *td, uintptr_t v,
410     const char *file, int line)
411 {
412         struct rwlock *rw;
413         struct turnstile *ts;
414 #ifdef ADAPTIVE_RWLOCKS
415         volatile struct thread *owner;
416         int spintries = 0;
417         int i, n;
418 #endif
419 #ifdef LOCK_PROFILING
420         uint64_t waittime = 0;
421         int contested = 0;
422 #endif
423 #if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS)
424         struct lock_delay_arg lda;
425 #endif
426 #ifdef KDTRACE_HOOKS
427         uintptr_t state;
428         u_int sleep_cnt = 0;
429         int64_t sleep_time = 0;
430         int64_t all_time = 0;
431 #endif
432
433         if (SCHEDULER_STOPPED())
434                 return;
435
436 #if defined(ADAPTIVE_RWLOCKS)
437         lock_delay_arg_init(&lda, &rw_delay);
438 #elif defined(KDTRACE_HOOKS)
439         lock_delay_arg_init(&lda, NULL);
440 #endif
441         rw = rwlock2rw(c);
442
443 #ifdef KDTRACE_HOOKS
444         all_time -= lockstat_nsecs(&rw->lock_object);
445 #endif
446 #ifdef KDTRACE_HOOKS
447         state = v;
448 #endif
449         for (;;) {
450                 if (__rw_rlock_try(rw, td, &v, file, line))
451                         break;
452 #ifdef KDTRACE_HOOKS
453                 lda.spin_cnt++;
454 #endif
455 #ifdef HWPMC_HOOKS
456                 PMC_SOFT_CALL( , , lock, failed);
457 #endif
458                 lock_profile_obtain_lock_failed(&rw->lock_object,
459                     &contested, &waittime);
460
461 #ifdef ADAPTIVE_RWLOCKS
462                 /*
463                  * If the owner is running on another CPU, spin until
464                  * the owner stops running or the state of the lock
465                  * changes.
466                  */
467                 if ((v & RW_LOCK_READ) == 0) {
468                         owner = (struct thread *)RW_OWNER(v);
469                         if (TD_IS_RUNNING(owner)) {
470                                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
471                                         CTR3(KTR_LOCK,
472                                             "%s: spinning on %p held by %p",
473                                             __func__, rw, owner);
474                                 KTR_STATE1(KTR_SCHED, "thread",
475                                     sched_tdname(curthread), "spinning",
476                                     "lockname:\"%s\"", rw->lock_object.lo_name);
477                                 do {
478                                         lock_delay(&lda);
479                                         v = RW_READ_VALUE(rw);
480                                         owner = lv_rw_wowner(v);
481                                 } while (owner != NULL && TD_IS_RUNNING(owner));
482                                 KTR_STATE0(KTR_SCHED, "thread",
483                                     sched_tdname(curthread), "running");
484                                 continue;
485                         }
486                 } else if (spintries < rowner_retries) {
487                         spintries++;
488                         KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
489                             "spinning", "lockname:\"%s\"",
490                             rw->lock_object.lo_name);
491                         for (i = 0; i < rowner_loops; i += n) {
492                                 n = RW_READERS(v);
493                                 lock_delay_spin(n);
494                                 v = RW_READ_VALUE(rw);
495                                 if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(td, v))
496                                         break;
497                         }
498 #ifdef KDTRACE_HOOKS
499                         lda.spin_cnt += rowner_loops - i;
500 #endif
501                         KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
502                             "running");
503                         if (i != rowner_loops)
504                                 continue;
505                 }
506 #endif
507
508                 /*
509                  * Okay, now it's the hard case.  Some other thread already
510                  * has a write lock or there are write waiters present,
511                  * acquire the turnstile lock so we can begin the process
512                  * of blocking.
513                  */
514                 ts = turnstile_trywait(&rw->lock_object);
515
516                 /*
517                  * The lock might have been released while we spun, so
518                  * recheck its state and restart the loop if needed.
519                  */
520                 v = RW_READ_VALUE(rw);
521                 if (RW_CAN_READ(td, v)) {
522                         turnstile_cancel(ts);
523                         continue;
524                 }
525
526 #ifdef ADAPTIVE_RWLOCKS
527                 /*
528                  * The current lock owner might have started executing
529                  * on another CPU (or the lock could have changed
530                  * owners) while we were waiting on the turnstile
531                  * chain lock.  If so, drop the turnstile lock and try
532                  * again.
533                  */
534                 if ((v & RW_LOCK_READ) == 0) {
535                         owner = (struct thread *)RW_OWNER(v);
536                         if (TD_IS_RUNNING(owner)) {
537                                 turnstile_cancel(ts);
538                                 continue;
539                         }
540                 }
541 #endif
542
543                 /*
544                  * The lock is held in write mode or it already has waiters.
545                  */
546                 MPASS(!RW_CAN_READ(td, v));
547
548                 /*
549                  * If the RW_LOCK_READ_WAITERS flag is already set, then
550                  * we can go ahead and block.  If it is not set then try
551                  * to set it.  If we fail to set it drop the turnstile
552                  * lock and restart the loop.
553                  */
554                 if (!(v & RW_LOCK_READ_WAITERS)) {
555                         if (!atomic_cmpset_ptr(&rw->rw_lock, v,
556                             v | RW_LOCK_READ_WAITERS)) {
557                                 turnstile_cancel(ts);
558                                 v = RW_READ_VALUE(rw);
559                                 continue;
560                         }
561                         if (LOCK_LOG_TEST(&rw->lock_object, 0))
562                                 CTR2(KTR_LOCK, "%s: %p set read waiters flag",
563                                     __func__, rw);
564                 }
565
566                 /*
567                  * We were unable to acquire the lock and the read waiters
568                  * flag is set, so we must block on the turnstile.
569                  */
570                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
571                         CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
572                             rw);
573 #ifdef KDTRACE_HOOKS
574                 sleep_time -= lockstat_nsecs(&rw->lock_object);
575 #endif
576                 turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE);
577 #ifdef KDTRACE_HOOKS
578                 sleep_time += lockstat_nsecs(&rw->lock_object);
579                 sleep_cnt++;
580 #endif
581                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
582                         CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
583                             __func__, rw);
584                 v = RW_READ_VALUE(rw);
585         }
586 #ifdef KDTRACE_HOOKS
587         all_time += lockstat_nsecs(&rw->lock_object);
588         if (sleep_time)
589                 LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
590                     LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
591                     (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
592
593         /* Record only the loops spinning and not sleeping. */
594         if (lda.spin_cnt > sleep_cnt)
595                 LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
596                     LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
597                     (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
598 #endif
599         /*
600          * TODO: acquire "owner of record" here.  Here be turnstile dragons
601          * however.  turnstiles don't like owners changing between calls to
602          * turnstile_wait() currently.
603          */
604         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested,
605             waittime, file, line, LOCKSTAT_READER);
606 }
607
608 void
609 __rw_rlock(volatile uintptr_t *c, const char *file, int line)
610 {
611         struct rwlock *rw;
612         struct thread *td;
613         uintptr_t v;
614
615         td = curthread;
616         rw = rwlock2rw(c);
617
618         KASSERT(kdb_active != 0 || SCHEDULER_STOPPED_TD(td) ||
619             !TD_IS_IDLETHREAD(td),
620             ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d",
621             td, rw->lock_object.lo_name, file, line));
622         KASSERT(rw->rw_lock != RW_DESTROYED,
623             ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
624         KASSERT(rw_wowner(rw) != td,
625             ("rw_rlock: wlock already held for %s @ %s:%d",
626             rw->lock_object.lo_name, file, line));
627         WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);
628
629         v = RW_READ_VALUE(rw);
630         if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(rw__acquire) ||
631             !__rw_rlock_try(rw, td, &v, file, line)))
632                 __rw_rlock_hard(c, td, v, file, line);
633
634         LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
635         WITNESS_LOCK(&rw->lock_object, 0, file, line);
636         TD_LOCKS_INC(curthread);
637 }
638
639 int
640 __rw_try_rlock(volatile uintptr_t *c, const char *file, int line)
641 {
642         struct rwlock *rw;
643         uintptr_t x;
644
645         if (SCHEDULER_STOPPED())
646                 return (1);
647
648         rw = rwlock2rw(c);
649
650         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
651             ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d",
652             curthread, rw->lock_object.lo_name, file, line));
653
654         x = rw->rw_lock;
655         for (;;) {
656                 KASSERT(rw->rw_lock != RW_DESTROYED,
657                     ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line));
658                 if (!(x & RW_LOCK_READ))
659                         break;
660                 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &x, x + RW_ONE_READER)) {
661                         LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file,
662                             line);
663                         WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);
664                         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire,
665                             rw, 0, 0, file, line, LOCKSTAT_READER);
666                         TD_LOCKS_INC(curthread);
667                         curthread->td_rw_rlocks++;
668                         return (1);
669                 }
670         }
671
672         LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line);
673         return (0);
674 }
675
676 static bool __always_inline
677 __rw_runlock_try(struct rwlock *rw, struct thread *td, uintptr_t *vp)
678 {
679
680         for (;;) {
681                 /*
682                  * See if there is more than one read lock held.  If so,
683                  * just drop one and return.
684                  */
685                 if (RW_READERS(*vp) > 1) {
686                         if (atomic_fcmpset_rel_ptr(&rw->rw_lock, vp,
687                             *vp - RW_ONE_READER)) {
688                                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
689                                         CTR4(KTR_LOCK,
690                                             "%s: %p succeeded %p -> %p",
691                                             __func__, rw, (void *)*vp,
692                                             (void *)(*vp - RW_ONE_READER));
693                                 td->td_rw_rlocks--;
694                                 return (true);
695                         }
696                         continue;
697                 }
698                 /*
699                  * If there aren't any waiters for a write lock, then try
700                  * to drop it quickly.
701                  */
702                 if (!(*vp & RW_LOCK_WAITERS)) {
703                         MPASS((*vp & ~RW_LOCK_WRITE_SPINNER) ==
704                             RW_READERS_LOCK(1));
705                         if (atomic_fcmpset_rel_ptr(&rw->rw_lock, vp,
706                             RW_UNLOCKED)) {
707                                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
708                                         CTR2(KTR_LOCK, "%s: %p last succeeded",
709                                             __func__, rw);
710                                 td->td_rw_rlocks--;
711                                 return (true);
712                         }
713                         continue;
714                 }
715                 break;
716         }
717         return (false);
718 }
719
720 static void __noinline
721 __rw_runlock_hard(volatile uintptr_t *c, struct thread *td, uintptr_t v,
722     const char *file, int line)
723 {
724         struct rwlock *rw;
725         struct turnstile *ts;
726         uintptr_t x, queue;
727
728         if (SCHEDULER_STOPPED())
729                 return;
730
731         rw = rwlock2rw(c);
732
733         for (;;) {
734                 if (__rw_runlock_try(rw, td, &v))
735                         break;
736
737                 /*
738                  * Ok, we know we have waiters and we think we are the
739                  * last reader, so grab the turnstile lock.
740                  */
741                 turnstile_chain_lock(&rw->lock_object);
742                 v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
743                 MPASS(v & RW_LOCK_WAITERS);
744
745                 /*
746                  * Try to drop our lock leaving the lock in a unlocked
747                  * state.
748                  *
749                  * If you wanted to do explicit lock handoff you'd have to
750                  * do it here.  You'd also want to use turnstile_signal()
751                  * and you'd have to handle the race where a higher
752                  * priority thread blocks on the write lock before the
753                  * thread you wakeup actually runs and have the new thread
754                  * "steal" the lock.  For now it's a lot simpler to just
755                  * wakeup all of the waiters.
756                  *
757                  * As above, if we fail, then another thread might have
758                  * acquired a read lock, so drop the turnstile lock and
759                  * restart.
760                  */
761                 x = RW_UNLOCKED;
762                 if (v & RW_LOCK_WRITE_WAITERS) {
763                         queue = TS_EXCLUSIVE_QUEUE;
764                         x |= (v & RW_LOCK_READ_WAITERS);
765                 } else
766                         queue = TS_SHARED_QUEUE;
767                 if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
768                     x)) {
769                         turnstile_chain_unlock(&rw->lock_object);
770                         v = RW_READ_VALUE(rw);
771                         continue;
772                 }
773                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
774                         CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
775                             __func__, rw);
776
777                 /*
778                  * Ok.  The lock is released and all that's left is to
779                  * wake up the waiters.  Note that the lock might not be
780                  * free anymore, but in that case the writers will just
781                  * block again if they run before the new lock holder(s)
782                  * release the lock.
783                  */
784                 ts = turnstile_lookup(&rw->lock_object);
785                 MPASS(ts != NULL);
786                 turnstile_broadcast(ts, queue);
787                 turnstile_unpend(ts, TS_SHARED_LOCK);
788                 turnstile_chain_unlock(&rw->lock_object);
789                 td->td_rw_rlocks--;
790                 break;
791         }
792         LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_READER);
793 }
794
795 void
796 _rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line)
797 {
798         struct rwlock *rw;
799         struct thread *td;
800         uintptr_t v;
801
802         rw = rwlock2rw(c);
803
804         KASSERT(rw->rw_lock != RW_DESTROYED,
805             ("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
806         __rw_assert(c, RA_RLOCKED, file, line);
807         WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
808         LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);
809
810         td = curthread;
811         v = RW_READ_VALUE(rw);
812
813         if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(rw__release) ||
814             !__rw_runlock_try(rw, td, &v)))
815                 __rw_runlock_hard(c, td, v, file, line);
816
817         TD_LOCKS_DEC(curthread);
818 }
819
820 /*
821  * This function is called when we are unable to obtain a write lock on the
822  * first try.  This means that at least one other thread holds either a
823  * read or write lock.
824  */
825 void
826 __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
827     const char *file, int line)
828 {
829         struct rwlock *rw;
830         struct turnstile *ts;
831 #ifdef ADAPTIVE_RWLOCKS
832         volatile struct thread *owner;
833         int spintries = 0;
834         int i, n;
835 #endif
836         uintptr_t x;
837 #ifdef LOCK_PROFILING
838         uint64_t waittime = 0;
839         int contested = 0;
840 #endif
841 #if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS)
842         struct lock_delay_arg lda;
843 #endif
844 #ifdef KDTRACE_HOOKS
845         uintptr_t state;
846         u_int sleep_cnt = 0;
847         int64_t sleep_time = 0;
848         int64_t all_time = 0;
849 #endif
850
851         if (SCHEDULER_STOPPED())
852                 return;
853
854 #if defined(ADAPTIVE_RWLOCKS)
855         lock_delay_arg_init(&lda, &rw_delay);
856 #elif defined(KDTRACE_HOOKS)
857         lock_delay_arg_init(&lda, NULL);
858 #endif
859         rw = rwlock2rw(c);
860         if (__predict_false(v == RW_UNLOCKED))
861                 v = RW_READ_VALUE(rw);
862
863         if (__predict_false(lv_rw_wowner(v) == (struct thread *)tid)) {
864                 KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
865                     ("%s: recursing but non-recursive rw %s @ %s:%d\n",
866                     __func__, rw->lock_object.lo_name, file, line));
867                 rw->rw_recurse++;
868                 atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
869                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
870                         CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw);
871                 return;
872         }
873
874         if (LOCK_LOG_TEST(&rw->lock_object, 0))
875                 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
876                     rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
877
878 #ifdef KDTRACE_HOOKS
879         all_time -= lockstat_nsecs(&rw->lock_object);
880         state = v;
881 #endif
882         for (;;) {
883                 if (v == RW_UNLOCKED) {
884                         if (_rw_write_lock_fetch(rw, &v, tid))
885                                 break;
886                         continue;
887                 }
888 #ifdef KDTRACE_HOOKS
889                 lda.spin_cnt++;
890 #endif
891 #ifdef HWPMC_HOOKS
892                 PMC_SOFT_CALL( , , lock, failed);
893 #endif
894                 lock_profile_obtain_lock_failed(&rw->lock_object,
895                     &contested, &waittime);
896 #ifdef ADAPTIVE_RWLOCKS
897                 /*
898                  * If the lock is write locked and the owner is
899                  * running on another CPU, spin until the owner stops
900                  * running or the state of the lock changes.
901                  */
902                 owner = lv_rw_wowner(v);
903                 if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
904                         if (LOCK_LOG_TEST(&rw->lock_object, 0))
905                                 CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
906                                     __func__, rw, owner);
907                         KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
908                             "spinning", "lockname:\"%s\"",
909                             rw->lock_object.lo_name);
910                         do {
911                                 lock_delay(&lda);
912                                 v = RW_READ_VALUE(rw);
913                                 owner = lv_rw_wowner(v);
914                         } while (owner != NULL && TD_IS_RUNNING(owner));
915                         KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
916                             "running");
917                         continue;
918                 }
919                 if ((v & RW_LOCK_READ) && RW_READERS(v) &&
920                     spintries < rowner_retries) {
921                         if (!(v & RW_LOCK_WRITE_SPINNER)) {
922                                 if (!atomic_cmpset_ptr(&rw->rw_lock, v,
923                                     v | RW_LOCK_WRITE_SPINNER)) {
924                                         v = RW_READ_VALUE(rw);
925                                         continue;
926                                 }
927                         }
928                         spintries++;
929                         KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
930                             "spinning", "lockname:\"%s\"",
931                             rw->lock_object.lo_name);
932                         for (i = 0; i < rowner_loops; i += n) {
933                                 n = RW_READERS(v);
934                                 lock_delay_spin(n);
935                                 v = RW_READ_VALUE(rw);
936                                 if ((v & RW_LOCK_WRITE_SPINNER) == 0)
937                                         break;
938                         }
939                         KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
940                             "running");
941 #ifdef KDTRACE_HOOKS
942                         lda.spin_cnt += rowner_loops - i;
943 #endif
944                         if (i != rowner_loops)
945                                 continue;
946                 }
947 #endif
948                 ts = turnstile_trywait(&rw->lock_object);
949                 v = RW_READ_VALUE(rw);
950
951 #ifdef ADAPTIVE_RWLOCKS
952                 /*
953                  * The current lock owner might have started executing
954                  * on another CPU (or the lock could have changed
955                  * owners) while we were waiting on the turnstile
956                  * chain lock.  If so, drop the turnstile lock and try
957                  * again.
958                  */
959                 if (!(v & RW_LOCK_READ)) {
960                         owner = (struct thread *)RW_OWNER(v);
961                         if (TD_IS_RUNNING(owner)) {
962                                 turnstile_cancel(ts);
963                                 continue;
964                         }
965                 }
966 #endif
967                 /*
968                  * Check for the waiters flags about this rwlock.
969                  * If the lock was released, without maintain any pending
970                  * waiters queue, simply try to acquire it.
971                  * If a pending waiters queue is present, claim the lock
972                  * ownership and maintain the pending queue.
973                  */
974                 x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
975                 if ((v & ~x) == RW_UNLOCKED) {
976                         x &= ~RW_LOCK_WRITE_SPINNER;
977                         if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid | x)) {
978                                 if (x)
979                                         turnstile_claim(ts);
980                                 else
981                                         turnstile_cancel(ts);
982                                 break;
983                         }
984                         turnstile_cancel(ts);
985                         v = RW_READ_VALUE(rw);
986                         continue;
987                 }
988                 /*
989                  * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
990                  * set it.  If we fail to set it, then loop back and try
991                  * again.
992                  */
993                 if (!(v & RW_LOCK_WRITE_WAITERS)) {
994                         if (!atomic_cmpset_ptr(&rw->rw_lock, v,
995                             v | RW_LOCK_WRITE_WAITERS)) {
996                                 turnstile_cancel(ts);
997                                 v = RW_READ_VALUE(rw);
998                                 continue;
999                         }
1000                         if (LOCK_LOG_TEST(&rw->lock_object, 0))
1001                                 CTR2(KTR_LOCK, "%s: %p set write waiters flag",
1002                                     __func__, rw);
1003                 }
1004                 /*
1005                  * We were unable to acquire the lock and the write waiters
1006                  * flag is set, so we must block on the turnstile.
1007                  */
1008                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
1009                         CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
1010                             rw);
1011 #ifdef KDTRACE_HOOKS
1012                 sleep_time -= lockstat_nsecs(&rw->lock_object);
1013 #endif
1014                 turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE);
1015 #ifdef KDTRACE_HOOKS
1016                 sleep_time += lockstat_nsecs(&rw->lock_object);
1017                 sleep_cnt++;
1018 #endif
1019                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
1020                         CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
1021                             __func__, rw);
1022 #ifdef ADAPTIVE_RWLOCKS
1023                 spintries = 0;
1024 #endif
1025                 v = RW_READ_VALUE(rw);
1026         }
1027 #ifdef KDTRACE_HOOKS
1028         all_time += lockstat_nsecs(&rw->lock_object);
1029         if (sleep_time)
1030                 LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
1031                     LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0,
1032                     (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
1033
1034         /* Record only the loops spinning and not sleeping. */
1035         if (lda.spin_cnt > sleep_cnt)
1036                 LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
1037                     LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0,
1038                     (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
1039 #endif
1040         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested,
1041             waittime, file, line, LOCKSTAT_WRITER);
1042 }
1043
1044 /*
1045  * This function is called if lockstat is active or the first try at releasing
1046  * a write lock failed.  The latter means that the lock is recursed or one of
1047  * the 2 waiter bits must be set indicating that at least one thread is waiting
1048  * on this lock.
1049  */
1050 void
1051 __rw_wunlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
1052     int line)
1053 {
1054         struct rwlock *rw;
1055         struct turnstile *ts;
1056         uintptr_t v;
1057         int queue;
1058
1059         if (SCHEDULER_STOPPED())
1060                 return;
1061
1062         rw = rwlock2rw(c);
1063         v = RW_READ_VALUE(rw);
1064         if (v & RW_LOCK_WRITER_RECURSED) {
1065                 if (--(rw->rw_recurse) == 0)
1066                         atomic_clear_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED);
1067                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
1068                         CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw);
1069                 return;
1070         }
1071
1072         LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_WRITER);
1073         if (v == tid && _rw_write_unlock(rw, tid))
1074                 return;
1075
1076         KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
1077             ("%s: neither of the waiter flags are set", __func__));
1078
1079         if (LOCK_LOG_TEST(&rw->lock_object, 0))
1080                 CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);
1081
1082         turnstile_chain_lock(&rw->lock_object);
1083         ts = turnstile_lookup(&rw->lock_object);
1084         MPASS(ts != NULL);
1085
1086         /*
1087          * Use the same algo as sx locks for now.  Prefer waking up shared
1088          * waiters if we have any over writers.  This is probably not ideal.
1089          *
1090          * 'v' is the value we are going to write back to rw_lock.  If we
1091          * have waiters on both queues, we need to preserve the state of
1092          * the waiter flag for the queue we don't wake up.  For now this is
1093          * hardcoded for the algorithm mentioned above.
1094          *
1095          * In the case of both readers and writers waiting we wakeup the
1096          * readers but leave the RW_LOCK_WRITE_WAITERS flag set.  If a
1097          * new writer comes in before a reader it will claim the lock up
1098          * above.  There is probably a potential priority inversion in
1099          * there that could be worked around either by waking both queues
1100          * of waiters or doing some complicated lock handoff gymnastics.
1101          */
1102         v = RW_UNLOCKED;
1103         if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) {
1104                 queue = TS_EXCLUSIVE_QUEUE;
1105                 v |= (rw->rw_lock & RW_LOCK_READ_WAITERS);
1106         } else
1107                 queue = TS_SHARED_QUEUE;
1108
1109         /* Wake up all waiters for the specific queue. */
1110         if (LOCK_LOG_TEST(&rw->lock_object, 0))
1111                 CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
1112                     queue == TS_SHARED_QUEUE ? "read" : "write");
1113         turnstile_broadcast(ts, queue);
1114         atomic_store_rel_ptr(&rw->rw_lock, v);
1115         turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
1116         turnstile_chain_unlock(&rw->lock_object);
1117 }
1118
1119 /*
1120  * Attempt to do a non-blocking upgrade from a read lock to a write
1121  * lock.  This will only succeed if this thread holds a single read
1122  * lock.  Returns true if the upgrade succeeded and false otherwise.
1123  */
1124 int
1125 __rw_try_upgrade(volatile uintptr_t *c, const char *file, int line)
1126 {
1127         struct rwlock *rw;
1128         uintptr_t v, x, tid;
1129         struct turnstile *ts;
1130         int success;
1131
1132         if (SCHEDULER_STOPPED())
1133                 return (1);
1134
1135         rw = rwlock2rw(c);
1136
1137         KASSERT(rw->rw_lock != RW_DESTROYED,
1138             ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
1139         __rw_assert(c, RA_RLOCKED, file, line);
1140
1141         /*
1142          * Attempt to switch from one reader to a writer.  If there
1143          * are any write waiters, then we will have to lock the
1144          * turnstile first to prevent races with another writer
1145          * calling turnstile_wait() before we have claimed this
1146          * turnstile.  So, do the simple case of no waiters first.
1147          */
1148         tid = (uintptr_t)curthread;
1149         success = 0;
1150         for (;;) {
1151                 v = rw->rw_lock;
1152                 if (RW_READERS(v) > 1)
1153                         break;
1154                 if (!(v & RW_LOCK_WAITERS)) {
1155                         success = atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid);
1156                         if (!success)
1157                                 continue;
1158                         break;
1159                 }
1160
1161                 /*
1162                  * Ok, we think we have waiters, so lock the turnstile.
1163                  */
1164                 ts = turnstile_trywait(&rw->lock_object);
1165                 v = rw->rw_lock;
1166                 if (RW_READERS(v) > 1) {
1167                         turnstile_cancel(ts);
1168                         break;
1169                 }
1170                 /*
1171                  * Try to switch from one reader to a writer again.  This time
1172                  * we honor the current state of the waiters flags.
1173                  * If we obtain the lock with the flags set, then claim
1174                  * ownership of the turnstile.
1175                  */
1176                 x = rw->rw_lock & RW_LOCK_WAITERS;
1177                 success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x);
1178                 if (success) {
1179                         if (x)
1180                                 turnstile_claim(ts);
1181                         else
1182                                 turnstile_cancel(ts);
1183                         break;
1184                 }
1185                 turnstile_cancel(ts);
1186         }
1187         LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line);
1188         if (success) {
1189                 curthread->td_rw_rlocks--;
1190                 WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
1191                     file, line);
1192                 LOCKSTAT_RECORD0(rw__upgrade, rw);
1193         }
1194         return (success);
1195 }
1196
1197 /*
1198  * Downgrade a write lock into a single read lock.
1199  */
1200 void
1201 __rw_downgrade(volatile uintptr_t *c, const char *file, int line)
1202 {
1203         struct rwlock *rw;
1204         struct turnstile *ts;
1205         uintptr_t tid, v;
1206         int rwait, wwait;
1207
1208         if (SCHEDULER_STOPPED())
1209                 return;
1210
1211         rw = rwlock2rw(c);
1212
1213         KASSERT(rw->rw_lock != RW_DESTROYED,
1214             ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
1215         __rw_assert(c, RA_WLOCKED | RA_NOTRECURSED, file, line);
1216 #ifndef INVARIANTS
1217         if (rw_recursed(rw))
1218                 panic("downgrade of a recursed lock");
1219 #endif
1220
1221         WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line);
1222
1223         /*
1224          * Convert from a writer to a single reader.  First we handle
1225          * the easy case with no waiters.  If there are any waiters, we
1226          * lock the turnstile and "disown" the lock.
1227          */
1228         tid = (uintptr_t)curthread;
1229         if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
1230                 goto out;
1231
1232         /*
1233          * Ok, we think we have waiters, so lock the turnstile so we can
1234          * read the waiter flags without any races.
1235          */
1236         turnstile_chain_lock(&rw->lock_object);
1237         v = rw->rw_lock & RW_LOCK_WAITERS;
1238         rwait = v & RW_LOCK_READ_WAITERS;
1239         wwait = v & RW_LOCK_WRITE_WAITERS;
1240         MPASS(rwait | wwait);
1241
1242         /*
1243          * Downgrade from a write lock while preserving waiters flag
1244          * and give up ownership of the turnstile.
1245          */
1246         ts = turnstile_lookup(&rw->lock_object);
1247         MPASS(ts != NULL);
1248         if (!wwait)
1249                 v &= ~RW_LOCK_READ_WAITERS;
1250         atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v);
1251         /*
1252          * Wake other readers if there are no writers pending.  Otherwise they
1253          * won't be able to acquire the lock anyway.
1254          */
1255         if (rwait && !wwait) {
1256                 turnstile_broadcast(ts, TS_SHARED_QUEUE);
1257                 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
1258         } else
1259                 turnstile_disown(ts);
1260         turnstile_chain_unlock(&rw->lock_object);
1261 out:
1262         curthread->td_rw_rlocks++;
1263         LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
1264         LOCKSTAT_RECORD0(rw__downgrade, rw);
1265 }
1266
1267 #ifdef INVARIANT_SUPPORT
1268 #ifndef INVARIANTS
1269 #undef __rw_assert
1270 #endif
1271
1272 /*
1273  * In the non-WITNESS case, rw_assert() can only detect that at least
1274  * *some* thread owns an rlock, but it cannot guarantee that *this*
1275  * thread owns an rlock.
1276  */
1277 void
1278 __rw_assert(const volatile uintptr_t *c, int what, const char *file, int line)
1279 {
1280         const struct rwlock *rw;
1281
1282         if (panicstr != NULL)
1283                 return;
1284
1285         rw = rwlock2rw(c);
1286
1287         switch (what) {
1288         case RA_LOCKED:
1289         case RA_LOCKED | RA_RECURSED:
1290         case RA_LOCKED | RA_NOTRECURSED:
1291         case RA_RLOCKED:
1292         case RA_RLOCKED | RA_RECURSED:
1293         case RA_RLOCKED | RA_NOTRECURSED:
1294 #ifdef WITNESS
1295                 witness_assert(&rw->lock_object, what, file, line);
1296 #else
1297                 /*
1298                  * If some other thread has a write lock or we have one
1299                  * and are asserting a read lock, fail.  Also, if no one
1300                  * has a lock at all, fail.
1301                  */
1302                 if (rw->rw_lock == RW_UNLOCKED ||
1303                     (!(rw->rw_lock & RW_LOCK_READ) && (what & RA_RLOCKED ||
1304                     rw_wowner(rw) != curthread)))
1305                         panic("Lock %s not %slocked @ %s:%d\n",
1306                             rw->lock_object.lo_name, (what & RA_RLOCKED) ?
1307                             "read " : "", file, line);
1308
1309                 if (!(rw->rw_lock & RW_LOCK_READ) && !(what & RA_RLOCKED)) {
1310                         if (rw_recursed(rw)) {
1311                                 if (what & RA_NOTRECURSED)
1312                                         panic("Lock %s recursed @ %s:%d\n",
1313                                             rw->lock_object.lo_name, file,
1314                                             line);
1315                         } else if (what & RA_RECURSED)
1316                                 panic("Lock %s not recursed @ %s:%d\n",
1317                                     rw->lock_object.lo_name, file, line);
1318                 }
1319 #endif
1320                 break;
1321         case RA_WLOCKED:
1322         case RA_WLOCKED | RA_RECURSED:
1323         case RA_WLOCKED | RA_NOTRECURSED:
1324                 if (rw_wowner(rw) != curthread)
1325                         panic("Lock %s not exclusively locked @ %s:%d\n",
1326                             rw->lock_object.lo_name, file, line);
1327                 if (rw_recursed(rw)) {
1328                         if (what & RA_NOTRECURSED)
1329                                 panic("Lock %s recursed @ %s:%d\n",
1330                                     rw->lock_object.lo_name, file, line);
1331                 } else if (what & RA_RECURSED)
1332                         panic("Lock %s not recursed @ %s:%d\n",
1333                             rw->lock_object.lo_name, file, line);
1334                 break;
1335         case RA_UNLOCKED:
1336 #ifdef WITNESS
1337                 witness_assert(&rw->lock_object, what, file, line);
1338 #else
1339                 /*
1340                  * If we hold a write lock fail.  We can't reliably check
1341                  * to see if we hold a read lock or not.
1342                  */
1343                 if (rw_wowner(rw) == curthread)
1344                         panic("Lock %s exclusively locked @ %s:%d\n",
1345                             rw->lock_object.lo_name, file, line);
1346 #endif
1347                 break;
1348         default:
1349                 panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
1350                     line);
1351         }
1352 }
1353 #endif /* INVARIANT_SUPPORT */
1354
1355 #ifdef DDB
1356 void
1357 db_show_rwlock(const struct lock_object *lock)
1358 {
1359         const struct rwlock *rw;
1360         struct thread *td;
1361
1362         rw = (const struct rwlock *)lock;
1363
1364         db_printf(" state: ");
1365         if (rw->rw_lock == RW_UNLOCKED)
1366                 db_printf("UNLOCKED\n");
1367         else if (rw->rw_lock == RW_DESTROYED) {
1368                 db_printf("DESTROYED\n");
1369                 return;
1370         } else if (rw->rw_lock & RW_LOCK_READ)
1371                 db_printf("RLOCK: %ju locks\n",
1372                     (uintmax_t)(RW_READERS(rw->rw_lock)));
1373         else {
1374                 td = rw_wowner(rw);
1375                 db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1376                     td->td_tid, td->td_proc->p_pid, td->td_name);
1377                 if (rw_recursed(rw))
1378                         db_printf(" recursed: %u\n", rw->rw_recurse);
1379         }
1380         db_printf(" waiters: ");
1381         switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {
1382         case RW_LOCK_READ_WAITERS:
1383                 db_printf("readers\n");
1384                 break;
1385         case RW_LOCK_WRITE_WAITERS:
1386                 db_printf("writers\n");
1387                 break;
1388         case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS:
1389                 db_printf("readers and writers\n");
1390                 break;
1391         default:
1392                 db_printf("none\n");
1393                 break;
1394         }
1395 }
1396
1397 #endif