]> CyberLeo.Net >> Repos - FreeBSD/releng/9.2.git/blob - sys/kern/kern_rwlock.c
- Copy stable/9 to releng/9.2 as part of the 9.2-RELEASE cycle.
[FreeBSD/releng/9.2.git] / sys / kern / kern_rwlock.c
1 /*-
2  * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29
30 /*
31  * Machine independent bits of reader/writer lock implementation.
32  */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_ddb.h"
38 #include "opt_hwpmc_hooks.h"
39 #include "opt_kdtrace.h"
40 #include "opt_no_adaptive_rwlocks.h"
41
42 #include <sys/param.h>
43 #include <sys/ktr.h>
44 #include <sys/kernel.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/proc.h>
48 #include <sys/rwlock.h>
49 #include <sys/sysctl.h>
50 #include <sys/systm.h>
51 #include <sys/turnstile.h>
52
53 #include <machine/cpu.h>
54
55 #if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
56 #define ADAPTIVE_RWLOCKS
57 #endif
58
59 #ifdef HWPMC_HOOKS
60 #include <sys/pmckern.h>
61 PMC_SOFT_DECLARE( , , lock, failed);
62 #endif
63
64 #ifdef ADAPTIVE_RWLOCKS
65 #define ROWNER_RETRIES  10
66 #define ROWNER_LOOPS    10000
67 #endif
68
69 #ifdef DDB
70 #include <ddb/ddb.h>
71
72 static void     db_show_rwlock(struct lock_object *lock);
73 #endif
74 static void     assert_rw(struct lock_object *lock, int what);
75 static void     lock_rw(struct lock_object *lock, int how);
76 #ifdef KDTRACE_HOOKS
77 static int      owner_rw(struct lock_object *lock, struct thread **owner);
78 #endif
79 static int      unlock_rw(struct lock_object *lock);
80
81 struct lock_class lock_class_rw = {
82         .lc_name = "rw",
83         .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
84         .lc_assert = assert_rw,
85 #ifdef DDB
86         .lc_ddb_show = db_show_rwlock,
87 #endif
88         .lc_lock = lock_rw,
89         .lc_unlock = unlock_rw,
90 #ifdef KDTRACE_HOOKS
91         .lc_owner = owner_rw,
92 #endif
93 };
94
95 /*
96  * Return a pointer to the owning thread if the lock is write-locked or
97  * NULL if the lock is unlocked or read-locked.
98  */
99 #define rw_wowner(rw)                                                   \
100         ((rw)->rw_lock & RW_LOCK_READ ? NULL :                          \
101             (struct thread *)RW_OWNER((rw)->rw_lock))
102
103 /*
104  * Returns if a write owner is recursed.  Write ownership is not assured
105  * here and should be previously checked.
106  */
107 #define rw_recursed(rw)         ((rw)->rw_recurse != 0)
108
109 /*
110  * Return true if curthread helds the lock.
111  */
112 #define rw_wlocked(rw)          (rw_wowner((rw)) == curthread)
113
114 /*
115  * Return a pointer to the owning thread for this lock who should receive
116  * any priority lent by threads that block on this lock.  Currently this
117  * is identical to rw_wowner().
118  */
119 #define rw_owner(rw)            rw_wowner(rw)
120
121 #ifndef INVARIANTS
122 #define _rw_assert(rw, what, file, line)
123 #endif
124
125 void
126 assert_rw(struct lock_object *lock, int what)
127 {
128
129         rw_assert((struct rwlock *)lock, what);
130 }
131
132 void
133 lock_rw(struct lock_object *lock, int how)
134 {
135         struct rwlock *rw;
136
137         rw = (struct rwlock *)lock;
138         if (how)
139                 rw_wlock(rw);
140         else
141                 rw_rlock(rw);
142 }
143
144 int
145 unlock_rw(struct lock_object *lock)
146 {
147         struct rwlock *rw;
148
149         rw = (struct rwlock *)lock;
150         rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
151         if (rw->rw_lock & RW_LOCK_READ) {
152                 rw_runlock(rw);
153                 return (0);
154         } else {
155                 rw_wunlock(rw);
156                 return (1);
157         }
158 }
159
160 #ifdef KDTRACE_HOOKS
161 int
162 owner_rw(struct lock_object *lock, struct thread **owner)
163 {
164         struct rwlock *rw = (struct rwlock *)lock;
165         uintptr_t x = rw->rw_lock;
166
167         *owner = rw_wowner(rw);
168         return ((x & RW_LOCK_READ) != 0 ?  (RW_READERS(x) != 0) :
169             (*owner != NULL));
170 }
171 #endif
172
173 void
174 rw_init_flags(struct rwlock *rw, const char *name, int opts)
175 {
176         int flags;
177
178         MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
179             RW_RECURSE)) == 0);
180         ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock,
181             ("%s: rw_lock not aligned for %s: %p", __func__, name,
182             &rw->rw_lock));
183
184         flags = LO_UPGRADABLE;
185         if (opts & RW_DUPOK)
186                 flags |= LO_DUPOK;
187         if (opts & RW_NOPROFILE)
188                 flags |= LO_NOPROFILE;
189         if (!(opts & RW_NOWITNESS))
190                 flags |= LO_WITNESS;
191         if (opts & RW_RECURSE)
192                 flags |= LO_RECURSABLE;
193         if (opts & RW_QUIET)
194                 flags |= LO_QUIET;
195
196         rw->rw_lock = RW_UNLOCKED;
197         rw->rw_recurse = 0;
198         lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags);
199 }
200
201 void
202 rw_destroy(struct rwlock *rw)
203 {
204
205         KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw));
206         KASSERT(rw->rw_recurse == 0, ("rw lock %p still recursed", rw));
207         rw->rw_lock = RW_DESTROYED;
208         lock_destroy(&rw->lock_object);
209 }
210
211 void
212 rw_sysinit(void *arg)
213 {
214         struct rw_args *args = arg;
215
216         rw_init(args->ra_rw, args->ra_desc);
217 }
218
219 void
220 rw_sysinit_flags(void *arg)
221 {
222         struct rw_args_flags *args = arg;
223
224         rw_init_flags(args->ra_rw, args->ra_desc, args->ra_flags);
225 }
226
227 int
228 rw_wowned(struct rwlock *rw)
229 {
230
231         return (rw_wowner(rw) == curthread);
232 }
233
234 void
235 _rw_wlock(struct rwlock *rw, const char *file, int line)
236 {
237
238         if (SCHEDULER_STOPPED())
239                 return;
240         KASSERT(rw->rw_lock != RW_DESTROYED,
241             ("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
242         WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
243             line, NULL);
244         __rw_wlock(rw, curthread, file, line);
245         LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line);
246         WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
247         curthread->td_locks++;
248 }
249
250 int
251 _rw_try_wlock(struct rwlock *rw, const char *file, int line)
252 {
253         int rval;
254
255         if (SCHEDULER_STOPPED())
256                 return (1);
257
258         KASSERT(rw->rw_lock != RW_DESTROYED,
259             ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
260
261         if (rw_wlocked(rw) &&
262             (rw->lock_object.lo_flags & LO_RECURSABLE) != 0) {
263                 rw->rw_recurse++;
264                 rval = 1;
265         } else
266                 rval = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_UNLOCKED,
267                     (uintptr_t)curthread);
268
269         LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line);
270         if (rval) {
271                 WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
272                     file, line);
273                 curthread->td_locks++;
274         }
275         return (rval);
276 }
277
278 void
279 _rw_wunlock(struct rwlock *rw, const char *file, int line)
280 {
281
282         if (SCHEDULER_STOPPED())
283                 return;
284         KASSERT(rw->rw_lock != RW_DESTROYED,
285             ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
286         _rw_assert(rw, RA_WLOCKED, file, line);
287         curthread->td_locks--;
288         WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
289         LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
290             line);
291         if (!rw_recursed(rw))
292                 LOCKSTAT_PROFILE_RELEASE_LOCK(LS_RW_WUNLOCK_RELEASE, rw);
293         __rw_wunlock(rw, curthread, file, line);
294 }
295 /*
296  * Determines whether a new reader can acquire a lock.  Succeeds if the
297  * reader already owns a read lock and the lock is locked for read to
298  * prevent deadlock from reader recursion.  Also succeeds if the lock
299  * is unlocked and has no writer waiters or spinners.  Failing otherwise
300  * prioritizes writers before readers.
301  */
302 #define RW_CAN_READ(_rw)                                                \
303     ((curthread->td_rw_rlocks && (_rw) & RW_LOCK_READ) || ((_rw) &      \
304     (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) ==  \
305     RW_LOCK_READ)
306
307 void
308 _rw_rlock(struct rwlock *rw, const char *file, int line)
309 {
310         struct turnstile *ts;
311 #ifdef ADAPTIVE_RWLOCKS
312         volatile struct thread *owner;
313         int spintries = 0;
314         int i;
315 #endif
316 #ifdef LOCK_PROFILING
317         uint64_t waittime = 0;
318         int contested = 0;
319 #endif
320         uintptr_t v;
321 #ifdef KDTRACE_HOOKS
322         uint64_t spin_cnt = 0;
323         uint64_t sleep_cnt = 0;
324         int64_t sleep_time = 0;
325 #endif
326
327         if (SCHEDULER_STOPPED())
328                 return;
329
330         KASSERT(rw->rw_lock != RW_DESTROYED,
331             ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
332         KASSERT(rw_wowner(rw) != curthread,
333             ("rw_rlock: wlock already held for %s @ %s:%d",
334             rw->lock_object.lo_name, file, line));
335         WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);
336
337         for (;;) {
338 #ifdef KDTRACE_HOOKS
339                 spin_cnt++;
340 #endif
341                 /*
342                  * Handle the easy case.  If no other thread has a write
343                  * lock, then try to bump up the count of read locks.  Note
344                  * that we have to preserve the current state of the
345                  * RW_LOCK_WRITE_WAITERS flag.  If we fail to acquire a
346                  * read lock, then rw_lock must have changed, so restart
347                  * the loop.  Note that this handles the case of a
348                  * completely unlocked rwlock since such a lock is encoded
349                  * as a read lock with no waiters.
350                  */
351                 v = rw->rw_lock;
352                 if (RW_CAN_READ(v)) {
353                         /*
354                          * The RW_LOCK_READ_WAITERS flag should only be set
355                          * if the lock has been unlocked and write waiters
356                          * were present.
357                          */
358                         if (atomic_cmpset_acq_ptr(&rw->rw_lock, v,
359                             v + RW_ONE_READER)) {
360                                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
361                                         CTR4(KTR_LOCK,
362                                             "%s: %p succeed %p -> %p", __func__,
363                                             rw, (void *)v,
364                                             (void *)(v + RW_ONE_READER));
365                                 break;
366                         }
367                         continue;
368                 }
369 #ifdef HWPMC_HOOKS
370                 PMC_SOFT_CALL( , , lock, failed);
371 #endif
372                 lock_profile_obtain_lock_failed(&rw->lock_object,
373                     &contested, &waittime);
374
375 #ifdef ADAPTIVE_RWLOCKS
376                 /*
377                  * If the owner is running on another CPU, spin until
378                  * the owner stops running or the state of the lock
379                  * changes.
380                  */
381                 if ((v & RW_LOCK_READ) == 0) {
382                         owner = (struct thread *)RW_OWNER(v);
383                         if (TD_IS_RUNNING(owner)) {
384                                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
385                                         CTR3(KTR_LOCK,
386                                             "%s: spinning on %p held by %p",
387                                             __func__, rw, owner);
388                                 while ((struct thread*)RW_OWNER(rw->rw_lock) ==
389                                     owner && TD_IS_RUNNING(owner)) {
390                                         cpu_spinwait();
391 #ifdef KDTRACE_HOOKS
392                                         spin_cnt++;
393 #endif
394                                 }
395                                 continue;
396                         }
397                 } else if (spintries < ROWNER_RETRIES) {
398                         spintries++;
399                         for (i = 0; i < ROWNER_LOOPS; i++) {
400                                 v = rw->rw_lock;
401                                 if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v))
402                                         break;
403                                 cpu_spinwait();
404                         }
405                         if (i != ROWNER_LOOPS)
406                                 continue;
407                 }
408 #endif
409
410                 /*
411                  * Okay, now it's the hard case.  Some other thread already
412                  * has a write lock or there are write waiters present,
413                  * acquire the turnstile lock so we can begin the process
414                  * of blocking.
415                  */
416                 ts = turnstile_trywait(&rw->lock_object);
417
418                 /*
419                  * The lock might have been released while we spun, so
420                  * recheck its state and restart the loop if needed.
421                  */
422                 v = rw->rw_lock;
423                 if (RW_CAN_READ(v)) {
424                         turnstile_cancel(ts);
425                         continue;
426                 }
427
428 #ifdef ADAPTIVE_RWLOCKS
429                 /*
430                  * The current lock owner might have started executing
431                  * on another CPU (or the lock could have changed
432                  * owners) while we were waiting on the turnstile
433                  * chain lock.  If so, drop the turnstile lock and try
434                  * again.
435                  */
436                 if ((v & RW_LOCK_READ) == 0) {
437                         owner = (struct thread *)RW_OWNER(v);
438                         if (TD_IS_RUNNING(owner)) {
439                                 turnstile_cancel(ts);
440                                 continue;
441                         }
442                 }
443 #endif
444
445                 /*
446                  * The lock is held in write mode or it already has waiters.
447                  */
448                 MPASS(!RW_CAN_READ(v));
449
450                 /*
451                  * If the RW_LOCK_READ_WAITERS flag is already set, then
452                  * we can go ahead and block.  If it is not set then try
453                  * to set it.  If we fail to set it drop the turnstile
454                  * lock and restart the loop.
455                  */
456                 if (!(v & RW_LOCK_READ_WAITERS)) {
457                         if (!atomic_cmpset_ptr(&rw->rw_lock, v,
458                             v | RW_LOCK_READ_WAITERS)) {
459                                 turnstile_cancel(ts);
460                                 continue;
461                         }
462                         if (LOCK_LOG_TEST(&rw->lock_object, 0))
463                                 CTR2(KTR_LOCK, "%s: %p set read waiters flag",
464                                     __func__, rw);
465                 }
466
467                 /*
468                  * We were unable to acquire the lock and the read waiters
469                  * flag is set, so we must block on the turnstile.
470                  */
471                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
472                         CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
473                             rw);
474 #ifdef KDTRACE_HOOKS
475                 sleep_time -= lockstat_nsecs();
476 #endif
477                 turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE);
478 #ifdef KDTRACE_HOOKS
479                 sleep_time += lockstat_nsecs();
480                 sleep_cnt++;
481 #endif
482                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
483                         CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
484                             __func__, rw);
485         }
486
487         /*
488          * TODO: acquire "owner of record" here.  Here be turnstile dragons
489          * however.  turnstiles don't like owners changing between calls to
490          * turnstile_wait() currently.
491          */
492         LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_RLOCK_ACQUIRE, rw, contested,
493             waittime, file, line);
494         LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
495         WITNESS_LOCK(&rw->lock_object, 0, file, line);
496         curthread->td_locks++;
497         curthread->td_rw_rlocks++;
498 #ifdef KDTRACE_HOOKS
499         if (sleep_time)
500                 LOCKSTAT_RECORD1(LS_RW_RLOCK_BLOCK, rw, sleep_time);
501
502         /*
503          * Record only the loops spinning and not sleeping. 
504          */
505         if (spin_cnt > sleep_cnt)
506                 LOCKSTAT_RECORD1(LS_RW_RLOCK_SPIN, rw, (spin_cnt - sleep_cnt));
507 #endif
508 }
509
510 int
511 _rw_try_rlock(struct rwlock *rw, const char *file, int line)
512 {
513         uintptr_t x;
514
515         if (SCHEDULER_STOPPED())
516                 return (1);
517
518         for (;;) {
519                 x = rw->rw_lock;
520                 KASSERT(rw->rw_lock != RW_DESTROYED,
521                     ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line));
522                 if (!(x & RW_LOCK_READ))
523                         break;
524                 if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) {
525                         LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file,
526                             line);
527                         WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);
528                         curthread->td_locks++;
529                         curthread->td_rw_rlocks++;
530                         return (1);
531                 }
532         }
533
534         LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line);
535         return (0);
536 }
537
538 void
539 _rw_runlock(struct rwlock *rw, const char *file, int line)
540 {
541         struct turnstile *ts;
542         uintptr_t x, v, queue;
543
544         if (SCHEDULER_STOPPED())
545                 return;
546
547         KASSERT(rw->rw_lock != RW_DESTROYED,
548             ("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
549         _rw_assert(rw, RA_RLOCKED, file, line);
550         curthread->td_locks--;
551         curthread->td_rw_rlocks--;
552         WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
553         LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);
554
555         /* TODO: drop "owner of record" here. */
556
557         for (;;) {
558                 /*
559                  * See if there is more than one read lock held.  If so,
560                  * just drop one and return.
561                  */
562                 x = rw->rw_lock;
563                 if (RW_READERS(x) > 1) {
564                         if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
565                             x - RW_ONE_READER)) {
566                                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
567                                         CTR4(KTR_LOCK,
568                                             "%s: %p succeeded %p -> %p",
569                                             __func__, rw, (void *)x,
570                                             (void *)(x - RW_ONE_READER));
571                                 break;
572                         }
573                         continue;
574                 }
575                 /*
576                  * If there aren't any waiters for a write lock, then try
577                  * to drop it quickly.
578                  */
579                 if (!(x & RW_LOCK_WAITERS)) {
580                         MPASS((x & ~RW_LOCK_WRITE_SPINNER) ==
581                             RW_READERS_LOCK(1));
582                         if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
583                             RW_UNLOCKED)) {
584                                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
585                                         CTR2(KTR_LOCK, "%s: %p last succeeded",
586                                             __func__, rw);
587                                 break;
588                         }
589                         continue;
590                 }
591                 /*
592                  * Ok, we know we have waiters and we think we are the
593                  * last reader, so grab the turnstile lock.
594                  */
595                 turnstile_chain_lock(&rw->lock_object);
596                 v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
597                 MPASS(v & RW_LOCK_WAITERS);
598
599                 /*
600                  * Try to drop our lock leaving the lock in a unlocked
601                  * state.
602                  *
603                  * If you wanted to do explicit lock handoff you'd have to
604                  * do it here.  You'd also want to use turnstile_signal()
605                  * and you'd have to handle the race where a higher
606                  * priority thread blocks on the write lock before the
607                  * thread you wakeup actually runs and have the new thread
608                  * "steal" the lock.  For now it's a lot simpler to just
609                  * wakeup all of the waiters.
610                  *
611                  * As above, if we fail, then another thread might have
612                  * acquired a read lock, so drop the turnstile lock and
613                  * restart.
614                  */
615                 x = RW_UNLOCKED;
616                 if (v & RW_LOCK_WRITE_WAITERS) {
617                         queue = TS_EXCLUSIVE_QUEUE;
618                         x |= (v & RW_LOCK_READ_WAITERS);
619                 } else
620                         queue = TS_SHARED_QUEUE;
621                 if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
622                     x)) {
623                         turnstile_chain_unlock(&rw->lock_object);
624                         continue;
625                 }
626                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
627                         CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
628                             __func__, rw);
629
630                 /*
631                  * Ok.  The lock is released and all that's left is to
632                  * wake up the waiters.  Note that the lock might not be
633                  * free anymore, but in that case the writers will just
634                  * block again if they run before the new lock holder(s)
635                  * release the lock.
636                  */
637                 ts = turnstile_lookup(&rw->lock_object);
638                 MPASS(ts != NULL);
639                 turnstile_broadcast(ts, queue);
640                 turnstile_unpend(ts, TS_SHARED_LOCK);
641                 turnstile_chain_unlock(&rw->lock_object);
642                 break;
643         }
644         LOCKSTAT_PROFILE_RELEASE_LOCK(LS_RW_RUNLOCK_RELEASE, rw);
645 }
646
647 /*
648  * This function is called when we are unable to obtain a write lock on the
649  * first try.  This means that at least one other thread holds either a
650  * read or write lock.
651  */
652 void
653 _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
654 {
655         struct turnstile *ts;
656 #ifdef ADAPTIVE_RWLOCKS
657         volatile struct thread *owner;
658         int spintries = 0;
659         int i;
660 #endif
661         uintptr_t v, x;
662 #ifdef LOCK_PROFILING
663         uint64_t waittime = 0;
664         int contested = 0;
665 #endif
666 #ifdef KDTRACE_HOOKS
667         uint64_t spin_cnt = 0;
668         uint64_t sleep_cnt = 0;
669         int64_t sleep_time = 0;
670 #endif
671
672         if (SCHEDULER_STOPPED())
673                 return;
674
675         if (rw_wlocked(rw)) {
676                 KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
677                     ("%s: recursing but non-recursive rw %s @ %s:%d\n",
678                     __func__, rw->lock_object.lo_name, file, line));
679                 rw->rw_recurse++;
680                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
681                         CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw);
682                 return;
683         }
684
685         if (LOCK_LOG_TEST(&rw->lock_object, 0))
686                 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
687                     rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
688
689         while (!_rw_write_lock(rw, tid)) {
690 #ifdef KDTRACE_HOOKS
691                 spin_cnt++;
692 #endif
693 #ifdef HWPMC_HOOKS
694                 PMC_SOFT_CALL( , , lock, failed);
695 #endif
696                 lock_profile_obtain_lock_failed(&rw->lock_object,
697                     &contested, &waittime);
698 #ifdef ADAPTIVE_RWLOCKS
699                 /*
700                  * If the lock is write locked and the owner is
701                  * running on another CPU, spin until the owner stops
702                  * running or the state of the lock changes.
703                  */
704                 v = rw->rw_lock;
705                 owner = (struct thread *)RW_OWNER(v);
706                 if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
707                         if (LOCK_LOG_TEST(&rw->lock_object, 0))
708                                 CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
709                                     __func__, rw, owner);
710                         while ((struct thread*)RW_OWNER(rw->rw_lock) == owner &&
711                             TD_IS_RUNNING(owner)) {
712                                 cpu_spinwait();
713 #ifdef KDTRACE_HOOKS
714                                 spin_cnt++;
715 #endif
716                         }
717                         continue;
718                 }
719                 if ((v & RW_LOCK_READ) && RW_READERS(v) &&
720                     spintries < ROWNER_RETRIES) {
721                         if (!(v & RW_LOCK_WRITE_SPINNER)) {
722                                 if (!atomic_cmpset_ptr(&rw->rw_lock, v,
723                                     v | RW_LOCK_WRITE_SPINNER)) {
724                                         continue;
725                                 }
726                         }
727                         spintries++;
728                         for (i = 0; i < ROWNER_LOOPS; i++) {
729                                 if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0)
730                                         break;
731                                 cpu_spinwait();
732                         }
733 #ifdef KDTRACE_HOOKS
734                         spin_cnt += ROWNER_LOOPS - i;
735 #endif
736                         if (i != ROWNER_LOOPS)
737                                 continue;
738                 }
739 #endif
740                 ts = turnstile_trywait(&rw->lock_object);
741                 v = rw->rw_lock;
742
743 #ifdef ADAPTIVE_RWLOCKS
744                 /*
745                  * The current lock owner might have started executing
746                  * on another CPU (or the lock could have changed
747                  * owners) while we were waiting on the turnstile
748                  * chain lock.  If so, drop the turnstile lock and try
749                  * again.
750                  */
751                 if (!(v & RW_LOCK_READ)) {
752                         owner = (struct thread *)RW_OWNER(v);
753                         if (TD_IS_RUNNING(owner)) {
754                                 turnstile_cancel(ts);
755                                 continue;
756                         }
757                 }
758 #endif
759                 /*
760                  * Check for the waiters flags about this rwlock.
761                  * If the lock was released, without maintain any pending
762                  * waiters queue, simply try to acquire it.
763                  * If a pending waiters queue is present, claim the lock
764                  * ownership and maintain the pending queue.
765                  */
766                 x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
767                 if ((v & ~x) == RW_UNLOCKED) {
768                         x &= ~RW_LOCK_WRITE_SPINNER;
769                         if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid | x)) {
770                                 if (x)
771                                         turnstile_claim(ts);
772                                 else
773                                         turnstile_cancel(ts);
774                                 break;
775                         }
776                         turnstile_cancel(ts);
777                         continue;
778                 }
779                 /*
780                  * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
781                  * set it.  If we fail to set it, then loop back and try
782                  * again.
783                  */
784                 if (!(v & RW_LOCK_WRITE_WAITERS)) {
785                         if (!atomic_cmpset_ptr(&rw->rw_lock, v,
786                             v | RW_LOCK_WRITE_WAITERS)) {
787                                 turnstile_cancel(ts);
788                                 continue;
789                         }
790                         if (LOCK_LOG_TEST(&rw->lock_object, 0))
791                                 CTR2(KTR_LOCK, "%s: %p set write waiters flag",
792                                     __func__, rw);
793                 }
794                 /*
795                  * We were unable to acquire the lock and the write waiters
796                  * flag is set, so we must block on the turnstile.
797                  */
798                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
799                         CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
800                             rw);
801 #ifdef KDTRACE_HOOKS
802                 sleep_time -= lockstat_nsecs();
803 #endif
804                 turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE);
805 #ifdef KDTRACE_HOOKS
806                 sleep_time += lockstat_nsecs();
807                 sleep_cnt++;
808 #endif
809                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
810                         CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
811                             __func__, rw);
812 #ifdef ADAPTIVE_RWLOCKS
813                 spintries = 0;
814 #endif
815         }
816         LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_RW_WLOCK_ACQUIRE, rw, contested,
817             waittime, file, line);
818 #ifdef KDTRACE_HOOKS
819         if (sleep_time)
820                 LOCKSTAT_RECORD1(LS_RW_WLOCK_BLOCK, rw, sleep_time);
821
822         /*
823          * Record only the loops spinning and not sleeping.
824          */ 
825         if (spin_cnt > sleep_cnt)
826                 LOCKSTAT_RECORD1(LS_RW_WLOCK_SPIN, rw, (spin_cnt - sleep_cnt));
827 #endif
828 }
829
830 /*
831  * This function is called if the first try at releasing a write lock failed.
832  * This means that one of the 2 waiter bits must be set indicating that at
833  * least one thread is waiting on this lock.
834  */
835 void
836 _rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
837 {
838         struct turnstile *ts;
839         uintptr_t v;
840         int queue;
841
842         if (SCHEDULER_STOPPED())
843                 return;
844
845         if (rw_wlocked(rw) && rw_recursed(rw)) {
846                 rw->rw_recurse--;
847                 if (LOCK_LOG_TEST(&rw->lock_object, 0))
848                         CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw);
849                 return;
850         }
851
852         KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
853             ("%s: neither of the waiter flags are set", __func__));
854
855         if (LOCK_LOG_TEST(&rw->lock_object, 0))
856                 CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);
857
858         turnstile_chain_lock(&rw->lock_object);
859         ts = turnstile_lookup(&rw->lock_object);
860         MPASS(ts != NULL);
861
862         /*
863          * Use the same algo as sx locks for now.  Prefer waking up shared
864          * waiters if we have any over writers.  This is probably not ideal.
865          *
866          * 'v' is the value we are going to write back to rw_lock.  If we
867          * have waiters on both queues, we need to preserve the state of
868          * the waiter flag for the queue we don't wake up.  For now this is
869          * hardcoded for the algorithm mentioned above.
870          *
871          * In the case of both readers and writers waiting we wakeup the
872          * readers but leave the RW_LOCK_WRITE_WAITERS flag set.  If a
873          * new writer comes in before a reader it will claim the lock up
874          * above.  There is probably a potential priority inversion in
875          * there that could be worked around either by waking both queues
876          * of waiters or doing some complicated lock handoff gymnastics.
877          */
878         v = RW_UNLOCKED;
879         if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) {
880                 queue = TS_EXCLUSIVE_QUEUE;
881                 v |= (rw->rw_lock & RW_LOCK_READ_WAITERS);
882         } else
883                 queue = TS_SHARED_QUEUE;
884
885         /* Wake up all waiters for the specific queue. */
886         if (LOCK_LOG_TEST(&rw->lock_object, 0))
887                 CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
888                     queue == TS_SHARED_QUEUE ? "read" : "write");
889         turnstile_broadcast(ts, queue);
890         atomic_store_rel_ptr(&rw->rw_lock, v);
891         turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
892         turnstile_chain_unlock(&rw->lock_object);
893 }
894
895 /*
896  * Attempt to do a non-blocking upgrade from a read lock to a write
897  * lock.  This will only succeed if this thread holds a single read
898  * lock.  Returns true if the upgrade succeeded and false otherwise.
899  */
900 int
901 _rw_try_upgrade(struct rwlock *rw, const char *file, int line)
902 {
903         uintptr_t v, x, tid;
904         struct turnstile *ts;
905         int success;
906
907         if (SCHEDULER_STOPPED())
908                 return (1);
909
910         KASSERT(rw->rw_lock != RW_DESTROYED,
911             ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
912         _rw_assert(rw, RA_RLOCKED, file, line);
913
914         /*
915          * Attempt to switch from one reader to a writer.  If there
916          * are any write waiters, then we will have to lock the
917          * turnstile first to prevent races with another writer
918          * calling turnstile_wait() before we have claimed this
919          * turnstile.  So, do the simple case of no waiters first.
920          */
921         tid = (uintptr_t)curthread;
922         success = 0;
923         for (;;) {
924                 v = rw->rw_lock;
925                 if (RW_READERS(v) > 1)
926                         break;
927                 if (!(v & RW_LOCK_WAITERS)) {
928                         success = atomic_cmpset_ptr(&rw->rw_lock, v, tid);
929                         if (!success)
930                                 continue;
931                         break;
932                 }
933
934                 /*
935                  * Ok, we think we have waiters, so lock the turnstile.
936                  */
937                 ts = turnstile_trywait(&rw->lock_object);
938                 v = rw->rw_lock;
939                 if (RW_READERS(v) > 1) {
940                         turnstile_cancel(ts);
941                         break;
942                 }
943                 /*
944                  * Try to switch from one reader to a writer again.  This time
945                  * we honor the current state of the waiters flags.
946                  * If we obtain the lock with the flags set, then claim
947                  * ownership of the turnstile.
948                  */
949                 x = rw->rw_lock & RW_LOCK_WAITERS;
950                 success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x);
951                 if (success) {
952                         if (x)
953                                 turnstile_claim(ts);
954                         else
955                                 turnstile_cancel(ts);
956                         break;
957                 }
958                 turnstile_cancel(ts);
959         }
960         LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line);
961         if (success) {
962                 curthread->td_rw_rlocks--;
963                 WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
964                     file, line);
965                 LOCKSTAT_RECORD0(LS_RW_TRYUPGRADE_UPGRADE, rw);
966         }
967         return (success);
968 }
969
970 /*
971  * Downgrade a write lock into a single read lock.
972  */
973 void
974 _rw_downgrade(struct rwlock *rw, const char *file, int line)
975 {
976         struct turnstile *ts;
977         uintptr_t tid, v;
978         int rwait, wwait;
979
980         if (SCHEDULER_STOPPED())
981                 return;
982
983         KASSERT(rw->rw_lock != RW_DESTROYED,
984             ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
985         _rw_assert(rw, RA_WLOCKED | RA_NOTRECURSED, file, line);
986 #ifndef INVARIANTS
987         if (rw_recursed(rw))
988                 panic("downgrade of a recursed lock");
989 #endif
990
991         WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line);
992
993         /*
994          * Convert from a writer to a single reader.  First we handle
995          * the easy case with no waiters.  If there are any waiters, we
996          * lock the turnstile and "disown" the lock.
997          */
998         tid = (uintptr_t)curthread;
999         if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
1000                 goto out;
1001
1002         /*
1003          * Ok, we think we have waiters, so lock the turnstile so we can
1004          * read the waiter flags without any races.
1005          */
1006         turnstile_chain_lock(&rw->lock_object);
1007         v = rw->rw_lock & RW_LOCK_WAITERS;
1008         rwait = v & RW_LOCK_READ_WAITERS;
1009         wwait = v & RW_LOCK_WRITE_WAITERS;
1010         MPASS(rwait | wwait);
1011
1012         /*
1013          * Downgrade from a write lock while preserving waiters flag
1014          * and give up ownership of the turnstile.
1015          */
1016         ts = turnstile_lookup(&rw->lock_object);
1017         MPASS(ts != NULL);
1018         if (!wwait)
1019                 v &= ~RW_LOCK_READ_WAITERS;
1020         atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v);
1021         /*
1022          * Wake other readers if there are no writers pending.  Otherwise they
1023          * won't be able to acquire the lock anyway.
1024          */
1025         if (rwait && !wwait) {
1026                 turnstile_broadcast(ts, TS_SHARED_QUEUE);
1027                 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
1028         } else
1029                 turnstile_disown(ts);
1030         turnstile_chain_unlock(&rw->lock_object);
1031 out:
1032         curthread->td_rw_rlocks++;
1033         LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
1034         LOCKSTAT_RECORD0(LS_RW_DOWNGRADE_DOWNGRADE, rw);
1035 }
1036
1037 #ifdef INVARIANT_SUPPORT
1038 #ifndef INVARIANTS
1039 #undef _rw_assert
1040 #endif
1041
1042 /*
1043  * In the non-WITNESS case, rw_assert() can only detect that at least
1044  * *some* thread owns an rlock, but it cannot guarantee that *this*
1045  * thread owns an rlock.
1046  */
1047 void
1048 _rw_assert(struct rwlock *rw, int what, const char *file, int line)
1049 {
1050
1051         if (panicstr != NULL)
1052                 return;
1053         switch (what) {
1054         case RA_LOCKED:
1055         case RA_LOCKED | RA_RECURSED:
1056         case RA_LOCKED | RA_NOTRECURSED:
1057         case RA_RLOCKED:
1058         case RA_RLOCKED | RA_RECURSED:
1059         case RA_RLOCKED | RA_NOTRECURSED:
1060 #ifdef WITNESS
1061                 witness_assert(&rw->lock_object, what, file, line);
1062 #else
1063                 /*
1064                  * If some other thread has a write lock or we have one
1065                  * and are asserting a read lock, fail.  Also, if no one
1066                  * has a lock at all, fail.
1067                  */
1068                 if (rw->rw_lock == RW_UNLOCKED ||
1069                     (!(rw->rw_lock & RW_LOCK_READ) && (what & RA_RLOCKED ||
1070                     rw_wowner(rw) != curthread)))
1071                         panic("Lock %s not %slocked @ %s:%d\n",
1072                             rw->lock_object.lo_name, (what & RA_RLOCKED) ?
1073                             "read " : "", file, line);
1074
1075                 if (!(rw->rw_lock & RW_LOCK_READ) && !(what & RA_RLOCKED)) {
1076                         if (rw_recursed(rw)) {
1077                                 if (what & RA_NOTRECURSED)
1078                                         panic("Lock %s recursed @ %s:%d\n",
1079                                             rw->lock_object.lo_name, file,
1080                                             line);
1081                         } else if (what & RA_RECURSED)
1082                                 panic("Lock %s not recursed @ %s:%d\n",
1083                                     rw->lock_object.lo_name, file, line);
1084                 }
1085 #endif
1086                 break;
1087         case RA_WLOCKED:
1088         case RA_WLOCKED | RA_RECURSED:
1089         case RA_WLOCKED | RA_NOTRECURSED:
1090                 if (rw_wowner(rw) != curthread)
1091                         panic("Lock %s not exclusively locked @ %s:%d\n",
1092                             rw->lock_object.lo_name, file, line);
1093                 if (rw_recursed(rw)) {
1094                         if (what & RA_NOTRECURSED)
1095                                 panic("Lock %s recursed @ %s:%d\n",
1096                                     rw->lock_object.lo_name, file, line);
1097                 } else if (what & RA_RECURSED)
1098                         panic("Lock %s not recursed @ %s:%d\n",
1099                             rw->lock_object.lo_name, file, line);
1100                 break;
1101         case RA_UNLOCKED:
1102 #ifdef WITNESS
1103                 witness_assert(&rw->lock_object, what, file, line);
1104 #else
1105                 /*
1106                  * If we hold a write lock fail.  We can't reliably check
1107                  * to see if we hold a read lock or not.
1108                  */
1109                 if (rw_wowner(rw) == curthread)
1110                         panic("Lock %s exclusively locked @ %s:%d\n",
1111                             rw->lock_object.lo_name, file, line);
1112 #endif
1113                 break;
1114         default:
1115                 panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
1116                     line);
1117         }
1118 }
1119 #endif /* INVARIANT_SUPPORT */
1120
1121 #ifdef DDB
1122 void
1123 db_show_rwlock(struct lock_object *lock)
1124 {
1125         struct rwlock *rw;
1126         struct thread *td;
1127
1128         rw = (struct rwlock *)lock;
1129
1130         db_printf(" state: ");
1131         if (rw->rw_lock == RW_UNLOCKED)
1132                 db_printf("UNLOCKED\n");
1133         else if (rw->rw_lock == RW_DESTROYED) {
1134                 db_printf("DESTROYED\n");
1135                 return;
1136         } else if (rw->rw_lock & RW_LOCK_READ)
1137                 db_printf("RLOCK: %ju locks\n",
1138                     (uintmax_t)(RW_READERS(rw->rw_lock)));
1139         else {
1140                 td = rw_wowner(rw);
1141                 db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1142                     td->td_tid, td->td_proc->p_pid, td->td_name);
1143                 if (rw_recursed(rw))
1144                         db_printf(" recursed: %u\n", rw->rw_recurse);
1145         }
1146         db_printf(" waiters: ");
1147         switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {
1148         case RW_LOCK_READ_WAITERS:
1149                 db_printf("readers\n");
1150                 break;
1151         case RW_LOCK_WRITE_WAITERS:
1152                 db_printf("writers\n");
1153                 break;
1154         case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS:
1155                 db_printf("readers and writers\n");
1156                 break;
1157         default:
1158                 db_printf("none\n");
1159                 break;
1160         }
1161 }
1162
1163 #endif