4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 * Copyright (c) 2012 by Delphix. All rights reserved.
29 #include <sys/refcount.h>
30 #include <sys/rrwlock.h>
33 * This file contains the implementation of a re-entrant read
34 * reader/writer lock (aka "rrwlock").
36 * This is a normal reader/writer lock with the additional feature
37 * of allowing threads who have already obtained a read lock to
38 * re-enter another read lock (re-entrant read) - even if there are
41 * Callers who have not obtained a read lock give waiting writers priority.
43 * The rrwlock_t lock does not allow re-entrant writers, nor does it
44 * allow a re-entrant mix of reads and writes (that is, it does not
45 * allow a caller who has already obtained a read lock to be able to
46 * then grab a write lock without first dropping all read locks, and
49 * The rrwlock_t uses tsd (thread specific data) to keep a list of
50 * nodes (rrw_node_t), where each node keeps track of which specific
51 * lock (rrw_node_t::rn_rrl) the thread has grabbed. Since re-entering
52 * should be rare, a thread that grabs multiple reads on the same rrwlock_t
53 * will store multiple rrw_node_ts of the same 'rrn_rrl'. Nodes on the
54 * tsd list can represent a different rrwlock_t. This allows a thread
55 * to enter multiple and unique rrwlock_ts for read locks at the same time.
57 * Since using tsd exposes some overhead, the rrwlock_t only needs to
58 * keep tsd data when writers are waiting. If no writers are waiting, then
59 * a reader just bumps the anonymous read count (rr_anon_rcount) - no tsd
60 * is needed. Once a writer attempts to grab the lock, readers then
61 * keep tsd data and bump the linked readers count (rr_linked_rcount).
63 * If there are waiting writers and there are anonymous readers, then a
64 * reader doesn't know if it is a re-entrant lock. But since it may be one,
65 * we allow the read to proceed (otherwise it could deadlock). Since once
66 * waiting writers are active, readers no longer bump the anonymous count,
67 * the anonymous readers will eventually flush themselves out. At this point,
68 * readers will be able to tell if they are a re-entrant lock (have a
69 * rrw_node_t entry for the lock) or not. If they are a re-entrant lock, then
70 * we must let the proceed. If they are not, then the reader blocks for the
71 * waiting writers. Hence, we do not starve writers.
74 /* global key for TSD */
77 typedef struct rrw_node {
78 struct rrw_node *rn_next;
83 rrn_find(rrwlock_t *rrl)
87 if (refcount_count(&rrl->rr_linked_rcount) == 0)
90 for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) {
91 if (rn->rn_rrl == rrl)
98 * Add a node to the head of the singly linked list.
101 rrn_add(rrwlock_t *rrl)
105 rn = kmem_alloc(sizeof (*rn), KM_SLEEP);
107 rn->rn_next = tsd_get(rrw_tsd_key);
108 VERIFY(tsd_set(rrw_tsd_key, rn) == 0);
112 * If a node is found for 'rrl', then remove the node from this
113 * thread's list and return TRUE; otherwise return FALSE.
116 rrn_find_and_remove(rrwlock_t *rrl)
119 rrw_node_t *prev = NULL;
121 if (refcount_count(&rrl->rr_linked_rcount) == 0)
124 for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) {
125 if (rn->rn_rrl == rrl) {
127 prev->rn_next = rn->rn_next;
129 VERIFY(tsd_set(rrw_tsd_key, rn->rn_next) == 0);
130 kmem_free(rn, sizeof (*rn));
139 rrw_init(rrwlock_t *rrl)
141 mutex_init(&rrl->rr_lock, NULL, MUTEX_DEFAULT, NULL);
142 cv_init(&rrl->rr_cv, NULL, CV_DEFAULT, NULL);
143 rrl->rr_writer = NULL;
144 refcount_create(&rrl->rr_anon_rcount);
145 refcount_create(&rrl->rr_linked_rcount);
146 rrl->rr_writer_wanted = B_FALSE;
150 rrw_destroy(rrwlock_t *rrl)
152 mutex_destroy(&rrl->rr_lock);
153 cv_destroy(&rrl->rr_cv);
154 ASSERT(rrl->rr_writer == NULL);
155 refcount_destroy(&rrl->rr_anon_rcount);
156 refcount_destroy(&rrl->rr_linked_rcount);
160 rrw_enter_read(rrwlock_t *rrl, void *tag)
162 mutex_enter(&rrl->rr_lock);
163 #if !defined(DEBUG) && defined(_KERNEL)
164 if (!rrl->rr_writer && !rrl->rr_writer_wanted) {
165 rrl->rr_anon_rcount.rc_count++;
166 mutex_exit(&rrl->rr_lock);
169 DTRACE_PROBE(zfs__rrwfastpath__rdmiss);
171 ASSERT(rrl->rr_writer != curthread);
172 ASSERT(refcount_count(&rrl->rr_anon_rcount) >= 0);
174 while (rrl->rr_writer || (rrl->rr_writer_wanted &&
175 refcount_is_zero(&rrl->rr_anon_rcount) &&
176 rrn_find(rrl) == NULL))
177 cv_wait(&rrl->rr_cv, &rrl->rr_lock);
179 if (rrl->rr_writer_wanted) {
180 /* may or may not be a re-entrant enter */
182 (void) refcount_add(&rrl->rr_linked_rcount, tag);
184 (void) refcount_add(&rrl->rr_anon_rcount, tag);
186 ASSERT(rrl->rr_writer == NULL);
187 mutex_exit(&rrl->rr_lock);
191 rrw_enter_write(rrwlock_t *rrl)
193 mutex_enter(&rrl->rr_lock);
194 ASSERT(rrl->rr_writer != curthread);
196 while (refcount_count(&rrl->rr_anon_rcount) > 0 ||
197 refcount_count(&rrl->rr_linked_rcount) > 0 ||
198 rrl->rr_writer != NULL) {
199 rrl->rr_writer_wanted = B_TRUE;
200 cv_wait(&rrl->rr_cv, &rrl->rr_lock);
202 rrl->rr_writer_wanted = B_FALSE;
203 rrl->rr_writer = curthread;
204 mutex_exit(&rrl->rr_lock);
208 rrw_enter(rrwlock_t *rrl, krw_t rw, void *tag)
211 rrw_enter_read(rrl, tag);
213 rrw_enter_write(rrl);
217 rrw_exit(rrwlock_t *rrl, void *tag)
219 mutex_enter(&rrl->rr_lock);
220 #if !defined(DEBUG) && defined(_KERNEL)
221 if (!rrl->rr_writer && rrl->rr_linked_rcount.rc_count == 0) {
222 rrl->rr_anon_rcount.rc_count--;
223 if (rrl->rr_anon_rcount.rc_count == 0)
224 cv_broadcast(&rrl->rr_cv);
225 mutex_exit(&rrl->rr_lock);
228 DTRACE_PROBE(zfs__rrwfastpath__exitmiss);
230 ASSERT(!refcount_is_zero(&rrl->rr_anon_rcount) ||
231 !refcount_is_zero(&rrl->rr_linked_rcount) ||
232 rrl->rr_writer != NULL);
234 if (rrl->rr_writer == NULL) {
236 if (rrn_find_and_remove(rrl))
237 count = refcount_remove(&rrl->rr_linked_rcount, tag);
239 count = refcount_remove(&rrl->rr_anon_rcount, tag);
241 cv_broadcast(&rrl->rr_cv);
243 ASSERT(rrl->rr_writer == curthread);
244 ASSERT(refcount_is_zero(&rrl->rr_anon_rcount) &&
245 refcount_is_zero(&rrl->rr_linked_rcount));
246 rrl->rr_writer = NULL;
247 cv_broadcast(&rrl->rr_cv);
249 mutex_exit(&rrl->rr_lock);
253 rrw_held(rrwlock_t *rrl, krw_t rw)
257 mutex_enter(&rrl->rr_lock);
258 if (rw == RW_WRITER) {
259 held = (rrl->rr_writer == curthread);
261 held = (!refcount_is_zero(&rrl->rr_anon_rcount) ||
262 !refcount_is_zero(&rrl->rr_linked_rcount));
264 mutex_exit(&rrl->rr_lock);
270 rrw_tsd_destroy(void *arg)
272 rrw_node_t *rn = arg;
274 panic("thread %p terminating with rrw lock %p held",
275 (void *)curthread, (void *)rn->rn_rrl);