2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2009 Konstantin Belousov <kib@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/kernel.h>
33 #include <sys/mutex.h>
35 #include <sys/rangelock.h>
36 #include <sys/systm.h>
41 TAILQ_ENTRY(rl_q_entry) rl_q_link;
42 off_t rl_q_start, rl_q_end;
46 static uma_zone_t rl_entry_zone;
49 rangelock_sys_init(void)
52 rl_entry_zone = uma_zcreate("rl_entry", sizeof(struct rl_q_entry),
53 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
55 SYSINIT(vfs, SI_SUB_LOCK, SI_ORDER_ANY, rangelock_sys_init, NULL);
57 static struct rl_q_entry *
61 return (uma_zalloc(rl_entry_zone, M_WAITOK));
65 rlqentry_free(struct rl_q_entry *rleq)
68 uma_zfree(rl_entry_zone, rleq);
72 rangelock_init(struct rangelock *lock)
75 TAILQ_INIT(&lock->rl_waiters);
76 lock->rl_currdep = NULL;
80 rangelock_destroy(struct rangelock *lock)
83 KASSERT(TAILQ_EMPTY(&lock->rl_waiters), ("Dangling waiters"));
87 * Two entries are compatible if their ranges do not overlap, or both
88 * entries are for read.
91 ranges_overlap(const struct rl_q_entry *e1,
92 const struct rl_q_entry *e2)
95 if (e1->rl_q_start < e2->rl_q_end && e1->rl_q_end > e2->rl_q_start)
101 * Recalculate the lock->rl_currdep after an unlock.
104 rangelock_calc_block(struct rangelock *lock)
106 struct rl_q_entry *entry, *nextentry, *entry1;
108 for (entry = lock->rl_currdep; entry != NULL; entry = nextentry) {
109 nextentry = TAILQ_NEXT(entry, rl_q_link);
110 if (entry->rl_q_flags & RL_LOCK_READ) {
111 /* Reads must not overlap with granted writes. */
112 for (entry1 = TAILQ_FIRST(&lock->rl_waiters);
113 !(entry1->rl_q_flags & RL_LOCK_READ);
114 entry1 = TAILQ_NEXT(entry1, rl_q_link)) {
115 if (ranges_overlap(entry, entry1))
119 /* Write must not overlap with any granted locks. */
120 for (entry1 = TAILQ_FIRST(&lock->rl_waiters);
122 entry1 = TAILQ_NEXT(entry1, rl_q_link)) {
123 if (ranges_overlap(entry, entry1))
127 /* Move grantable write locks to the front. */
128 TAILQ_REMOVE(&lock->rl_waiters, entry, rl_q_link);
129 TAILQ_INSERT_HEAD(&lock->rl_waiters, entry, rl_q_link);
132 /* Grant this lock. */
133 entry->rl_q_flags |= RL_LOCK_GRANTED;
137 lock->rl_currdep = entry;
141 rangelock_unlock_locked(struct rangelock *lock, struct rl_q_entry *entry,
142 struct mtx *ilk, bool do_calc_block)
145 MPASS(lock != NULL && entry != NULL && ilk != NULL);
146 mtx_assert(ilk, MA_OWNED);
148 if (!do_calc_block) {
150 * This is the case where rangelock_enqueue() has been called
151 * with trylock == true and just inserted this entry in the
153 * If rl_currdep is this entry, rl_currdep needs to
154 * be set to the next entry in the rl_waiters list.
155 * However, since this entry is the last entry in the
156 * list, the next entry is NULL.
158 if (lock->rl_currdep == entry) {
159 KASSERT(TAILQ_NEXT(lock->rl_currdep, rl_q_link) == NULL,
160 ("rangelock_enqueue: next entry not NULL"));
161 lock->rl_currdep = NULL;
164 KASSERT(entry != lock->rl_currdep, ("stuck currdep"));
166 TAILQ_REMOVE(&lock->rl_waiters, entry, rl_q_link);
168 rangelock_calc_block(lock);
170 if (curthread->td_rlqe == NULL)
171 curthread->td_rlqe = entry;
173 rlqentry_free(entry);
177 rangelock_unlock(struct rangelock *lock, void *cookie, struct mtx *ilk)
180 MPASS(lock != NULL && cookie != NULL && ilk != NULL);
183 rangelock_unlock_locked(lock, cookie, ilk, true);
187 * Unlock the sub-range of granted lock.
190 rangelock_unlock_range(struct rangelock *lock, void *cookie, off_t start,
191 off_t end, struct mtx *ilk)
193 struct rl_q_entry *entry;
195 MPASS(lock != NULL && cookie != NULL && ilk != NULL);
197 KASSERT(entry->rl_q_flags & RL_LOCK_GRANTED,
198 ("Unlocking non-granted lock"));
199 KASSERT(entry->rl_q_start == start, ("wrong start"));
200 KASSERT(entry->rl_q_end >= end, ("wrong end"));
203 if (entry->rl_q_end == end) {
204 rangelock_unlock_locked(lock, cookie, ilk, true);
207 entry->rl_q_end = end;
208 rangelock_calc_block(lock);
214 * Add the lock request to the queue of the pending requests for
215 * rangelock. Sleep until the request can be granted unless trylock == true.
218 rangelock_enqueue(struct rangelock *lock, off_t start, off_t end, int mode,
219 struct mtx *ilk, bool trylock)
221 struct rl_q_entry *entry;
224 MPASS(lock != NULL && ilk != NULL);
227 if (td->td_rlqe != NULL) {
231 entry = rlqentry_alloc();
232 MPASS(entry != NULL);
233 entry->rl_q_flags = mode;
234 entry->rl_q_start = start;
235 entry->rl_q_end = end;
239 * XXXKIB TODO. Check that a thread does not try to enqueue a
240 * lock that is incompatible with another request from the same
244 TAILQ_INSERT_TAIL(&lock->rl_waiters, entry, rl_q_link);
246 * If rl_currdep == NULL, there is no entry waiting for a conflicting
247 * range to be resolved, so set rl_currdep to this entry. If there is
248 * no conflicting entry for this entry, rl_currdep will be set back to
249 * NULL by rangelock_calc_block().
251 if (lock->rl_currdep == NULL)
252 lock->rl_currdep = entry;
253 rangelock_calc_block(lock);
254 while (!(entry->rl_q_flags & RL_LOCK_GRANTED)) {
257 * For this case, the range is not actually locked
258 * yet, but removal from the list requires the same
259 * steps, except for not doing a rangelock_calc_block()
260 * call, since rangelock_calc_block() was called above.
262 rangelock_unlock_locked(lock, entry, ilk, false);
265 msleep(entry, ilk, 0, "range", 0);
272 rangelock_rlock(struct rangelock *lock, off_t start, off_t end, struct mtx *ilk)
275 return (rangelock_enqueue(lock, start, end, RL_LOCK_READ, ilk, false));
279 rangelock_tryrlock(struct rangelock *lock, off_t start, off_t end,
283 return (rangelock_enqueue(lock, start, end, RL_LOCK_READ, ilk, true));
287 rangelock_wlock(struct rangelock *lock, off_t start, off_t end, struct mtx *ilk)
290 return (rangelock_enqueue(lock, start, end, RL_LOCK_WRITE, ilk, false));
294 rangelock_trywlock(struct rangelock *lock, off_t start, off_t end,
298 return (rangelock_enqueue(lock, start, end, RL_LOCK_WRITE, ilk, true));
301 #ifdef INVARIANT_SUPPORT
303 _rangelock_cookie_assert(void *cookie, int what, const char *file, int line)
305 struct rl_q_entry *entry;
308 MPASS(cookie != NULL);
310 flags = entry->rl_q_flags;
313 if ((flags & RL_LOCK_GRANTED) == 0)
314 panic("rangelock not held @ %s:%d\n", file, line);
317 if ((flags & (RL_LOCK_GRANTED | RL_LOCK_READ)) !=
318 (RL_LOCK_GRANTED | RL_LOCK_READ))
319 panic("rangelock not rlocked @ %s:%d\n", file, line);
322 if ((flags & (RL_LOCK_GRANTED | RL_LOCK_WRITE)) !=
323 (RL_LOCK_GRANTED | RL_LOCK_WRITE))
324 panic("rangelock not wlocked @ %s:%d\n", file, line);
327 panic("Unknown rangelock assertion: %d @ %s:%d", what, file,
331 #endif /* INVARIANT_SUPPORT */