2 * Copyright (c) 2009 Konstantin Belousov <kib@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/kernel.h>
33 #include <sys/mutex.h>
35 #include <sys/rangelock.h>
36 #include <sys/systm.h>
41 TAILQ_ENTRY(rl_q_entry) rl_q_link;
42 off_t rl_q_start, rl_q_end;
46 static uma_zone_t rl_entry_zone;
49 rangelock_sys_init(void)
52 rl_entry_zone = uma_zcreate("rl_entry", sizeof(struct rl_q_entry),
53 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
55 SYSINIT(vfs, SI_SUB_LOCK, SI_ORDER_ANY, rangelock_sys_init, NULL);
57 static struct rl_q_entry *
61 return (uma_zalloc(rl_entry_zone, M_WAITOK));
65 rlqentry_free(struct rl_q_entry *rleq)
68 uma_zfree(rl_entry_zone, rleq);
72 rangelock_init(struct rangelock *lock)
75 TAILQ_INIT(&lock->rl_waiters);
76 lock->rl_currdep = NULL;
80 rangelock_destroy(struct rangelock *lock)
83 KASSERT(TAILQ_EMPTY(&lock->rl_waiters), ("Dangling waiters"));
87 * Verifies the supplied rl_q_entries for compatibility. Returns true
88 * if the rangelock queue entries are not compatible, false if they are.
90 * Two entries are compatible if their ranges do not overlap, or both
91 * entries are for read.
94 rangelock_incompatible(const struct rl_q_entry *e1,
95 const struct rl_q_entry *e2)
98 if ((e1->rl_q_flags & RL_LOCK_TYPE_MASK) == RL_LOCK_READ &&
99 (e2->rl_q_flags & RL_LOCK_TYPE_MASK) == RL_LOCK_READ)
101 if (e1->rl_q_start < e2->rl_q_end && e1->rl_q_end > e2->rl_q_start)
107 * Recalculate the lock->rl_currdep after an unlock.
110 rangelock_calc_block(struct rangelock *lock)
112 struct rl_q_entry *entry, *entry1, *whead;
114 if (lock->rl_currdep == TAILQ_FIRST(&lock->rl_waiters) &&
115 lock->rl_currdep != NULL)
116 lock->rl_currdep = TAILQ_NEXT(lock->rl_currdep, rl_q_link);
117 for (entry = lock->rl_currdep; entry != NULL;
118 entry = TAILQ_NEXT(entry, rl_q_link)) {
119 TAILQ_FOREACH(entry1, &lock->rl_waiters, rl_q_link) {
120 if (rangelock_incompatible(entry, entry1))
127 lock->rl_currdep = entry;
128 TAILQ_FOREACH(whead, &lock->rl_waiters, rl_q_link) {
129 if (whead == lock->rl_currdep)
131 if (!(whead->rl_q_flags & RL_LOCK_GRANTED)) {
132 whead->rl_q_flags |= RL_LOCK_GRANTED;
139 rangelock_unlock_locked(struct rangelock *lock, struct rl_q_entry *entry,
143 MPASS(lock != NULL && entry != NULL && ilk != NULL);
144 mtx_assert(ilk, MA_OWNED);
145 KASSERT(entry != lock->rl_currdep, ("stuck currdep"));
147 TAILQ_REMOVE(&lock->rl_waiters, entry, rl_q_link);
148 rangelock_calc_block(lock);
150 if (curthread->td_rlqe == NULL)
151 curthread->td_rlqe = entry;
153 rlqentry_free(entry);
157 rangelock_unlock(struct rangelock *lock, void *cookie, struct mtx *ilk)
160 MPASS(lock != NULL && cookie != NULL && ilk != NULL);
163 rangelock_unlock_locked(lock, cookie, ilk);
167 * Unlock the sub-range of granted lock.
170 rangelock_unlock_range(struct rangelock *lock, void *cookie, off_t start,
171 off_t end, struct mtx *ilk)
173 struct rl_q_entry *entry;
175 MPASS(lock != NULL && cookie != NULL && ilk != NULL);
177 KASSERT(entry->rl_q_flags & RL_LOCK_GRANTED,
178 ("Unlocking non-granted lock"));
179 KASSERT(entry->rl_q_start == start, ("wrong start"));
180 KASSERT(entry->rl_q_end >= end, ("wrong end"));
183 if (entry->rl_q_end == end) {
184 rangelock_unlock_locked(lock, cookie, ilk);
187 entry->rl_q_end = end;
188 rangelock_calc_block(lock);
194 * Add the lock request to the queue of the pending requests for
195 * rangelock. Sleep until the request can be granted.
198 rangelock_enqueue(struct rangelock *lock, off_t start, off_t end, int mode,
201 struct rl_q_entry *entry;
204 MPASS(lock != NULL && ilk != NULL);
207 if (td->td_rlqe != NULL) {
211 entry = rlqentry_alloc();
212 MPASS(entry != NULL);
213 entry->rl_q_flags = mode;
214 entry->rl_q_start = start;
215 entry->rl_q_end = end;
219 * XXXKIB TODO. Check that a thread does not try to enqueue a
220 * lock that is incompatible with another request from the same
224 TAILQ_INSERT_TAIL(&lock->rl_waiters, entry, rl_q_link);
225 if (lock->rl_currdep == NULL)
226 lock->rl_currdep = entry;
227 rangelock_calc_block(lock);
228 while (!(entry->rl_q_flags & RL_LOCK_GRANTED))
229 msleep(entry, ilk, 0, "range", 0);
235 rangelock_rlock(struct rangelock *lock, off_t start, off_t end, struct mtx *ilk)
238 return (rangelock_enqueue(lock, start, end, RL_LOCK_READ, ilk));
242 rangelock_wlock(struct rangelock *lock, off_t start, off_t end, struct mtx *ilk)
245 return (rangelock_enqueue(lock, start, end, RL_LOCK_WRITE, ilk));