2 * Copyright (c) 2001, 2003 Daniel Eischen <deischen@freebsd.org>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/types.h>
30 #include <machine/atomic.h>
34 #include "atomic_ops.h"
38 #define LCK_ASSERT(e) assert(e)
46 _lock_destroy(struct lock *lck)
48 if ((lck != NULL) && (lck->l_head != NULL)) {
56 _lock_init(struct lock *lck, enum lock_type ltype,
57 lock_handler_t *waitfunc, lock_handler_t *wakeupfunc,
58 void *(calloc_cb)(size_t, size_t))
62 else if ((lck->l_head = calloc_cb(1, sizeof(struct lockreq))) == NULL)
66 lck->l_wait = waitfunc;
67 lck->l_wakeup = wakeupfunc;
68 lck->l_head->lr_locked = 0;
69 lck->l_head->lr_watcher = NULL;
70 lck->l_head->lr_owner = NULL;
71 lck->l_head->lr_active = 1;
72 lck->l_tail = lck->l_head;
78 _lock_reinit(struct lock *lck, enum lock_type ltype,
79 lock_handler_t *waitfunc, lock_handler_t *wakeupfunc)
83 else if (lck->l_head == NULL)
84 return (_lock_init(lck, ltype, waitfunc, wakeupfunc, calloc));
86 lck->l_head->lr_locked = 0;
87 lck->l_head->lr_watcher = NULL;
88 lck->l_head->lr_owner = NULL;
89 lck->l_head->lr_active = 1;
90 lck->l_tail = lck->l_head;
96 _lockuser_init(struct lockuser *lu, void *priv)
100 else if ((lu->lu_myreq == NULL) &&
101 ((lu->lu_myreq = malloc(sizeof(struct lockreq))) == NULL))
104 lu->lu_myreq->lr_locked = 1;
105 lu->lu_myreq->lr_watcher = NULL;
106 lu->lu_myreq->lr_owner = lu;
107 lu->lu_myreq->lr_active = 0;
108 lu->lu_watchreq = NULL;
110 lu->lu_private = priv;
111 lu->lu_private2 = NULL;
117 _lockuser_reinit(struct lockuser *lu, void *priv)
121 if (lu->lu_watchreq != NULL) {
123 * In this case the lock is active. All lockusers
124 * keep their watch request and drop their own
125 * (lu_myreq) request. Their own request is either
126 * some other lockuser's watch request or is the
129 lu->lu_myreq = lu->lu_watchreq;
130 lu->lu_watchreq = NULL;
132 if (lu->lu_myreq == NULL)
134 * Oops, something isn't quite right. Try to
137 return (_lockuser_init(lu, priv));
139 lu->lu_myreq->lr_locked = 1;
140 lu->lu_myreq->lr_watcher = NULL;
141 lu->lu_myreq->lr_owner = lu;
142 lu->lu_myreq->lr_active = 0;
143 lu->lu_watchreq = NULL;
145 lu->lu_private = priv;
146 lu->lu_private2 = NULL;
152 _lockuser_destroy(struct lockuser *lu)
154 if ((lu != NULL) && (lu->lu_myreq != NULL))
159 * Acquire a lock waiting (spin or sleep) for it to become available.
162 _lock_acquire(struct lock *lck, struct lockuser *lu, int prio)
168 * XXX - We probably want to remove these checks to optimize
169 * performance. It is also a bug if any one of the
170 * checks fail, so it's probably better to just let it
174 if (lck == NULL || lu == NULL || lck->l_head == NULL)
177 if ((lck->l_type & LCK_PRIORITY) != 0) {
178 LCK_ASSERT(lu->lu_myreq->lr_locked == 1);
179 LCK_ASSERT(lu->lu_myreq->lr_watcher == NULL);
180 LCK_ASSERT(lu->lu_myreq->lr_owner == lu);
181 LCK_ASSERT(lu->lu_watchreq == NULL);
183 lu->lu_priority = prio;
186 * Atomically swap the head of the lock request with
189 atomic_swap_ptr((void *)&lck->l_head, lu->lu_myreq,
190 (void *)&lu->lu_watchreq);
192 if (lu->lu_watchreq->lr_locked != 0) {
194 ((volatile uintptr_t *)(void *)&lu->lu_watchreq->lr_watcher,
196 if ((lck->l_wait == NULL) ||
197 ((lck->l_type & LCK_ADAPTIVE) == 0)) {
198 while (lu->lu_watchreq->lr_locked != 0)
199 ; /* spin, then yield? */
202 * Spin for a bit before invoking the wait function.
204 * We should be a little smarter here. If we're
205 * running on a single processor, then the lock
206 * owner got preempted and spinning will accomplish
207 * nothing but waste time. If we're running on
208 * multiple processors, the owner could be running
209 * on another CPU and we might acquire the lock if
212 * The other thing to keep in mind is that threads
213 * acquiring these locks are considered to be in
214 * critical regions; they will not be preempted by
215 * the _UTS_ until they release the lock. It is
216 * therefore safe to assume that if a lock can't
217 * be acquired, it is currently held by a thread
218 * running in another KSE.
220 for (i = 0; i < MAX_SPINS; i++) {
221 if (lu->lu_watchreq->lr_locked == 0)
223 if (lu->lu_watchreq->lr_active == 0)
226 atomic_swap_int(&lu->lu_watchreq->lr_locked,
229 lu->lu_watchreq->lr_locked = 0;
231 lck->l_wait(lck, lu);
235 lu->lu_myreq->lr_active = 1;
242 _lock_release(struct lock *lck, struct lockuser *lu)
244 struct lockuser *lu_tmp, *lu_h;
245 struct lockreq *myreq;
250 * XXX - We probably want to remove these checks to optimize
251 * performance. It is also a bug if any one of the
252 * checks fail, so it's probably better to just let it
256 if ((lck == NULL) || (lu == NULL))
259 if ((lck->l_type & LCK_PRIORITY) != 0) {
263 /* Update tail if our request is last. */
264 if (lu->lu_watchreq->lr_owner == NULL) {
265 atomic_store_rel_ptr((volatile uintptr_t *)
266 (void *)&lck->l_tail,
267 (uintptr_t)lu->lu_myreq);
268 atomic_store_rel_ptr((volatile uintptr_t *)
269 (void *)&lu->lu_myreq->lr_owner,
272 /* Remove ourselves from the list. */
273 atomic_store_rel_ptr((volatile uintptr_t *)
274 (void *)&lu->lu_myreq->lr_owner,
275 (uintptr_t)lu->lu_watchreq->lr_owner);
276 atomic_store_rel_ptr((volatile uintptr_t *)
277 (void *)&lu->lu_watchreq->lr_owner->lu_myreq,
278 (uintptr_t)lu->lu_myreq);
281 * The watch request now becomes our own because we've
282 * traded away our previous request. Save our previous
283 * request so that we can grant the lock.
285 myreq = lu->lu_myreq;
286 lu->lu_myreq = lu->lu_watchreq;
287 lu->lu_watchreq = NULL;
288 lu->lu_myreq->lr_locked = 1;
289 lu->lu_myreq->lr_owner = lu;
290 lu->lu_myreq->lr_watcher = NULL;
292 * Traverse the list of lock requests in reverse order
293 * looking for the user with the highest priority.
295 for (lu_tmp = lck->l_tail->lr_watcher; lu_tmp != NULL;
296 lu_tmp = lu_tmp->lu_myreq->lr_watcher) {
297 if (lu_tmp->lu_priority > prio_h) {
299 prio_h = lu_tmp->lu_priority;
303 /* Give the lock to the highest priority user. */
304 if (lck->l_wakeup != NULL) {
306 &lu_h->lu_watchreq->lr_locked,
309 /* Notify the sleeper */
311 lu_h->lu_myreq->lr_watcher);
314 atomic_store_rel_int(
315 &lu_h->lu_watchreq->lr_locked, 0);
317 if (lck->l_wakeup != NULL) {
318 atomic_swap_int(&myreq->lr_locked,
321 /* Notify the sleeper */
322 lck->l_wakeup(lck, myreq->lr_watcher);
325 /* Give the lock to the previous request. */
326 atomic_store_rel_int(&myreq->lr_locked, 0);
330 * The watch request now becomes our own because we've
331 * traded away our previous request. Save our previous
332 * request so that we can grant the lock.
334 myreq = lu->lu_myreq;
335 lu->lu_myreq = lu->lu_watchreq;
336 lu->lu_watchreq = NULL;
337 lu->lu_myreq->lr_locked = 1;
339 atomic_swap_int(&myreq->lr_locked, 0, &lval);
341 /* Notify the sleeper */
342 lck->l_wakeup(lck, myreq->lr_watcher);
345 /* Give the lock to the previous request. */
346 atomic_store_rel_int(&myreq->lr_locked, 0);
348 lu->lu_myreq->lr_active = 0;
352 _lock_grant(struct lock *lck __unused /* unused */, struct lockuser *lu)
354 atomic_store_rel_int(&lu->lu_watchreq->lr_locked, 3);
358 _lockuser_setactive(struct lockuser *lu, int active)
360 lu->lu_myreq->lr_active = active;