2 * Copyright (c) 2001, 2003 Daniel Eischen <deischen@freebsd.org>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/types.h>
30 #include <machine/atomic.h>
34 #include "atomic_ops.h"
38 #define LCK_ASSERT(e) assert(e)
46 _lock_destroy(struct lock *lck)
48 if ((lck != NULL) && (lck->l_head != NULL)) {
56 _lock_init(struct lock *lck, enum lock_type ltype,
57 lock_handler_t *waitfunc, lock_handler_t *wakeupfunc,
58 void *(calloc_cb)(size_t, size_t))
62 else if ((lck->l_head = calloc_cb(1, sizeof(struct lockreq))) == NULL)
66 lck->l_wait = waitfunc;
67 lck->l_wakeup = wakeupfunc;
68 lck->l_head->lr_locked = 0;
69 lck->l_head->lr_watcher = NULL;
70 lck->l_head->lr_owner = NULL;
71 lck->l_head->lr_active = 1;
72 lck->l_tail = lck->l_head;
78 _lock_reinit(struct lock *lck, enum lock_type ltype,
79 lock_handler_t *waitfunc, lock_handler_t *wakeupfunc)
83 else if (lck->l_head == NULL)
84 return (_lock_init(lck, ltype, waitfunc, wakeupfunc, calloc));
86 lck->l_head->lr_locked = 0;
87 lck->l_head->lr_watcher = NULL;
88 lck->l_head->lr_owner = NULL;
89 lck->l_head->lr_active = 1;
90 lck->l_tail = lck->l_head;
96 _lockuser_init(struct lockuser *lu, void *priv)
100 else if ((lu->lu_myreq == NULL) &&
101 ((lu->lu_myreq = malloc(sizeof(struct lockreq))) == NULL))
104 lu->lu_myreq->lr_locked = 1;
105 lu->lu_myreq->lr_watcher = NULL;
106 lu->lu_myreq->lr_owner = lu;
107 lu->lu_myreq->lr_active = 0;
108 lu->lu_watchreq = NULL;
110 lu->lu_private = priv;
111 lu->lu_private2 = NULL;
117 _lockuser_reinit(struct lockuser *lu, void *priv)
122 if (lu->lu_watchreq != NULL) {
124 * In this case the lock is active. All lockusers
125 * keep their watch request and drop their own
126 * (lu_myreq) request. Their own request is either
127 * some other lockuser's watch request or is the
130 lu->lu_myreq = lu->lu_watchreq;
131 lu->lu_watchreq = NULL;
133 if (lu->lu_myreq == NULL)
135 * Oops, something isn't quite right. Try to
138 return (_lockuser_init(lu, priv));
140 lu->lu_myreq->lr_locked = 1;
141 lu->lu_myreq->lr_watcher = NULL;
142 lu->lu_myreq->lr_owner = lu;
143 lu->lu_myreq->lr_active = 0;
144 lu->lu_watchreq = NULL;
146 lu->lu_private = priv;
147 lu->lu_private2 = NULL;
153 _lockuser_destroy(struct lockuser *lu)
155 if ((lu != NULL) && (lu->lu_myreq != NULL))
160 * Acquire a lock waiting (spin or sleep) for it to become available.
163 _lock_acquire(struct lock *lck, struct lockuser *lu, int prio)
169 * XXX - We probably want to remove these checks to optimize
170 * performance. It is also a bug if any one of the
171 * checks fail, so it's probably better to just let it
175 if (lck == NULL || lu == NULL || lck->l_head == NULL)
178 if ((lck->l_type & LCK_PRIORITY) != 0) {
179 LCK_ASSERT(lu->lu_myreq->lr_locked == 1);
180 LCK_ASSERT(lu->lu_myreq->lr_watcher == NULL);
181 LCK_ASSERT(lu->lu_myreq->lr_owner == lu);
182 LCK_ASSERT(lu->lu_watchreq == NULL);
184 lu->lu_priority = prio;
187 * Atomically swap the head of the lock request with
190 atomic_swap_ptr(&lck->l_head, lu->lu_myreq, &lu->lu_watchreq);
192 if (lu->lu_watchreq->lr_locked != 0) {
194 ((volatile uintptr_t *)&lu->lu_watchreq->lr_watcher,
196 if ((lck->l_wait == NULL) ||
197 ((lck->l_type & LCK_ADAPTIVE) == 0)) {
198 while (lu->lu_watchreq->lr_locked != 0)
199 ; /* spin, then yield? */
202 * Spin for a bit before invoking the wait function.
204 * We should be a little smarter here. If we're
205 * running on a single processor, then the lock
206 * owner got preempted and spinning will accomplish
207 * nothing but waste time. If we're running on
208 * multiple processors, the owner could be running
209 * on another CPU and we might acquire the lock if
212 * The other thing to keep in mind is that threads
213 * acquiring these locks are considered to be in
214 * critical regions; they will not be preempted by
215 * the _UTS_ until they release the lock. It is
216 * therefore safe to assume that if a lock can't
217 * be acquired, it is currently held by a thread
218 * running in another KSE.
220 for (i = 0; i < MAX_SPINS; i++) {
221 if (lu->lu_watchreq->lr_locked == 0)
223 if (lu->lu_watchreq->lr_active == 0)
226 atomic_swap_int((int *)&lu->lu_watchreq->lr_locked,
229 lu->lu_watchreq->lr_locked = 0;
231 lck->l_wait(lck, lu);
235 lu->lu_myreq->lr_active = 1;
242 _lock_release(struct lock *lck, struct lockuser *lu)
244 struct lockuser *lu_tmp, *lu_h;
245 struct lockreq *myreq;
250 * XXX - We probably want to remove these checks to optimize
251 * performance. It is also a bug if any one of the
252 * checks fail, so it's probably better to just let it
256 if ((lck == NULL) || (lu == NULL))
259 if ((lck->l_type & LCK_PRIORITY) != 0) {
263 /* Update tail if our request is last. */
264 if (lu->lu_watchreq->lr_owner == NULL) {
265 atomic_store_rel_ptr((volatile uintptr_t *)&lck->l_tail,
266 (uintptr_t)lu->lu_myreq);
268 ((volatile uintptr_t *)&lu->lu_myreq->lr_owner,
271 /* Remove ourselves from the list. */
272 atomic_store_rel_ptr((volatile uintptr_t *)
273 &lu->lu_myreq->lr_owner,
274 (uintptr_t)lu->lu_watchreq->lr_owner);
275 atomic_store_rel_ptr((volatile uintptr_t *)
276 &lu->lu_watchreq->lr_owner->lu_myreq,
277 (uintptr_t)lu->lu_myreq);
280 * The watch request now becomes our own because we've
281 * traded away our previous request. Save our previous
282 * request so that we can grant the lock.
284 myreq = lu->lu_myreq;
285 lu->lu_myreq = lu->lu_watchreq;
286 lu->lu_watchreq = NULL;
287 lu->lu_myreq->lr_locked = 1;
288 lu->lu_myreq->lr_owner = lu;
289 lu->lu_myreq->lr_watcher = NULL;
291 * Traverse the list of lock requests in reverse order
292 * looking for the user with the highest priority.
294 for (lu_tmp = lck->l_tail->lr_watcher; lu_tmp != NULL;
295 lu_tmp = lu_tmp->lu_myreq->lr_watcher) {
296 if (lu_tmp->lu_priority > prio_h) {
298 prio_h = lu_tmp->lu_priority;
302 /* Give the lock to the highest priority user. */
303 if (lck->l_wakeup != NULL) {
305 (int *)&lu_h->lu_watchreq->lr_locked,
308 /* Notify the sleeper */
310 lu_h->lu_myreq->lr_watcher);
313 atomic_store_rel_int(
314 &lu_h->lu_watchreq->lr_locked, 0);
316 if (lck->l_wakeup != NULL) {
317 atomic_swap_int((int *)&myreq->lr_locked,
320 /* Notify the sleeper */
321 lck->l_wakeup(lck, myreq->lr_watcher);
324 /* Give the lock to the previous request. */
325 atomic_store_rel_int(&myreq->lr_locked, 0);
329 * The watch request now becomes our own because we've
330 * traded away our previous request. Save our previous
331 * request so that we can grant the lock.
333 myreq = lu->lu_myreq;
334 lu->lu_myreq = lu->lu_watchreq;
335 lu->lu_watchreq = NULL;
336 lu->lu_myreq->lr_locked = 1;
338 atomic_swap_int((int *)&myreq->lr_locked, 0, &lval);
340 /* Notify the sleeper */
341 lck->l_wakeup(lck, myreq->lr_watcher);
344 /* Give the lock to the previous request. */
345 atomic_store_rel_int(&myreq->lr_locked, 0);
347 lu->lu_myreq->lr_active = 0;
351 _lock_grant(struct lock *lck /* unused */, struct lockuser *lu)
353 atomic_store_rel_int(&lu->lu_watchreq->lr_locked, 3);
357 _lockuser_setactive(struct lockuser *lu, int active)
359 lu->lu_myreq->lr_active = active;