]> CyberLeo.Net >> Repos - FreeBSD/releng/9.2.git/blob - lib/libkse/sys/lock.c
- Copy stable/9 to releng/9.2 as part of the 9.2-RELEASE cycle.
[FreeBSD/releng/9.2.git] / lib / libkse / sys / lock.c
1 /*-
2  * Copyright (c) 2001, 2003 Daniel Eischen <deischen@freebsd.org>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28
29 #include <sys/types.h>
30 #include <machine/atomic.h>
31 #include <assert.h>
32 #include <stdlib.h>
33
34 #include "atomic_ops.h"
35 #include "lock.h"
36
37 #ifdef _LOCK_DEBUG
38 #define LCK_ASSERT(e)   assert(e)
39 #else
40 #define LCK_ASSERT(e)
41 #endif
42
43 #define MAX_SPINS       500
44
45 void
46 _lock_destroy(struct lock *lck)
47 {
48         if ((lck != NULL) && (lck->l_head != NULL)) {
49                 free(lck->l_head);
50                 lck->l_head = NULL;
51                 lck->l_tail = NULL;
52         }
53 }
54
55 int
56 _lock_init(struct lock *lck, enum lock_type ltype,
57     lock_handler_t *waitfunc, lock_handler_t *wakeupfunc,
58     void *(calloc_cb)(size_t, size_t))
59 {
60         if (lck == NULL)
61                 return (-1);
62         else if ((lck->l_head = calloc_cb(1, sizeof(struct lockreq))) == NULL)
63                 return (-1);
64         else {
65                 lck->l_type = ltype;
66                 lck->l_wait = waitfunc;
67                 lck->l_wakeup = wakeupfunc;
68                 lck->l_head->lr_locked = 0;
69                 lck->l_head->lr_watcher = NULL;
70                 lck->l_head->lr_owner = NULL;
71                 lck->l_head->lr_active = 1;
72                 lck->l_tail = lck->l_head;
73         }
74         return (0);
75 }
76
77 int
78 _lock_reinit(struct lock *lck, enum lock_type ltype,
79     lock_handler_t *waitfunc, lock_handler_t *wakeupfunc)
80 {
81         if (lck == NULL)
82                 return (-1);
83         else if (lck->l_head == NULL)
84                 return (_lock_init(lck, ltype, waitfunc, wakeupfunc, calloc));
85         else {
86                 lck->l_head->lr_locked = 0;
87                 lck->l_head->lr_watcher = NULL;
88                 lck->l_head->lr_owner = NULL;
89                 lck->l_head->lr_active = 1;
90                 lck->l_tail = lck->l_head;
91         }
92         return (0);
93 }
94
95 int
96 _lockuser_init(struct lockuser *lu, void *priv)
97 {
98         if (lu == NULL)
99                 return (-1);
100         else if ((lu->lu_myreq == NULL) &&
101             ((lu->lu_myreq = malloc(sizeof(struct lockreq))) == NULL))
102                 return (-1);
103         else {
104                 lu->lu_myreq->lr_locked = 1;
105                 lu->lu_myreq->lr_watcher = NULL;
106                 lu->lu_myreq->lr_owner = lu;
107                 lu->lu_myreq->lr_active = 0;
108                 lu->lu_watchreq = NULL;
109                 lu->lu_priority = 0;
110                 lu->lu_private = priv;
111                 lu->lu_private2 = NULL;
112         }
113         return (0);
114 }
115
116 int
117 _lockuser_reinit(struct lockuser *lu, void *priv)
118 {
119         if (lu == NULL)
120                 return (-1);
121         if (lu->lu_watchreq != NULL) {
122                 /*
123                  * In this case the lock is active.  All lockusers
124                  * keep their watch request and drop their own
125                  * (lu_myreq) request.  Their own request is either
126                  * some other lockuser's watch request or is the
127                  * head of the lock.
128                  */
129                 lu->lu_myreq = lu->lu_watchreq;
130                 lu->lu_watchreq = NULL;
131         }
132         if (lu->lu_myreq == NULL)
133                 /*
134                  * Oops, something isn't quite right.  Try to
135                  * allocate one.
136                  */
137                 return (_lockuser_init(lu, priv));
138         else {
139                 lu->lu_myreq->lr_locked = 1;
140                 lu->lu_myreq->lr_watcher = NULL;
141                 lu->lu_myreq->lr_owner = lu;
142                 lu->lu_myreq->lr_active = 0;
143                 lu->lu_watchreq = NULL;
144                 lu->lu_priority = 0;
145                 lu->lu_private = priv;
146                 lu->lu_private2 = NULL;
147         }
148         return (0);
149 }
150
151 void
152 _lockuser_destroy(struct lockuser *lu)
153 {
154         if ((lu != NULL) && (lu->lu_myreq != NULL))
155                 free(lu->lu_myreq);
156 }
157
158 /*
159  * Acquire a lock waiting (spin or sleep) for it to become available.
160  */
161 void
162 _lock_acquire(struct lock *lck, struct lockuser *lu, int prio)
163 {
164         int i;
165         int lval;
166
167         /**
168          * XXX - We probably want to remove these checks to optimize
169          *       performance.  It is also a bug if any one of the 
170          *       checks fail, so it's probably better to just let it
171          *       SEGV and fix it.
172          */
173 #if 0
174         if (lck == NULL || lu == NULL || lck->l_head == NULL)
175                 return;
176 #endif
177         if ((lck->l_type & LCK_PRIORITY) != 0) {
178                 LCK_ASSERT(lu->lu_myreq->lr_locked == 1);
179                 LCK_ASSERT(lu->lu_myreq->lr_watcher == NULL);
180                 LCK_ASSERT(lu->lu_myreq->lr_owner == lu);
181                 LCK_ASSERT(lu->lu_watchreq == NULL);
182
183                 lu->lu_priority = prio;
184         }
185         /*
186          * Atomically swap the head of the lock request with
187          * this request.
188          */
189         atomic_swap_ptr((void *)&lck->l_head, lu->lu_myreq,
190             (void *)&lu->lu_watchreq);
191
192         if (lu->lu_watchreq->lr_locked != 0) {
193                 atomic_store_rel_ptr
194                     ((volatile uintptr_t *)(void *)&lu->lu_watchreq->lr_watcher,
195                     (uintptr_t)lu);
196                 if ((lck->l_wait == NULL) ||
197                     ((lck->l_type & LCK_ADAPTIVE) == 0)) {
198                         while (lu->lu_watchreq->lr_locked != 0)
199                                 ;       /* spin, then yield? */
200                 } else {
201                         /*
202                          * Spin for a bit before invoking the wait function.
203                          *
204                          * We should be a little smarter here.  If we're
205                          * running on a single processor, then the lock
206                          * owner got preempted and spinning will accomplish
207                          * nothing but waste time.  If we're running on
208                          * multiple processors, the owner could be running
209                          * on another CPU and we might acquire the lock if
210                          * we spin for a bit.
211                          *
212                          * The other thing to keep in mind is that threads
213                          * acquiring these locks are considered to be in
214                          * critical regions; they will not be preempted by
215                          * the _UTS_ until they release the lock.  It is
216                          * therefore safe to assume that if a lock can't
217                          * be acquired, it is currently held by a thread
218                          * running in another KSE.
219                          */
220                         for (i = 0; i < MAX_SPINS; i++) {
221                                 if (lu->lu_watchreq->lr_locked == 0)
222                                         return;
223                                 if (lu->lu_watchreq->lr_active == 0)
224                                         break;
225                         }
226                         atomic_swap_int(&lu->lu_watchreq->lr_locked,
227                             2, &lval);
228                         if (lval == 0)
229                                 lu->lu_watchreq->lr_locked = 0;
230                         else
231                                 lck->l_wait(lck, lu);
232
233                 }
234         }
235         lu->lu_myreq->lr_active = 1;
236 }
237
238 /*
239  * Release a lock.
240  */
241 void
242 _lock_release(struct lock *lck, struct lockuser *lu)
243 {
244         struct lockuser *lu_tmp, *lu_h;
245         struct lockreq *myreq;
246         int prio_h;
247         int lval;
248
249         /**
250          * XXX - We probably want to remove these checks to optimize
251          *       performance.  It is also a bug if any one of the 
252          *       checks fail, so it's probably better to just let it
253          *       SEGV and fix it.
254          */
255 #if 0
256         if ((lck == NULL) || (lu == NULL))
257                 return;
258 #endif
259         if ((lck->l_type & LCK_PRIORITY) != 0) {
260                 prio_h = 0;
261                 lu_h = NULL;
262
263                 /* Update tail if our request is last. */
264                 if (lu->lu_watchreq->lr_owner == NULL) {
265                         atomic_store_rel_ptr((volatile uintptr_t *)
266                             (void *)&lck->l_tail,
267                             (uintptr_t)lu->lu_myreq);
268                         atomic_store_rel_ptr((volatile uintptr_t *)
269                             (void *)&lu->lu_myreq->lr_owner,
270                             (uintptr_t)NULL);
271                 } else {
272                         /* Remove ourselves from the list. */
273                         atomic_store_rel_ptr((volatile uintptr_t *)
274                             (void *)&lu->lu_myreq->lr_owner,
275                             (uintptr_t)lu->lu_watchreq->lr_owner);
276                         atomic_store_rel_ptr((volatile uintptr_t *)
277                             (void *)&lu->lu_watchreq->lr_owner->lu_myreq,
278                             (uintptr_t)lu->lu_myreq);
279                 }
280                 /*
281                  * The watch request now becomes our own because we've
282                  * traded away our previous request.  Save our previous
283                  * request so that we can grant the lock.
284                  */
285                 myreq = lu->lu_myreq;
286                 lu->lu_myreq = lu->lu_watchreq;
287                 lu->lu_watchreq = NULL;
288                 lu->lu_myreq->lr_locked = 1;
289                 lu->lu_myreq->lr_owner = lu;
290                 lu->lu_myreq->lr_watcher = NULL;
291                 /*
292                  * Traverse the list of lock requests in reverse order
293                  * looking for the user with the highest priority.
294                  */
295                 for (lu_tmp = lck->l_tail->lr_watcher; lu_tmp != NULL;
296                      lu_tmp = lu_tmp->lu_myreq->lr_watcher) {
297                         if (lu_tmp->lu_priority > prio_h) {
298                                 lu_h = lu_tmp;
299                                 prio_h = lu_tmp->lu_priority;
300                         }
301                 }
302                 if (lu_h != NULL) {
303                         /* Give the lock to the highest priority user. */
304                         if (lck->l_wakeup != NULL) {
305                                 atomic_swap_int(
306                                     &lu_h->lu_watchreq->lr_locked,
307                                     0, &lval);
308                                 if (lval == 2)
309                                         /* Notify the sleeper */
310                                         lck->l_wakeup(lck,
311                                             lu_h->lu_myreq->lr_watcher);
312                         }
313                         else
314                                 atomic_store_rel_int(
315                                     &lu_h->lu_watchreq->lr_locked, 0);
316                 } else {
317                         if (lck->l_wakeup != NULL) {
318                                 atomic_swap_int(&myreq->lr_locked,
319                                     0, &lval);
320                                 if (lval == 2)
321                                         /* Notify the sleeper */
322                                         lck->l_wakeup(lck, myreq->lr_watcher);
323                         }
324                         else
325                                 /* Give the lock to the previous request. */
326                                 atomic_store_rel_int(&myreq->lr_locked, 0);
327                 }
328         } else {
329                 /*
330                  * The watch request now becomes our own because we've
331                  * traded away our previous request.  Save our previous
332                  * request so that we can grant the lock.
333                  */
334                 myreq = lu->lu_myreq;
335                 lu->lu_myreq = lu->lu_watchreq;
336                 lu->lu_watchreq = NULL;
337                 lu->lu_myreq->lr_locked = 1;
338                 if (lck->l_wakeup) {
339                         atomic_swap_int(&myreq->lr_locked, 0, &lval);
340                         if (lval == 2)
341                                 /* Notify the sleeper */
342                                 lck->l_wakeup(lck, myreq->lr_watcher);
343                 }
344                 else
345                         /* Give the lock to the previous request. */
346                         atomic_store_rel_int(&myreq->lr_locked, 0);
347         }
348         lu->lu_myreq->lr_active = 0;
349 }
350
351 void
352 _lock_grant(struct lock *lck __unused /* unused */, struct lockuser *lu)
353 {
354         atomic_store_rel_int(&lu->lu_watchreq->lr_locked, 3);
355 }
356
357 void
358 _lockuser_setactive(struct lockuser *lu, int active)
359 {
360         lu->lu_myreq->lr_active = active;
361 }
362