]> CyberLeo.Net >> Repos - FreeBSD/releng/7.2.git/blob - lib/libkse/sys/lock.c
Create releng/7.2 from stable/7 in preparation for 7.2-RELEASE.
[FreeBSD/releng/7.2.git] / lib / libkse / sys / lock.c
1 /*-
2  * Copyright (c) 2001, 2003 Daniel Eischen <deischen@freebsd.org>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28
29 #include <sys/types.h>
30 #include <machine/atomic.h>
31 #include <assert.h>
32 #include <stdlib.h>
33
34 #include "atomic_ops.h"
35 #include "lock.h"
36
37 #ifdef _LOCK_DEBUG
38 #define LCK_ASSERT(e)   assert(e)
39 #else
40 #define LCK_ASSERT(e)
41 #endif
42
43 #define MAX_SPINS       500
44
45 void
46 _lock_destroy(struct lock *lck)
47 {
48         if ((lck != NULL) && (lck->l_head != NULL)) {
49                 free(lck->l_head);
50                 lck->l_head = NULL;
51                 lck->l_tail = NULL;
52         }
53 }
54
55 int
56 _lock_init(struct lock *lck, enum lock_type ltype,
57     lock_handler_t *waitfunc, lock_handler_t *wakeupfunc,
58     void *(calloc_cb)(size_t, size_t))
59 {
60         if (lck == NULL)
61                 return (-1);
62         else if ((lck->l_head = calloc_cb(1, sizeof(struct lockreq))) == NULL)
63                 return (-1);
64         else {
65                 lck->l_type = ltype;
66                 lck->l_wait = waitfunc;
67                 lck->l_wakeup = wakeupfunc;
68                 lck->l_head->lr_locked = 0;
69                 lck->l_head->lr_watcher = NULL;
70                 lck->l_head->lr_owner = NULL;
71                 lck->l_head->lr_active = 1;
72                 lck->l_tail = lck->l_head;
73         }
74         return (0);
75 }
76
77 int
78 _lock_reinit(struct lock *lck, enum lock_type ltype,
79     lock_handler_t *waitfunc, lock_handler_t *wakeupfunc)
80 {
81         if (lck == NULL)
82                 return (-1);
83         else if (lck->l_head == NULL)
84                 return (_lock_init(lck, ltype, waitfunc, wakeupfunc, calloc));
85         else {
86                 lck->l_head->lr_locked = 0;
87                 lck->l_head->lr_watcher = NULL;
88                 lck->l_head->lr_owner = NULL;
89                 lck->l_head->lr_active = 1;
90                 lck->l_tail = lck->l_head;
91         }
92         return (0);
93 }
94
95 int
96 _lockuser_init(struct lockuser *lu, void *priv)
97 {
98         if (lu == NULL)
99                 return (-1);
100         else if ((lu->lu_myreq == NULL) &&
101             ((lu->lu_myreq = malloc(sizeof(struct lockreq))) == NULL))
102                 return (-1);
103         else {
104                 lu->lu_myreq->lr_locked = 1;
105                 lu->lu_myreq->lr_watcher = NULL;
106                 lu->lu_myreq->lr_owner = lu;
107                 lu->lu_myreq->lr_active = 0;
108                 lu->lu_watchreq = NULL;
109                 lu->lu_priority = 0;
110                 lu->lu_private = priv;
111                 lu->lu_private2 = NULL;
112         }
113         return (0);
114 }
115
116 int
117 _lockuser_reinit(struct lockuser *lu, void *priv)
118 {
119         if (lu == NULL)
120                 return (-1);
121
122         if (lu->lu_watchreq != NULL) {
123                 /*
124                  * In this case the lock is active.  All lockusers
125                  * keep their watch request and drop their own
126                  * (lu_myreq) request.  Their own request is either
127                  * some other lockuser's watch request or is the
128                  * head of the lock.
129                  */
130                 lu->lu_myreq = lu->lu_watchreq;
131                 lu->lu_watchreq = NULL;
132        }
133        if (lu->lu_myreq == NULL)
134                 /*
135                  * Oops, something isn't quite right.  Try to
136                  * allocate one.
137                  */
138                 return (_lockuser_init(lu, priv));
139         else {
140                 lu->lu_myreq->lr_locked = 1;
141                 lu->lu_myreq->lr_watcher = NULL;
142                 lu->lu_myreq->lr_owner = lu;
143                 lu->lu_myreq->lr_active = 0;
144                 lu->lu_watchreq = NULL;
145                 lu->lu_priority = 0;
146                 lu->lu_private = priv;
147                 lu->lu_private2 = NULL;
148         }
149         return (0);
150 }
151
152 void
153 _lockuser_destroy(struct lockuser *lu)
154 {
155         if ((lu != NULL) && (lu->lu_myreq != NULL))
156                 free(lu->lu_myreq);
157 }
158
159 /*
160  * Acquire a lock waiting (spin or sleep) for it to become available.
161  */
162 void
163 _lock_acquire(struct lock *lck, struct lockuser *lu, int prio)
164 {
165         int i;
166         int lval;
167
168         /**
169          * XXX - We probably want to remove these checks to optimize
170          *       performance.  It is also a bug if any one of the 
171          *       checks fail, so it's probably better to just let it
172          *       SEGV and fix it.
173          */
174 #if 0
175         if (lck == NULL || lu == NULL || lck->l_head == NULL)
176                 return;
177 #endif
178         if ((lck->l_type & LCK_PRIORITY) != 0) {
179                 LCK_ASSERT(lu->lu_myreq->lr_locked == 1);
180                 LCK_ASSERT(lu->lu_myreq->lr_watcher == NULL);
181                 LCK_ASSERT(lu->lu_myreq->lr_owner == lu);
182                 LCK_ASSERT(lu->lu_watchreq == NULL);
183
184                 lu->lu_priority = prio;
185         }
186         /*
187          * Atomically swap the head of the lock request with
188          * this request.
189          */
190         atomic_swap_ptr(&lck->l_head, lu->lu_myreq, &lu->lu_watchreq);
191
192         if (lu->lu_watchreq->lr_locked != 0) {
193                 atomic_store_rel_ptr
194                     ((volatile uintptr_t *)&lu->lu_watchreq->lr_watcher,
195                     (uintptr_t)lu);
196                 if ((lck->l_wait == NULL) ||
197                     ((lck->l_type & LCK_ADAPTIVE) == 0)) {
198                         while (lu->lu_watchreq->lr_locked != 0)
199                                 ;       /* spin, then yield? */
200                 } else {
201                         /*
202                          * Spin for a bit before invoking the wait function.
203                          *
204                          * We should be a little smarter here.  If we're
205                          * running on a single processor, then the lock
206                          * owner got preempted and spinning will accomplish
207                          * nothing but waste time.  If we're running on
208                          * multiple processors, the owner could be running
209                          * on another CPU and we might acquire the lock if
210                          * we spin for a bit.
211                          *
212                          * The other thing to keep in mind is that threads
213                          * acquiring these locks are considered to be in
214                          * critical regions; they will not be preempted by
215                          * the _UTS_ until they release the lock.  It is
216                          * therefore safe to assume that if a lock can't
217                          * be acquired, it is currently held by a thread
218                          * running in another KSE.
219                          */
220                         for (i = 0; i < MAX_SPINS; i++) {
221                                 if (lu->lu_watchreq->lr_locked == 0)
222                                         return;
223                                 if (lu->lu_watchreq->lr_active == 0)
224                                         break;
225                         }
226                         atomic_swap_int((int *)&lu->lu_watchreq->lr_locked,
227                             2, &lval);
228                         if (lval == 0)
229                                 lu->lu_watchreq->lr_locked = 0;
230                         else
231                                 lck->l_wait(lck, lu);
232
233                 }
234         }
235         lu->lu_myreq->lr_active = 1;
236 }
237
238 /*
239  * Release a lock.
240  */
241 void
242 _lock_release(struct lock *lck, struct lockuser *lu)
243 {
244         struct lockuser *lu_tmp, *lu_h;
245         struct lockreq *myreq;
246         int prio_h;
247         int lval;
248
249         /**
250          * XXX - We probably want to remove these checks to optimize
251          *       performance.  It is also a bug if any one of the 
252          *       checks fail, so it's probably better to just let it
253          *       SEGV and fix it.
254          */
255 #if 0
256         if ((lck == NULL) || (lu == NULL))
257                 return;
258 #endif
259         if ((lck->l_type & LCK_PRIORITY) != 0) {
260                 prio_h = 0;
261                 lu_h = NULL;
262
263                 /* Update tail if our request is last. */
264                 if (lu->lu_watchreq->lr_owner == NULL) {
265                         atomic_store_rel_ptr((volatile uintptr_t *)&lck->l_tail,
266                             (uintptr_t)lu->lu_myreq);
267                         atomic_store_rel_ptr
268                             ((volatile uintptr_t *)&lu->lu_myreq->lr_owner,
269                             (uintptr_t)NULL);
270                 } else {
271                         /* Remove ourselves from the list. */
272                         atomic_store_rel_ptr((volatile uintptr_t *)
273                             &lu->lu_myreq->lr_owner,
274                             (uintptr_t)lu->lu_watchreq->lr_owner);
275                         atomic_store_rel_ptr((volatile uintptr_t *)
276                             &lu->lu_watchreq->lr_owner->lu_myreq,
277                             (uintptr_t)lu->lu_myreq);
278                 }
279                 /*
280                  * The watch request now becomes our own because we've
281                  * traded away our previous request.  Save our previous
282                  * request so that we can grant the lock.
283                  */
284                 myreq = lu->lu_myreq;
285                 lu->lu_myreq = lu->lu_watchreq;
286                 lu->lu_watchreq = NULL;
287                 lu->lu_myreq->lr_locked = 1;
288                 lu->lu_myreq->lr_owner = lu;
289                 lu->lu_myreq->lr_watcher = NULL;
290                 /*
291                  * Traverse the list of lock requests in reverse order
292                  * looking for the user with the highest priority.
293                  */
294                 for (lu_tmp = lck->l_tail->lr_watcher; lu_tmp != NULL;
295                      lu_tmp = lu_tmp->lu_myreq->lr_watcher) {
296                         if (lu_tmp->lu_priority > prio_h) {
297                                 lu_h = lu_tmp;
298                                 prio_h = lu_tmp->lu_priority;
299                         }
300                 }
301                 if (lu_h != NULL) {
302                         /* Give the lock to the highest priority user. */
303                         if (lck->l_wakeup != NULL) {
304                                 atomic_swap_int(
305                                     (int *)&lu_h->lu_watchreq->lr_locked,
306                                     0, &lval);
307                                 if (lval == 2)
308                                         /* Notify the sleeper */
309                                         lck->l_wakeup(lck,
310                                             lu_h->lu_myreq->lr_watcher);
311                         }
312                         else
313                                 atomic_store_rel_int(
314                                     &lu_h->lu_watchreq->lr_locked, 0);
315                 } else {
316                         if (lck->l_wakeup != NULL) {
317                                 atomic_swap_int((int *)&myreq->lr_locked,
318                                     0, &lval);
319                                 if (lval == 2)
320                                         /* Notify the sleeper */
321                                         lck->l_wakeup(lck, myreq->lr_watcher);
322                         }
323                         else
324                                 /* Give the lock to the previous request. */
325                                 atomic_store_rel_int(&myreq->lr_locked, 0);
326                 }
327         } else {
328                 /*
329                  * The watch request now becomes our own because we've
330                  * traded away our previous request.  Save our previous
331                  * request so that we can grant the lock.
332                  */
333                 myreq = lu->lu_myreq;
334                 lu->lu_myreq = lu->lu_watchreq;
335                 lu->lu_watchreq = NULL;
336                 lu->lu_myreq->lr_locked = 1;
337                 if (lck->l_wakeup) {
338                         atomic_swap_int((int *)&myreq->lr_locked, 0, &lval);
339                         if (lval == 2)
340                                 /* Notify the sleeper */
341                                 lck->l_wakeup(lck, myreq->lr_watcher);
342                 }
343                 else
344                         /* Give the lock to the previous request. */
345                         atomic_store_rel_int(&myreq->lr_locked, 0);
346         }
347         lu->lu_myreq->lr_active = 0;
348 }
349
350 void
351 _lock_grant(struct lock *lck /* unused */, struct lockuser *lu)
352 {
353         atomic_store_rel_int(&lu->lu_watchreq->lr_locked, 3);
354 }
355
356 void
357 _lockuser_setactive(struct lockuser *lu, int active)
358 {
359         lu->lu_myreq->lr_active = active;
360 }
361