]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - lib/libkse/sys/lock.c
Add support for filtering on Routing Header Type 0 and
[FreeBSD/FreeBSD.git] / lib / libkse / sys / lock.c
1 /*-
2  * Copyright (c) 2001, 2003 Daniel Eischen <deischen@freebsd.org>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28
29 #include <sys/types.h>
30 #include <machine/atomic.h>
31 #include <assert.h>
32 #include <stdlib.h>
33
34 #include "atomic_ops.h"
35 #include "lock.h"
36
37 #ifdef _LOCK_DEBUG
38 #define LCK_ASSERT(e)   assert(e)
39 #else
40 #define LCK_ASSERT(e)
41 #endif
42
43 #define MAX_SPINS       500
44
45 void
46 _lock_destroy(struct lock *lck)
47 {
48         if ((lck != NULL) && (lck->l_head != NULL)) {
49                 free(lck->l_head);
50                 lck->l_head = NULL;
51                 lck->l_tail = NULL;
52         }
53 }
54
55 int
56 _lock_init(struct lock *lck, enum lock_type ltype,
57     lock_handler_t *waitfunc, lock_handler_t *wakeupfunc)
58 {
59         if (lck == NULL)
60                 return (-1);
61         else if ((lck->l_head = malloc(sizeof(struct lockreq))) == NULL)
62                 return (-1);
63         else {
64                 lck->l_type = ltype;
65                 lck->l_wait = waitfunc;
66                 lck->l_wakeup = wakeupfunc;
67                 lck->l_head->lr_locked = 0;
68                 lck->l_head->lr_watcher = NULL;
69                 lck->l_head->lr_owner = NULL;
70                 lck->l_head->lr_active = 1;
71                 lck->l_tail = lck->l_head;
72         }
73         return (0);
74 }
75
76 int
77 _lock_reinit(struct lock *lck, enum lock_type ltype,
78     lock_handler_t *waitfunc, lock_handler_t *wakeupfunc)
79 {
80         if (lck == NULL)
81                 return (-1);
82         else if (lck->l_head == NULL)
83                 return (_lock_init(lck, ltype, waitfunc, wakeupfunc));
84         else {
85                 lck->l_head->lr_locked = 0;
86                 lck->l_head->lr_watcher = NULL;
87                 lck->l_head->lr_owner = NULL;
88                 lck->l_head->lr_active = 1;
89                 lck->l_tail = lck->l_head;
90         }
91         return (0);
92 }
93
94 int
95 _lockuser_init(struct lockuser *lu, void *priv)
96 {
97         if (lu == NULL)
98                 return (-1);
99         else if ((lu->lu_myreq == NULL) &&
100             ((lu->lu_myreq = malloc(sizeof(struct lockreq))) == NULL))
101                 return (-1);
102         else {
103                 lu->lu_myreq->lr_locked = 1;
104                 lu->lu_myreq->lr_watcher = NULL;
105                 lu->lu_myreq->lr_owner = lu;
106                 lu->lu_myreq->lr_active = 0;
107                 lu->lu_watchreq = NULL;
108                 lu->lu_priority = 0;
109                 lu->lu_private = priv;
110                 lu->lu_private2 = NULL;
111         }
112         return (0);
113 }
114
115 int
116 _lockuser_reinit(struct lockuser *lu, void *priv)
117 {
118         if (lu == NULL)
119                 return (-1);
120         /*
121          * All lockusers keep their watch request and drop their
122          * own (lu_myreq) request.  Their own request is either
123          * some other lockuser's watch request or is the head of
124          * the lock.
125          */
126         lu->lu_myreq = lu->lu_watchreq;
127         if (lu->lu_myreq == NULL)
128                 return (_lockuser_init(lu, priv));
129         else {
130                 lu->lu_myreq->lr_locked = 1;
131                 lu->lu_myreq->lr_watcher = NULL;
132                 lu->lu_myreq->lr_owner = lu;
133                 lu->lu_myreq->lr_active = 0;
134                 lu->lu_watchreq = NULL;
135                 lu->lu_priority = 0;
136                 lu->lu_private = priv;
137                 lu->lu_private2 = NULL;
138         }
139         return (0);
140 }
141
142 void
143 _lockuser_destroy(struct lockuser *lu)
144 {
145         if ((lu != NULL) && (lu->lu_myreq != NULL))
146                 free(lu->lu_myreq);
147 }
148
149 /*
150  * Acquire a lock waiting (spin or sleep) for it to become available.
151  */
152 void
153 _lock_acquire(struct lock *lck, struct lockuser *lu, int prio)
154 {
155         int i;
156         int lval;
157
158         /**
159          * XXX - We probably want to remove these checks to optimize
160          *       performance.  It is also a bug if any one of the 
161          *       checks fail, so it's probably better to just let it
162          *       SEGV and fix it.
163          */
164 #if 0
165         if (lck == NULL || lu == NULL || lck->l_head == NULL)
166                 return;
167 #endif
168         if ((lck->l_type & LCK_PRIORITY) != 0) {
169                 LCK_ASSERT(lu->lu_myreq->lr_locked == 1);
170                 LCK_ASSERT(lu->lu_myreq->lr_watcher == NULL);
171                 LCK_ASSERT(lu->lu_myreq->lr_owner == lu);
172                 LCK_ASSERT(lu->lu_watchreq == NULL);
173
174                 lu->lu_priority = prio;
175         }
176         /*
177          * Atomically swap the head of the lock request with
178          * this request.
179          */
180         atomic_swap_ptr(&lck->l_head, lu->lu_myreq, &lu->lu_watchreq);
181
182         if (lu->lu_watchreq->lr_locked != 0) {
183                 atomic_store_rel_ptr
184                     ((volatile uintptr_t *)&lu->lu_watchreq->lr_watcher,
185                     (uintptr_t)lu);
186                 if ((lck->l_wait == NULL) ||
187                     ((lck->l_type & LCK_ADAPTIVE) == 0)) {
188                         while (lu->lu_watchreq->lr_locked != 0)
189                                 ;       /* spin, then yield? */
190                 } else {
191                         /*
192                          * Spin for a bit before invoking the wait function.
193                          *
194                          * We should be a little smarter here.  If we're
195                          * running on a single processor, then the lock
196                          * owner got preempted and spinning will accomplish
197                          * nothing but waste time.  If we're running on
198                          * multiple processors, the owner could be running
199                          * on another CPU and we might acquire the lock if
200                          * we spin for a bit.
201                          *
202                          * The other thing to keep in mind is that threads
203                          * acquiring these locks are considered to be in
204                          * critical regions; they will not be preempted by
205                          * the _UTS_ until they release the lock.  It is
206                          * therefore safe to assume that if a lock can't
207                          * be acquired, it is currently held by a thread
208                          * running in another KSE.
209                          */
210                         for (i = 0; i < MAX_SPINS; i++) {
211                                 if (lu->lu_watchreq->lr_locked == 0)
212                                         return;
213                                 if (lu->lu_watchreq->lr_active == 0)
214                                         break;
215                         }
216                         atomic_swap_int((int *)&lu->lu_watchreq->lr_locked,
217                             2, &lval);
218                         if (lval == 0)
219                                 lu->lu_watchreq->lr_locked = 0;
220                         else
221                                 lck->l_wait(lck, lu);
222
223                 }
224         }
225         lu->lu_myreq->lr_active = 1;
226 }
227
228 /*
229  * Release a lock.
230  */
231 void
232 _lock_release(struct lock *lck, struct lockuser *lu)
233 {
234         struct lockuser *lu_tmp, *lu_h;
235         struct lockreq *myreq;
236         int prio_h;
237         int lval;
238
239         /**
240          * XXX - We probably want to remove these checks to optimize
241          *       performance.  It is also a bug if any one of the 
242          *       checks fail, so it's probably better to just let it
243          *       SEGV and fix it.
244          */
245 #if 0
246         if ((lck == NULL) || (lu == NULL))
247                 return;
248 #endif
249         if ((lck->l_type & LCK_PRIORITY) != 0) {
250                 prio_h = 0;
251                 lu_h = NULL;
252
253                 /* Update tail if our request is last. */
254                 if (lu->lu_watchreq->lr_owner == NULL) {
255                         atomic_store_rel_ptr((volatile uintptr_t *)&lck->l_tail,
256                             (uintptr_t)lu->lu_myreq);
257                         atomic_store_rel_ptr
258                             ((volatile uintptr_t *)&lu->lu_myreq->lr_owner,
259                             (uintptr_t)NULL);
260                 } else {
261                         /* Remove ourselves from the list. */
262                         atomic_store_rel_ptr((volatile uintptr_t *)
263                             &lu->lu_myreq->lr_owner,
264                             (uintptr_t)lu->lu_watchreq->lr_owner);
265                         atomic_store_rel_ptr((volatile uintptr_t *)
266                             &lu->lu_watchreq->lr_owner->lu_myreq,
267                             (uintptr_t)lu->lu_myreq);
268                 }
269                 /*
270                  * The watch request now becomes our own because we've
271                  * traded away our previous request.  Save our previous
272                  * request so that we can grant the lock.
273                  */
274                 myreq = lu->lu_myreq;
275                 lu->lu_myreq = lu->lu_watchreq;
276                 lu->lu_watchreq = NULL;
277                 lu->lu_myreq->lr_locked = 1;
278                 lu->lu_myreq->lr_owner = lu;
279                 lu->lu_myreq->lr_watcher = NULL;
280                 /*
281                  * Traverse the list of lock requests in reverse order
282                  * looking for the user with the highest priority.
283                  */
284                 for (lu_tmp = lck->l_tail->lr_watcher; lu_tmp != NULL;
285                      lu_tmp = lu_tmp->lu_myreq->lr_watcher) {
286                         if (lu_tmp->lu_priority > prio_h) {
287                                 lu_h = lu_tmp;
288                                 prio_h = lu_tmp->lu_priority;
289                         }
290                 }
291                 if (lu_h != NULL) {
292                         /* Give the lock to the highest priority user. */
293                         if (lck->l_wakeup != NULL) {
294                                 atomic_swap_int(
295                                     (int *)&lu_h->lu_watchreq->lr_locked,
296                                     0, &lval);
297                                 if (lval == 2)
298                                         /* Notify the sleeper */
299                                         lck->l_wakeup(lck,
300                                             lu_h->lu_myreq->lr_watcher);
301                         }
302                         else
303                                 atomic_store_rel_int(
304                                     &lu_h->lu_watchreq->lr_locked, 0);
305                 } else {
306                         if (lck->l_wakeup != NULL) {
307                                 atomic_swap_int((int *)&myreq->lr_locked,
308                                     0, &lval);
309                                 if (lval == 2)
310                                         /* Notify the sleeper */
311                                         lck->l_wakeup(lck, myreq->lr_watcher);
312                         }
313                         else
314                                 /* Give the lock to the previous request. */
315                                 atomic_store_rel_int(&myreq->lr_locked, 0);
316                 }
317         } else {
318                 /*
319                  * The watch request now becomes our own because we've
320                  * traded away our previous request.  Save our previous
321                  * request so that we can grant the lock.
322                  */
323                 myreq = lu->lu_myreq;
324                 lu->lu_myreq = lu->lu_watchreq;
325                 lu->lu_watchreq = NULL;
326                 lu->lu_myreq->lr_locked = 1;
327                 if (lck->l_wakeup) {
328                         atomic_swap_int((int *)&myreq->lr_locked, 0, &lval);
329                         if (lval == 2)
330                                 /* Notify the sleeper */
331                                 lck->l_wakeup(lck, myreq->lr_watcher);
332                 }
333                 else
334                         /* Give the lock to the previous request. */
335                         atomic_store_rel_int(&myreq->lr_locked, 0);
336         }
337         lu->lu_myreq->lr_active = 0;
338 }
339
340 void
341 _lock_grant(struct lock *lck /* unused */, struct lockuser *lu)
342 {
343         atomic_store_rel_int(&lu->lu_watchreq->lr_locked, 3);
344 }
345
346 void
347 _lockuser_setactive(struct lockuser *lu, int active)
348 {
349         lu->lu_myreq->lr_active = active;
350 }
351