]> CyberLeo.Net >> Repos - FreeBSD/releng/7.2.git/blob - lib/libthr/thread/thr_rwlock.c
Create releng/7.2 from stable/7 in preparation for 7.2-RELEASE.
[FreeBSD/releng/7.2.git] / lib / libthr / thread / thr_rwlock.c
1 /*-
2  * Copyright (c) 1998 Alex Nash
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28
29 #include <errno.h>
30 #include <limits.h>
31 #include <stdlib.h>
32
33 #include "namespace.h"
34 #include <pthread.h>
35 #include "un-namespace.h"
36 #include "thr_private.h"
37
38 __weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
39 __weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
40 __weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
41 __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
42 __weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
43 __weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
44 __weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
45 __weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
46 __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
47
48 /*
49  * Prototypes
50  */
51
52 static int
53 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr __unused)
54 {
55         pthread_rwlock_t prwlock;
56
57         prwlock = (pthread_rwlock_t)calloc(1, sizeof(struct pthread_rwlock));
58         if (prwlock == NULL)
59                 return (ENOMEM);
60         *rwlock = prwlock;
61         return (0);
62 }
63
64 int
65 _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
66 {
67         int ret;
68
69         if (rwlock == NULL)
70                 ret = EINVAL;
71         else {
72                 pthread_rwlock_t prwlock;
73
74                 prwlock = *rwlock;
75                 *rwlock = NULL;
76
77                 free(prwlock);
78                 ret = 0;
79         }
80         return (ret);
81 }
82
83 static int
84 init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
85 {
86         int ret;
87
88         THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
89
90         if (*rwlock == NULL)
91                 ret = rwlock_init(rwlock, NULL);
92         else
93                 ret = 0;
94
95         THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
96
97         return (ret);
98 }
99
100 int
101 _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
102 {
103         *rwlock = NULL;
104         return (rwlock_init(rwlock, attr));
105 }
106
107 static int
108 rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
109 {
110         struct pthread *curthread = _get_curthread();
111         pthread_rwlock_t prwlock;
112         struct timespec ts, ts2, *tsp;
113         int flags;
114         int ret;
115
116         if (__predict_false(rwlock == NULL))
117                 return (EINVAL);
118
119         prwlock = *rwlock;
120
121         /* check for static initialization */
122         if (__predict_false(prwlock == NULL)) {
123                 if ((ret = init_static(curthread, rwlock)) != 0)
124                         return (ret);
125
126                 prwlock = *rwlock;
127         }
128
129         if (curthread->rdlock_count) {
130                 /*
131                  * To avoid having to track all the rdlocks held by
132                  * a thread or all of the threads that hold a rdlock,
133                  * we keep a simple count of all the rdlocks held by
134                  * a thread.  If a thread holds any rdlocks it is
135                  * possible that it is attempting to take a recursive
136                  * rdlock.  If there are blocked writers and precedence
137                  * is given to them, then that would result in the thread
138                  * deadlocking.  So allowing a thread to take the rdlock
139                  * when it already has one or more rdlocks avoids the
140                  * deadlock.  I hope the reader can follow that logic ;-)
141                  */
142                 flags = URWLOCK_PREFER_READER;
143         } else {
144                 flags = 0;
145         }
146
147         /*
148          * POSIX said the validity of the abstimeout parameter need
149          * not be checked if the lock can be immediately acquired.
150          */
151         ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
152         if (ret == 0) {
153                 curthread->rdlock_count++;
154                 return (ret);
155         }
156
157         if (__predict_false(abstime && 
158                 (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
159                 return (EINVAL);
160
161         for (;;) {
162                 if (abstime) {
163                         clock_gettime(CLOCK_REALTIME, &ts);
164                         TIMESPEC_SUB(&ts2, abstime, &ts);
165                         if (ts2.tv_sec < 0 || 
166                             (ts2.tv_sec == 0 && ts2.tv_nsec <= 0))
167                                 return (ETIMEDOUT);
168                         tsp = &ts2;
169                 } else
170                         tsp = NULL;
171
172                 /* goto kernel and lock it */
173                 ret = __thr_rwlock_rdlock(&prwlock->lock, flags, tsp);
174                 if (ret != EINTR)
175                         break;
176
177                 /* if interrupted, try to lock it in userland again. */
178                 if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) {
179                         ret = 0;
180                         curthread->rdlock_count++;
181                         break;
182                 }
183         }
184         return (ret);
185 }
186
187 int
188 _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
189 {
190         return (rwlock_rdlock_common(rwlock, NULL));
191 }
192
193 int
194 _pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
195          const struct timespec *abstime)
196 {
197         return (rwlock_rdlock_common(rwlock, abstime));
198 }
199
200 int
201 _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
202 {
203         struct pthread *curthread = _get_curthread();
204         pthread_rwlock_t prwlock;
205         int flags;
206         int ret;
207
208         if (__predict_false(rwlock == NULL))
209                 return (EINVAL);
210
211         prwlock = *rwlock;
212
213         /* check for static initialization */
214         if (__predict_false(prwlock == NULL)) {
215                 if ((ret = init_static(curthread, rwlock)) != 0)
216                         return (ret);
217
218                 prwlock = *rwlock;
219         }
220
221         if (curthread->rdlock_count) {
222                 /*
223                  * To avoid having to track all the rdlocks held by
224                  * a thread or all of the threads that hold a rdlock,
225                  * we keep a simple count of all the rdlocks held by
226                  * a thread.  If a thread holds any rdlocks it is
227                  * possible that it is attempting to take a recursive
228                  * rdlock.  If there are blocked writers and precedence
229                  * is given to them, then that would result in the thread
230                  * deadlocking.  So allowing a thread to take the rdlock
231                  * when it already has one or more rdlocks avoids the
232                  * deadlock.  I hope the reader can follow that logic ;-)
233                  */
234                 flags = URWLOCK_PREFER_READER;
235         } else {
236                 flags = 0;
237         }
238
239         ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
240         if (ret == 0)
241                 curthread->rdlock_count++;
242         return (ret);
243 }
244
245 int
246 _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
247 {
248         struct pthread *curthread = _get_curthread();
249         pthread_rwlock_t prwlock;
250         int ret;
251
252         if (__predict_false(rwlock == NULL))
253                 return (EINVAL);
254
255         prwlock = *rwlock;
256
257         /* check for static initialization */
258         if (__predict_false(prwlock == NULL)) {
259                 if ((ret = init_static(curthread, rwlock)) != 0)
260                         return (ret);
261
262                 prwlock = *rwlock;
263         }
264
265         ret = _thr_rwlock_trywrlock(&prwlock->lock);
266         if (ret == 0)
267                 prwlock->owner = curthread;
268         return (ret);
269 }
270
271 static int
272 rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
273 {
274         struct pthread *curthread = _get_curthread();
275         pthread_rwlock_t prwlock;
276         struct timespec ts, ts2, *tsp;
277         int ret;
278
279         if (__predict_false(rwlock == NULL))
280                 return (EINVAL);
281
282         prwlock = *rwlock;
283
284         /* check for static initialization */
285         if (__predict_false(prwlock == NULL)) {
286                 if ((ret = init_static(curthread, rwlock)) != 0)
287                         return (ret);
288
289                 prwlock = *rwlock;
290         }
291
292         /*
293          * POSIX said the validity of the abstimeout parameter need
294          * not be checked if the lock can be immediately acquired.
295          */
296         ret = _thr_rwlock_trywrlock(&prwlock->lock);
297         if (ret == 0) {
298                 prwlock->owner = curthread;
299                 return (ret);
300         }
301
302         if (__predict_false(abstime && 
303                 (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
304                 return (EINVAL);
305
306         for (;;) {
307                 if (abstime != NULL) {
308                         clock_gettime(CLOCK_REALTIME, &ts);
309                         TIMESPEC_SUB(&ts2, abstime, &ts);
310                         if (ts2.tv_sec < 0 || 
311                             (ts2.tv_sec == 0 && ts2.tv_nsec <= 0))
312                                 return (ETIMEDOUT);
313                         tsp = &ts2;
314                 } else
315                         tsp = NULL;
316
317                 /* goto kernel and lock it */
318                 ret = __thr_rwlock_wrlock(&prwlock->lock, tsp);
319                 if (ret == 0) {
320                         prwlock->owner = curthread;
321                         break;
322                 }
323
324                 if (ret != EINTR)
325                         break;
326
327                 /* if interrupted, try to lock it in userland again. */
328                 if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
329                         ret = 0;
330                         prwlock->owner = curthread;
331                         break;
332                 }
333         }
334         return (ret);
335 }
336
337 int
338 _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
339 {
340         return (rwlock_wrlock_common (rwlock, NULL));
341 }
342
343 int
344 _pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
345     const struct timespec *abstime)
346 {
347         return (rwlock_wrlock_common (rwlock, abstime));
348 }
349
350 int
351 _pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
352 {
353         struct pthread *curthread = _get_curthread();
354         pthread_rwlock_t prwlock;
355         int ret;
356         int32_t state;
357
358         if (__predict_false(rwlock == NULL))
359                 return (EINVAL);
360
361         prwlock = *rwlock;
362
363         if (__predict_false(prwlock == NULL))
364                 return (EINVAL);
365
366         state = prwlock->lock.rw_state;
367         if (state & URWLOCK_WRITE_OWNER) {
368                 if (__predict_false(prwlock->owner != curthread))
369                         return (EPERM);
370                 prwlock->owner = NULL;
371         }
372
373         ret = _thr_rwlock_unlock(&prwlock->lock);
374         if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
375                 curthread->rdlock_count--;
376
377         return (ret);
378 }