]> CyberLeo.Net >> Repos - FreeBSD/releng/7.2.git/blob - lib/libkse/thread/thr_rwlock.c
Create releng/7.2 from stable/7 in preparation for 7.2-RELEASE.
[FreeBSD/releng/7.2.git] / lib / libkse / thread / thr_rwlock.c
1 /*-
2  * Copyright (c) 1998 Alex Nash
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28
29 #include <errno.h>
30 #include <limits.h>
31 #include <stdlib.h>
32
33 #include "namespace.h"
34 #include <pthread.h>
35 #include "un-namespace.h"
36 #include "thr_private.h"
37
38 /* maximum number of times a read lock may be obtained */
39 #define MAX_READ_LOCKS          (INT_MAX - 1)
40
41 LT10_COMPAT_PRIVATE(_pthread_rwlock_destroy);
42 LT10_COMPAT_DEFAULT(pthread_rwlock_destroy);
43 LT10_COMPAT_PRIVATE(_pthread_rwlock_init);
44 LT10_COMPAT_DEFAULT(pthread_rwlock_init);
45 LT10_COMPAT_PRIVATE(_pthread_rwlock_rdlock);
46 LT10_COMPAT_DEFAULT(pthread_rwlock_rdlock);
47 LT10_COMPAT_PRIVATE(_pthread_rwlock_timedrdlock);
48 LT10_COMPAT_DEFAULT(pthread_rwlock_timedrdlock);
49 LT10_COMPAT_PRIVATE(_pthread_rwlock_tryrdlock);
50 LT10_COMPAT_DEFAULT(pthread_rwlock_tryrdlock);
51 LT10_COMPAT_PRIVATE(_pthread_rwlock_trywrlock);
52 LT10_COMPAT_DEFAULT(pthread_rwlock_trywrlock);
53 LT10_COMPAT_PRIVATE(_pthread_rwlock_unlock);
54 LT10_COMPAT_DEFAULT(pthread_rwlock_unlock);
55 LT10_COMPAT_PRIVATE(_pthread_rwlock_wrlock);
56 LT10_COMPAT_DEFAULT(pthread_rwlock_wrlock);
57 LT10_COMPAT_PRIVATE(_pthread_rwlock_timedwrlock);
58 LT10_COMPAT_DEFAULT(pthread_rwlock_timedwrlock);
59
60 __weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
61 __weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
62 __weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
63 __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
64 __weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
65 __weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
66 __weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
67 __weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
68 __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
69
70 /*
71  * Prototypes
72  */
73 static int init_static(pthread_rwlock_t *rwlock);
74
75
76 static int
77 init_static(pthread_rwlock_t *rwlock)
78 {
79         struct pthread *thread = _get_curthread();
80         int ret;
81
82         THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
83
84         if (*rwlock == NULL)
85                 ret = _pthread_rwlock_init(rwlock, NULL);
86         else
87                 ret = 0;
88
89         THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
90         return (ret);
91 }
92
93 int
94 _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
95 {
96         int ret;
97
98         if (rwlock == NULL)
99                 ret = EINVAL;
100         else {
101                 pthread_rwlock_t prwlock;
102
103                 prwlock = *rwlock;
104
105                 _pthread_mutex_destroy(&prwlock->lock);
106                 _pthread_cond_destroy(&prwlock->read_signal);
107                 _pthread_cond_destroy(&prwlock->write_signal);
108                 free(prwlock);
109
110                 *rwlock = NULL;
111
112                 ret = 0;
113         }
114         return (ret);
115 }
116
117 int
118 _pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
119 {
120         pthread_rwlock_t prwlock;
121         int ret;
122
123         /* allocate rwlock object */
124         prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock));
125
126         if (prwlock == NULL)
127                 return (ENOMEM);
128
129         /* initialize the lock */
130         if ((ret = _pthread_mutex_init(&prwlock->lock, NULL)) != 0)
131                 free(prwlock);
132         else {
133                 /* initialize the read condition signal */
134                 ret = _pthread_cond_init(&prwlock->read_signal, NULL);
135
136                 if (ret != 0) {
137                         _pthread_mutex_destroy(&prwlock->lock);
138                         free(prwlock);
139                 } else {
140                         /* initialize the write condition signal */
141                         ret = _pthread_cond_init(&prwlock->write_signal, NULL);
142
143                         if (ret != 0) {
144                                 _pthread_cond_destroy(&prwlock->read_signal);
145                                 _pthread_mutex_destroy(&prwlock->lock);
146                                 free(prwlock);
147                         } else {
148                                 /* success */
149                                 prwlock->state = 0;
150                                 prwlock->blocked_writers = 0;
151
152                                 *rwlock = prwlock;
153                         }
154                 }
155         }
156
157         return (ret);
158 }
159
160 static int
161 rwlock_rdlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
162 {
163         pthread_rwlock_t prwlock;
164         struct pthread *curthread;
165         int ret;
166
167         if (rwlock == NULL)
168                 return (EINVAL);
169
170         prwlock = *rwlock;
171
172         /* check for static initialization */
173         if (prwlock == NULL) {
174                 if ((ret = init_static(rwlock)) != 0)
175                         return (ret);
176
177                 prwlock = *rwlock;
178         }
179
180         /* grab the monitor lock */
181         if ((ret = _thr_mutex_lock(&prwlock->lock)) != 0)
182                 return (ret);
183
184         /* check lock count */
185         if (prwlock->state == MAX_READ_LOCKS) {
186                 _thr_mutex_unlock(&prwlock->lock);
187                 return (EAGAIN);
188         }
189
190         curthread = _get_curthread();
191         if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
192                 /*
193                  * To avoid having to track all the rdlocks held by
194                  * a thread or all of the threads that hold a rdlock,
195                  * we keep a simple count of all the rdlocks held by
196                  * a thread.  If a thread holds any rdlocks it is
197                  * possible that it is attempting to take a recursive
198                  * rdlock.  If there are blocked writers and precedence
199                  * is given to them, then that would result in the thread
200                  * deadlocking.  So allowing a thread to take the rdlock
201                  * when it already has one or more rdlocks avoids the
202                  * deadlock.  I hope the reader can follow that logic ;-)
203                  */
204                 ;       /* nothing needed */
205         } else {
206                 /* give writers priority over readers */
207                 while (prwlock->blocked_writers || prwlock->state < 0) {
208                         if (abstime)
209                                 ret = _pthread_cond_timedwait
210                                     (&prwlock->read_signal,
211                                     &prwlock->lock, abstime);
212                         else
213                                 ret = _thr_cond_wait(&prwlock->read_signal,
214                             &prwlock->lock);
215                         if (ret != 0) {
216                                 /* can't do a whole lot if this fails */
217                                 _thr_mutex_unlock(&prwlock->lock);
218                                 return (ret);
219                         }
220                 }
221         }
222
223         curthread->rdlock_count++;
224         prwlock->state++; /* indicate we are locked for reading */
225
226         /*
227          * Something is really wrong if this call fails.  Returning
228          * error won't do because we've already obtained the read
229          * lock.  Decrementing 'state' is no good because we probably
230          * don't have the monitor lock.
231          */
232         _thr_mutex_unlock(&prwlock->lock);
233
234         return (ret);
235 }
236
237 int
238 _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
239 {
240         return (rwlock_rdlock_common(rwlock, NULL));
241 }
242
243 __strong_reference(_pthread_rwlock_rdlock, _thr_rwlock_rdlock);
244
245 int
246 _pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
247          const struct timespec *abstime)
248 {
249         return (rwlock_rdlock_common(rwlock, abstime));
250 }
251
252 int
253 _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
254 {
255         struct pthread *curthread;
256         pthread_rwlock_t prwlock;
257         int ret;
258
259         if (rwlock == NULL)
260                 return (EINVAL);
261
262         prwlock = *rwlock;
263
264         /* check for static initialization */
265         if (prwlock == NULL) {
266                 if ((ret = init_static(rwlock)) != 0)
267                         return (ret);
268
269                 prwlock = *rwlock;
270         }
271
272         /* grab the monitor lock */
273         if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
274                 return (ret);
275
276         curthread = _get_curthread();
277         if (prwlock->state == MAX_READ_LOCKS)
278                 ret = EAGAIN;
279         else if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
280                 /* see comment for pthread_rwlock_rdlock() */
281                 curthread->rdlock_count++;
282                 prwlock->state++;
283         }
284         /* give writers priority over readers */
285         else if (prwlock->blocked_writers || prwlock->state < 0)
286                 ret = EBUSY;
287         else {
288                 curthread->rdlock_count++;
289                 prwlock->state++; /* indicate we are locked for reading */
290         }
291
292         /* see the comment on this in pthread_rwlock_rdlock */
293         _pthread_mutex_unlock(&prwlock->lock);
294
295         return (ret);
296 }
297
298 int
299 _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
300 {
301         pthread_rwlock_t prwlock;
302         int ret;
303
304         if (rwlock == NULL)
305                 return (EINVAL);
306
307         prwlock = *rwlock;
308
309         /* check for static initialization */
310         if (prwlock == NULL) {
311                 if ((ret = init_static(rwlock)) != 0)
312                         return (ret);
313
314                 prwlock = *rwlock;
315         }
316
317         /* grab the monitor lock */
318         if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
319                 return (ret);
320
321         if (prwlock->state != 0)
322                 ret = EBUSY;
323         else
324                 /* indicate we are locked for writing */
325                 prwlock->state = -1;
326
327         /* see the comment on this in pthread_rwlock_rdlock */
328         _pthread_mutex_unlock(&prwlock->lock);
329
330         return (ret);
331 }
332
333 int
334 _pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
335 {
336         struct pthread *curthread;
337         pthread_rwlock_t prwlock;
338         int ret;
339
340         if (rwlock == NULL)
341                 return (EINVAL);
342
343         prwlock = *rwlock;
344
345         if (prwlock == NULL)
346                 return (EINVAL);
347
348         /* grab the monitor lock */
349         if ((ret = _thr_mutex_lock(&prwlock->lock)) != 0)
350                 return (ret);
351
352         curthread = _get_curthread();
353         if (prwlock->state > 0) {
354                 curthread->rdlock_count--;
355                 prwlock->state--;
356                 if (prwlock->state == 0 && prwlock->blocked_writers)
357                         ret = _thr_cond_signal(&prwlock->write_signal);
358         } else if (prwlock->state < 0) {
359                 prwlock->state = 0;
360
361                 if (prwlock->blocked_writers)
362                         ret = _thr_cond_signal(&prwlock->write_signal);
363                 else
364                         ret = _thr_cond_broadcast(&prwlock->read_signal);
365         } else
366                 ret = EINVAL;
367
368         /* see the comment on this in pthread_rwlock_rdlock */
369         _thr_mutex_unlock(&prwlock->lock);
370
371         return (ret);
372 }
373
374 __strong_reference(_pthread_rwlock_unlock, _thr_rwlock_unlock);
375
376 static int
377 rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
378 {
379         pthread_rwlock_t prwlock;
380         int ret;
381
382         if (rwlock == NULL)
383                 return (EINVAL);
384
385         prwlock = *rwlock;
386
387         /* check for static initialization */
388         if (prwlock == NULL) {
389                 if ((ret = init_static(rwlock)) != 0)
390                         return (ret);
391
392                 prwlock = *rwlock;
393         }
394
395         /* grab the monitor lock */
396         if ((ret = _thr_mutex_lock(&prwlock->lock)) != 0)
397                 return (ret);
398
399         while (prwlock->state != 0) {
400                 prwlock->blocked_writers++;
401
402                 if (abstime != NULL)
403                         ret = _pthread_cond_timedwait(&prwlock->write_signal,
404                             &prwlock->lock, abstime);
405                 else
406                         ret = _thr_cond_wait(&prwlock->write_signal,
407                             &prwlock->lock);
408                 if (ret != 0) {
409                         prwlock->blocked_writers--;
410                         _thr_mutex_unlock(&prwlock->lock);
411                         return (ret);
412                 }
413
414                 prwlock->blocked_writers--;
415         }
416
417         /* indicate we are locked for writing */
418         prwlock->state = -1;
419
420         /* see the comment on this in pthread_rwlock_rdlock */
421         _thr_mutex_unlock(&prwlock->lock);
422
423         return (ret);
424 }
425
426 int
427 _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
428 {
429         return (rwlock_wrlock_common (rwlock, NULL));
430 }
431 __strong_reference(_pthread_rwlock_wrlock, _thr_rwlock_wrlock);
432
433 int
434 _pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
435     const struct timespec *abstime)
436 {
437         return (rwlock_wrlock_common (rwlock, abstime));
438 }