]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - lib/libthr/thread/thr_rwlock.c
MFC r303795:
[FreeBSD/FreeBSD.git] / lib / libthr / thread / thr_rwlock.c
1 /*-
2  * Copyright (c) 1998 Alex Nash
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <errno.h>
31 #include <limits.h>
32 #include <stdlib.h>
33
34 #include "namespace.h"
35 #include <pthread.h>
36 #include "un-namespace.h"
37 #include "thr_private.h"
38
39 _Static_assert(sizeof(struct pthread_rwlock) <= PAGE_SIZE,
40     "pthread_rwlock is too large for off-page");
41
42 __weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
43 __weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
44 __weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
45 __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
46 __weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
47 __weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
48 __weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
49 __weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
50 __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
51
52 #define CHECK_AND_INIT_RWLOCK                                                   \
53         if (*rwlock == THR_PSHARED_PTR) {                                       \
54                 prwlock = __thr_pshared_offpage(rwlock, 0);                     \
55                 if (prwlock == NULL)                                            \
56                         return (EINVAL);                                        \
57         } else if (__predict_false((prwlock = (*rwlock)) <=                     \
58             THR_RWLOCK_DESTROYED)) {                                            \
59                 if (prwlock == THR_RWLOCK_INITIALIZER) {                        \
60                         int ret;                                                \
61                         ret = init_static(_get_curthread(), rwlock);            \
62                         if (ret)                                                \
63                                 return (ret);                                   \
64                 } else if (prwlock == THR_RWLOCK_DESTROYED) {                   \
65                         return (EINVAL);                                        \
66                 }                                                               \
67                 prwlock = *rwlock;                                              \
68         }
69
70 /*
71  * Prototypes
72  */
73
74 static int
75 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
76 {
77         pthread_rwlock_t prwlock;
78
79         if (attr == NULL || *attr == NULL ||
80             (*attr)->pshared == PTHREAD_PROCESS_PRIVATE) {
81                 prwlock = calloc(1, sizeof(struct pthread_rwlock));
82                 if (prwlock == NULL)
83                         return (ENOMEM);
84                 *rwlock = prwlock;
85         } else {
86                 prwlock = __thr_pshared_offpage(rwlock, 1);
87                 if (prwlock == NULL)
88                         return (EFAULT);
89                 prwlock->lock.rw_flags |= USYNC_PROCESS_SHARED;
90                 *rwlock = THR_PSHARED_PTR;
91         }
92         return (0);
93 }
94
95 int
96 _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
97 {
98         pthread_rwlock_t prwlock;
99         int ret;
100
101         prwlock = *rwlock;
102         if (prwlock == THR_RWLOCK_INITIALIZER)
103                 ret = 0;
104         else if (prwlock == THR_RWLOCK_DESTROYED)
105                 ret = EINVAL;
106         else if (prwlock == THR_PSHARED_PTR) {
107                 *rwlock = THR_RWLOCK_DESTROYED;
108                 __thr_pshared_destroy(rwlock);
109                 ret = 0;
110         } else {
111                 *rwlock = THR_RWLOCK_DESTROYED;
112                 free(prwlock);
113                 ret = 0;
114         }
115         return (ret);
116 }
117
118 static int
119 init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
120 {
121         int ret;
122
123         THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
124
125         if (*rwlock == THR_RWLOCK_INITIALIZER)
126                 ret = rwlock_init(rwlock, NULL);
127         else
128                 ret = 0;
129
130         THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
131
132         return (ret);
133 }
134
135 int
136 _pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
137 {
138
139         *rwlock = NULL;
140         return (rwlock_init(rwlock, attr));
141 }
142
143 static int
144 rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
145 {
146         struct pthread *curthread = _get_curthread();
147         pthread_rwlock_t prwlock;
148         int flags;
149         int ret;
150
151         CHECK_AND_INIT_RWLOCK
152
153         if (curthread->rdlock_count) {
154                 /*
155                  * To avoid having to track all the rdlocks held by
156                  * a thread or all of the threads that hold a rdlock,
157                  * we keep a simple count of all the rdlocks held by
158                  * a thread.  If a thread holds any rdlocks it is
159                  * possible that it is attempting to take a recursive
160                  * rdlock.  If there are blocked writers and precedence
161                  * is given to them, then that would result in the thread
162                  * deadlocking.  So allowing a thread to take the rdlock
163                  * when it already has one or more rdlocks avoids the
164                  * deadlock.  I hope the reader can follow that logic ;-)
165                  */
166                 flags = URWLOCK_PREFER_READER;
167         } else {
168                 flags = 0;
169         }
170
171         /*
172          * POSIX said the validity of the abstimeout parameter need
173          * not be checked if the lock can be immediately acquired.
174          */
175         ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
176         if (ret == 0) {
177                 curthread->rdlock_count++;
178                 return (ret);
179         }
180
181         if (__predict_false(abstime && 
182                 (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
183                 return (EINVAL);
184
185         for (;;) {
186                 /* goto kernel and lock it */
187                 ret = __thr_rwlock_rdlock(&prwlock->lock, flags, abstime);
188                 if (ret != EINTR)
189                         break;
190
191                 /* if interrupted, try to lock it in userland again. */
192                 if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) {
193                         ret = 0;
194                         break;
195                 }
196         }
197         if (ret == 0)
198                 curthread->rdlock_count++;
199         return (ret);
200 }
201
202 int
203 _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
204 {
205         return (rwlock_rdlock_common(rwlock, NULL));
206 }
207
208 int
209 _pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
210          const struct timespec *abstime)
211 {
212         return (rwlock_rdlock_common(rwlock, abstime));
213 }
214
215 int
216 _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
217 {
218         struct pthread *curthread = _get_curthread();
219         pthread_rwlock_t prwlock;
220         int flags;
221         int ret;
222
223         CHECK_AND_INIT_RWLOCK
224
225         if (curthread->rdlock_count) {
226                 /*
227                  * To avoid having to track all the rdlocks held by
228                  * a thread or all of the threads that hold a rdlock,
229                  * we keep a simple count of all the rdlocks held by
230                  * a thread.  If a thread holds any rdlocks it is
231                  * possible that it is attempting to take a recursive
232                  * rdlock.  If there are blocked writers and precedence
233                  * is given to them, then that would result in the thread
234                  * deadlocking.  So allowing a thread to take the rdlock
235                  * when it already has one or more rdlocks avoids the
236                  * deadlock.  I hope the reader can follow that logic ;-)
237                  */
238                 flags = URWLOCK_PREFER_READER;
239         } else {
240                 flags = 0;
241         }
242
243         ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
244         if (ret == 0)
245                 curthread->rdlock_count++;
246         return (ret);
247 }
248
249 int
250 _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
251 {
252         struct pthread *curthread = _get_curthread();
253         pthread_rwlock_t prwlock;
254         int ret;
255
256         CHECK_AND_INIT_RWLOCK
257
258         ret = _thr_rwlock_trywrlock(&prwlock->lock);
259         if (ret == 0)
260                 prwlock->owner = TID(curthread);
261         return (ret);
262 }
263
264 static int
265 rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
266 {
267         struct pthread *curthread = _get_curthread();
268         pthread_rwlock_t prwlock;
269         int ret;
270
271         CHECK_AND_INIT_RWLOCK
272
273         /*
274          * POSIX said the validity of the abstimeout parameter need
275          * not be checked if the lock can be immediately acquired.
276          */
277         ret = _thr_rwlock_trywrlock(&prwlock->lock);
278         if (ret == 0) {
279                 prwlock->owner = TID(curthread);
280                 return (ret);
281         }
282
283         if (__predict_false(abstime && 
284             (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
285                 return (EINVAL);
286
287         for (;;) {
288                 /* goto kernel and lock it */
289                 ret = __thr_rwlock_wrlock(&prwlock->lock, abstime);
290                 if (ret == 0) {
291                         prwlock->owner = TID(curthread);
292                         break;
293                 }
294
295                 if (ret != EINTR)
296                         break;
297
298                 /* if interrupted, try to lock it in userland again. */
299                 if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
300                         ret = 0;
301                         prwlock->owner = TID(curthread);
302                         break;
303                 }
304         }
305         return (ret);
306 }
307
308 int
309 _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
310 {
311         return (rwlock_wrlock_common (rwlock, NULL));
312 }
313
314 int
315 _pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
316     const struct timespec *abstime)
317 {
318         return (rwlock_wrlock_common (rwlock, abstime));
319 }
320
321 int
322 _pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
323 {
324         struct pthread *curthread = _get_curthread();
325         pthread_rwlock_t prwlock;
326         int ret;
327         int32_t state;
328
329         if (*rwlock == THR_PSHARED_PTR) {
330                 prwlock = __thr_pshared_offpage(rwlock, 0);
331                 if (prwlock == NULL)
332                         return (EINVAL);
333         } else {
334                 prwlock = *rwlock;
335         }
336
337         if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED))
338                 return (EINVAL);
339
340         state = prwlock->lock.rw_state;
341         if (state & URWLOCK_WRITE_OWNER) {
342                 if (__predict_false(prwlock->owner != TID(curthread)))
343                         return (EPERM);
344                 prwlock->owner = 0;
345         }
346
347         ret = _thr_rwlock_unlock(&prwlock->lock);
348         if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
349                 curthread->rdlock_count--;
350
351         return (ret);
352 }