]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - lib/libthr/thread/thr_rwlock.c
Update apr to 1.7.0. See contrib/apr/CHANGES for a summary of changes.
[FreeBSD/FreeBSD.git] / lib / libthr / thread / thr_rwlock.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1998 Alex Nash
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <errno.h>
33 #include <limits.h>
34 #include <stdlib.h>
35
36 #include "namespace.h"
37 #include <pthread.h>
38 #include "un-namespace.h"
39 #include "thr_private.h"
40
41 _Static_assert(sizeof(struct pthread_rwlock) <= PAGE_SIZE,
42     "pthread_rwlock is too large for off-page");
43
44 __weak_reference(_thr_rwlock_destroy, pthread_rwlock_destroy);
45 __weak_reference(_thr_rwlock_destroy, _pthread_rwlock_destroy);
46 __weak_reference(_thr_rwlock_init, pthread_rwlock_init);
47 __weak_reference(_thr_rwlock_init, _pthread_rwlock_init);
48 __weak_reference(_Tthr_rwlock_rdlock, pthread_rwlock_rdlock);
49 __weak_reference(_Tthr_rwlock_rdlock, _pthread_rwlock_rdlock);
50 __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
51 __weak_reference(_Tthr_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
52 __weak_reference(_Tthr_rwlock_tryrdlock, _pthread_rwlock_tryrdlock);
53 __weak_reference(_Tthr_rwlock_trywrlock, pthread_rwlock_trywrlock);
54 __weak_reference(_Tthr_rwlock_trywrlock, _pthread_rwlock_trywrlock);
55 __weak_reference(_Tthr_rwlock_unlock, pthread_rwlock_unlock);
56 __weak_reference(_Tthr_rwlock_unlock, _pthread_rwlock_unlock);
57 __weak_reference(_Tthr_rwlock_wrlock, pthread_rwlock_wrlock);
58 __weak_reference(_Tthr_rwlock_wrlock, _pthread_rwlock_wrlock);
59 __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
60
61 static int init_static(struct pthread *thread, pthread_rwlock_t *rwlock);
62 static int init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out);
63
64 static int __always_inline
65 check_and_init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out)
66 {
67         if (__predict_false(*rwlock == THR_PSHARED_PTR ||
68             *rwlock <= THR_RWLOCK_DESTROYED))
69                 return (init_rwlock(rwlock, rwlock_out));
70         *rwlock_out = *rwlock;
71         return (0);
72 }
73
74 static int __noinline
75 init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out)
76 {
77         pthread_rwlock_t prwlock;
78         int ret;
79
80         if (*rwlock == THR_PSHARED_PTR) {
81                 prwlock = __thr_pshared_offpage(rwlock, 0);
82                 if (prwlock == NULL)
83                         return (EINVAL);
84         } else if ((prwlock = *rwlock) <= THR_RWLOCK_DESTROYED) {
85                 if (prwlock == THR_RWLOCK_INITIALIZER) {
86                         ret = init_static(_get_curthread(), rwlock);
87                         if (ret != 0)
88                                 return (ret);
89                 } else if (prwlock == THR_RWLOCK_DESTROYED) {
90                         return (EINVAL);
91                 }
92                 prwlock = *rwlock;
93         }
94         *rwlock_out = prwlock;
95         return (0);
96 }
97
98 static int
99 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
100 {
101         pthread_rwlock_t prwlock;
102
103         if (attr == NULL || *attr == NULL ||
104             (*attr)->pshared == PTHREAD_PROCESS_PRIVATE) {
105                 prwlock = calloc(1, sizeof(struct pthread_rwlock));
106                 if (prwlock == NULL)
107                         return (ENOMEM);
108                 *rwlock = prwlock;
109         } else {
110                 prwlock = __thr_pshared_offpage(rwlock, 1);
111                 if (prwlock == NULL)
112                         return (EFAULT);
113                 prwlock->lock.rw_flags |= USYNC_PROCESS_SHARED;
114                 *rwlock = THR_PSHARED_PTR;
115         }
116         return (0);
117 }
118
119 int
120 _thr_rwlock_destroy(pthread_rwlock_t *rwlock)
121 {
122         pthread_rwlock_t prwlock;
123         int ret;
124
125         prwlock = *rwlock;
126         if (prwlock == THR_RWLOCK_INITIALIZER)
127                 ret = 0;
128         else if (prwlock == THR_RWLOCK_DESTROYED)
129                 ret = EINVAL;
130         else if (prwlock == THR_PSHARED_PTR) {
131                 *rwlock = THR_RWLOCK_DESTROYED;
132                 __thr_pshared_destroy(rwlock);
133                 ret = 0;
134         } else {
135                 *rwlock = THR_RWLOCK_DESTROYED;
136                 free(prwlock);
137                 ret = 0;
138         }
139         return (ret);
140 }
141
142 static int
143 init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
144 {
145         int ret;
146
147         THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
148
149         if (*rwlock == THR_RWLOCK_INITIALIZER)
150                 ret = rwlock_init(rwlock, NULL);
151         else
152                 ret = 0;
153
154         THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
155
156         return (ret);
157 }
158
159 int
160 _thr_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
161 {
162
163         *rwlock = NULL;
164         return (rwlock_init(rwlock, attr));
165 }
166
167 static int
168 rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
169 {
170         struct pthread *curthread = _get_curthread();
171         pthread_rwlock_t prwlock;
172         int flags;
173         int ret;
174
175         ret = check_and_init_rwlock(rwlock, &prwlock);
176         if (ret != 0)
177                 return (ret);
178
179         if (curthread->rdlock_count) {
180                 /*
181                  * To avoid having to track all the rdlocks held by
182                  * a thread or all of the threads that hold a rdlock,
183                  * we keep a simple count of all the rdlocks held by
184                  * a thread.  If a thread holds any rdlocks it is
185                  * possible that it is attempting to take a recursive
186                  * rdlock.  If there are blocked writers and precedence
187                  * is given to them, then that would result in the thread
188                  * deadlocking.  So allowing a thread to take the rdlock
189                  * when it already has one or more rdlocks avoids the
190                  * deadlock.  I hope the reader can follow that logic ;-)
191                  */
192                 flags = URWLOCK_PREFER_READER;
193         } else {
194                 flags = 0;
195         }
196
197         /*
198          * POSIX said the validity of the abstimeout parameter need
199          * not be checked if the lock can be immediately acquired.
200          */
201         ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
202         if (ret == 0) {
203                 curthread->rdlock_count++;
204                 return (ret);
205         }
206
207         if (__predict_false(abstime && 
208                 (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
209                 return (EINVAL);
210
211         for (;;) {
212                 /* goto kernel and lock it */
213                 ret = __thr_rwlock_rdlock(&prwlock->lock, flags, abstime);
214                 if (ret != EINTR)
215                         break;
216
217                 /* if interrupted, try to lock it in userland again. */
218                 if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) {
219                         ret = 0;
220                         break;
221                 }
222         }
223         if (ret == 0)
224                 curthread->rdlock_count++;
225         return (ret);
226 }
227
228 int
229 _Tthr_rwlock_rdlock(pthread_rwlock_t *rwlock)
230 {
231         return (rwlock_rdlock_common(rwlock, NULL));
232 }
233
234 int
235 _pthread_rwlock_timedrdlock(pthread_rwlock_t * __restrict rwlock,
236     const struct timespec * __restrict abstime)
237 {
238         return (rwlock_rdlock_common(rwlock, abstime));
239 }
240
241 int
242 _Tthr_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
243 {
244         struct pthread *curthread = _get_curthread();
245         pthread_rwlock_t prwlock;
246         int flags;
247         int ret;
248
249         ret = check_and_init_rwlock(rwlock, &prwlock);
250         if (ret != 0)
251                 return (ret);
252
253         if (curthread->rdlock_count) {
254                 /*
255                  * To avoid having to track all the rdlocks held by
256                  * a thread or all of the threads that hold a rdlock,
257                  * we keep a simple count of all the rdlocks held by
258                  * a thread.  If a thread holds any rdlocks it is
259                  * possible that it is attempting to take a recursive
260                  * rdlock.  If there are blocked writers and precedence
261                  * is given to them, then that would result in the thread
262                  * deadlocking.  So allowing a thread to take the rdlock
263                  * when it already has one or more rdlocks avoids the
264                  * deadlock.  I hope the reader can follow that logic ;-)
265                  */
266                 flags = URWLOCK_PREFER_READER;
267         } else {
268                 flags = 0;
269         }
270
271         ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
272         if (ret == 0)
273                 curthread->rdlock_count++;
274         return (ret);
275 }
276
277 int
278 _Tthr_rwlock_trywrlock(pthread_rwlock_t *rwlock)
279 {
280         struct pthread *curthread = _get_curthread();
281         pthread_rwlock_t prwlock;
282         int ret;
283
284         ret = check_and_init_rwlock(rwlock, &prwlock);
285         if (ret != 0)
286                 return (ret);
287
288         ret = _thr_rwlock_trywrlock(&prwlock->lock);
289         if (ret == 0)
290                 prwlock->owner = TID(curthread);
291         return (ret);
292 }
293
294 static int
295 rwlock_wrlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
296 {
297         struct pthread *curthread = _get_curthread();
298         pthread_rwlock_t prwlock;
299         int ret;
300
301         ret = check_and_init_rwlock(rwlock, &prwlock);
302         if (ret != 0)
303                 return (ret);
304
305         /*
306          * POSIX said the validity of the abstimeout parameter need
307          * not be checked if the lock can be immediately acquired.
308          */
309         ret = _thr_rwlock_trywrlock(&prwlock->lock);
310         if (ret == 0) {
311                 prwlock->owner = TID(curthread);
312                 return (ret);
313         }
314
315         if (__predict_false(abstime && 
316             (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
317                 return (EINVAL);
318
319         for (;;) {
320                 /* goto kernel and lock it */
321                 ret = __thr_rwlock_wrlock(&prwlock->lock, abstime);
322                 if (ret == 0) {
323                         prwlock->owner = TID(curthread);
324                         break;
325                 }
326
327                 if (ret != EINTR)
328                         break;
329
330                 /* if interrupted, try to lock it in userland again. */
331                 if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
332                         ret = 0;
333                         prwlock->owner = TID(curthread);
334                         break;
335                 }
336         }
337         return (ret);
338 }
339
340 int
341 _Tthr_rwlock_wrlock(pthread_rwlock_t *rwlock)
342 {
343         return (rwlock_wrlock_common (rwlock, NULL));
344 }
345
346 int
347 _pthread_rwlock_timedwrlock(pthread_rwlock_t * __restrict rwlock,
348     const struct timespec * __restrict abstime)
349 {
350         return (rwlock_wrlock_common (rwlock, abstime));
351 }
352
353 int
354 _Tthr_rwlock_unlock(pthread_rwlock_t *rwlock)
355 {
356         struct pthread *curthread = _get_curthread();
357         pthread_rwlock_t prwlock;
358         int ret;
359         int32_t state;
360
361         if (*rwlock == THR_PSHARED_PTR) {
362                 prwlock = __thr_pshared_offpage(rwlock, 0);
363                 if (prwlock == NULL)
364                         return (EINVAL);
365         } else {
366                 prwlock = *rwlock;
367         }
368
369         if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED))
370                 return (EINVAL);
371
372         state = prwlock->lock.rw_state;
373         if (state & URWLOCK_WRITE_OWNER) {
374                 if (__predict_false(prwlock->owner != TID(curthread)))
375                         return (EPERM);
376                 prwlock->owner = 0;
377         }
378
379         ret = _thr_rwlock_unlock(&prwlock->lock);
380         if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
381                 curthread->rdlock_count--;
382
383         return (ret);
384 }