]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - lib/libthr/thread/thr_rwlock.c
Add LLVM openmp trunk r351319 (just before the release_80 branch point)
[FreeBSD/FreeBSD.git] / lib / libthr / thread / thr_rwlock.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1998 Alex Nash
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <errno.h>
33 #include <limits.h>
34 #include <stdlib.h>
35
36 #include "namespace.h"
37 #include <pthread.h>
38 #include "un-namespace.h"
39 #include "thr_private.h"
40
41 _Static_assert(sizeof(struct pthread_rwlock) <= PAGE_SIZE,
42     "pthread_rwlock is too large for off-page");
43
44 __weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
45 __weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
46 __weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
47 __weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
48 __weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
49 __weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
50 __weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
51 __weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
52 __weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
53
54 static int init_static(struct pthread *thread, pthread_rwlock_t *rwlock);
55 static int init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out);
56
57 static int __always_inline
58 check_and_init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out)
59 {
60         if (__predict_false(*rwlock == THR_PSHARED_PTR ||
61             *rwlock <= THR_RWLOCK_DESTROYED))
62                 return (init_rwlock(rwlock, rwlock_out));
63         *rwlock_out = *rwlock;
64         return (0);
65 }
66
67 static int __noinline
68 init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out)
69 {
70         pthread_rwlock_t prwlock;
71         int ret;
72
73         if (*rwlock == THR_PSHARED_PTR) {
74                 prwlock = __thr_pshared_offpage(rwlock, 0);
75                 if (prwlock == NULL)
76                         return (EINVAL);
77         } else if ((prwlock = *rwlock) <= THR_RWLOCK_DESTROYED) {
78                 if (prwlock == THR_RWLOCK_INITIALIZER) {
79                         ret = init_static(_get_curthread(), rwlock);
80                         if (ret != 0)
81                                 return (ret);
82                 } else if (prwlock == THR_RWLOCK_DESTROYED) {
83                         return (EINVAL);
84                 }
85                 prwlock = *rwlock;
86         }
87         *rwlock_out = prwlock;
88         return (0);
89 }
90
91 static int
92 rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
93 {
94         pthread_rwlock_t prwlock;
95
96         if (attr == NULL || *attr == NULL ||
97             (*attr)->pshared == PTHREAD_PROCESS_PRIVATE) {
98                 prwlock = calloc(1, sizeof(struct pthread_rwlock));
99                 if (prwlock == NULL)
100                         return (ENOMEM);
101                 *rwlock = prwlock;
102         } else {
103                 prwlock = __thr_pshared_offpage(rwlock, 1);
104                 if (prwlock == NULL)
105                         return (EFAULT);
106                 prwlock->lock.rw_flags |= USYNC_PROCESS_SHARED;
107                 *rwlock = THR_PSHARED_PTR;
108         }
109         return (0);
110 }
111
112 int
113 _pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
114 {
115         pthread_rwlock_t prwlock;
116         int ret;
117
118         prwlock = *rwlock;
119         if (prwlock == THR_RWLOCK_INITIALIZER)
120                 ret = 0;
121         else if (prwlock == THR_RWLOCK_DESTROYED)
122                 ret = EINVAL;
123         else if (prwlock == THR_PSHARED_PTR) {
124                 *rwlock = THR_RWLOCK_DESTROYED;
125                 __thr_pshared_destroy(rwlock);
126                 ret = 0;
127         } else {
128                 *rwlock = THR_RWLOCK_DESTROYED;
129                 free(prwlock);
130                 ret = 0;
131         }
132         return (ret);
133 }
134
135 static int
136 init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
137 {
138         int ret;
139
140         THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
141
142         if (*rwlock == THR_RWLOCK_INITIALIZER)
143                 ret = rwlock_init(rwlock, NULL);
144         else
145                 ret = 0;
146
147         THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
148
149         return (ret);
150 }
151
152 int
153 _pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
154 {
155
156         *rwlock = NULL;
157         return (rwlock_init(rwlock, attr));
158 }
159
160 static int
161 rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
162 {
163         struct pthread *curthread = _get_curthread();
164         pthread_rwlock_t prwlock;
165         int flags;
166         int ret;
167
168         ret = check_and_init_rwlock(rwlock, &prwlock);
169         if (ret != 0)
170                 return (ret);
171
172         if (curthread->rdlock_count) {
173                 /*
174                  * To avoid having to track all the rdlocks held by
175                  * a thread or all of the threads that hold a rdlock,
176                  * we keep a simple count of all the rdlocks held by
177                  * a thread.  If a thread holds any rdlocks it is
178                  * possible that it is attempting to take a recursive
179                  * rdlock.  If there are blocked writers and precedence
180                  * is given to them, then that would result in the thread
181                  * deadlocking.  So allowing a thread to take the rdlock
182                  * when it already has one or more rdlocks avoids the
183                  * deadlock.  I hope the reader can follow that logic ;-)
184                  */
185                 flags = URWLOCK_PREFER_READER;
186         } else {
187                 flags = 0;
188         }
189
190         /*
191          * POSIX said the validity of the abstimeout parameter need
192          * not be checked if the lock can be immediately acquired.
193          */
194         ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
195         if (ret == 0) {
196                 curthread->rdlock_count++;
197                 return (ret);
198         }
199
200         if (__predict_false(abstime && 
201                 (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
202                 return (EINVAL);
203
204         for (;;) {
205                 /* goto kernel and lock it */
206                 ret = __thr_rwlock_rdlock(&prwlock->lock, flags, abstime);
207                 if (ret != EINTR)
208                         break;
209
210                 /* if interrupted, try to lock it in userland again. */
211                 if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) {
212                         ret = 0;
213                         break;
214                 }
215         }
216         if (ret == 0)
217                 curthread->rdlock_count++;
218         return (ret);
219 }
220
221 int
222 _pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
223 {
224         return (rwlock_rdlock_common(rwlock, NULL));
225 }
226
227 int
228 _pthread_rwlock_timedrdlock(pthread_rwlock_t * __restrict rwlock,
229     const struct timespec * __restrict abstime)
230 {
231         return (rwlock_rdlock_common(rwlock, abstime));
232 }
233
234 int
235 _pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
236 {
237         struct pthread *curthread = _get_curthread();
238         pthread_rwlock_t prwlock;
239         int flags;
240         int ret;
241
242         ret = check_and_init_rwlock(rwlock, &prwlock);
243         if (ret != 0)
244                 return (ret);
245
246         if (curthread->rdlock_count) {
247                 /*
248                  * To avoid having to track all the rdlocks held by
249                  * a thread or all of the threads that hold a rdlock,
250                  * we keep a simple count of all the rdlocks held by
251                  * a thread.  If a thread holds any rdlocks it is
252                  * possible that it is attempting to take a recursive
253                  * rdlock.  If there are blocked writers and precedence
254                  * is given to them, then that would result in the thread
255                  * deadlocking.  So allowing a thread to take the rdlock
256                  * when it already has one or more rdlocks avoids the
257                  * deadlock.  I hope the reader can follow that logic ;-)
258                  */
259                 flags = URWLOCK_PREFER_READER;
260         } else {
261                 flags = 0;
262         }
263
264         ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
265         if (ret == 0)
266                 curthread->rdlock_count++;
267         return (ret);
268 }
269
270 int
271 _pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
272 {
273         struct pthread *curthread = _get_curthread();
274         pthread_rwlock_t prwlock;
275         int ret;
276
277         ret = check_and_init_rwlock(rwlock, &prwlock);
278         if (ret != 0)
279                 return (ret);
280
281         ret = _thr_rwlock_trywrlock(&prwlock->lock);
282         if (ret == 0)
283                 prwlock->owner = TID(curthread);
284         return (ret);
285 }
286
287 static int
288 rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
289 {
290         struct pthread *curthread = _get_curthread();
291         pthread_rwlock_t prwlock;
292         int ret;
293
294         ret = check_and_init_rwlock(rwlock, &prwlock);
295         if (ret != 0)
296                 return (ret);
297
298         /*
299          * POSIX said the validity of the abstimeout parameter need
300          * not be checked if the lock can be immediately acquired.
301          */
302         ret = _thr_rwlock_trywrlock(&prwlock->lock);
303         if (ret == 0) {
304                 prwlock->owner = TID(curthread);
305                 return (ret);
306         }
307
308         if (__predict_false(abstime && 
309             (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
310                 return (EINVAL);
311
312         for (;;) {
313                 /* goto kernel and lock it */
314                 ret = __thr_rwlock_wrlock(&prwlock->lock, abstime);
315                 if (ret == 0) {
316                         prwlock->owner = TID(curthread);
317                         break;
318                 }
319
320                 if (ret != EINTR)
321                         break;
322
323                 /* if interrupted, try to lock it in userland again. */
324                 if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
325                         ret = 0;
326                         prwlock->owner = TID(curthread);
327                         break;
328                 }
329         }
330         return (ret);
331 }
332
333 int
334 _pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
335 {
336         return (rwlock_wrlock_common (rwlock, NULL));
337 }
338
339 int
340 _pthread_rwlock_timedwrlock(pthread_rwlock_t * __restrict rwlock,
341     const struct timespec * __restrict abstime)
342 {
343         return (rwlock_wrlock_common (rwlock, abstime));
344 }
345
346 int
347 _pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
348 {
349         struct pthread *curthread = _get_curthread();
350         pthread_rwlock_t prwlock;
351         int ret;
352         int32_t state;
353
354         if (*rwlock == THR_PSHARED_PTR) {
355                 prwlock = __thr_pshared_offpage(rwlock, 0);
356                 if (prwlock == NULL)
357                         return (EINVAL);
358         } else {
359                 prwlock = *rwlock;
360         }
361
362         if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED))
363                 return (EINVAL);
364
365         state = prwlock->lock.rw_state;
366         if (state & URWLOCK_WRITE_OWNER) {
367                 if (__predict_false(prwlock->owner != TID(curthread)))
368                         return (EPERM);
369                 prwlock->owner = 0;
370         }
371
372         ret = _thr_rwlock_unlock(&prwlock->lock);
373         if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
374                 curthread->rdlock_count--;
375
376         return (ret);
377 }