]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - lib/libthr/thread/thr_mutex.c
Apparently there are some popular programs around which assume that it
[FreeBSD/FreeBSD.git] / lib / libthr / thread / thr_mutex.c
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
4  * Copyright (c) 2015 The FreeBSD Foundation
5  *
6  * All rights reserved.
7  *
8  * Portions of this software were developed by Konstantin Belousov
9  * under sponsorship from the FreeBSD Foundation.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *      This product includes software developed by John Birrell.
22  * 4. Neither the name of the author nor the names of any co-contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  * $FreeBSD$
39  */
40
41 #include <stdbool.h>
42 #include "namespace.h"
43 #include <stdlib.h>
44 #include <errno.h>
45 #include <string.h>
46 #include <sys/param.h>
47 #include <sys/queue.h>
48 #include <pthread.h>
49 #include <pthread_np.h>
50 #include "un-namespace.h"
51
52 #include "thr_private.h"
53
54 /*
55  * For adaptive mutexes, how many times to spin doing trylock2
56  * before entering the kernel to block
57  */
58 #define MUTEX_ADAPTIVE_SPINS    2000
59
60 /*
61  * Prototypes
62  */
63 int     __pthread_mutex_init(pthread_mutex_t *mutex,
64                 const pthread_mutexattr_t *mutex_attr);
65 int     __pthread_mutex_trylock(pthread_mutex_t *mutex);
66 int     __pthread_mutex_lock(pthread_mutex_t *mutex);
67 int     __pthread_mutex_timedlock(pthread_mutex_t *mutex,
68                 const struct timespec *abstime);
69 int     _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
70                 void *(calloc_cb)(size_t, size_t));
71 int     _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count);
72 int     _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
73 int     __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
74 int     _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
75 int     _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count);
76 int     __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
77
78 static int      mutex_self_trylock(pthread_mutex_t);
79 static int      mutex_self_lock(pthread_mutex_t,
80                                 const struct timespec *abstime);
81 static int      mutex_unlock_common(struct pthread_mutex *, int, int *);
82 static int      mutex_lock_sleep(struct pthread *, pthread_mutex_t,
83                                 const struct timespec *);
84
85 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
86 __strong_reference(__pthread_mutex_init, _pthread_mutex_init);
87 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
88 __strong_reference(__pthread_mutex_lock, _pthread_mutex_lock);
89 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
90 __strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock);
91 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
92 __strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock);
93
94 /* Single underscore versions provided for libc internal usage: */
95 /* No difference between libc and application usage of these: */
96 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
97 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
98
99 __weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling);
100 __weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling);
101
102 __weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np);
103 __strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np);
104 __weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np);
105
106 __weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np);
107 __strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np);
108 __weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np);
109 __weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np);
110
111 static void
112 mutex_init_link(struct pthread_mutex *m)
113 {
114
115 #if defined(_PTHREADS_INVARIANTS)
116         m->m_qe.tqe_prev = NULL;
117         m->m_qe.tqe_next = NULL;
118         m->m_pqe.tqe_prev = NULL;
119         m->m_pqe.tqe_next = NULL;
120 #endif
121 }
122
123 static void
124 mutex_assert_is_owned(struct pthread_mutex *m)
125 {
126
127 #if defined(_PTHREADS_INVARIANTS)
128         if (__predict_false(m->m_qe.tqe_prev == NULL)) {
129                 char msg[128];
130                 snprintf(msg, sizeof(msg),
131                     "mutex %p own %#x %#x is not on list %p %p",
132                     m, m->m_lock.m_owner, m->m_owner, m->m_qe.tqe_prev,
133                     m->m_qe.tqe_next);
134                 PANIC(msg);
135         }
136 #endif
137 }
138
139 static void
140 mutex_assert_not_owned(struct pthread_mutex *m)
141 {
142
143 #if defined(_PTHREADS_INVARIANTS)
144         if (__predict_false(m->m_qe.tqe_prev != NULL ||
145             m->m_qe.tqe_next != NULL)) {
146                 char msg[128];
147                 snprintf(msg, sizeof(msg),
148                     "mutex %p own %#x %#x is on list %p %p",
149                     m, m->m_lock.m_owner, m->m_owner, m->m_qe.tqe_prev,
150                     m->m_qe.tqe_next);
151                 PANIC(msg);
152         }
153 #endif
154 }
155
156 static int
157 is_pshared_mutex(struct pthread_mutex *m)
158 {
159
160         return ((m->m_lock.m_flags & USYNC_PROCESS_SHARED) != 0);
161 }
162
163 static int
164 mutex_check_attr(const struct pthread_mutex_attr *attr)
165 {
166
167         if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
168             attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
169                 return (EINVAL);
170         if (attr->m_protocol < PTHREAD_PRIO_NONE ||
171             attr->m_protocol > PTHREAD_PRIO_PROTECT)
172                 return (EINVAL);
173         return (0);
174 }
175
176 static void
177 mutex_init_body(struct pthread_mutex *pmutex,
178     const struct pthread_mutex_attr *attr)
179 {
180
181         pmutex->m_flags = attr->m_type;
182         pmutex->m_owner = 0;
183         pmutex->m_count = 0;
184         pmutex->m_spinloops = 0;
185         pmutex->m_yieldloops = 0;
186         mutex_init_link(pmutex);
187         switch (attr->m_protocol) {
188         case PTHREAD_PRIO_NONE:
189                 pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
190                 pmutex->m_lock.m_flags = 0;
191                 break;
192         case PTHREAD_PRIO_INHERIT:
193                 pmutex->m_lock.m_owner = UMUTEX_UNOWNED;
194                 pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT;
195                 break;
196         case PTHREAD_PRIO_PROTECT:
197                 pmutex->m_lock.m_owner = UMUTEX_CONTESTED;
198                 pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT;
199                 pmutex->m_lock.m_ceilings[0] = attr->m_ceiling;
200                 break;
201         }
202         if (attr->m_pshared == PTHREAD_PROCESS_SHARED)
203                 pmutex->m_lock.m_flags |= USYNC_PROCESS_SHARED;
204
205         if (PMUTEX_TYPE(pmutex->m_flags) == PTHREAD_MUTEX_ADAPTIVE_NP) {
206                 pmutex->m_spinloops =
207                     _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS;
208                 pmutex->m_yieldloops = _thr_yieldloops;
209         }
210 }
211
212 static int
213 mutex_init(pthread_mutex_t *mutex,
214     const struct pthread_mutex_attr *mutex_attr,
215     void *(calloc_cb)(size_t, size_t))
216 {
217         const struct pthread_mutex_attr *attr;
218         struct pthread_mutex *pmutex;
219         int error;
220
221         if (mutex_attr == NULL) {
222                 attr = &_pthread_mutexattr_default;
223         } else {
224                 attr = mutex_attr;
225                 error = mutex_check_attr(attr);
226                 if (error != 0)
227                         return (error);
228         }
229         if ((pmutex = (pthread_mutex_t)
230                 calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
231                 return (ENOMEM);
232         mutex_init_body(pmutex, attr);
233         *mutex = pmutex;
234         return (0);
235 }
236
237 static int
238 init_static(struct pthread *thread, pthread_mutex_t *mutex)
239 {
240         int ret;
241
242         THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
243
244         if (*mutex == THR_MUTEX_INITIALIZER)
245                 ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc);
246         else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER)
247                 ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default,
248                     calloc);
249         else
250                 ret = 0;
251         THR_LOCK_RELEASE(thread, &_mutex_static_lock);
252
253         return (ret);
254 }
255
256 static void
257 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
258 {
259         struct pthread_mutex *m2;
260
261         m2 = TAILQ_LAST(&curthread->mq[TMQ_NORM_PP], mutex_queue);
262         if (m2 != NULL)
263                 m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
264         else
265                 m->m_lock.m_ceilings[1] = -1;
266 }
267
268 static void
269 shared_mutex_init(struct pthread_mutex *pmtx, const struct
270     pthread_mutex_attr *mutex_attr)
271 {
272         static const struct pthread_mutex_attr foobar_mutex_attr = {
273                 .m_type = PTHREAD_MUTEX_DEFAULT,
274                 .m_protocol = PTHREAD_PRIO_NONE,
275                 .m_ceiling = 0,
276                 .m_pshared = PTHREAD_PROCESS_SHARED
277         };
278         bool done;
279
280         /*
281          * Hack to allow multiple pthread_mutex_init() calls on the
282          * same process-shared mutex.  We rely on kernel allocating
283          * zeroed offpage for the mutex, i.e. the
284          * PMUTEX_INITSTAGE_ALLOC value must be zero.
285          */
286         for (done = false; !done;) {
287                 switch (pmtx->m_ps) {
288                 case PMUTEX_INITSTAGE_DONE:
289                         atomic_thread_fence_acq();
290                         done = true;
291                         break;
292                 case PMUTEX_INITSTAGE_ALLOC:
293                         if (atomic_cmpset_int(&pmtx->m_ps,
294                             PMUTEX_INITSTAGE_ALLOC, PMUTEX_INITSTAGE_BUSY)) {
295                                 if (mutex_attr == NULL)
296                                         mutex_attr = &foobar_mutex_attr;
297                                 mutex_init_body(pmtx, mutex_attr);
298                                 atomic_store_rel_int(&pmtx->m_ps,
299                                     PMUTEX_INITSTAGE_DONE);
300                                 done = true;
301                         }
302                         break;
303                 case PMUTEX_INITSTAGE_BUSY:
304                         _pthread_yield();
305                         break;
306                 default:
307                         PANIC("corrupted offpage");
308                         break;
309                 }
310         }
311 }
312
313 int
314 __pthread_mutex_init(pthread_mutex_t *mutex,
315     const pthread_mutexattr_t *mutex_attr)
316 {
317         struct pthread_mutex *pmtx;
318         int ret;
319
320         if (mutex_attr != NULL) {
321                 ret = mutex_check_attr(*mutex_attr);
322                 if (ret != 0)
323                         return (ret);
324         }
325         if (mutex_attr == NULL ||
326             (*mutex_attr)->m_pshared == PTHREAD_PROCESS_PRIVATE) {
327                 return (mutex_init(mutex, mutex_attr ? *mutex_attr : NULL,
328                    calloc));
329         }
330         pmtx = __thr_pshared_offpage(mutex, 1);
331         if (pmtx == NULL)
332                 return (EFAULT);
333         *mutex = THR_PSHARED_PTR;
334         shared_mutex_init(pmtx, *mutex_attr);
335         return (0);
336 }
337
338 /* This function is used internally by malloc. */
339 int
340 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
341     void *(calloc_cb)(size_t, size_t))
342 {
343         static const struct pthread_mutex_attr attr = {
344                 .m_type = PTHREAD_MUTEX_NORMAL,
345                 .m_protocol = PTHREAD_PRIO_NONE,
346                 .m_ceiling = 0,
347                 .m_pshared = PTHREAD_PROCESS_PRIVATE,
348         };
349         int ret;
350
351         ret = mutex_init(mutex, &attr, calloc_cb);
352         if (ret == 0)
353                 (*mutex)->m_flags |= PMUTEX_FLAG_PRIVATE;
354         return (ret);
355 }
356
357 /*
358  * Fix mutex ownership for child process.
359  *
360  * Process private mutex ownership is transmitted from the forking
361  * thread to the child process.
362  *
363  * Process shared mutex should not be inherited because owner is
364  * forking thread which is in parent process, they are removed from
365  * the owned mutex list.
366  */
367 static void
368 queue_fork(struct pthread *curthread, struct mutex_queue *q,
369     struct mutex_queue *qp, uint bit)
370 {
371         struct pthread_mutex *m;
372
373         TAILQ_INIT(q);
374         TAILQ_FOREACH(m, qp, m_pqe) {
375                 TAILQ_INSERT_TAIL(q, m, m_qe);
376                 m->m_lock.m_owner = TID(curthread) | bit;
377                 m->m_owner = TID(curthread);
378         }
379 }
380
381 void
382 _mutex_fork(struct pthread *curthread)
383 {
384
385         queue_fork(curthread, &curthread->mq[TMQ_NORM],
386             &curthread->mq[TMQ_NORM_PRIV], 0);
387         queue_fork(curthread, &curthread->mq[TMQ_NORM_PP],
388             &curthread->mq[TMQ_NORM_PP_PRIV], UMUTEX_CONTESTED);
389 }
390
391 int
392 _pthread_mutex_destroy(pthread_mutex_t *mutex)
393 {
394         pthread_mutex_t m, m1;
395         int ret;
396
397         m = *mutex;
398         if (m < THR_MUTEX_DESTROYED) {
399                 ret = 0;
400         } else if (m == THR_MUTEX_DESTROYED) {
401                 ret = EINVAL;
402         } else {
403                 if (m == THR_PSHARED_PTR) {
404                         m1 = __thr_pshared_offpage(mutex, 0);
405                         if (m1 != NULL) {
406                                 mutex_assert_not_owned(m1);
407                                 __thr_pshared_destroy(mutex);
408                         }
409                         *mutex = THR_MUTEX_DESTROYED;
410                         return (0);
411                 }
412                 if (m->m_owner != 0) {
413                         ret = EBUSY;
414                 } else {
415                         *mutex = THR_MUTEX_DESTROYED;
416                         mutex_assert_not_owned(m);
417                         free(m);
418                         ret = 0;
419                 }
420         }
421
422         return (ret);
423 }
424
425 static int
426 mutex_qidx(struct pthread_mutex *m)
427 {
428
429         if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
430                 return (TMQ_NORM);
431         return (TMQ_NORM_PP);
432 }
433
434 static void
435 enqueue_mutex(struct pthread *curthread, struct pthread_mutex *m)
436 {
437         int qidx;
438
439         m->m_owner = TID(curthread);
440         /* Add to the list of owned mutexes: */
441         mutex_assert_not_owned(m);
442         qidx = mutex_qidx(m);
443         TAILQ_INSERT_TAIL(&curthread->mq[qidx], m, m_qe);
444         if (!is_pshared_mutex(m))
445                 TAILQ_INSERT_TAIL(&curthread->mq[qidx + 1], m, m_pqe);
446 }
447
448 static void
449 dequeue_mutex(struct pthread *curthread, struct pthread_mutex *m)
450 {
451         int qidx;
452
453         m->m_owner = 0;
454         mutex_assert_is_owned(m);
455         qidx = mutex_qidx(m);
456         TAILQ_REMOVE(&curthread->mq[qidx], m, m_qe);
457         if (!is_pshared_mutex(m))
458                 TAILQ_REMOVE(&curthread->mq[qidx + 1], m, m_pqe);
459         if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) != 0)
460                 set_inherited_priority(curthread, m);
461         mutex_init_link(m);
462 }
463
464 static int
465 check_and_init_mutex(pthread_mutex_t *mutex, struct pthread_mutex **m)
466 {
467         int ret;
468
469         *m = *mutex;
470         ret = 0;
471         if (*m == THR_PSHARED_PTR) {
472                 *m = __thr_pshared_offpage(mutex, 0);
473                 if (*m == NULL)
474                         ret = EINVAL;
475                 shared_mutex_init(*m, NULL);
476         } else if (__predict_false(*m <= THR_MUTEX_DESTROYED)) {
477                 if (*m == THR_MUTEX_DESTROYED) {
478                         ret = EINVAL;
479                 } else {
480                         ret = init_static(_get_curthread(), mutex);
481                         if (ret == 0)
482                                 *m = *mutex;
483                 }
484         }
485         return (ret);
486 }
487
488 int
489 __pthread_mutex_trylock(pthread_mutex_t *mutex)
490 {
491         struct pthread *curthread;
492         struct pthread_mutex *m;
493         uint32_t id;
494         int ret;
495
496         ret = check_and_init_mutex(mutex, &m);
497         if (ret != 0)
498                 return (ret);
499         curthread = _get_curthread();
500         id = TID(curthread);
501         if (m->m_flags & PMUTEX_FLAG_PRIVATE)
502                 THR_CRITICAL_ENTER(curthread);
503         ret = _thr_umutex_trylock(&m->m_lock, id);
504         if (__predict_true(ret == 0)) {
505                 enqueue_mutex(curthread, m);
506         } else if (m->m_owner == id) {
507                 ret = mutex_self_trylock(m);
508         } /* else {} */
509         if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE))
510                 THR_CRITICAL_LEAVE(curthread);
511         return (ret);
512 }
513
514 static int
515 mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
516         const struct timespec *abstime)
517 {
518         uint32_t        id, owner;
519         int     count;
520         int     ret;
521
522         id = TID(curthread);
523         if (m->m_owner == id)
524                 return (mutex_self_lock(m, abstime));
525
526         /*
527          * For adaptive mutexes, spin for a bit in the expectation
528          * that if the application requests this mutex type then
529          * the lock is likely to be released quickly and it is
530          * faster than entering the kernel
531          */
532         if (__predict_false(
533                 (m->m_lock.m_flags & 
534                  (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0))
535                         goto sleep_in_kernel;
536
537         if (!_thr_is_smp)
538                 goto yield_loop;
539
540         count = m->m_spinloops;
541         while (count--) {
542                 owner = m->m_lock.m_owner;
543                 if ((owner & ~UMUTEX_CONTESTED) == 0) {
544                         if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
545                                 ret = 0;
546                                 goto done;
547                         }
548                 }
549                 CPU_SPINWAIT;
550         }
551
552 yield_loop:
553         count = m->m_yieldloops;
554         while (count--) {
555                 _sched_yield();
556                 owner = m->m_lock.m_owner;
557                 if ((owner & ~UMUTEX_CONTESTED) == 0) {
558                         if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
559                                 ret = 0;
560                                 goto done;
561                         }
562                 }
563         }
564
565 sleep_in_kernel:
566         if (abstime == NULL) {
567                 ret = __thr_umutex_lock(&m->m_lock, id);
568         } else if (__predict_false(
569                    abstime->tv_nsec < 0 ||
570                    abstime->tv_nsec >= 1000000000)) {
571                 ret = EINVAL;
572         } else {
573                 ret = __thr_umutex_timedlock(&m->m_lock, id, abstime);
574         }
575 done:
576         if (ret == 0)
577                 enqueue_mutex(curthread, m);
578
579         return (ret);
580 }
581
582 static inline int
583 mutex_lock_common(struct pthread_mutex *m,
584         const struct timespec *abstime, int cvattach)
585 {
586         struct pthread *curthread  = _get_curthread();
587         int ret;
588
589         if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE)
590                 THR_CRITICAL_ENTER(curthread);
591         if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) {
592                 enqueue_mutex(curthread, m);
593                 ret = 0;
594         } else {
595                 ret = mutex_lock_sleep(curthread, m, abstime);
596         }
597         if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE) && !cvattach)
598                 THR_CRITICAL_LEAVE(curthread);
599         return (ret);
600 }
601
602 int
603 __pthread_mutex_lock(pthread_mutex_t *mutex)
604 {
605         struct pthread_mutex *m;
606         int ret;
607
608         _thr_check_init();
609         ret = check_and_init_mutex(mutex, &m);
610         if (ret == 0)
611                 ret = mutex_lock_common(m, NULL, 0);
612         return (ret);
613 }
614
615 int
616 __pthread_mutex_timedlock(pthread_mutex_t *mutex,
617     const struct timespec *abstime)
618 {
619         struct pthread_mutex *m;
620         int ret;
621
622         _thr_check_init();
623         ret = check_and_init_mutex(mutex, &m);
624         if (ret == 0)
625                 ret = mutex_lock_common(m, abstime, 0);
626         return (ret);
627 }
628
629 int
630 _pthread_mutex_unlock(pthread_mutex_t *mutex)
631 {
632         struct pthread_mutex *mp;
633
634         if (*mutex == THR_PSHARED_PTR) {
635                 mp = __thr_pshared_offpage(mutex, 0);
636                 if (mp == NULL)
637                         return (EINVAL);
638                 shared_mutex_init(mp, NULL);
639         } else {
640                 mp = *mutex;
641         }
642         return (mutex_unlock_common(mp, 0, NULL));
643 }
644
645 int
646 _mutex_cv_lock(struct pthread_mutex *m, int count)
647 {
648         int     error;
649
650         error = mutex_lock_common(m, NULL, 1);
651         if (error == 0)
652                 m->m_count = count;
653         return (error);
654 }
655
656 int
657 _mutex_cv_unlock(struct pthread_mutex *m, int *count, int *defer)
658 {
659
660         /*
661          * Clear the count in case this is a recursive mutex.
662          */
663         *count = m->m_count;
664         m->m_count = 0;
665         (void)mutex_unlock_common(m, 1, defer);
666         return (0);
667 }
668
669 int
670 _mutex_cv_attach(struct pthread_mutex *m, int count)
671 {
672         struct pthread *curthread = _get_curthread();
673
674         enqueue_mutex(curthread, m);
675         m->m_count = count;
676         return (0);
677 }
678
679 int
680 _mutex_cv_detach(struct pthread_mutex *mp, int *recurse)
681 {
682         struct pthread *curthread = _get_curthread();
683         int     defered;
684         int     error;
685
686         if ((error = _mutex_owned(curthread, mp)) != 0)
687                 return (error);
688
689         /*
690          * Clear the count in case this is a recursive mutex.
691          */
692         *recurse = mp->m_count;
693         mp->m_count = 0;
694         dequeue_mutex(curthread, mp);
695
696         /* Will this happen in real-world ? */
697         if ((mp->m_flags & PMUTEX_FLAG_DEFERED) != 0) {
698                 defered = 1;
699                 mp->m_flags &= ~PMUTEX_FLAG_DEFERED;
700         } else
701                 defered = 0;
702
703         if (defered)  {
704                 _thr_wake_all(curthread->defer_waiters,
705                                 curthread->nwaiter_defer);
706                 curthread->nwaiter_defer = 0;
707         }
708         return (0);
709 }
710
711 static int
712 mutex_self_trylock(struct pthread_mutex *m)
713 {
714         int     ret;
715
716         switch (PMUTEX_TYPE(m->m_flags)) {
717         case PTHREAD_MUTEX_ERRORCHECK:
718         case PTHREAD_MUTEX_NORMAL:
719         case PTHREAD_MUTEX_ADAPTIVE_NP:
720                 ret = EBUSY; 
721                 break;
722
723         case PTHREAD_MUTEX_RECURSIVE:
724                 /* Increment the lock count: */
725                 if (m->m_count + 1 > 0) {
726                         m->m_count++;
727                         ret = 0;
728                 } else
729                         ret = EAGAIN;
730                 break;
731
732         default:
733                 /* Trap invalid mutex types; */
734                 ret = EINVAL;
735         }
736
737         return (ret);
738 }
739
740 static int
741 mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime)
742 {
743         struct timespec ts1, ts2;
744         int     ret;
745
746         switch (PMUTEX_TYPE(m->m_flags)) {
747         case PTHREAD_MUTEX_ERRORCHECK:
748         case PTHREAD_MUTEX_ADAPTIVE_NP:
749                 if (abstime) {
750                         if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
751                             abstime->tv_nsec >= 1000000000) {
752                                 ret = EINVAL;
753                         } else {
754                                 clock_gettime(CLOCK_REALTIME, &ts1);
755                                 TIMESPEC_SUB(&ts2, abstime, &ts1);
756                                 __sys_nanosleep(&ts2, NULL);
757                                 ret = ETIMEDOUT;
758                         }
759                 } else {
760                         /*
761                          * POSIX specifies that mutexes should return
762                          * EDEADLK if a recursive lock is detected.
763                          */
764                         ret = EDEADLK; 
765                 }
766                 break;
767
768         case PTHREAD_MUTEX_NORMAL:
769                 /*
770                  * What SS2 define as a 'normal' mutex.  Intentionally
771                  * deadlock on attempts to get a lock you already own.
772                  */
773                 ret = 0;
774                 if (abstime) {
775                         if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
776                             abstime->tv_nsec >= 1000000000) {
777                                 ret = EINVAL;
778                         } else {
779                                 clock_gettime(CLOCK_REALTIME, &ts1);
780                                 TIMESPEC_SUB(&ts2, abstime, &ts1);
781                                 __sys_nanosleep(&ts2, NULL);
782                                 ret = ETIMEDOUT;
783                         }
784                 } else {
785                         ts1.tv_sec = 30;
786                         ts1.tv_nsec = 0;
787                         for (;;)
788                                 __sys_nanosleep(&ts1, NULL);
789                 }
790                 break;
791
792         case PTHREAD_MUTEX_RECURSIVE:
793                 /* Increment the lock count: */
794                 if (m->m_count + 1 > 0) {
795                         m->m_count++;
796                         ret = 0;
797                 } else
798                         ret = EAGAIN;
799                 break;
800
801         default:
802                 /* Trap invalid mutex types; */
803                 ret = EINVAL;
804         }
805
806         return (ret);
807 }
808
809 static int
810 mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer)
811 {
812         struct pthread *curthread = _get_curthread();
813         uint32_t id;
814         int defered, error;
815
816         if (__predict_false(m <= THR_MUTEX_DESTROYED)) {
817                 if (m == THR_MUTEX_DESTROYED)
818                         return (EINVAL);
819                 return (EPERM);
820         }
821
822         id = TID(curthread);
823
824         /*
825          * Check if the running thread is not the owner of the mutex.
826          */
827         if (__predict_false(m->m_owner != id))
828                 return (EPERM);
829
830         error = 0;
831         if (__predict_false(
832                 PMUTEX_TYPE(m->m_flags) == PTHREAD_MUTEX_RECURSIVE &&
833                 m->m_count > 0)) {
834                 m->m_count--;
835         } else {
836                 if ((m->m_flags & PMUTEX_FLAG_DEFERED) != 0) {
837                         defered = 1;
838                         m->m_flags &= ~PMUTEX_FLAG_DEFERED;
839                 } else
840                         defered = 0;
841
842                 dequeue_mutex(curthread, m);
843                 error = _thr_umutex_unlock2(&m->m_lock, id, mtx_defer);
844
845                 if (mtx_defer == NULL && defered)  {
846                         _thr_wake_all(curthread->defer_waiters,
847                                 curthread->nwaiter_defer);
848                         curthread->nwaiter_defer = 0;
849                 }
850         }
851         if (!cv && m->m_flags & PMUTEX_FLAG_PRIVATE)
852                 THR_CRITICAL_LEAVE(curthread);
853         return (error);
854 }
855
856 int
857 _pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
858     int *prioceiling)
859 {
860         struct pthread_mutex *m;
861
862         if (*mutex == THR_PSHARED_PTR) {
863                 m = __thr_pshared_offpage(mutex, 0);
864                 if (m == NULL)
865                         return (EINVAL);
866                 shared_mutex_init(m, NULL);
867         } else {
868                 m = *mutex;
869                 if (m <= THR_MUTEX_DESTROYED)
870                         return (EINVAL);
871         }
872         if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
873                 return (EINVAL);
874         *prioceiling = m->m_lock.m_ceilings[0];
875         return (0);
876 }
877
878 int
879 _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
880     int ceiling, int *old_ceiling)
881 {
882         struct pthread *curthread;
883         struct pthread_mutex *m, *m1, *m2;
884         struct mutex_queue *q, *qp;
885         int ret;
886
887         if (*mutex == THR_PSHARED_PTR) {
888                 m = __thr_pshared_offpage(mutex, 0);
889                 if (m == NULL)
890                         return (EINVAL);
891                 shared_mutex_init(m, NULL);
892         } else {
893                 m = *mutex;
894                 if (m <= THR_MUTEX_DESTROYED)
895                         return (EINVAL);
896         }
897         if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
898                 return (EINVAL);
899
900         ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
901         if (ret != 0)
902                 return (ret);
903
904         curthread = _get_curthread();
905         if (m->m_owner == TID(curthread)) {
906                 mutex_assert_is_owned(m);
907                 m1 = TAILQ_PREV(m, mutex_queue, m_qe);
908                 m2 = TAILQ_NEXT(m, m_qe);
909                 if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) ||
910                     (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) {
911                         q = &curthread->mq[TMQ_NORM_PP];
912                         qp = &curthread->mq[TMQ_NORM_PP_PRIV];
913                         TAILQ_REMOVE(q, m, m_qe);
914                         if (!is_pshared_mutex(m))
915                                 TAILQ_REMOVE(qp, m, m_pqe);
916                         TAILQ_FOREACH(m2, q, m_qe) {
917                                 if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) {
918                                         TAILQ_INSERT_BEFORE(m2, m, m_qe);
919                                         if (!is_pshared_mutex(m)) {
920                                                 while (m2 != NULL &&
921                                                     is_pshared_mutex(m2)) {
922                                                         m2 = TAILQ_PREV(m2,
923                                                             mutex_queue, m_qe);
924                                                 }
925                                                 if (m2 == NULL) {
926                                                         TAILQ_INSERT_HEAD(qp,
927                                                             m, m_pqe);
928                                                 } else {
929                                                         TAILQ_INSERT_BEFORE(m2,
930                                                             m, m_pqe);
931                                                 }
932                                         }
933                                         return (0);
934                                 }
935                         }
936                         TAILQ_INSERT_TAIL(q, m, m_qe);
937                         if (!is_pshared_mutex(m))
938                                 TAILQ_INSERT_TAIL(qp, m, m_pqe);
939                 }
940         }
941         return (0);
942 }
943
944 int
945 _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count)
946 {
947         struct pthread_mutex *m;
948         int ret;
949
950         ret = check_and_init_mutex(mutex, &m);
951         if (ret == 0)
952                 *count = m->m_spinloops;
953         return (ret);
954 }
955
956 int
957 __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count)
958 {
959         struct pthread_mutex *m;
960         int ret;
961
962         ret = check_and_init_mutex(mutex, &m);
963         if (ret == 0)
964                 m->m_spinloops = count;
965         return (ret);
966 }
967
968 int
969 _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count)
970 {
971         struct pthread_mutex *m;
972         int ret;
973
974         ret = check_and_init_mutex(mutex, &m);
975         if (ret == 0)
976                 *count = m->m_yieldloops;
977         return (ret);
978 }
979
980 int
981 __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count)
982 {
983         struct pthread_mutex *m;
984         int ret;
985
986         ret = check_and_init_mutex(mutex, &m);
987         if (ret == 0)
988                 m->m_yieldloops = count;
989         return (0);
990 }
991
992 int
993 _pthread_mutex_isowned_np(pthread_mutex_t *mutex)
994 {
995         struct pthread_mutex *m;
996
997         if (*mutex == THR_PSHARED_PTR) {
998                 m = __thr_pshared_offpage(mutex, 0);
999                 if (m == NULL)
1000                         return (0);
1001                 shared_mutex_init(m, NULL);
1002         } else {
1003                 m = *mutex;
1004                 if (m <= THR_MUTEX_DESTROYED)
1005                         return (0);
1006         }
1007         return (m->m_owner == TID(_get_curthread()));
1008 }
1009
1010 int
1011 _mutex_owned(struct pthread *curthread, const struct pthread_mutex *mp)
1012 {
1013         if (__predict_false(mp <= THR_MUTEX_DESTROYED)) {
1014                 if (mp == THR_MUTEX_DESTROYED)
1015                         return (EINVAL);
1016                 return (EPERM);
1017         }
1018         if (mp->m_owner != TID(curthread))
1019                 return (EPERM);
1020         return (0);                  
1021 }