]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - lib/libkse/thread/thr_mutex.c
Set the tcb (thread control block) in the child process after a fork.
[FreeBSD/FreeBSD.git] / lib / libkse / thread / thr_mutex.c
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31
32 #include "namespace.h"
33 #include <stdlib.h>
34 #include <errno.h>
35 #include <string.h>
36 #include <sys/param.h>
37 #include <sys/queue.h>
38 #include <pthread.h>
39 #include "un-namespace.h"
40 #include "thr_private.h"
41
42 #if defined(_PTHREADS_INVARIANTS)
43 #define MUTEX_INIT_LINK(m)              do {            \
44         (m)->m_qe.tqe_prev = NULL;                      \
45         (m)->m_qe.tqe_next = NULL;                      \
46 } while (0)
47 #define MUTEX_ASSERT_IS_OWNED(m)        do {            \
48         if ((m)->m_qe.tqe_prev == NULL)                 \
49                 PANIC("mutex is not on list");          \
50 } while (0)
51 #define MUTEX_ASSERT_NOT_OWNED(m)       do {            \
52         if (((m)->m_qe.tqe_prev != NULL) ||             \
53             ((m)->m_qe.tqe_next != NULL))               \
54                 PANIC("mutex is on list");              \
55 } while (0)
56 #define THR_ASSERT_NOT_IN_SYNCQ(thr)    do {            \
57         THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
58             "thread in syncq when it shouldn't be.");   \
59 } while (0);
60 #else
61 #define MUTEX_INIT_LINK(m)
62 #define MUTEX_ASSERT_IS_OWNED(m)
63 #define MUTEX_ASSERT_NOT_OWNED(m)
64 #define THR_ASSERT_NOT_IN_SYNCQ(thr)
65 #endif
66
67 #define THR_IN_MUTEXQ(thr)      (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
68 #define MUTEX_DESTROY(m) do {           \
69         _lock_destroy(&(m)->m_lock);    \
70         free(m);                        \
71 } while (0)
72
73
74 /*
75  * Prototypes
76  */
77 static struct kse_mailbox *mutex_handoff(struct pthread *,
78                             struct pthread_mutex *);
79 static inline int       mutex_self_trylock(pthread_mutex_t);
80 static inline int       mutex_self_lock(struct pthread *, pthread_mutex_t);
81 static int              mutex_unlock_common(pthread_mutex_t *, int);
82 static void             mutex_priority_adjust(struct pthread *, pthread_mutex_t);
83 static void             mutex_rescan_owned (struct pthread *, struct pthread *,
84                             struct pthread_mutex *);
85 static inline pthread_t mutex_queue_deq(pthread_mutex_t);
86 static inline void      mutex_queue_remove(pthread_mutex_t, pthread_t);
87 static inline void      mutex_queue_enq(pthread_mutex_t, pthread_t);
88 static void             mutex_lock_backout(void *arg);
89
90 int     __pthread_mutex_init(pthread_mutex_t *mutex,
91             const pthread_mutexattr_t *mutex_attr);
92 int     __pthread_mutex_trylock(pthread_mutex_t *mutex);
93 int     __pthread_mutex_lock(pthread_mutex_t *m);
94 int     __pthread_mutex_timedlock(pthread_mutex_t *m,
95             const struct timespec *abs_timeout);
96 int     _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
97             void *(calloc_cb)(size_t, size_t));
98
99
100 static struct pthread_mutex_attr        static_mutex_attr =
101     PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
102 static pthread_mutexattr_t              static_mattr = &static_mutex_attr;
103
104 LT10_COMPAT_PRIVATE(__pthread_mutex_init);
105 LT10_COMPAT_PRIVATE(_pthread_mutex_init);
106 LT10_COMPAT_DEFAULT(pthread_mutex_init);
107 LT10_COMPAT_PRIVATE(__pthread_mutex_lock);
108 LT10_COMPAT_PRIVATE(_pthread_mutex_lock);
109 LT10_COMPAT_DEFAULT(pthread_mutex_lock);
110 LT10_COMPAT_PRIVATE(__pthread_mutex_timedlock);
111 LT10_COMPAT_PRIVATE(_pthread_mutex_timedlock);
112 LT10_COMPAT_DEFAULT(pthread_mutex_timedlock);
113 LT10_COMPAT_PRIVATE(__pthread_mutex_trylock);
114 LT10_COMPAT_PRIVATE(_pthread_mutex_trylock);
115 LT10_COMPAT_DEFAULT(pthread_mutex_trylock);
116 LT10_COMPAT_PRIVATE(_pthread_mutex_destroy);
117 LT10_COMPAT_DEFAULT(pthread_mutex_destroy);
118 LT10_COMPAT_PRIVATE(_pthread_mutex_unlock);
119 LT10_COMPAT_DEFAULT(pthread_mutex_unlock);
120
121 /* Single underscore versions provided for libc internal usage: */
122 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
123 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
124 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
125 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
126
127 /* No difference between libc and application usage of these: */
128 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
129 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
130
131 static int
132 thr_mutex_init(pthread_mutex_t *mutex,
133     const pthread_mutexattr_t *mutex_attr, void *(calloc_cb)(size_t, size_t))
134 {
135         struct pthread_mutex *pmutex;
136         enum pthread_mutextype type;
137         int             protocol;
138         int             ceiling;
139         int             flags;
140         int             ret = 0;
141
142         if (mutex == NULL)
143                 ret = EINVAL;
144
145         /* Check if default mutex attributes: */
146         else if (mutex_attr == NULL || *mutex_attr == NULL) {
147                 /* Default to a (error checking) POSIX mutex: */
148                 type = PTHREAD_MUTEX_ERRORCHECK;
149                 protocol = PTHREAD_PRIO_NONE;
150                 ceiling = THR_MAX_PRIORITY;
151                 flags = 0;
152         }
153
154         /* Check mutex type: */
155         else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
156             ((*mutex_attr)->m_type >= PTHREAD_MUTEX_TYPE_MAX))
157                 /* Return an invalid argument error: */
158                 ret = EINVAL;
159
160         /* Check mutex protocol: */
161         else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
162             ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
163                 /* Return an invalid argument error: */
164                 ret = EINVAL;
165
166         else {
167                 /* Use the requested mutex type and protocol: */
168                 type = (*mutex_attr)->m_type;
169                 protocol = (*mutex_attr)->m_protocol;
170                 ceiling = (*mutex_attr)->m_ceiling;
171                 flags = (*mutex_attr)->m_flags;
172         }
173
174         /* Check no errors so far: */
175         if (ret == 0) {
176                 if ((pmutex = (pthread_mutex_t)
177                     calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
178                         ret = ENOMEM;
179                 else if (_lock_init(&pmutex->m_lock, LCK_ADAPTIVE,
180                     _thr_lock_wait, _thr_lock_wakeup, calloc_cb) != 0) {
181                         free(pmutex);
182                         *mutex = NULL;
183                         ret = ENOMEM;
184                 } else {
185                         /* Set the mutex flags: */
186                         pmutex->m_flags = flags;
187
188                         /* Process according to mutex type: */
189                         switch (type) {
190                         /* case PTHREAD_MUTEX_DEFAULT: */
191                         case PTHREAD_MUTEX_ERRORCHECK:
192                         case PTHREAD_MUTEX_NORMAL:
193                         case PTHREAD_MUTEX_ADAPTIVE_NP:
194                                 /* Nothing to do here. */
195                                 break;
196
197                         /* Single UNIX Spec 2 recursive mutex: */
198                         case PTHREAD_MUTEX_RECURSIVE:
199                                 /* Reset the mutex count: */
200                                 pmutex->m_count = 0;
201                                 break;
202
203                         /* Trap invalid mutex types: */
204                         default:
205                                 /* Return an invalid argument error: */
206                                 ret = EINVAL;
207                                 break;
208                         }
209                         if (ret == 0) {
210                                 /* Initialise the rest of the mutex: */
211                                 TAILQ_INIT(&pmutex->m_queue);
212                                 pmutex->m_flags |= MUTEX_FLAGS_INITED;
213                                 pmutex->m_owner = NULL;
214                                 pmutex->m_type = type;
215                                 pmutex->m_protocol = protocol;
216                                 pmutex->m_refcount = 0;
217                                 if (protocol == PTHREAD_PRIO_PROTECT)
218                                         pmutex->m_prio = ceiling;
219                                 else
220                                         pmutex->m_prio = -1;
221                                 pmutex->m_saved_prio = 0;
222                                 MUTEX_INIT_LINK(pmutex);
223                                 *mutex = pmutex;
224                         } else {
225                                 /* Free the mutex lock structure: */
226                                 MUTEX_DESTROY(pmutex);
227                                 *mutex = NULL;
228                         }
229                 }
230         }
231         /* Return the completion status: */
232         return (ret);
233 }
234
235 int
236 __pthread_mutex_init(pthread_mutex_t *mutex,
237     const pthread_mutexattr_t *mutex_attr)
238 {
239
240         return (thr_mutex_init(mutex, mutex_attr, calloc));
241 }
242
243 int
244 _pthread_mutex_init(pthread_mutex_t *mutex,
245     const pthread_mutexattr_t *mutex_attr)
246 {
247         struct pthread_mutex_attr mattr, *mattrp;
248
249         if ((mutex_attr == NULL) || (*mutex_attr == NULL))
250                 return (__pthread_mutex_init(mutex, &static_mattr));
251         else {
252                 mattr = **mutex_attr;
253                 mattr.m_flags |= MUTEX_FLAGS_PRIVATE;
254                 mattrp = &mattr;
255                 return (__pthread_mutex_init(mutex, &mattrp));
256         }
257 }
258
259 /* This function is used internally by malloc. */
260 int
261 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
262     void *(calloc_cb)(size_t, size_t))
263 {
264         static const struct pthread_mutex_attr attr = {
265                 .m_type = PTHREAD_MUTEX_NORMAL,
266                 .m_protocol = PTHREAD_PRIO_NONE,
267                 .m_ceiling = 0,
268                 .m_flags = 0
269         };
270         static const struct pthread_mutex_attr *pattr = &attr;
271
272         return (thr_mutex_init(mutex, (pthread_mutexattr_t *)&pattr,
273             calloc_cb));
274 }
275
276 void
277 _thr_mutex_reinit(pthread_mutex_t *mutex)
278 {
279         _lock_reinit(&(*mutex)->m_lock, LCK_ADAPTIVE,
280             _thr_lock_wait, _thr_lock_wakeup);
281         TAILQ_INIT(&(*mutex)->m_queue);
282         (*mutex)->m_owner = NULL;
283         (*mutex)->m_count = 0;
284         (*mutex)->m_refcount = 0;
285         (*mutex)->m_prio = 0;
286         (*mutex)->m_saved_prio = 0;
287 }
288
289 int
290 _pthread_mutex_destroy(pthread_mutex_t *mutex)
291 {
292         struct pthread  *curthread = _get_curthread();
293         pthread_mutex_t m;
294         int ret = 0;
295
296         if (mutex == NULL || *mutex == NULL)
297                 ret = EINVAL;
298         else {
299                 /* Lock the mutex structure: */
300                 THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
301
302                 /*
303                  * Check to see if this mutex is in use:
304                  */
305                 if (((*mutex)->m_owner != NULL) ||
306                     (!TAILQ_EMPTY(&(*mutex)->m_queue)) ||
307                     ((*mutex)->m_refcount != 0)) {
308                         ret = EBUSY;
309
310                         /* Unlock the mutex structure: */
311                         THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
312                 } else {
313                         /*
314                          * Save a pointer to the mutex so it can be free'd
315                          * and set the caller's pointer to NULL:
316                          */
317                         m = *mutex;
318                         *mutex = NULL;
319
320                         /* Unlock the mutex structure: */
321                         THR_LOCK_RELEASE(curthread, &m->m_lock);
322
323                         /*
324                          * Free the memory allocated for the mutex
325                          * structure:
326                          */
327                         MUTEX_ASSERT_NOT_OWNED(m);
328                         MUTEX_DESTROY(m);
329                 }
330         }
331
332         /* Return the completion status: */
333         return (ret);
334 }
335
336 static int
337 init_static(struct pthread *thread, pthread_mutex_t *mutex)
338 {
339         int ret;
340
341         THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
342
343         if (*mutex == NULL)
344                 ret = _pthread_mutex_init(mutex, NULL);
345         else
346                 ret = 0;
347
348         THR_LOCK_RELEASE(thread, &_mutex_static_lock);
349
350         return (ret);
351 }
352
353 static int
354 init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
355 {
356         int ret;
357
358         THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
359
360         if (*mutex == NULL)
361                 ret = _pthread_mutex_init(mutex, &static_mattr);
362         else
363                 ret = 0;
364
365         THR_LOCK_RELEASE(thread, &_mutex_static_lock);
366
367         return (ret);
368 }
369
370 static int
371 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
372 {
373         int private;
374         int ret = 0;
375
376         THR_ASSERT((mutex != NULL) && (*mutex != NULL),
377             "Uninitialized mutex in pthread_mutex_trylock_basic");
378
379         /* Lock the mutex structure: */
380         THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
381         private = (*mutex)->m_flags & MUTEX_FLAGS_PRIVATE;
382
383         /*
384          * If the mutex was statically allocated, properly
385          * initialize the tail queue.
386          */
387         if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
388                 TAILQ_INIT(&(*mutex)->m_queue);
389                 MUTEX_INIT_LINK(*mutex);
390                 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
391         }
392
393         /* Process according to mutex type: */
394         switch ((*mutex)->m_protocol) {
395         /* Default POSIX mutex: */
396         case PTHREAD_PRIO_NONE: 
397                 /* Check if this mutex is not locked: */
398                 if ((*mutex)->m_owner == NULL) {
399                         /* Lock the mutex for the running thread: */
400                         (*mutex)->m_owner = curthread;
401
402                         /* Add to the list of owned mutexes: */
403                         MUTEX_ASSERT_NOT_OWNED(*mutex);
404                         TAILQ_INSERT_TAIL(&curthread->mutexq,
405                             (*mutex), m_qe);
406                 } else if ((*mutex)->m_owner == curthread)
407                         ret = mutex_self_trylock(*mutex);
408                 else
409                         /* Return a busy error: */
410                         ret = EBUSY;
411                 break;
412
413         /* POSIX priority inheritence mutex: */
414         case PTHREAD_PRIO_INHERIT:
415                 /* Check if this mutex is not locked: */
416                 if ((*mutex)->m_owner == NULL) {
417                         /* Lock the mutex for the running thread: */
418                         (*mutex)->m_owner = curthread;
419
420                         THR_SCHED_LOCK(curthread, curthread);
421                         /* Track number of priority mutexes owned: */
422                         curthread->priority_mutex_count++;
423
424                         /*
425                          * The mutex takes on the attributes of the
426                          * running thread when there are no waiters.
427                          */
428                         (*mutex)->m_prio = curthread->active_priority;
429                         (*mutex)->m_saved_prio =
430                             curthread->inherited_priority;
431                         curthread->inherited_priority = (*mutex)->m_prio;
432                         THR_SCHED_UNLOCK(curthread, curthread);
433
434                         /* Add to the list of owned mutexes: */
435                         MUTEX_ASSERT_NOT_OWNED(*mutex);
436                         TAILQ_INSERT_TAIL(&curthread->mutexq,
437                             (*mutex), m_qe);
438                 } else if ((*mutex)->m_owner == curthread)
439                         ret = mutex_self_trylock(*mutex);
440                 else
441                         /* Return a busy error: */
442                         ret = EBUSY;
443                 break;
444
445         /* POSIX priority protection mutex: */
446         case PTHREAD_PRIO_PROTECT:
447                 /* Check for a priority ceiling violation: */
448                 if (curthread->active_priority > (*mutex)->m_prio)
449                         ret = EINVAL;
450
451                 /* Check if this mutex is not locked: */
452                 else if ((*mutex)->m_owner == NULL) {
453                         /* Lock the mutex for the running thread: */
454                         (*mutex)->m_owner = curthread;
455
456                         THR_SCHED_LOCK(curthread, curthread);
457                         /* Track number of priority mutexes owned: */
458                         curthread->priority_mutex_count++;
459
460                         /*
461                          * The running thread inherits the ceiling
462                          * priority of the mutex and executes at that
463                          * priority.
464                          */
465                         curthread->active_priority = (*mutex)->m_prio;
466                         (*mutex)->m_saved_prio =
467                             curthread->inherited_priority;
468                         curthread->inherited_priority =
469                             (*mutex)->m_prio;
470                         THR_SCHED_UNLOCK(curthread, curthread);
471                         /* Add to the list of owned mutexes: */
472                         MUTEX_ASSERT_NOT_OWNED(*mutex);
473                         TAILQ_INSERT_TAIL(&curthread->mutexq,
474                             (*mutex), m_qe);
475                 } else if ((*mutex)->m_owner == curthread)
476                         ret = mutex_self_trylock(*mutex);
477                 else
478                         /* Return a busy error: */
479                         ret = EBUSY;
480                 break;
481
482         /* Trap invalid mutex types: */
483         default:
484                 /* Return an invalid argument error: */
485                 ret = EINVAL;
486                 break;
487         }
488
489         if (ret == 0 && private)
490                 THR_CRITICAL_ENTER(curthread);
491
492         /* Unlock the mutex structure: */
493         THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
494
495         /* Return the completion status: */
496         return (ret);
497 }
498
499 int
500 __pthread_mutex_trylock(pthread_mutex_t *mutex)
501 {
502         struct pthread *curthread = _get_curthread();
503         int ret = 0;
504
505         if (mutex == NULL)
506                 ret = EINVAL;
507
508         /*
509          * If the mutex is statically initialized, perform the dynamic
510          * initialization:
511          */
512         else if ((*mutex != NULL) ||
513             ((ret = init_static(curthread, mutex)) == 0))
514                 ret = mutex_trylock_common(curthread, mutex);
515
516         return (ret);
517 }
518
519 int
520 _pthread_mutex_trylock(pthread_mutex_t *mutex)
521 {
522         struct pthread  *curthread = _get_curthread();
523         int     ret = 0;
524
525         if (mutex == NULL)
526                 ret = EINVAL;
527
528         /*
529          * If the mutex is statically initialized, perform the dynamic
530          * initialization marking the mutex private (delete safe):
531          */
532         else if ((*mutex != NULL) ||
533             ((ret = init_static_private(curthread, mutex)) == 0))
534                 ret = mutex_trylock_common(curthread, mutex);
535
536         return (ret);
537 }
538
539 static int
540 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
541         const struct timespec * abstime)
542 {
543         int     private;
544         int     ret = 0;
545
546         THR_ASSERT((m != NULL) && (*m != NULL),
547             "Uninitialized mutex in pthread_mutex_trylock_basic");
548
549         if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
550             abstime->tv_nsec >= 1000000000))
551                 return (EINVAL);
552
553         /* Reset the interrupted flag: */
554         curthread->interrupted = 0;
555         curthread->timeout = 0;
556         curthread->wakeup_time.tv_sec = -1;
557
558         private = (*m)->m_flags & MUTEX_FLAGS_PRIVATE;
559
560         /*
561          * Enter a loop waiting to become the mutex owner.  We need a
562          * loop in case the waiting thread is interrupted by a signal
563          * to execute a signal handler.  It is not (currently) possible
564          * to remain in the waiting queue while running a handler.
565          * Instead, the thread is interrupted and backed out of the
566          * waiting queue prior to executing the signal handler.
567          */
568         do {
569                 /* Lock the mutex structure: */
570                 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
571
572                 /*
573                  * If the mutex was statically allocated, properly
574                  * initialize the tail queue.
575                  */
576                 if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) {
577                         TAILQ_INIT(&(*m)->m_queue);
578                         (*m)->m_flags |= MUTEX_FLAGS_INITED;
579                         MUTEX_INIT_LINK(*m);
580                 }
581
582                 /* Process according to mutex type: */
583                 switch ((*m)->m_protocol) {
584                 /* Default POSIX mutex: */
585                 case PTHREAD_PRIO_NONE:
586                         if ((*m)->m_owner == NULL) {
587                                 /* Lock the mutex for this thread: */
588                                 (*m)->m_owner = curthread;
589
590                                 /* Add to the list of owned mutexes: */
591                                 MUTEX_ASSERT_NOT_OWNED(*m);
592                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
593                                     (*m), m_qe);
594                                 if (private)
595                                         THR_CRITICAL_ENTER(curthread);
596
597                                 /* Unlock the mutex structure: */
598                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
599                         } else if ((*m)->m_owner == curthread) {
600                                 ret = mutex_self_lock(curthread, *m);
601
602                                 /* Unlock the mutex structure: */
603                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
604                         } else {
605                                 /*
606                                  * Join the queue of threads waiting to lock
607                                  * the mutex and save a pointer to the mutex.
608                                  */
609                                 mutex_queue_enq(*m, curthread);
610                                 curthread->data.mutex = *m;
611                                 curthread->sigbackout = mutex_lock_backout;
612                                 /*
613                                  * This thread is active and is in a critical
614                                  * region (holding the mutex lock); we should
615                                  * be able to safely set the state.
616                                  */
617                                 THR_SCHED_LOCK(curthread, curthread);
618                                 /* Set the wakeup time: */
619                                 if (abstime) {
620                                         curthread->wakeup_time.tv_sec =
621                                                 abstime->tv_sec;
622                                         curthread->wakeup_time.tv_nsec =
623                                                 abstime->tv_nsec;
624                                 }
625
626                                 THR_SET_STATE(curthread, PS_MUTEX_WAIT);
627                                 THR_SCHED_UNLOCK(curthread, curthread);
628
629                                 /* Unlock the mutex structure: */
630                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
631
632                                 /* Schedule the next thread: */
633                                 _thr_sched_switch(curthread);
634
635                                 if (THR_IN_MUTEXQ(curthread)) {
636                                         THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
637                                         mutex_queue_remove(*m, curthread);
638                                         THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
639                                 }
640                                 /*
641                                  * Only clear these after assuring the
642                                  * thread is dequeued.
643                                  */
644                                 curthread->data.mutex = NULL;
645                                 curthread->sigbackout = NULL;
646                         }
647                         break;
648
649                 /* POSIX priority inheritence mutex: */
650                 case PTHREAD_PRIO_INHERIT:
651                         /* Check if this mutex is not locked: */
652                         if ((*m)->m_owner == NULL) {
653                                 /* Lock the mutex for this thread: */
654                                 (*m)->m_owner = curthread;
655
656                                 THR_SCHED_LOCK(curthread, curthread);
657                                 /* Track number of priority mutexes owned: */
658                                 curthread->priority_mutex_count++;
659
660                                 /*
661                                  * The mutex takes on attributes of the
662                                  * running thread when there are no waiters.
663                                  * Make sure the thread's scheduling lock is
664                                  * held while priorities are adjusted.
665                                  */
666                                 (*m)->m_prio = curthread->active_priority;
667                                 (*m)->m_saved_prio =
668                                     curthread->inherited_priority;
669                                 curthread->inherited_priority = (*m)->m_prio;
670                                 THR_SCHED_UNLOCK(curthread, curthread);
671
672                                 /* Add to the list of owned mutexes: */
673                                 MUTEX_ASSERT_NOT_OWNED(*m);
674                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
675                                     (*m), m_qe);
676                                 if (private)
677                                         THR_CRITICAL_ENTER(curthread);
678
679                                 /* Unlock the mutex structure: */
680                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
681                         } else if ((*m)->m_owner == curthread) {
682                                 ret = mutex_self_lock(curthread, *m);
683
684                                 /* Unlock the mutex structure: */
685                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
686                         } else {
687                                 /*
688                                  * Join the queue of threads waiting to lock
689                                  * the mutex and save a pointer to the mutex.
690                                  */
691                                 mutex_queue_enq(*m, curthread);
692                                 curthread->data.mutex = *m;
693                                 curthread->sigbackout = mutex_lock_backout;
694
695                                 /*
696                                  * This thread is active and is in a critical
697                                  * region (holding the mutex lock); we should
698                                  * be able to safely set the state.
699                                  */
700                                 if (curthread->active_priority > (*m)->m_prio)
701                                         /* Adjust priorities: */
702                                         mutex_priority_adjust(curthread, *m);
703
704                                 THR_SCHED_LOCK(curthread, curthread);
705                                 /* Set the wakeup time: */
706                                 if (abstime) {
707                                         curthread->wakeup_time.tv_sec =
708                                                 abstime->tv_sec;
709                                         curthread->wakeup_time.tv_nsec =
710                                                 abstime->tv_nsec;
711                                 }
712                                 THR_SET_STATE(curthread, PS_MUTEX_WAIT);
713                                 THR_SCHED_UNLOCK(curthread, curthread);
714
715                                 /* Unlock the mutex structure: */
716                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
717
718                                 /* Schedule the next thread: */
719                                 _thr_sched_switch(curthread);
720
721                                 if (THR_IN_MUTEXQ(curthread)) {
722                                         THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
723                                         mutex_queue_remove(*m, curthread);
724                                         THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
725                                 }
726                                 /*
727                                  * Only clear these after assuring the
728                                  * thread is dequeued.
729                                  */
730                                 curthread->data.mutex = NULL;
731                                 curthread->sigbackout = NULL;
732                         }
733                         break;
734
735                 /* POSIX priority protection mutex: */
736                 case PTHREAD_PRIO_PROTECT:
737                         /* Check for a priority ceiling violation: */
738                         if (curthread->active_priority > (*m)->m_prio) {
739                                 /* Unlock the mutex structure: */
740                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
741                                 ret = EINVAL;
742                         }
743                         /* Check if this mutex is not locked: */
744                         else if ((*m)->m_owner == NULL) {
745                                 /*
746                                  * Lock the mutex for the running
747                                  * thread:
748                                  */
749                                 (*m)->m_owner = curthread;
750
751                                 THR_SCHED_LOCK(curthread, curthread);
752                                 /* Track number of priority mutexes owned: */
753                                 curthread->priority_mutex_count++;
754
755                                 /*
756                                  * The running thread inherits the ceiling
757                                  * priority of the mutex and executes at that
758                                  * priority.  Make sure the thread's
759                                  * scheduling lock is held while priorities
760                                  * are adjusted.
761                                  */
762                                 curthread->active_priority = (*m)->m_prio;
763                                 (*m)->m_saved_prio =
764                                     curthread->inherited_priority;
765                                 curthread->inherited_priority = (*m)->m_prio;
766                                 THR_SCHED_UNLOCK(curthread, curthread);
767
768                                 /* Add to the list of owned mutexes: */
769                                 MUTEX_ASSERT_NOT_OWNED(*m);
770                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
771                                     (*m), m_qe);
772                                 if (private)
773                                         THR_CRITICAL_ENTER(curthread);
774
775                                 /* Unlock the mutex structure: */
776                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
777                         } else if ((*m)->m_owner == curthread) {
778                                 ret = mutex_self_lock(curthread, *m);
779
780                                 /* Unlock the mutex structure: */
781                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
782                         } else {
783                                 /*
784                                  * Join the queue of threads waiting to lock
785                                  * the mutex and save a pointer to the mutex.
786                                  */
787                                 mutex_queue_enq(*m, curthread);
788                                 curthread->data.mutex = *m;
789                                 curthread->sigbackout = mutex_lock_backout;
790
791                                 /* Clear any previous error: */
792                                 curthread->error = 0;
793
794                                 /*
795                                  * This thread is active and is in a critical
796                                  * region (holding the mutex lock); we should
797                                  * be able to safely set the state.
798                                  */
799
800                                 THR_SCHED_LOCK(curthread, curthread);
801                                 /* Set the wakeup time: */
802                                 if (abstime) {
803                                         curthread->wakeup_time.tv_sec =
804                                                 abstime->tv_sec;
805                                         curthread->wakeup_time.tv_nsec =
806                                                 abstime->tv_nsec;
807                                 }
808                                 THR_SET_STATE(curthread, PS_MUTEX_WAIT);
809                                 THR_SCHED_UNLOCK(curthread, curthread);
810
811                                 /* Unlock the mutex structure: */
812                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
813
814                                 /* Schedule the next thread: */
815                                 _thr_sched_switch(curthread);
816
817                                 if (THR_IN_MUTEXQ(curthread)) {
818                                         THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
819                                         mutex_queue_remove(*m, curthread);
820                                         THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
821                                 }
822                                 /*
823                                  * Only clear these after assuring the
824                                  * thread is dequeued.
825                                  */
826                                 curthread->data.mutex = NULL;
827                                 curthread->sigbackout = NULL;
828
829                                 /*
830                                  * The threads priority may have changed while
831                                  * waiting for the mutex causing a ceiling
832                                  * violation.
833                                  */
834                                 ret = curthread->error;
835                                 curthread->error = 0;
836                         }
837                         break;
838
839                 /* Trap invalid mutex types: */
840                 default:
841                         /* Unlock the mutex structure: */
842                         THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
843
844                         /* Return an invalid argument error: */
845                         ret = EINVAL;
846                         break;
847                 }
848
849         } while (((*m)->m_owner != curthread) && (ret == 0) &&
850             (curthread->interrupted == 0) && (curthread->timeout == 0));
851
852         if (ret == 0 && (*m)->m_owner != curthread && curthread->timeout)
853                 ret = ETIMEDOUT;
854
855         /*
856          * Check to see if this thread was interrupted and
857          * is still in the mutex queue of waiting threads:
858          */
859         if (curthread->interrupted != 0) {
860                 /* Remove this thread from the mutex queue. */
861                 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
862                 if (THR_IN_SYNCQ(curthread))
863                         mutex_queue_remove(*m, curthread);
864                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
865
866                 /* Check for asynchronous cancellation. */
867                 if (curthread->continuation != NULL)
868                         curthread->continuation((void *) curthread);
869         }
870
871         /* Return the completion status: */
872         return (ret);
873 }
874
875 int
876 __pthread_mutex_lock(pthread_mutex_t *m)
877 {
878         struct pthread *curthread;
879         int     ret = 0;
880
881         if (_thr_initial == NULL)
882                 _libpthread_init(NULL);
883
884         curthread = _get_curthread();
885         if (m == NULL)
886                 ret = EINVAL;
887
888         /*
889          * If the mutex is statically initialized, perform the dynamic
890          * initialization:
891          */
892         else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
893                 ret = mutex_lock_common(curthread, m, NULL);
894
895         return (ret);
896 }
897
898 __strong_reference(__pthread_mutex_lock, _thr_mutex_lock);
899
900 int
901 _pthread_mutex_lock(pthread_mutex_t *m)
902 {
903         struct pthread *curthread;
904         int     ret = 0;
905
906         if (_thr_initial == NULL)
907                 _libpthread_init(NULL);
908         curthread = _get_curthread();
909
910         if (m == NULL)
911                 ret = EINVAL;
912
913         /*
914          * If the mutex is statically initialized, perform the dynamic
915          * initialization marking it private (delete safe):
916          */
917         else if ((*m != NULL) ||
918             ((ret = init_static_private(curthread, m)) == 0))
919                 ret = mutex_lock_common(curthread, m, NULL);
920
921         return (ret);
922 }
923
924 int
925 __pthread_mutex_timedlock(pthread_mutex_t *m,
926         const struct timespec *abs_timeout)
927 {
928         struct pthread *curthread;
929         int     ret = 0;
930
931         if (_thr_initial == NULL)
932                 _libpthread_init(NULL);
933
934         curthread = _get_curthread();
935         if (m == NULL)
936                 ret = EINVAL;
937
938         /*
939          * If the mutex is statically initialized, perform the dynamic
940          * initialization:
941          */
942         else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
943                 ret = mutex_lock_common(curthread, m, abs_timeout);
944
945         return (ret);
946 }
947
948 int
949 _pthread_mutex_timedlock(pthread_mutex_t *m,
950         const struct timespec *abs_timeout)
951 {
952         struct pthread *curthread;
953         int     ret = 0;
954
955         if (_thr_initial == NULL)
956                 _libpthread_init(NULL);
957         curthread = _get_curthread();
958
959         if (m == NULL)
960                 ret = EINVAL;
961
962         /*
963          * If the mutex is statically initialized, perform the dynamic
964          * initialization marking it private (delete safe):
965          */
966         else if ((*m != NULL) ||
967             ((ret = init_static_private(curthread, m)) == 0))
968                 ret = mutex_lock_common(curthread, m, abs_timeout);
969
970         return (ret);
971 }
972
973 int
974 _pthread_mutex_unlock(pthread_mutex_t *m)
975 {
976         return (mutex_unlock_common(m, /* add reference */ 0));
977 }
978
979 __strong_reference(_pthread_mutex_unlock, _thr_mutex_unlock);
980
981 int
982 _mutex_cv_unlock(pthread_mutex_t *m)
983 {
984         return (mutex_unlock_common(m, /* add reference */ 1));
985 }
986
987 int
988 _mutex_cv_lock(pthread_mutex_t *m)
989 {
990         struct  pthread *curthread;
991         int     ret;
992
993         curthread = _get_curthread();
994         if ((ret = _pthread_mutex_lock(m)) == 0) {
995                 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
996                 (*m)->m_refcount--;
997                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
998         }
999         return (ret);
1000 }
1001
1002 static inline int
1003 mutex_self_trylock(pthread_mutex_t m)
1004 {
1005         int     ret = 0;
1006
1007         switch (m->m_type) {
1008         /* case PTHREAD_MUTEX_DEFAULT: */
1009         case PTHREAD_MUTEX_ERRORCHECK:
1010         case PTHREAD_MUTEX_NORMAL:
1011         case PTHREAD_MUTEX_ADAPTIVE_NP:
1012                 ret = EBUSY; 
1013                 break;
1014
1015         case PTHREAD_MUTEX_RECURSIVE:
1016                 /* Increment the lock count: */
1017                 m->m_count++;
1018                 break;
1019
1020         default:
1021                 /* Trap invalid mutex types; */
1022                 ret = EINVAL;
1023         }
1024
1025         return (ret);
1026 }
1027
1028 static inline int
1029 mutex_self_lock(struct pthread *curthread, pthread_mutex_t m)
1030 {
1031         int ret = 0;
1032
1033         /*
1034          * Don't allow evil recursive mutexes for private use
1035          * in libc and libpthread.
1036          */
1037         if (m->m_flags & MUTEX_FLAGS_PRIVATE)
1038                 PANIC("Recurse on a private mutex.");
1039
1040         switch (m->m_type) {
1041         /* case PTHREAD_MUTEX_DEFAULT: */
1042         case PTHREAD_MUTEX_ERRORCHECK:
1043         case PTHREAD_MUTEX_ADAPTIVE_NP:
1044                 /*
1045                  * POSIX specifies that mutexes should return EDEADLK if a
1046                  * recursive lock is detected.
1047                  */
1048                 ret = EDEADLK; 
1049                 break;
1050
1051         case PTHREAD_MUTEX_NORMAL:
1052                 /*
1053                  * What SS2 define as a 'normal' mutex.  Intentionally
1054                  * deadlock on attempts to get a lock you already own.
1055                  */
1056
1057                 THR_SCHED_LOCK(curthread, curthread);
1058                 THR_SET_STATE(curthread, PS_DEADLOCK);
1059                 THR_SCHED_UNLOCK(curthread, curthread);
1060
1061                 /* Unlock the mutex structure: */
1062                 THR_LOCK_RELEASE(curthread, &m->m_lock);
1063
1064                 /* Schedule the next thread: */
1065                 _thr_sched_switch(curthread);
1066                 break;
1067
1068         case PTHREAD_MUTEX_RECURSIVE:
1069                 /* Increment the lock count: */
1070                 m->m_count++;
1071                 break;
1072
1073         default:
1074                 /* Trap invalid mutex types; */
1075                 ret = EINVAL;
1076         }
1077
1078         return (ret);
1079 }
1080
1081 static int
1082 mutex_unlock_common(pthread_mutex_t *m, int add_reference)
1083 {
1084         struct pthread *curthread = _get_curthread();
1085         struct kse_mailbox *kmbx = NULL;
1086         int ret = 0;
1087
1088         if (m == NULL || *m == NULL)
1089                 ret = EINVAL;
1090         else {
1091                 /* Lock the mutex structure: */
1092                 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
1093
1094                 /* Process according to mutex type: */
1095                 switch ((*m)->m_protocol) {
1096                 /* Default POSIX mutex: */
1097                 case PTHREAD_PRIO_NONE:
1098                         /*
1099                          * Check if the running thread is not the owner of the
1100                          * mutex:
1101                          */
1102                         if ((*m)->m_owner != curthread)
1103                                 ret = EPERM;
1104                         else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1105                             ((*m)->m_count > 0))
1106                                 /* Decrement the count: */
1107                                 (*m)->m_count--;
1108                         else {
1109                                 /*
1110                                  * Clear the count in case this is a recursive
1111                                  * mutex.
1112                                  */
1113                                 (*m)->m_count = 0;
1114
1115                                 /* Remove the mutex from the threads queue. */
1116                                 MUTEX_ASSERT_IS_OWNED(*m);
1117                                 TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1118                                     (*m), m_qe);
1119                                 MUTEX_INIT_LINK(*m);
1120
1121                                 /*
1122                                  * Hand off the mutex to the next waiting
1123                                  * thread:
1124                                  */
1125                                 kmbx = mutex_handoff(curthread, *m);
1126                         }
1127                         break;
1128
1129                 /* POSIX priority inheritence mutex: */
1130                 case PTHREAD_PRIO_INHERIT:
1131                         /*
1132                          * Check if the running thread is not the owner of the
1133                          * mutex:
1134                          */
1135                         if ((*m)->m_owner != curthread)
1136                                 ret = EPERM;
1137                         else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1138                             ((*m)->m_count > 0))
1139                                 /* Decrement the count: */
1140                                 (*m)->m_count--;
1141                         else {
1142                                 /*
1143                                  * Clear the count in case this is recursive
1144                                  * mutex.
1145                                  */
1146                                 (*m)->m_count = 0;
1147
1148                                 /*
1149                                  * Restore the threads inherited priority and
1150                                  * recompute the active priority (being careful
1151                                  * not to override changes in the threads base
1152                                  * priority subsequent to locking the mutex).
1153                                  */
1154                                 THR_SCHED_LOCK(curthread, curthread);
1155                                 curthread->inherited_priority =
1156                                         (*m)->m_saved_prio;
1157                                 curthread->active_priority =
1158                                     MAX(curthread->inherited_priority,
1159                                     curthread->base_priority);
1160
1161                                 /*
1162                                  * This thread now owns one less priority mutex.
1163                                  */
1164                                 curthread->priority_mutex_count--;
1165                                 THR_SCHED_UNLOCK(curthread, curthread);
1166
1167                                 /* Remove the mutex from the threads queue. */
1168                                 MUTEX_ASSERT_IS_OWNED(*m);
1169                                 TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1170                                     (*m), m_qe);
1171                                 MUTEX_INIT_LINK(*m);
1172
1173                                 /*
1174                                  * Hand off the mutex to the next waiting
1175                                  * thread:
1176                                  */
1177                                 kmbx = mutex_handoff(curthread, *m);
1178                         }
1179                         break;
1180
1181                 /* POSIX priority ceiling mutex: */
1182                 case PTHREAD_PRIO_PROTECT:
1183                         /*
1184                          * Check if the running thread is not the owner of the
1185                          * mutex:
1186                          */
1187                         if ((*m)->m_owner != curthread)
1188                                 ret = EPERM;
1189                         else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1190                             ((*m)->m_count > 0))
1191                                 /* Decrement the count: */
1192                                 (*m)->m_count--;
1193                         else {
1194                                 /*
1195                                  * Clear the count in case this is a recursive
1196                                  * mutex.
1197                                  */
1198                                 (*m)->m_count = 0;
1199
1200                                 /*
1201                                  * Restore the threads inherited priority and
1202                                  * recompute the active priority (being careful
1203                                  * not to override changes in the threads base
1204                                  * priority subsequent to locking the mutex).
1205                                  */
1206                                 THR_SCHED_LOCK(curthread, curthread);
1207                                 curthread->inherited_priority =
1208                                         (*m)->m_saved_prio;
1209                                 curthread->active_priority =
1210                                     MAX(curthread->inherited_priority,
1211                                     curthread->base_priority);
1212
1213                                 /*
1214                                  * This thread now owns one less priority mutex.
1215                                  */
1216                                 curthread->priority_mutex_count--;
1217                                 THR_SCHED_UNLOCK(curthread, curthread);
1218
1219                                 /* Remove the mutex from the threads queue. */
1220                                 MUTEX_ASSERT_IS_OWNED(*m);
1221                                 TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1222                                     (*m), m_qe);
1223                                 MUTEX_INIT_LINK(*m);
1224
1225                                 /*
1226                                  * Hand off the mutex to the next waiting
1227                                  * thread:
1228                                  */
1229                                 kmbx = mutex_handoff(curthread, *m);
1230                         }
1231                         break;
1232
1233                 /* Trap invalid mutex types: */
1234                 default:
1235                         /* Return an invalid argument error: */
1236                         ret = EINVAL;
1237                         break;
1238                 }
1239
1240                 if ((ret == 0) && (add_reference != 0))
1241                         /* Increment the reference count: */
1242                         (*m)->m_refcount++;
1243
1244                 /* Leave the critical region if this is a private mutex. */
1245                 if ((ret == 0) && ((*m)->m_flags & MUTEX_FLAGS_PRIVATE))
1246                         THR_CRITICAL_LEAVE(curthread);
1247
1248                 /* Unlock the mutex structure: */
1249                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
1250
1251                 if (kmbx != NULL)
1252                         kse_wakeup(kmbx);
1253         }
1254
1255         /* Return the completion status: */
1256         return (ret);
1257 }
1258
1259
1260 /*
1261  * This function is called when a change in base priority occurs for
1262  * a thread that is holding or waiting for a priority protection or
1263  * inheritence mutex.  A change in a threads base priority can effect
1264  * changes to active priorities of other threads and to the ordering
1265  * of mutex locking by waiting threads.
1266  *
1267  * This must be called without the target thread's scheduling lock held.
1268  */
1269 void
1270 _mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread,
1271     int propagate_prio)
1272 {
1273         struct pthread_mutex *m;
1274
1275         /* Adjust the priorites of any owned priority mutexes: */
1276         if (pthread->priority_mutex_count > 0) {
1277                 /*
1278                  * Rescan the mutexes owned by this thread and correct
1279                  * their priorities to account for this threads change
1280                  * in priority.  This has the side effect of changing
1281                  * the threads active priority.
1282                  *
1283                  * Be sure to lock the first mutex in the list of owned
1284                  * mutexes.  This acts as a barrier against another
1285                  * simultaneous call to change the threads priority
1286                  * and from the owning thread releasing the mutex.
1287                  */
1288                 m = TAILQ_FIRST(&pthread->mutexq);
1289                 if (m != NULL) {
1290                         THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1291                         /*
1292                          * Make sure the thread still owns the lock.
1293                          */
1294                         if (m == TAILQ_FIRST(&pthread->mutexq))
1295                                 mutex_rescan_owned(curthread, pthread,
1296                                     /* rescan all owned */ NULL);
1297                         THR_LOCK_RELEASE(curthread, &m->m_lock);
1298                 }
1299         }
1300
1301         /*
1302          * If this thread is waiting on a priority inheritence mutex,
1303          * check for priority adjustments.  A change in priority can
1304          * also cause a ceiling violation(*) for a thread waiting on
1305          * a priority protection mutex; we don't perform the check here
1306          * as it is done in pthread_mutex_unlock.
1307          *
1308          * (*) It should be noted that a priority change to a thread
1309          *     _after_ taking and owning a priority ceiling mutex
1310          *     does not affect ownership of that mutex; the ceiling
1311          *     priority is only checked before mutex ownership occurs.
1312          */
1313         if (propagate_prio != 0) {
1314                 /*
1315                  * Lock the thread's scheduling queue.  This is a bit
1316                  * convoluted; the "in synchronization queue flag" can
1317                  * only be cleared with both the thread's scheduling and
1318                  * mutex locks held.  The thread's pointer to the wanted
1319                  * mutex is guaranteed to be valid during this time.
1320                  */
1321                 THR_SCHED_LOCK(curthread, pthread);
1322
1323                 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) ||
1324                     ((m = pthread->data.mutex) == NULL))
1325                         THR_SCHED_UNLOCK(curthread, pthread);
1326                 else {
1327                         /*
1328                          * This thread is currently waiting on a mutex; unlock
1329                          * the scheduling queue lock and lock the mutex.  We
1330                          * can't hold both at the same time because the locking
1331                          * order could cause a deadlock.
1332                          */
1333                         THR_SCHED_UNLOCK(curthread, pthread);
1334                         THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1335
1336                         /*
1337                          * Check to make sure this thread is still in the
1338                          * same state (the lock above can yield the CPU to
1339                          * another thread or the thread may be running on
1340                          * another CPU).
1341                          */
1342                         if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1343                             (pthread->data.mutex == m)) {
1344                                 /*
1345                                  * Remove and reinsert this thread into
1346                                  * the list of waiting threads to preserve
1347                                  * decreasing priority order.
1348                                  */
1349                                 mutex_queue_remove(m, pthread);
1350                                 mutex_queue_enq(m, pthread);
1351
1352                                 if (m->m_protocol == PTHREAD_PRIO_INHERIT)
1353                                         /* Adjust priorities: */
1354                                         mutex_priority_adjust(curthread, m);
1355                         }
1356
1357                         /* Unlock the mutex structure: */
1358                         THR_LOCK_RELEASE(curthread, &m->m_lock);
1359                 }
1360         }
1361 }
1362
1363 /*
1364  * Called when a new thread is added to the mutex waiting queue or
1365  * when a threads priority changes that is already in the mutex
1366  * waiting queue.
1367  *
1368  * This must be called with the mutex locked by the current thread.
1369  */
1370 static void
1371 mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex)
1372 {
1373         pthread_mutex_t m = mutex;
1374         struct pthread  *pthread_next, *pthread = mutex->m_owner;
1375         int             done, temp_prio;
1376
1377         /*
1378          * Calculate the mutex priority as the maximum of the highest
1379          * active priority of any waiting threads and the owning threads
1380          * active priority(*).
1381          *
1382          * (*) Because the owning threads current active priority may
1383          *     reflect priority inherited from this mutex (and the mutex
1384          *     priority may have changed) we must recalculate the active
1385          *     priority based on the threads saved inherited priority
1386          *     and its base priority.
1387          */
1388         pthread_next = TAILQ_FIRST(&m->m_queue);  /* should never be NULL */
1389         temp_prio = MAX(pthread_next->active_priority,
1390             MAX(m->m_saved_prio, pthread->base_priority));
1391
1392         /* See if this mutex really needs adjusting: */
1393         if (temp_prio == m->m_prio)
1394                 /* No need to propagate the priority: */
1395                 return;
1396
1397         /* Set new priority of the mutex: */
1398         m->m_prio = temp_prio;
1399
1400         /*
1401          * Don't unlock the mutex passed in as an argument.  It is
1402          * expected to be locked and unlocked by the caller.
1403          */
1404         done = 1;
1405         do {
1406                 /*
1407                  * Save the threads priority before rescanning the
1408                  * owned mutexes:
1409                  */
1410                 temp_prio = pthread->active_priority;
1411
1412                 /*
1413                  * Fix the priorities for all mutexes held by the owning
1414                  * thread since taking this mutex.  This also has a
1415                  * potential side-effect of changing the threads priority.
1416                  *
1417                  * At this point the mutex is locked by the current thread.
1418                  * The owning thread can't release the mutex until it is
1419                  * unlocked, so we should be able to safely walk its list
1420                  * of owned mutexes.
1421                  */
1422                 mutex_rescan_owned(curthread, pthread, m);
1423
1424                 /*
1425                  * If this isn't the first time through the loop,
1426                  * the current mutex needs to be unlocked.
1427                  */
1428                 if (done == 0)
1429                         THR_LOCK_RELEASE(curthread, &m->m_lock);
1430
1431                 /* Assume we're done unless told otherwise: */
1432                 done = 1;
1433
1434                 /*
1435                  * If the thread is currently waiting on a mutex, check
1436                  * to see if the threads new priority has affected the
1437                  * priority of the mutex.
1438                  */
1439                 if ((temp_prio != pthread->active_priority) &&
1440                     ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1441                     ((m = pthread->data.mutex) != NULL) &&
1442                     (m->m_protocol == PTHREAD_PRIO_INHERIT)) {
1443                         /* Lock the mutex structure: */
1444                         THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1445
1446                         /*
1447                          * Make sure the thread is still waiting on the
1448                          * mutex:
1449                          */
1450                         if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1451                             (m == pthread->data.mutex)) {
1452                                 /*
1453                                  * The priority for this thread has changed.
1454                                  * Remove and reinsert this thread into the
1455                                  * list of waiting threads to preserve
1456                                  * decreasing priority order.
1457                                  */
1458                                 mutex_queue_remove(m, pthread);
1459                                 mutex_queue_enq(m, pthread);
1460
1461                                 /*
1462                                  * Grab the waiting thread with highest
1463                                  * priority:
1464                                  */
1465                                 pthread_next = TAILQ_FIRST(&m->m_queue);
1466
1467                                 /*
1468                                  * Calculate the mutex priority as the maximum
1469                                  * of the highest active priority of any
1470                                  * waiting threads and the owning threads
1471                                  * active priority.
1472                                  */
1473                                 temp_prio = MAX(pthread_next->active_priority,
1474                                     MAX(m->m_saved_prio,
1475                                     m->m_owner->base_priority));
1476
1477                                 if (temp_prio != m->m_prio) {
1478                                         /*
1479                                          * The priority needs to be propagated
1480                                          * to the mutex this thread is waiting
1481                                          * on and up to the owner of that mutex.
1482                                          */
1483                                         m->m_prio = temp_prio;
1484                                         pthread = m->m_owner;
1485
1486                                         /* We're not done yet: */
1487                                         done = 0;
1488                                 }
1489                         }
1490                         /* Only release the mutex if we're done: */
1491                         if (done != 0)
1492                                 THR_LOCK_RELEASE(curthread, &m->m_lock);
1493                 }
1494         } while (done == 0);
1495 }
1496
1497 static void
1498 mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread,
1499     struct pthread_mutex *mutex)
1500 {
1501         struct pthread_mutex    *m;
1502         struct pthread          *pthread_next;
1503         int                     active_prio, inherited_prio;
1504
1505         /*
1506          * Start walking the mutexes the thread has taken since
1507          * taking this mutex.
1508          */
1509         if (mutex == NULL) {
1510                 /*
1511                  * A null mutex means start at the beginning of the owned
1512                  * mutex list.
1513                  */
1514                 m = TAILQ_FIRST(&pthread->mutexq);
1515
1516                 /* There is no inherited priority yet. */
1517                 inherited_prio = 0;
1518         } else {
1519                 /*
1520                  * The caller wants to start after a specific mutex.  It
1521                  * is assumed that this mutex is a priority inheritence
1522                  * mutex and that its priority has been correctly
1523                  * calculated.
1524                  */
1525                 m = TAILQ_NEXT(mutex, m_qe);
1526
1527                 /* Start inheriting priority from the specified mutex. */
1528                 inherited_prio = mutex->m_prio;
1529         }
1530         active_prio = MAX(inherited_prio, pthread->base_priority);
1531
1532         for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) {
1533                 /*
1534                  * We only want to deal with priority inheritence
1535                  * mutexes.  This might be optimized by only placing
1536                  * priority inheritence mutexes into the owned mutex
1537                  * list, but it may prove to be useful having all
1538                  * owned mutexes in this list.  Consider a thread
1539                  * exiting while holding mutexes...
1540                  */
1541                 if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1542                         /*
1543                          * Fix the owners saved (inherited) priority to
1544                          * reflect the priority of the previous mutex.
1545                          */
1546                         m->m_saved_prio = inherited_prio;
1547
1548                         if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1549                                 /* Recalculate the priority of the mutex: */
1550                                 m->m_prio = MAX(active_prio,
1551                                      pthread_next->active_priority);
1552                         else
1553                                 m->m_prio = active_prio;
1554
1555                         /* Recalculate new inherited and active priorities: */
1556                         inherited_prio = m->m_prio;
1557                         active_prio = MAX(m->m_prio, pthread->base_priority);
1558                 }
1559         }
1560
1561         /*
1562          * Fix the threads inherited priority and recalculate its
1563          * active priority.
1564          */
1565         pthread->inherited_priority = inherited_prio;
1566         active_prio = MAX(inherited_prio, pthread->base_priority);
1567
1568         if (active_prio != pthread->active_priority) {
1569                 /* Lock the thread's scheduling queue: */
1570                 THR_SCHED_LOCK(curthread, pthread);
1571
1572                 if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) {
1573                         /*
1574                          * This thread is not in a run queue.  Just set
1575                          * its active priority.
1576                          */
1577                         pthread->active_priority = active_prio;
1578                 }
1579                 else {
1580                         /*
1581                          * This thread is in a run queue.  Remove it from
1582                          * the queue before changing its priority:
1583                          */
1584                         THR_RUNQ_REMOVE(pthread);
1585
1586                         /*
1587                          * POSIX states that if the priority is being
1588                          * lowered, the thread must be inserted at the
1589                          * head of the queue for its priority if it owns
1590                          * any priority protection or inheritence mutexes.
1591                          */
1592                         if ((active_prio < pthread->active_priority) &&
1593                             (pthread->priority_mutex_count > 0)) {
1594                                 /* Set the new active priority. */
1595                                 pthread->active_priority = active_prio;
1596
1597                                 THR_RUNQ_INSERT_HEAD(pthread);
1598                         } else {
1599                                 /* Set the new active priority. */
1600                                 pthread->active_priority = active_prio;
1601
1602                                 THR_RUNQ_INSERT_TAIL(pthread);
1603                         }
1604                 }
1605                 THR_SCHED_UNLOCK(curthread, pthread);
1606         }
1607 }
1608
1609 void
1610 _mutex_unlock_private(pthread_t pthread)
1611 {
1612         struct pthread_mutex    *m, *m_next;
1613
1614         for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1615                 m_next = TAILQ_NEXT(m, m_qe);
1616                 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1617                         _pthread_mutex_unlock(&m);
1618         }
1619 }
1620
1621 /*
1622  * This is called by the current thread when it wants to back out of a
1623  * mutex_lock in order to run a signal handler.
1624  */
1625 static void
1626 mutex_lock_backout(void *arg)
1627 {
1628         struct pthread *curthread = (struct pthread *)arg;
1629         struct pthread_mutex *m;
1630
1631         if ((curthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1632                 /*
1633                  * Any other thread may clear the "in sync queue flag",
1634                  * but only the current thread can clear the pointer
1635                  * to the mutex.  So if the flag is set, we can
1636                  * guarantee that the pointer to the mutex is valid.
1637                  * The only problem may be if the mutex is destroyed
1638                  * out from under us, but that should be considered
1639                  * an application bug.
1640                  */
1641                 m = curthread->data.mutex;
1642
1643                 /* Lock the mutex structure: */
1644                 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1645
1646
1647                 /*
1648                  * Check to make sure this thread doesn't already own
1649                  * the mutex.  Since mutexes are unlocked with direct
1650                  * handoffs, it is possible the previous owner gave it
1651                  * to us after we checked the sync queue flag and before
1652                  * we locked the mutex structure.
1653                  */
1654                 if (m->m_owner == curthread) {
1655                         THR_LOCK_RELEASE(curthread, &m->m_lock);
1656                         mutex_unlock_common(&m, /* add_reference */ 0);
1657                 } else {
1658                         /*
1659                          * Remove ourselves from the mutex queue and
1660                          * clear the pointer to the mutex.  We may no
1661                          * longer be in the mutex queue, but the removal
1662                          * function will DTRT.
1663                          */
1664                         mutex_queue_remove(m, curthread);
1665                         curthread->data.mutex = NULL;
1666                         THR_LOCK_RELEASE(curthread, &m->m_lock);
1667                 }
1668         }
1669         /* No need to call this again. */
1670         curthread->sigbackout = NULL;
1671 }
1672
1673 /*
1674  * Dequeue a waiting thread from the head of a mutex queue in descending
1675  * priority order.
1676  *
1677  * In order to properly dequeue a thread from the mutex queue and
1678  * make it runnable without the possibility of errant wakeups, it
1679  * is necessary to lock the thread's scheduling queue while also
1680  * holding the mutex lock.
1681  */
1682 static struct kse_mailbox *
1683 mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
1684 {
1685         struct kse_mailbox *kmbx = NULL;
1686         struct pthread *pthread;
1687
1688         /* Keep dequeueing until we find a valid thread: */
1689         mutex->m_owner = NULL;
1690         pthread = TAILQ_FIRST(&mutex->m_queue);
1691         while (pthread != NULL) {
1692                 /* Take the thread's scheduling lock: */
1693                 THR_SCHED_LOCK(curthread, pthread);
1694
1695                 /* Remove the thread from the mutex queue: */
1696                 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1697                 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1698
1699                 /*
1700                  * Only exit the loop if the thread hasn't been
1701                  * cancelled.
1702                  */
1703                 switch (mutex->m_protocol) {
1704                 case PTHREAD_PRIO_NONE:
1705                         /*
1706                          * Assign the new owner and add the mutex to the
1707                          * thread's list of owned mutexes.
1708                          */
1709                         mutex->m_owner = pthread;
1710                         TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1711                         break;
1712
1713                 case PTHREAD_PRIO_INHERIT:
1714                         /*
1715                          * Assign the new owner and add the mutex to the
1716                          * thread's list of owned mutexes.
1717                          */
1718                         mutex->m_owner = pthread;
1719                         TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1720
1721                         /* Track number of priority mutexes owned: */
1722                         pthread->priority_mutex_count++;
1723
1724                         /*
1725                          * Set the priority of the mutex.  Since our waiting
1726                          * threads are in descending priority order, the
1727                          * priority of the mutex becomes the active priority
1728                          * of the thread we just dequeued.
1729                          */
1730                         mutex->m_prio = pthread->active_priority;
1731
1732                         /* Save the owning threads inherited priority: */
1733                         mutex->m_saved_prio = pthread->inherited_priority;
1734
1735                         /*
1736                          * The owning threads inherited priority now becomes
1737                          * his active priority (the priority of the mutex).
1738                          */
1739                         pthread->inherited_priority = mutex->m_prio;
1740                         break;
1741
1742                 case PTHREAD_PRIO_PROTECT:
1743                         if (pthread->active_priority > mutex->m_prio) {
1744                                 /*
1745                                  * Either the mutex ceiling priority has
1746                                  * been lowered and/or this threads priority
1747                                  * has been raised subsequent to the thread
1748                                  * being queued on the waiting list.
1749                                  */
1750                                 pthread->error = EINVAL;
1751                         }
1752                         else {
1753                                 /*
1754                                  * Assign the new owner and add the mutex
1755                                  * to the thread's list of owned mutexes.
1756                                  */
1757                                 mutex->m_owner = pthread;
1758                                 TAILQ_INSERT_TAIL(&pthread->mutexq,
1759                                     mutex, m_qe);
1760
1761                                 /* Track number of priority mutexes owned: */
1762                                 pthread->priority_mutex_count++;
1763
1764                                 /*
1765                                  * Save the owning threads inherited
1766                                  * priority:
1767                                  */
1768                                 mutex->m_saved_prio =
1769                                     pthread->inherited_priority;
1770
1771                                 /*
1772                                  * The owning thread inherits the ceiling
1773                                  * priority of the mutex and executes at
1774                                  * that priority:
1775                                  */
1776                                 pthread->inherited_priority = mutex->m_prio;
1777                                 pthread->active_priority = mutex->m_prio;
1778
1779                         }
1780                         break;
1781                 }
1782
1783                 /* Make the thread runnable and unlock the scheduling queue: */
1784                 kmbx = _thr_setrunnable_unlocked(pthread);
1785
1786                 /* Add a preemption point. */
1787                 if ((curthread->kseg == pthread->kseg) &&
1788                     (pthread->active_priority > curthread->active_priority))
1789                         curthread->critical_yield = 1;
1790
1791                 if (mutex->m_owner == pthread) {
1792                         /* We're done; a valid owner was found. */
1793                         if (mutex->m_flags & MUTEX_FLAGS_PRIVATE)
1794                                 THR_CRITICAL_ENTER(pthread);
1795                         THR_SCHED_UNLOCK(curthread, pthread);
1796                         break;
1797                 }
1798                 THR_SCHED_UNLOCK(curthread, pthread);
1799                 /* Get the next thread from the waiting queue: */
1800                 pthread = TAILQ_NEXT(pthread, sqe);
1801         }
1802
1803         if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
1804                 /* This mutex has no priority: */
1805                 mutex->m_prio = 0;
1806         return (kmbx);
1807 }
1808
1809 /*
1810  * Dequeue a waiting thread from the head of a mutex queue in descending
1811  * priority order.
1812  */
1813 static inline pthread_t
1814 mutex_queue_deq(struct pthread_mutex *mutex)
1815 {
1816         pthread_t pthread;
1817
1818         while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1819                 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1820                 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1821
1822                 /*
1823                  * Only exit the loop if the thread hasn't been
1824                  * cancelled.
1825                  */
1826                 if (pthread->interrupted == 0)
1827                         break;
1828         }
1829
1830         return (pthread);
1831 }
1832
1833 /*
1834  * Remove a waiting thread from a mutex queue in descending priority order.
1835  */
1836 static inline void
1837 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1838 {
1839         if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1840                 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1841                 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1842         }
1843 }
1844
1845 /*
1846  * Enqueue a waiting thread to a queue in descending priority order.
1847  */
1848 static inline void
1849 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1850 {
1851         pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1852
1853         THR_ASSERT_NOT_IN_SYNCQ(pthread);
1854         /*
1855          * For the common case of all threads having equal priority,
1856          * we perform a quick check against the priority of the thread
1857          * at the tail of the queue.
1858          */
1859         if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1860                 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1861         else {
1862                 tid = TAILQ_FIRST(&mutex->m_queue);
1863                 while (pthread->active_priority <= tid->active_priority)
1864                         tid = TAILQ_NEXT(tid, sqe);
1865                 TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1866         }
1867         pthread->sflags |= THR_FLAGS_IN_SYNCQ;
1868 }