]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - lib/libc_r/uthread/uthread_mutex.c
This commit was generated by cvs2svn to compensate for changes in r147462,
[FreeBSD/FreeBSD.git] / lib / libc_r / uthread / uthread_mutex.c
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed by John Birrell.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34 #include <stdlib.h>
35 #include <errno.h>
36 #include <string.h>
37 #include <sys/param.h>
38 #include <sys/queue.h>
39 #include <pthread.h>
40 #include "pthread_private.h"
41
42 #if defined(_PTHREADS_INVARIANTS)
43 #define _MUTEX_INIT_LINK(m)             do {            \
44         (m)->m_qe.tqe_prev = NULL;                      \
45         (m)->m_qe.tqe_next = NULL;                      \
46 } while (0)
47 #define _MUTEX_ASSERT_IS_OWNED(m)       do {            \
48         if ((m)->m_qe.tqe_prev == NULL)                 \
49                 PANIC("mutex is not on list");          \
50 } while (0)
51 #define _MUTEX_ASSERT_NOT_OWNED(m)      do {            \
52         if (((m)->m_qe.tqe_prev != NULL) ||             \
53             ((m)->m_qe.tqe_next != NULL))               \
54                 PANIC("mutex is on list");              \
55 } while (0)
56 #else
57 #define _MUTEX_INIT_LINK(m)
58 #define _MUTEX_ASSERT_IS_OWNED(m)
59 #define _MUTEX_ASSERT_NOT_OWNED(m)
60 #endif
61
62 /*
63  * Prototypes
64  */
65 static inline int       mutex_self_trylock(pthread_mutex_t);
66 static inline int       mutex_self_lock(pthread_mutex_t);
67 static inline int       mutex_unlock_common(pthread_mutex_t *, int);
68 static void             mutex_priority_adjust(pthread_mutex_t);
69 static void             mutex_rescan_owned (pthread_t, pthread_mutex_t);
70 static inline pthread_t mutex_queue_deq(pthread_mutex_t);
71 static inline void      mutex_queue_remove(pthread_mutex_t, pthread_t);
72 static inline void      mutex_queue_enq(pthread_mutex_t, pthread_t);
73
74
75 static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
76
77 static struct pthread_mutex_attr        static_mutex_attr =
78     PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
79 static pthread_mutexattr_t              static_mattr = &static_mutex_attr;
80
81 /* Single underscore versions provided for libc internal usage: */
82 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
83 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
84
85 /* No difference between libc and application usage of these: */
86 __weak_reference(_pthread_mutex_init, pthread_mutex_init);
87 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
88 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
89
90
91 /*
92  * Reinitialize a private mutex; this is only used for internal mutexes.
93  */
94 int
95 _mutex_reinit(pthread_mutex_t * mutex)
96 {
97         int     ret = 0;
98
99         if (mutex == NULL)
100                 ret = EINVAL;
101         else if (*mutex == NULL)
102                 ret = _pthread_mutex_init(mutex, NULL);
103         else {
104                 /*
105                  * Initialize the mutex structure:
106                  */
107                 (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT;
108                 (*mutex)->m_protocol = PTHREAD_PRIO_NONE;
109                 TAILQ_INIT(&(*mutex)->m_queue);
110                 (*mutex)->m_owner = NULL;
111                 (*mutex)->m_data.m_count = 0;
112                 (*mutex)->m_flags |= MUTEX_FLAGS_INITED | MUTEX_FLAGS_PRIVATE;
113                 (*mutex)->m_refcount = 0;
114                 (*mutex)->m_prio = 0;
115                 (*mutex)->m_saved_prio = 0;
116                 _MUTEX_INIT_LINK(*mutex);
117                 memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock));
118         }
119         return (ret);
120 }
121
122 int
123 _pthread_mutex_init(pthread_mutex_t * mutex,
124                    const pthread_mutexattr_t * mutex_attr)
125 {
126         enum pthread_mutextype  type;
127         int             protocol;
128         int             ceiling;
129         int             flags;
130         pthread_mutex_t pmutex;
131         int             ret = 0;
132
133         if (mutex == NULL)
134                 ret = EINVAL;
135
136         /* Check if default mutex attributes: */
137         if (mutex_attr == NULL || *mutex_attr == NULL) {
138                 /* Default to a (error checking) POSIX mutex: */
139                 type = PTHREAD_MUTEX_ERRORCHECK;
140                 protocol = PTHREAD_PRIO_NONE;
141                 ceiling = PTHREAD_MAX_PRIORITY;
142                 flags = 0;
143         }
144
145         /* Check mutex type: */
146         else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
147             ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
148                 /* Return an invalid argument error: */
149                 ret = EINVAL;
150
151         /* Check mutex protocol: */
152         else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
153             ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
154                 /* Return an invalid argument error: */
155                 ret = EINVAL;
156
157         else {
158                 /* Use the requested mutex type and protocol: */
159                 type = (*mutex_attr)->m_type;
160                 protocol = (*mutex_attr)->m_protocol;
161                 ceiling = (*mutex_attr)->m_ceiling;
162                 flags = (*mutex_attr)->m_flags;
163         }
164
165         /* Check no errors so far: */
166         if (ret == 0) {
167                 if ((pmutex = (pthread_mutex_t)
168                     malloc(sizeof(struct pthread_mutex))) == NULL)
169                         ret = ENOMEM;
170                 else {
171                         /* Set the mutex flags: */
172                         pmutex->m_flags = flags;
173
174                         /* Process according to mutex type: */
175                         switch (type) {
176                         /* case PTHREAD_MUTEX_DEFAULT: */
177                         case PTHREAD_MUTEX_ERRORCHECK:
178                         case PTHREAD_MUTEX_NORMAL:
179                                 /* Nothing to do here. */
180                                 break;
181
182                         /* Single UNIX Spec 2 recursive mutex: */
183                         case PTHREAD_MUTEX_RECURSIVE:
184                                 /* Reset the mutex count: */
185                                 pmutex->m_data.m_count = 0;
186                                 break;
187
188                         /* Trap invalid mutex types: */
189                         default:
190                                 /* Return an invalid argument error: */
191                                 ret = EINVAL;
192                                 break;
193                         }
194                         if (ret == 0) {
195                                 /* Initialise the rest of the mutex: */
196                                 TAILQ_INIT(&pmutex->m_queue);
197                                 pmutex->m_flags |= MUTEX_FLAGS_INITED;
198                                 pmutex->m_owner = NULL;
199                                 pmutex->m_type = type;
200                                 pmutex->m_protocol = protocol;
201                                 pmutex->m_refcount = 0;
202                                 if (protocol == PTHREAD_PRIO_PROTECT)
203                                         pmutex->m_prio = ceiling;
204                                 else
205                                         pmutex->m_prio = 0;
206                                 pmutex->m_saved_prio = 0;
207                                 _MUTEX_INIT_LINK(pmutex);
208                                 memset(&pmutex->lock, 0, sizeof(pmutex->lock));
209                                 *mutex = pmutex;
210                         } else {
211                                 free(pmutex);
212                                 *mutex = NULL;
213                         }
214                 }
215         }
216         /* Return the completion status: */
217         return (ret);
218 }
219
220 int
221 _pthread_mutex_destroy(pthread_mutex_t * mutex)
222 {
223         int     ret = 0;
224
225         if (mutex == NULL || *mutex == NULL)
226                 ret = EINVAL;
227         else {
228                 /* Lock the mutex structure: */
229                 _SPINLOCK(&(*mutex)->lock);
230
231                 /*
232                  * Check to see if this mutex is in use:
233                  */
234                 if (((*mutex)->m_owner != NULL) ||
235                     (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
236                     ((*mutex)->m_refcount != 0)) {
237                         ret = EBUSY;
238
239                         /* Unlock the mutex structure: */
240                         _SPINUNLOCK(&(*mutex)->lock);
241                 }
242                 else {
243                         /*
244                          * Free the memory allocated for the mutex
245                          * structure:
246                          */
247                         _MUTEX_ASSERT_NOT_OWNED(*mutex);
248                         free(*mutex);
249
250                         /*
251                          * Leave the caller's pointer NULL now that
252                          * the mutex has been destroyed:
253                          */
254                         *mutex = NULL;
255                 }
256         }
257
258         /* Return the completion status: */
259         return (ret);
260 }
261
262 static int
263 init_static(pthread_mutex_t *mutex)
264 {
265         int     ret;
266
267         _SPINLOCK(&static_init_lock);
268
269         if (*mutex == NULL)
270                 ret = _pthread_mutex_init(mutex, NULL);
271         else
272                 ret = 0;
273
274         _SPINUNLOCK(&static_init_lock);
275
276         return (ret);
277 }
278
279 static int
280 init_static_private(pthread_mutex_t *mutex)
281 {
282         int     ret;
283
284         _SPINLOCK(&static_init_lock);
285
286         if (*mutex == NULL)
287                 ret = _pthread_mutex_init(mutex, &static_mattr);
288         else
289                 ret = 0;
290
291         _SPINUNLOCK(&static_init_lock);
292
293         return (ret);
294 }
295
296 static int
297 mutex_trylock_common(pthread_mutex_t *mutex)
298 {
299         struct pthread  *curthread = _get_curthread();
300         int     ret = 0;
301
302         PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL),
303             "Uninitialized mutex in pthread_mutex_trylock_basic");
304
305         /*
306          * Defer signals to protect the scheduling queues from
307          * access by the signal handler:
308          */
309         _thread_kern_sig_defer();
310
311         /* Lock the mutex structure: */
312         _SPINLOCK(&(*mutex)->lock);
313
314         /*
315          * If the mutex was statically allocated, properly
316          * initialize the tail queue.
317          */
318         if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
319                 TAILQ_INIT(&(*mutex)->m_queue);
320                 _MUTEX_INIT_LINK(*mutex);
321                 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
322         }
323
324         /* Process according to mutex type: */
325         switch ((*mutex)->m_protocol) {
326         /* Default POSIX mutex: */
327         case PTHREAD_PRIO_NONE: 
328                 /* Check if this mutex is not locked: */
329                 if ((*mutex)->m_owner == NULL) {
330                         /* Lock the mutex for the running thread: */
331                         (*mutex)->m_owner = curthread;
332
333                         /* Add to the list of owned mutexes: */
334                         _MUTEX_ASSERT_NOT_OWNED(*mutex);
335                         TAILQ_INSERT_TAIL(&curthread->mutexq,
336                             (*mutex), m_qe);
337                 } else if ((*mutex)->m_owner == curthread)
338                         ret = mutex_self_trylock(*mutex);
339                 else
340                         /* Return a busy error: */
341                         ret = EBUSY;
342                 break;
343
344         /* POSIX priority inheritence mutex: */
345         case PTHREAD_PRIO_INHERIT:
346                 /* Check if this mutex is not locked: */
347                 if ((*mutex)->m_owner == NULL) {
348                         /* Lock the mutex for the running thread: */
349                         (*mutex)->m_owner = curthread;
350
351                         /* Track number of priority mutexes owned: */
352                         curthread->priority_mutex_count++;
353
354                         /*
355                          * The mutex takes on the attributes of the
356                          * running thread when there are no waiters.
357                          */
358                         (*mutex)->m_prio = curthread->active_priority;
359                         (*mutex)->m_saved_prio =
360                             curthread->inherited_priority;
361
362                         /* Add to the list of owned mutexes: */
363                         _MUTEX_ASSERT_NOT_OWNED(*mutex);
364                         TAILQ_INSERT_TAIL(&curthread->mutexq,
365                             (*mutex), m_qe);
366                 } else if ((*mutex)->m_owner == curthread)
367                         ret = mutex_self_trylock(*mutex);
368                 else
369                         /* Return a busy error: */
370                         ret = EBUSY;
371                 break;
372
373         /* POSIX priority protection mutex: */
374         case PTHREAD_PRIO_PROTECT:
375                 /* Check for a priority ceiling violation: */
376                 if (curthread->active_priority > (*mutex)->m_prio)
377                         ret = EINVAL;
378
379                 /* Check if this mutex is not locked: */
380                 else if ((*mutex)->m_owner == NULL) {
381                         /* Lock the mutex for the running thread: */
382                         (*mutex)->m_owner = curthread;
383
384                         /* Track number of priority mutexes owned: */
385                         curthread->priority_mutex_count++;
386
387                         /*
388                          * The running thread inherits the ceiling
389                          * priority of the mutex and executes at that
390                          * priority.
391                          */
392                         curthread->active_priority = (*mutex)->m_prio;
393                         (*mutex)->m_saved_prio =
394                             curthread->inherited_priority;
395                         curthread->inherited_priority =
396                             (*mutex)->m_prio;
397
398                         /* Add to the list of owned mutexes: */
399                         _MUTEX_ASSERT_NOT_OWNED(*mutex);
400                         TAILQ_INSERT_TAIL(&curthread->mutexq,
401                             (*mutex), m_qe);
402                 } else if ((*mutex)->m_owner == curthread)
403                         ret = mutex_self_trylock(*mutex);
404                 else
405                         /* Return a busy error: */
406                         ret = EBUSY;
407                 break;
408
409         /* Trap invalid mutex types: */
410         default:
411                 /* Return an invalid argument error: */
412                 ret = EINVAL;
413                 break;
414         }
415
416         /* Unlock the mutex structure: */
417         _SPINUNLOCK(&(*mutex)->lock);
418
419         /*
420          * Undefer and handle pending signals, yielding if
421          * necessary:
422          */
423         _thread_kern_sig_undefer();
424
425         /* Return the completion status: */
426         return (ret);
427 }
428
429 int
430 __pthread_mutex_trylock(pthread_mutex_t *mutex)
431 {
432         int     ret = 0;
433
434         if (mutex == NULL)
435                 ret = EINVAL;
436
437         /*
438          * If the mutex is statically initialized, perform the dynamic
439          * initialization:
440          */
441         else if ((*mutex != NULL) || (ret = init_static(mutex)) == 0)
442                 ret = mutex_trylock_common(mutex);
443
444         return (ret);
445 }
446
447 int
448 _pthread_mutex_trylock(pthread_mutex_t *mutex)
449 {
450         int     ret = 0;
451
452         if (mutex == NULL)
453                 ret = EINVAL;
454
455         /*
456          * If the mutex is statically initialized, perform the dynamic
457          * initialization marking the mutex private (delete safe):
458          */
459         else if ((*mutex != NULL) || (ret = init_static_private(mutex)) == 0)
460                 ret = mutex_trylock_common(mutex);
461
462         return (ret);
463 }
464
465 static int
466 mutex_lock_common(pthread_mutex_t * mutex)
467 {
468         struct pthread  *curthread = _get_curthread();
469         int     ret = 0;
470
471         PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL),
472             "Uninitialized mutex in pthread_mutex_trylock_basic");
473
474         /* Reset the interrupted flag: */
475         curthread->interrupted = 0;
476
477         /*
478          * Enter a loop waiting to become the mutex owner.  We need a
479          * loop in case the waiting thread is interrupted by a signal
480          * to execute a signal handler.  It is not (currently) possible
481          * to remain in the waiting queue while running a handler.
482          * Instead, the thread is interrupted and backed out of the
483          * waiting queue prior to executing the signal handler.
484          */
485         do {
486                 /*
487                  * Defer signals to protect the scheduling queues from
488                  * access by the signal handler:
489                  */
490                 _thread_kern_sig_defer();
491
492                 /* Lock the mutex structure: */
493                 _SPINLOCK(&(*mutex)->lock);
494
495                 /*
496                  * If the mutex was statically allocated, properly
497                  * initialize the tail queue.
498                  */
499                 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
500                         TAILQ_INIT(&(*mutex)->m_queue);
501                         (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
502                         _MUTEX_INIT_LINK(*mutex);
503                 }
504
505                 /* Process according to mutex type: */
506                 switch ((*mutex)->m_protocol) {
507                 /* Default POSIX mutex: */
508                 case PTHREAD_PRIO_NONE:
509                         if ((*mutex)->m_owner == NULL) {
510                                 /* Lock the mutex for this thread: */
511                                 (*mutex)->m_owner = curthread;
512
513                                 /* Add to the list of owned mutexes: */
514                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
515                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
516                                     (*mutex), m_qe);
517
518                         } else if ((*mutex)->m_owner == curthread)
519                                 ret = mutex_self_lock(*mutex);
520                         else {
521                                 /*
522                                  * Join the queue of threads waiting to lock
523                                  * the mutex: 
524                                  */
525                                 mutex_queue_enq(*mutex, curthread);
526
527                                 /*
528                                  * Keep a pointer to the mutex this thread
529                                  * is waiting on:
530                                  */
531                                 curthread->data.mutex = *mutex;
532
533                                 /*
534                                  * Unlock the mutex structure and schedule the
535                                  * next thread:
536                                  */
537                                 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
538                                     &(*mutex)->lock, __FILE__, __LINE__);
539
540                                 /* Lock the mutex structure again: */
541                                 _SPINLOCK(&(*mutex)->lock);
542                         }
543                         break;
544
545                 /* POSIX priority inheritence mutex: */
546                 case PTHREAD_PRIO_INHERIT:
547                         /* Check if this mutex is not locked: */
548                         if ((*mutex)->m_owner == NULL) {
549                                 /* Lock the mutex for this thread: */
550                                 (*mutex)->m_owner = curthread;
551
552                                 /* Track number of priority mutexes owned: */
553                                 curthread->priority_mutex_count++;
554
555                                 /*
556                                  * The mutex takes on attributes of the
557                                  * running thread when there are no waiters.
558                                  */
559                                 (*mutex)->m_prio = curthread->active_priority;
560                                 (*mutex)->m_saved_prio =
561                                     curthread->inherited_priority;
562                                 curthread->inherited_priority =
563                                     (*mutex)->m_prio;
564
565                                 /* Add to the list of owned mutexes: */
566                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
567                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
568                                     (*mutex), m_qe);
569
570                         } else if ((*mutex)->m_owner == curthread)
571                                 ret = mutex_self_lock(*mutex);
572                         else {
573                                 /*
574                                  * Join the queue of threads waiting to lock
575                                  * the mutex: 
576                                  */
577                                 mutex_queue_enq(*mutex, curthread);
578
579                                 /*
580                                  * Keep a pointer to the mutex this thread
581                                  * is waiting on:
582                                  */
583                                 curthread->data.mutex = *mutex;
584
585                                 if (curthread->active_priority >
586                                     (*mutex)->m_prio)
587                                         /* Adjust priorities: */
588                                         mutex_priority_adjust(*mutex);
589
590                                 /*
591                                  * Unlock the mutex structure and schedule the
592                                  * next thread:
593                                  */
594                                 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
595                                     &(*mutex)->lock, __FILE__, __LINE__);
596
597                                 /* Lock the mutex structure again: */
598                                 _SPINLOCK(&(*mutex)->lock);
599                         }
600                         break;
601
602                 /* POSIX priority protection mutex: */
603                 case PTHREAD_PRIO_PROTECT:
604                         /* Check for a priority ceiling violation: */
605                         if (curthread->active_priority > (*mutex)->m_prio)
606                                 ret = EINVAL;
607
608                         /* Check if this mutex is not locked: */
609                         else if ((*mutex)->m_owner == NULL) {
610                                 /*
611                                  * Lock the mutex for the running
612                                  * thread:
613                                  */
614                                 (*mutex)->m_owner = curthread;
615
616                                 /* Track number of priority mutexes owned: */
617                                 curthread->priority_mutex_count++;
618
619                                 /*
620                                  * The running thread inherits the ceiling
621                                  * priority of the mutex and executes at that
622                                  * priority:
623                                  */
624                                 curthread->active_priority = (*mutex)->m_prio;
625                                 (*mutex)->m_saved_prio =
626                                     curthread->inherited_priority;
627                                 curthread->inherited_priority =
628                                     (*mutex)->m_prio;
629
630                                 /* Add to the list of owned mutexes: */
631                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
632                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
633                                     (*mutex), m_qe);
634                         } else if ((*mutex)->m_owner == curthread)
635                                 ret = mutex_self_lock(*mutex);
636                         else {
637                                 /*
638                                  * Join the queue of threads waiting to lock
639                                  * the mutex: 
640                                  */
641                                 mutex_queue_enq(*mutex, curthread);
642
643                                 /*
644                                  * Keep a pointer to the mutex this thread
645                                  * is waiting on:
646                                  */
647                                 curthread->data.mutex = *mutex;
648
649                                 /* Clear any previous error: */
650                                 curthread->error = 0;
651
652                                 /*
653                                  * Unlock the mutex structure and schedule the
654                                  * next thread:
655                                  */
656                                 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
657                                     &(*mutex)->lock, __FILE__, __LINE__);
658
659                                 /* Lock the mutex structure again: */
660                                 _SPINLOCK(&(*mutex)->lock);
661
662                                 /*
663                                  * The threads priority may have changed while
664                                  * waiting for the mutex causing a ceiling
665                                  * violation.
666                                  */
667                                 ret = curthread->error;
668                                 curthread->error = 0;
669                         }
670                         break;
671
672                 /* Trap invalid mutex types: */
673                 default:
674                         /* Return an invalid argument error: */
675                         ret = EINVAL;
676                         break;
677                 }
678
679                 /*
680                  * Check to see if this thread was interrupted and
681                  * is still in the mutex queue of waiting threads:
682                  */
683                 if (curthread->interrupted != 0)
684                         mutex_queue_remove(*mutex, curthread);
685
686                 /* Unlock the mutex structure: */
687                 _SPINUNLOCK(&(*mutex)->lock);
688
689                 /*
690                  * Undefer and handle pending signals, yielding if
691                  * necessary:
692                  */
693                 _thread_kern_sig_undefer();
694         } while (((*mutex)->m_owner != curthread) && (ret == 0) &&
695             (curthread->interrupted == 0));
696
697         if (curthread->interrupted != 0 &&
698             curthread->continuation != NULL)
699                 curthread->continuation((void *) curthread);
700
701         /* Return the completion status: */
702         return (ret);
703 }
704
705 int
706 __pthread_mutex_lock(pthread_mutex_t *mutex)
707 {
708         int     ret = 0;
709
710         if (_thread_initial == NULL)
711                 _thread_init();
712
713         if (mutex == NULL)
714                 ret = EINVAL;
715
716         /*
717          * If the mutex is statically initialized, perform the dynamic
718          * initialization:
719          */
720         else if ((*mutex != NULL) || ((ret = init_static(mutex)) == 0))
721                 ret = mutex_lock_common(mutex);
722
723         return (ret);
724 }
725
726 int
727 _pthread_mutex_lock(pthread_mutex_t *mutex)
728 {
729         int     ret = 0;
730
731         if (_thread_initial == NULL)
732                 _thread_init();
733
734         if (mutex == NULL)
735                 ret = EINVAL;
736
737         /*
738          * If the mutex is statically initialized, perform the dynamic
739          * initialization marking it private (delete safe):
740          */
741         else if ((*mutex != NULL) || ((ret = init_static_private(mutex)) == 0))
742                 ret = mutex_lock_common(mutex);
743
744         return (ret);
745 }
746
747 int
748 _pthread_mutex_unlock(pthread_mutex_t * mutex)
749 {
750         return (mutex_unlock_common(mutex, /* add reference */ 0));
751 }
752
753 int
754 _mutex_cv_unlock(pthread_mutex_t * mutex)
755 {
756         return (mutex_unlock_common(mutex, /* add reference */ 1));
757 }
758
759 int
760 _mutex_cv_lock(pthread_mutex_t * mutex)
761 {
762         int     ret;
763         if ((ret = _pthread_mutex_lock(mutex)) == 0)
764                 (*mutex)->m_refcount--;
765         return (ret);
766 }
767
768 static inline int
769 mutex_self_trylock(pthread_mutex_t mutex)
770 {
771         int     ret = 0;
772
773         switch (mutex->m_type) {
774
775         /* case PTHREAD_MUTEX_DEFAULT: */
776         case PTHREAD_MUTEX_ERRORCHECK:
777         case PTHREAD_MUTEX_NORMAL:
778                 /*
779                  * POSIX specifies that mutexes should return EDEADLK if a
780                  * recursive lock is detected.
781                  */
782                 ret = EBUSY; 
783                 break;
784
785         case PTHREAD_MUTEX_RECURSIVE:
786                 /* Increment the lock count: */
787                 mutex->m_data.m_count++;
788                 break;
789
790         default:
791                 /* Trap invalid mutex types; */
792                 ret = EINVAL;
793         }
794
795         return (ret);
796 }
797
798 static inline int
799 mutex_self_lock(pthread_mutex_t mutex)
800 {
801         int ret = 0;
802
803         switch (mutex->m_type) {
804         /* case PTHREAD_MUTEX_DEFAULT: */
805         case PTHREAD_MUTEX_ERRORCHECK:
806                 /*
807                  * POSIX specifies that mutexes should return EDEADLK if a
808                  * recursive lock is detected.
809                  */
810                 ret = EDEADLK; 
811                 break;
812
813         case PTHREAD_MUTEX_NORMAL:
814                 /*
815                  * What SS2 define as a 'normal' mutex.  Intentionally
816                  * deadlock on attempts to get a lock you already own.
817                  */
818                 _thread_kern_sched_state_unlock(PS_DEADLOCK,
819                     &mutex->lock, __FILE__, __LINE__);
820                 break;
821
822         case PTHREAD_MUTEX_RECURSIVE:
823                 /* Increment the lock count: */
824                 mutex->m_data.m_count++;
825                 break;
826
827         default:
828                 /* Trap invalid mutex types; */
829                 ret = EINVAL;
830         }
831
832         return (ret);
833 }
834
835 static inline int
836 mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
837 {
838         struct pthread  *curthread = _get_curthread();
839         int     ret = 0;
840
841         if (mutex == NULL || *mutex == NULL) {
842                 ret = EINVAL;
843         } else {
844                 /*
845                  * Defer signals to protect the scheduling queues from
846                  * access by the signal handler:
847                  */
848                 _thread_kern_sig_defer();
849
850                 /* Lock the mutex structure: */
851                 _SPINLOCK(&(*mutex)->lock);
852
853                 /* Process according to mutex type: */
854                 switch ((*mutex)->m_protocol) {
855                 /* Default POSIX mutex: */
856                 case PTHREAD_PRIO_NONE:
857                         /*
858                          * Check if the running thread is not the owner of the
859                          * mutex:
860                          */
861                         if ((*mutex)->m_owner != curthread) {
862                                 /*
863                                  * Return an invalid argument error for no
864                                  * owner and a permission error otherwise:
865                                  */
866                                 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
867                         }
868                         else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
869                             ((*mutex)->m_data.m_count > 0)) {
870                                 /* Decrement the count: */
871                                 (*mutex)->m_data.m_count--;
872                         } else {
873                                 /*
874                                  * Clear the count in case this is recursive
875                                  * mutex.
876                                  */
877                                 (*mutex)->m_data.m_count = 0;
878
879                                 /* Remove the mutex from the threads queue. */
880                                 _MUTEX_ASSERT_IS_OWNED(*mutex);
881                                 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
882                                     (*mutex), m_qe);
883                                 _MUTEX_INIT_LINK(*mutex);
884
885                                 /*
886                                  * Get the next thread from the queue of
887                                  * threads waiting on the mutex: 
888                                  */
889                                 if (((*mutex)->m_owner =
890                                     mutex_queue_deq(*mutex)) != NULL) {
891                                         /* Make the new owner runnable: */
892                                         PTHREAD_NEW_STATE((*mutex)->m_owner,
893                                             PS_RUNNING);
894
895                                         /*
896                                          * Add the mutex to the threads list of
897                                          * owned mutexes:
898                                          */
899                                         TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
900                                             (*mutex), m_qe);
901
902                                         /*
903                                          * The owner is no longer waiting for
904                                          * this mutex:
905                                          */
906                                         (*mutex)->m_owner->data.mutex = NULL;
907                                 }
908                         }
909                         break;
910
911                 /* POSIX priority inheritence mutex: */
912                 case PTHREAD_PRIO_INHERIT:
913                         /*
914                          * Check if the running thread is not the owner of the
915                          * mutex:
916                          */
917                         if ((*mutex)->m_owner != curthread) {
918                                 /*
919                                  * Return an invalid argument error for no
920                                  * owner and a permission error otherwise:
921                                  */
922                                 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
923                         }
924                         else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
925                             ((*mutex)->m_data.m_count > 0)) {
926                                 /* Decrement the count: */
927                                 (*mutex)->m_data.m_count--;
928                         } else {
929                                 /*
930                                  * Clear the count in case this is recursive
931                                  * mutex.
932                                  */
933                                 (*mutex)->m_data.m_count = 0;
934
935                                 /*
936                                  * Restore the threads inherited priority and
937                                  * recompute the active priority (being careful
938                                  * not to override changes in the threads base
939                                  * priority subsequent to locking the mutex).
940                                  */
941                                 curthread->inherited_priority =
942                                         (*mutex)->m_saved_prio;
943                                 curthread->active_priority =
944                                     MAX(curthread->inherited_priority,
945                                     curthread->base_priority);
946
947                                 /*
948                                  * This thread now owns one less priority mutex.
949                                  */
950                                 curthread->priority_mutex_count--;
951
952                                 /* Remove the mutex from the threads queue. */
953                                 _MUTEX_ASSERT_IS_OWNED(*mutex);
954                                 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
955                                     (*mutex), m_qe);
956                                 _MUTEX_INIT_LINK(*mutex);
957
958                                 /*
959                                  * Get the next thread from the queue of threads
960                                  * waiting on the mutex: 
961                                  */
962                                 if (((*mutex)->m_owner = 
963                                     mutex_queue_deq(*mutex)) == NULL)
964                                         /* This mutex has no priority. */
965                                         (*mutex)->m_prio = 0;
966                                 else {
967                                         /*
968                                          * Track number of priority mutexes owned:
969                                          */
970                                         (*mutex)->m_owner->priority_mutex_count++;
971
972                                         /*
973                                          * Add the mutex to the threads list
974                                          * of owned mutexes:
975                                          */
976                                         TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
977                                             (*mutex), m_qe);
978
979                                         /*
980                                          * The owner is no longer waiting for
981                                          * this mutex:
982                                          */
983                                         (*mutex)->m_owner->data.mutex = NULL;
984
985                                         /*
986                                          * Set the priority of the mutex.  Since
987                                          * our waiting threads are in descending
988                                          * priority order, the priority of the
989                                          * mutex becomes the active priority of
990                                          * the thread we just dequeued.
991                                          */
992                                         (*mutex)->m_prio =
993                                             (*mutex)->m_owner->active_priority;
994
995                                         /*
996                                          * Save the owning threads inherited
997                                          * priority:
998                                          */
999                                         (*mutex)->m_saved_prio =
1000                                                 (*mutex)->m_owner->inherited_priority;
1001
1002                                         /*
1003                                          * The owning threads inherited priority
1004                                          * now becomes his active priority (the
1005                                          * priority of the mutex).
1006                                          */
1007                                         (*mutex)->m_owner->inherited_priority =
1008                                                 (*mutex)->m_prio;
1009
1010                                         /*
1011                                          * Make the new owner runnable:
1012                                          */
1013                                         PTHREAD_NEW_STATE((*mutex)->m_owner,
1014                                             PS_RUNNING);
1015                                 }
1016                         }
1017                         break;
1018
1019                 /* POSIX priority ceiling mutex: */
1020                 case PTHREAD_PRIO_PROTECT:
1021                         /*
1022                          * Check if the running thread is not the owner of the
1023                          * mutex:
1024                          */
1025                         if ((*mutex)->m_owner != curthread) {
1026                                 /*
1027                                  * Return an invalid argument error for no
1028                                  * owner and a permission error otherwise:
1029                                  */
1030                                 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
1031                         }
1032                         else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1033                             ((*mutex)->m_data.m_count > 0)) {
1034                                 /* Decrement the count: */
1035                                 (*mutex)->m_data.m_count--;
1036                         } else {
1037                                 /*
1038                                  * Clear the count in case this is recursive
1039                                  * mutex.
1040                                  */
1041                                 (*mutex)->m_data.m_count = 0;
1042
1043                                 /*
1044                                  * Restore the threads inherited priority and
1045                                  * recompute the active priority (being careful
1046                                  * not to override changes in the threads base
1047                                  * priority subsequent to locking the mutex).
1048                                  */
1049                                 curthread->inherited_priority =
1050                                         (*mutex)->m_saved_prio;
1051                                 curthread->active_priority =
1052                                     MAX(curthread->inherited_priority,
1053                                     curthread->base_priority);
1054
1055                                 /*
1056                                  * This thread now owns one less priority mutex.
1057                                  */
1058                                 curthread->priority_mutex_count--;
1059
1060                                 /* Remove the mutex from the threads queue. */
1061                                 _MUTEX_ASSERT_IS_OWNED(*mutex);
1062                                 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
1063                                     (*mutex), m_qe);
1064                                 _MUTEX_INIT_LINK(*mutex);
1065
1066                                 /*
1067                                  * Enter a loop to find a waiting thread whose
1068                                  * active priority will not cause a ceiling
1069                                  * violation:
1070                                  */
1071                                 while ((((*mutex)->m_owner =
1072                                     mutex_queue_deq(*mutex)) != NULL) &&
1073                                     ((*mutex)->m_owner->active_priority >
1074                                      (*mutex)->m_prio)) {
1075                                         /*
1076                                          * Either the mutex ceiling priority
1077                                          * been lowered and/or this threads
1078                                          * priority has been raised subsequent
1079                                          * to this thread being queued on the
1080                                          * waiting list.
1081                                          */
1082                                         (*mutex)->m_owner->error = EINVAL;
1083                                         PTHREAD_NEW_STATE((*mutex)->m_owner,
1084                                             PS_RUNNING);
1085                                         /*
1086                                          * The thread is no longer waiting for
1087                                          * this mutex:
1088                                          */
1089                                         (*mutex)->m_owner->data.mutex = NULL;
1090                                 }
1091
1092                                 /* Check for a new owner: */
1093                                 if ((*mutex)->m_owner != NULL) {
1094                                         /*
1095                                          * Track number of priority mutexes owned:
1096                                          */
1097                                         (*mutex)->m_owner->priority_mutex_count++;
1098
1099                                         /*
1100                                          * Add the mutex to the threads list
1101                                          * of owned mutexes:
1102                                          */
1103                                         TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
1104                                             (*mutex), m_qe);
1105
1106                                         /*
1107                                          * The owner is no longer waiting for
1108                                          * this mutex:
1109                                          */
1110                                         (*mutex)->m_owner->data.mutex = NULL;
1111
1112                                         /*
1113                                          * Save the owning threads inherited
1114                                          * priority:
1115                                          */
1116                                         (*mutex)->m_saved_prio =
1117                                                 (*mutex)->m_owner->inherited_priority;
1118
1119                                         /*
1120                                          * The owning thread inherits the
1121                                          * ceiling priority of the mutex and
1122                                          * executes at that priority:
1123                                          */
1124                                         (*mutex)->m_owner->inherited_priority =
1125                                             (*mutex)->m_prio;
1126                                         (*mutex)->m_owner->active_priority =
1127                                             (*mutex)->m_prio;
1128
1129                                         /*
1130                                          * Make the new owner runnable:
1131                                          */
1132                                         PTHREAD_NEW_STATE((*mutex)->m_owner,
1133                                             PS_RUNNING);
1134                                 }
1135                         }
1136                         break;
1137
1138                 /* Trap invalid mutex types: */
1139                 default:
1140                         /* Return an invalid argument error: */
1141                         ret = EINVAL;
1142                         break;
1143                 }
1144
1145                 if ((ret == 0) && (add_reference != 0)) {
1146                         /* Increment the reference count: */
1147                         (*mutex)->m_refcount++;
1148                 }
1149
1150                 /* Unlock the mutex structure: */
1151                 _SPINUNLOCK(&(*mutex)->lock);
1152
1153                 /*
1154                  * Undefer and handle pending signals, yielding if
1155                  * necessary:
1156                  */
1157                 _thread_kern_sig_undefer();
1158         }
1159
1160         /* Return the completion status: */
1161         return (ret);
1162 }
1163
1164
1165 /*
1166  * This function is called when a change in base priority occurs for
1167  * a thread that is holding or waiting for a priority protection or
1168  * inheritence mutex.  A change in a threads base priority can effect
1169  * changes to active priorities of other threads and to the ordering
1170  * of mutex locking by waiting threads.
1171  *
1172  * This must be called while thread scheduling is deferred.
1173  */
1174 void
1175 _mutex_notify_priochange(pthread_t pthread)
1176 {
1177         /* Adjust the priorites of any owned priority mutexes: */
1178         if (pthread->priority_mutex_count > 0) {
1179                 /*
1180                  * Rescan the mutexes owned by this thread and correct
1181                  * their priorities to account for this threads change
1182                  * in priority.  This has the side effect of changing
1183                  * the threads active priority.
1184                  */
1185                 mutex_rescan_owned(pthread, /* rescan all owned */ NULL);
1186         }
1187
1188         /*
1189          * If this thread is waiting on a priority inheritence mutex,
1190          * check for priority adjustments.  A change in priority can
1191          * also effect a ceiling violation(*) for a thread waiting on
1192          * a priority protection mutex; we don't perform the check here
1193          * as it is done in pthread_mutex_unlock.
1194          *
1195          * (*) It should be noted that a priority change to a thread
1196          *     _after_ taking and owning a priority ceiling mutex
1197          *     does not affect ownership of that mutex; the ceiling
1198          *     priority is only checked before mutex ownership occurs.
1199          */
1200         if (pthread->state == PS_MUTEX_WAIT) {
1201                 /* Lock the mutex structure: */
1202                 _SPINLOCK(&pthread->data.mutex->lock);
1203
1204                 /*
1205                  * Check to make sure this thread is still in the same state
1206                  * (the spinlock above can yield the CPU to another thread):
1207                  */
1208                 if (pthread->state == PS_MUTEX_WAIT) {
1209                         /*
1210                          * Remove and reinsert this thread into the list of
1211                          * waiting threads to preserve decreasing priority
1212                          * order.
1213                          */
1214                         mutex_queue_remove(pthread->data.mutex, pthread);
1215                         mutex_queue_enq(pthread->data.mutex, pthread);
1216
1217                         if (pthread->data.mutex->m_protocol ==
1218                              PTHREAD_PRIO_INHERIT) {
1219                                 /* Adjust priorities: */
1220                                 mutex_priority_adjust(pthread->data.mutex);
1221                         }
1222                 }
1223
1224                 /* Unlock the mutex structure: */
1225                 _SPINUNLOCK(&pthread->data.mutex->lock);
1226         }
1227 }
1228
1229 /*
1230  * Called when a new thread is added to the mutex waiting queue or
1231  * when a threads priority changes that is already in the mutex
1232  * waiting queue.
1233  */
1234 static void
1235 mutex_priority_adjust(pthread_mutex_t mutex)
1236 {
1237         pthread_t       pthread_next, pthread = mutex->m_owner;
1238         int             temp_prio;
1239         pthread_mutex_t m = mutex;
1240
1241         /*
1242          * Calculate the mutex priority as the maximum of the highest
1243          * active priority of any waiting threads and the owning threads
1244          * active priority(*).
1245          *
1246          * (*) Because the owning threads current active priority may
1247          *     reflect priority inherited from this mutex (and the mutex
1248          *     priority may have changed) we must recalculate the active
1249          *     priority based on the threads saved inherited priority
1250          *     and its base priority.
1251          */
1252         pthread_next = TAILQ_FIRST(&m->m_queue);  /* should never be NULL */
1253         temp_prio = MAX(pthread_next->active_priority,
1254             MAX(m->m_saved_prio, pthread->base_priority));
1255
1256         /* See if this mutex really needs adjusting: */
1257         if (temp_prio == m->m_prio)
1258                 /* No need to propagate the priority: */
1259                 return;
1260
1261         /* Set new priority of the mutex: */
1262         m->m_prio = temp_prio;
1263
1264         while (m != NULL) {
1265                 /*
1266                  * Save the threads priority before rescanning the
1267                  * owned mutexes:
1268                  */
1269                 temp_prio = pthread->active_priority;
1270
1271                 /*
1272                  * Fix the priorities for all the mutexes this thread has
1273                  * locked since taking this mutex.  This also has a
1274                  * potential side-effect of changing the threads priority.
1275                  */
1276                 mutex_rescan_owned(pthread, m);
1277
1278                 /*
1279                  * If the thread is currently waiting on a mutex, check
1280                  * to see if the threads new priority has affected the
1281                  * priority of the mutex.
1282                  */
1283                 if ((temp_prio != pthread->active_priority) &&
1284                     (pthread->state == PS_MUTEX_WAIT) &&
1285                     (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) {
1286                         /* Grab the mutex this thread is waiting on: */
1287                         m = pthread->data.mutex;
1288
1289                         /*
1290                          * The priority for this thread has changed.  Remove
1291                          * and reinsert this thread into the list of waiting
1292                          * threads to preserve decreasing priority order.
1293                          */
1294                         mutex_queue_remove(m, pthread);
1295                         mutex_queue_enq(m, pthread);
1296
1297                         /* Grab the waiting thread with highest priority: */
1298                         pthread_next = TAILQ_FIRST(&m->m_queue);
1299
1300                         /*
1301                          * Calculate the mutex priority as the maximum of the
1302                          * highest active priority of any waiting threads and
1303                          * the owning threads active priority.
1304                          */
1305                         temp_prio = MAX(pthread_next->active_priority,
1306                             MAX(m->m_saved_prio, m->m_owner->base_priority));
1307
1308                         if (temp_prio != m->m_prio) {
1309                                 /*
1310                                  * The priority needs to be propagated to the
1311                                  * mutex this thread is waiting on and up to
1312                                  * the owner of that mutex.
1313                                  */
1314                                 m->m_prio = temp_prio;
1315                                 pthread = m->m_owner;
1316                         }
1317                         else
1318                                 /* We're done: */
1319                                 m = NULL;
1320
1321                 }
1322                 else
1323                         /* We're done: */
1324                         m = NULL;
1325         }
1326 }
1327
1328 static void
1329 mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
1330 {
1331         int             active_prio, inherited_prio;
1332         pthread_mutex_t m;
1333         pthread_t       pthread_next;
1334
1335         /*
1336          * Start walking the mutexes the thread has taken since
1337          * taking this mutex.
1338          */
1339         if (mutex == NULL) {
1340                 /*
1341                  * A null mutex means start at the beginning of the owned
1342                  * mutex list.
1343                  */
1344                 m = TAILQ_FIRST(&pthread->mutexq);
1345
1346                 /* There is no inherited priority yet. */
1347                 inherited_prio = 0;
1348         }
1349         else {
1350                 /*
1351                  * The caller wants to start after a specific mutex.  It
1352                  * is assumed that this mutex is a priority inheritence
1353                  * mutex and that its priority has been correctly
1354                  * calculated.
1355                  */
1356                 m = TAILQ_NEXT(mutex, m_qe);
1357
1358                 /* Start inheriting priority from the specified mutex. */
1359                 inherited_prio = mutex->m_prio;
1360         }
1361         active_prio = MAX(inherited_prio, pthread->base_priority);
1362
1363         while (m != NULL) {
1364                 /*
1365                  * We only want to deal with priority inheritence
1366                  * mutexes.  This might be optimized by only placing
1367                  * priority inheritence mutexes into the owned mutex
1368                  * list, but it may prove to be useful having all
1369                  * owned mutexes in this list.  Consider a thread
1370                  * exiting while holding mutexes...
1371                  */
1372                 if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1373                         /*
1374                          * Fix the owners saved (inherited) priority to
1375                          * reflect the priority of the previous mutex.
1376                          */
1377                         m->m_saved_prio = inherited_prio;
1378
1379                         if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1380                                 /* Recalculate the priority of the mutex: */
1381                                 m->m_prio = MAX(active_prio,
1382                                      pthread_next->active_priority);
1383                         else
1384                                 m->m_prio = active_prio;
1385
1386                         /* Recalculate new inherited and active priorities: */
1387                         inherited_prio = m->m_prio;
1388                         active_prio = MAX(m->m_prio, pthread->base_priority);
1389                 }
1390
1391                 /* Advance to the next mutex owned by this thread: */
1392                 m = TAILQ_NEXT(m, m_qe);
1393         }
1394
1395         /*
1396          * Fix the threads inherited priority and recalculate its
1397          * active priority.
1398          */
1399         pthread->inherited_priority = inherited_prio;
1400         active_prio = MAX(inherited_prio, pthread->base_priority);
1401
1402         if (active_prio != pthread->active_priority) {
1403                 /*
1404                  * If this thread is in the priority queue, it must be
1405                  * removed and reinserted for its new priority.
1406                  */
1407                 if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) {
1408                         /*
1409                          * Remove the thread from the priority queue
1410                          * before changing its priority:
1411                          */
1412                         PTHREAD_PRIOQ_REMOVE(pthread);
1413
1414                         /*
1415                          * POSIX states that if the priority is being
1416                          * lowered, the thread must be inserted at the
1417                          * head of the queue for its priority if it owns
1418                          * any priority protection or inheritence mutexes.
1419                          */
1420                         if ((active_prio < pthread->active_priority) &&
1421                             (pthread->priority_mutex_count > 0)) {
1422                                 /* Set the new active priority. */
1423                                 pthread->active_priority = active_prio;
1424
1425                                 PTHREAD_PRIOQ_INSERT_HEAD(pthread);
1426                         }
1427                         else {
1428                                 /* Set the new active priority. */
1429                                 pthread->active_priority = active_prio;
1430
1431                                 PTHREAD_PRIOQ_INSERT_TAIL(pthread);
1432                         }
1433                 }
1434                 else {
1435                         /* Set the new active priority. */
1436                         pthread->active_priority = active_prio;
1437                 }
1438         }
1439 }
1440
1441 void
1442 _mutex_unlock_private(pthread_t pthread)
1443 {
1444         struct pthread_mutex    *m, *m_next;
1445
1446         for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1447                 m_next = TAILQ_NEXT(m, m_qe);
1448                 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1449                         _pthread_mutex_unlock(&m);
1450         }
1451 }
1452
1453 void
1454 _mutex_lock_backout(pthread_t pthread)
1455 {
1456         struct pthread_mutex    *mutex;
1457
1458         /*
1459          * Defer signals to protect the scheduling queues from
1460          * access by the signal handler:
1461          */
1462         _thread_kern_sig_defer();
1463         if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1464                 mutex = pthread->data.mutex;
1465
1466                 /* Lock the mutex structure: */
1467                 _SPINLOCK(&mutex->lock);
1468
1469                 mutex_queue_remove(mutex, pthread);
1470
1471                 /* This thread is no longer waiting for the mutex: */
1472                 pthread->data.mutex = NULL;
1473
1474                 /* Unlock the mutex structure: */
1475                 _SPINUNLOCK(&mutex->lock);
1476
1477         }
1478         /*
1479          * Undefer and handle pending signals, yielding if
1480          * necessary:
1481          */
1482         _thread_kern_sig_undefer();
1483 }
1484
1485 /*
1486  * Dequeue a waiting thread from the head of a mutex queue in descending
1487  * priority order.
1488  */
1489 static inline pthread_t
1490 mutex_queue_deq(pthread_mutex_t mutex)
1491 {
1492         pthread_t pthread;
1493
1494         while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1495                 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1496                 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1497
1498                 /*
1499                  * Only exit the loop if the thread hasn't been
1500                  * cancelled.
1501                  */
1502                 if (pthread->interrupted == 0)
1503                         break;
1504         }
1505
1506         return (pthread);
1507 }
1508
1509 /*
1510  * Remove a waiting thread from a mutex queue in descending priority order.
1511  */
1512 static inline void
1513 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1514 {
1515         if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1516                 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1517                 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1518         }
1519 }
1520
1521 /*
1522  * Enqueue a waiting thread to a queue in descending priority order.
1523  */
1524 static inline void
1525 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1526 {
1527         pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1528
1529         PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
1530         /*
1531          * For the common case of all threads having equal priority,
1532          * we perform a quick check against the priority of the thread
1533          * at the tail of the queue.
1534          */
1535         if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1536                 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1537         else {
1538                 tid = TAILQ_FIRST(&mutex->m_queue);
1539                 while (pthread->active_priority <= tid->active_priority)
1540                         tid = TAILQ_NEXT(tid, sqe);
1541                 TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1542         }
1543         pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;
1544 }
1545