]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - lib/libc_r/uthread/uthread_mutex.c
Track libc's three-tier symbol naming. libc_r must currently implement
[FreeBSD/FreeBSD.git] / lib / libc_r / uthread / uthread_mutex.c
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed by John Birrell.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34 #include <stdlib.h>
35 #include <errno.h>
36 #include <string.h>
37 #include <sys/param.h>
38 #include <sys/queue.h>
39 #ifdef _THREAD_SAFE
40 #include <pthread.h>
41 #include "pthread_private.h"
42
43 #if defined(_PTHREADS_INVARIANTS)
44 #define _MUTEX_INIT_LINK(m)             do {            \
45         (m)->m_qe.tqe_prev = NULL;                      \
46         (m)->m_qe.tqe_next = NULL;                      \
47 } while (0)
48 #define _MUTEX_ASSERT_IS_OWNED(m)       do {            \
49         if ((m)->m_qe.tqe_prev == NULL)                 \
50                 PANIC("mutex is not on list");          \
51 } while (0)
52 #define _MUTEX_ASSERT_NOT_OWNED(m)      do {            \
53         if (((m)->m_qe.tqe_prev != NULL) ||             \
54             ((m)->m_qe.tqe_next != NULL))               \
55                 PANIC("mutex is on list");              \
56 } while (0)
57 #else
58 #define _MUTEX_INIT_LINK(m)
59 #define _MUTEX_ASSERT_IS_OWNED(m)
60 #define _MUTEX_ASSERT_NOT_OWNED(m)
61 #endif
62
63 /*
64  * Prototypes
65  */
66 static inline int       mutex_self_trylock(pthread_mutex_t);
67 static inline int       mutex_self_lock(pthread_mutex_t);
68 static inline int       mutex_unlock_common(pthread_mutex_t *, int);
69 static void             mutex_priority_adjust(pthread_mutex_t);
70 static void             mutex_rescan_owned (pthread_t, pthread_mutex_t);
71 static inline pthread_t mutex_queue_deq(pthread_mutex_t);
72 static inline void      mutex_queue_remove(pthread_mutex_t, pthread_t);
73 static inline void      mutex_queue_enq(pthread_mutex_t, pthread_t);
74
75
76 static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
77
78 /* Reinitialize a mutex to defaults. */
79 int
80 _mutex_reinit(pthread_mutex_t * mutex)
81 {
82         int ret = 0;
83
84         if (mutex == NULL)
85                 ret = EINVAL;
86         else if (*mutex == NULL)
87                 ret = pthread_mutex_init(mutex, NULL);
88         else {
89                 /*
90                  * Initialize the mutex structure:
91                  */
92                 (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT;
93                 (*mutex)->m_protocol = PTHREAD_PRIO_NONE;
94                 TAILQ_INIT(&(*mutex)->m_queue);
95                 (*mutex)->m_owner = NULL;
96                 (*mutex)->m_data.m_count = 0;
97                 (*mutex)->m_flags &= MUTEX_FLAGS_PRIVATE;
98                 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
99                 (*mutex)->m_refcount = 0;
100                 (*mutex)->m_prio = 0;
101                 (*mutex)->m_saved_prio = 0;
102                 _MUTEX_INIT_LINK(*mutex);
103                 memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock));
104         }
105         return (ret);
106 }
107
108 int
109 pthread_mutex_init(pthread_mutex_t * mutex,
110                    const pthread_mutexattr_t * mutex_attr)
111 {
112         enum pthread_mutextype  type;
113         int             protocol;
114         int             ceiling;
115         pthread_mutex_t pmutex;
116         int             ret = 0;
117
118         if (mutex == NULL)
119                 ret = EINVAL;
120
121         /* Check if default mutex attributes: */
122         else if (mutex_attr == NULL || *mutex_attr == NULL) {
123                 /* Default to a (error checking) POSIX mutex: */
124                 type = PTHREAD_MUTEX_ERRORCHECK;
125                 protocol = PTHREAD_PRIO_NONE;
126                 ceiling = PTHREAD_MAX_PRIORITY;
127         }
128
129         /* Check mutex type: */
130         else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
131             ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
132                 /* Return an invalid argument error: */
133                 ret = EINVAL;
134
135         /* Check mutex protocol: */
136         else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
137             ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
138                 /* Return an invalid argument error: */
139                 ret = EINVAL;
140
141         else {
142                 /* Use the requested mutex type and protocol: */
143                 type = (*mutex_attr)->m_type;
144                 protocol = (*mutex_attr)->m_protocol;
145                 ceiling = (*mutex_attr)->m_ceiling;
146         }
147
148         /* Check no errors so far: */
149         if (ret == 0) {
150                 if ((pmutex = (pthread_mutex_t)
151                     malloc(sizeof(struct pthread_mutex))) == NULL)
152                         ret = ENOMEM;
153                 else {
154                         /* Reset the mutex flags: */
155                         pmutex->m_flags = 0;
156
157                         /* Process according to mutex type: */
158                         switch (type) {
159                         /* case PTHREAD_MUTEX_DEFAULT: */
160                         case PTHREAD_MUTEX_ERRORCHECK:
161                         case PTHREAD_MUTEX_NORMAL:
162                                 /* Nothing to do here. */
163                                 break;
164
165                         /* Single UNIX Spec 2 recursive mutex: */
166                         case PTHREAD_MUTEX_RECURSIVE:
167                                 /* Reset the mutex count: */
168                                 pmutex->m_data.m_count = 0;
169                                 break;
170
171                         /* Trap invalid mutex types: */
172                         default:
173                                 /* Return an invalid argument error: */
174                                 ret = EINVAL;
175                                 break;
176                         }
177                         if (ret == 0) {
178                                 /* Initialise the rest of the mutex: */
179                                 TAILQ_INIT(&pmutex->m_queue);
180                                 pmutex->m_flags |= MUTEX_FLAGS_INITED;
181                                 pmutex->m_owner = NULL;
182                                 pmutex->m_type = type;
183                                 pmutex->m_protocol = protocol;
184                                 pmutex->m_refcount = 0;
185                                 if (protocol == PTHREAD_PRIO_PROTECT)
186                                         pmutex->m_prio = ceiling;
187                                 else
188                                         pmutex->m_prio = 0;
189                                 pmutex->m_saved_prio = 0;
190                                 _MUTEX_INIT_LINK(pmutex);
191                                 memset(&pmutex->lock, 0, sizeof(pmutex->lock));
192                                 *mutex = pmutex;
193                         } else {
194                                 free(pmutex);
195                                 *mutex = NULL;
196                         }
197                 }
198         }
199         /* Return the completion status: */
200         return(ret);
201 }
202
203 int
204 pthread_mutex_destroy(pthread_mutex_t * mutex)
205 {
206         int ret = 0;
207
208         if (mutex == NULL || *mutex == NULL)
209                 ret = EINVAL;
210         else {
211                 /* Lock the mutex structure: */
212                 _SPINLOCK(&(*mutex)->lock);
213
214                 /*
215                  * Check to see if this mutex is in use:
216                  */
217                 if (((*mutex)->m_owner != NULL) ||
218                     (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
219                     ((*mutex)->m_refcount != 0)) {
220                         ret = EBUSY;
221
222                         /* Unlock the mutex structure: */
223                         _SPINUNLOCK(&(*mutex)->lock);
224                 }
225                 else {
226                         /*
227                          * Free the memory allocated for the mutex
228                          * structure:
229                          */
230                         _MUTEX_ASSERT_NOT_OWNED(*mutex);
231                         free(*mutex);
232
233                         /*
234                          * Leave the caller's pointer NULL now that
235                          * the mutex has been destroyed:
236                          */
237                         *mutex = NULL;
238                 }
239         }
240
241         /* Return the completion status: */
242         return (ret);
243 }
244
245 static int
246 init_static(pthread_mutex_t *mutex)
247 {
248         int ret;
249
250         _SPINLOCK(&static_init_lock);
251
252         if (*mutex == NULL)
253                 ret = pthread_mutex_init(mutex, NULL);
254         else
255                 ret = 0;
256
257         _SPINUNLOCK(&static_init_lock);
258
259         return(ret);
260 }
261
262 int
263 pthread_mutex_trylock(pthread_mutex_t * mutex)
264 {
265         int             ret = 0;
266
267         if (mutex == NULL)
268                 ret = EINVAL;
269
270         /*
271          * If the mutex is statically initialized, perform the dynamic
272          * initialization:
273          */
274         else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
275                 /*
276                  * Defer signals to protect the scheduling queues from
277                  * access by the signal handler:
278                  */
279                 _thread_kern_sig_defer();
280
281                 /* Lock the mutex structure: */
282                 _SPINLOCK(&(*mutex)->lock);
283
284                 /*
285                  * If the mutex was statically allocated, properly
286                  * initialize the tail queue.
287                  */
288                 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
289                         TAILQ_INIT(&(*mutex)->m_queue);
290                         _MUTEX_INIT_LINK(*mutex);
291                         (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
292                 }
293
294                 /* Process according to mutex type: */
295                 switch ((*mutex)->m_protocol) {
296                 /* Default POSIX mutex: */
297                 case PTHREAD_PRIO_NONE: 
298                         /* Check if this mutex is not locked: */
299                         if ((*mutex)->m_owner == NULL) {
300                                 /* Lock the mutex for the running thread: */
301                                 (*mutex)->m_owner = _thread_run;
302
303                                 /* Add to the list of owned mutexes: */
304                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
305                                 TAILQ_INSERT_TAIL(&_thread_run->mutexq,
306                                     (*mutex), m_qe);
307                         } else if ((*mutex)->m_owner == _thread_run)
308                                 ret = mutex_self_trylock(*mutex);
309                         else
310                                 /* Return a busy error: */
311                                 ret = EBUSY;
312                         break;
313
314                 /* POSIX priority inheritence mutex: */
315                 case PTHREAD_PRIO_INHERIT:
316                         /* Check if this mutex is not locked: */
317                         if ((*mutex)->m_owner == NULL) {
318                                 /* Lock the mutex for the running thread: */
319                                 (*mutex)->m_owner = _thread_run;
320
321                                 /* Track number of priority mutexes owned: */
322                                 _thread_run->priority_mutex_count++;
323
324                                 /*
325                                  * The mutex takes on the attributes of the
326                                  * running thread when there are no waiters.
327                                  */
328                                 (*mutex)->m_prio = _thread_run->active_priority;
329                                 (*mutex)->m_saved_prio =
330                                     _thread_run->inherited_priority;
331
332                                 /* Add to the list of owned mutexes: */
333                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
334                                 TAILQ_INSERT_TAIL(&_thread_run->mutexq,
335                                     (*mutex), m_qe);
336                         } else if ((*mutex)->m_owner == _thread_run)
337                                 ret = mutex_self_trylock(*mutex);
338                         else
339                                 /* Return a busy error: */
340                                 ret = EBUSY;
341                         break;
342
343                 /* POSIX priority protection mutex: */
344                 case PTHREAD_PRIO_PROTECT:
345                         /* Check for a priority ceiling violation: */
346                         if (_thread_run->active_priority > (*mutex)->m_prio)
347                                 ret = EINVAL;
348
349                         /* Check if this mutex is not locked: */
350                         else if ((*mutex)->m_owner == NULL) {
351                                 /* Lock the mutex for the running thread: */
352                                 (*mutex)->m_owner = _thread_run;
353
354                                 /* Track number of priority mutexes owned: */
355                                 _thread_run->priority_mutex_count++;
356
357                                 /*
358                                  * The running thread inherits the ceiling
359                                  * priority of the mutex and executes at that
360                                  * priority.
361                                  */
362                                 _thread_run->active_priority = (*mutex)->m_prio;
363                                 (*mutex)->m_saved_prio =
364                                     _thread_run->inherited_priority;
365                                 _thread_run->inherited_priority =
366                                     (*mutex)->m_prio;
367
368                                 /* Add to the list of owned mutexes: */
369                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
370                                 TAILQ_INSERT_TAIL(&_thread_run->mutexq,
371                                     (*mutex), m_qe);
372                         } else if ((*mutex)->m_owner == _thread_run)
373                                 ret = mutex_self_trylock(*mutex);
374                         else
375                                 /* Return a busy error: */
376                                 ret = EBUSY;
377                         break;
378
379                 /* Trap invalid mutex types: */
380                 default:
381                         /* Return an invalid argument error: */
382                         ret = EINVAL;
383                         break;
384                 }
385
386                 /* Unlock the mutex structure: */
387                 _SPINUNLOCK(&(*mutex)->lock);
388
389                 /*
390                  * Undefer and handle pending signals, yielding if
391                  * necessary:
392                  */
393                 _thread_kern_sig_undefer();
394         }
395
396         /* Return the completion status: */
397         return (ret);
398 }
399
400 int
401 pthread_mutex_lock(pthread_mutex_t * mutex)
402 {
403         int             ret = 0;
404
405         if (mutex == NULL)
406                 ret = EINVAL;
407
408         /*
409          * If the mutex is statically initialized, perform the dynamic
410          * initialization:
411          */
412         else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
413                 /*
414                  * Defer signals to protect the scheduling queues from
415                  * access by the signal handler:
416                  */
417                 _thread_kern_sig_defer();
418
419                 /* Lock the mutex structure: */
420                 _SPINLOCK(&(*mutex)->lock);
421
422                 /*
423                  * If the mutex was statically allocated, properly
424                  * initialize the tail queue.
425                  */
426                 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
427                         TAILQ_INIT(&(*mutex)->m_queue);
428                         (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
429                         _MUTEX_INIT_LINK(*mutex);
430                 }
431
432                 /* Reset the interrupted flag: */
433                 _thread_run->interrupted = 0;
434
435                 /* Process according to mutex type: */
436                 switch ((*mutex)->m_protocol) {
437                 /* Default POSIX mutex: */
438                 case PTHREAD_PRIO_NONE:
439                         if ((*mutex)->m_owner == NULL) {
440                                 /* Lock the mutex for this thread: */
441                                 (*mutex)->m_owner = _thread_run;
442
443                                 /* Add to the list of owned mutexes: */
444                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
445                                 TAILQ_INSERT_TAIL(&_thread_run->mutexq,
446                                     (*mutex), m_qe);
447
448                         } else if ((*mutex)->m_owner == _thread_run)
449                                 ret = mutex_self_lock(*mutex);
450                         else {
451                                 /*
452                                  * Join the queue of threads waiting to lock
453                                  * the mutex: 
454                                  */
455                                 mutex_queue_enq(*mutex, _thread_run);
456
457                                 /*
458                                  * Keep a pointer to the mutex this thread
459                                  * is waiting on:
460                                  */
461                                 _thread_run->data.mutex = *mutex;
462
463                                 /*
464                                  * Unlock the mutex structure and schedule the
465                                  * next thread:
466                                  */
467                                 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
468                                     &(*mutex)->lock, __FILE__, __LINE__);
469
470                                 /* Lock the mutex structure again: */
471                                 _SPINLOCK(&(*mutex)->lock);
472                         }
473                         break;
474
475                 /* POSIX priority inheritence mutex: */
476                 case PTHREAD_PRIO_INHERIT:
477                         /* Check if this mutex is not locked: */
478                         if ((*mutex)->m_owner == NULL) {
479                                 /* Lock the mutex for this thread: */
480                                 (*mutex)->m_owner = _thread_run;
481
482                                 /* Track number of priority mutexes owned: */
483                                 _thread_run->priority_mutex_count++;
484
485                                 /*
486                                  * The mutex takes on attributes of the
487                                  * running thread when there are no waiters.
488                                  */
489                                 (*mutex)->m_prio = _thread_run->active_priority;
490                                 (*mutex)->m_saved_prio =
491                                     _thread_run->inherited_priority;
492                                 _thread_run->inherited_priority =
493                                     (*mutex)->m_prio;
494
495                                 /* Add to the list of owned mutexes: */
496                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
497                                 TAILQ_INSERT_TAIL(&_thread_run->mutexq,
498                                     (*mutex), m_qe);
499
500                         } else if ((*mutex)->m_owner == _thread_run)
501                                 ret = mutex_self_lock(*mutex);
502                         else {
503                                 /*
504                                  * Join the queue of threads waiting to lock
505                                  * the mutex: 
506                                  */
507                                 mutex_queue_enq(*mutex, _thread_run);
508
509                                 /*
510                                  * Keep a pointer to the mutex this thread
511                                  * is waiting on:
512                                  */
513                                 _thread_run->data.mutex = *mutex;
514
515                                 if (_thread_run->active_priority >
516                                     (*mutex)->m_prio)
517                                         /* Adjust priorities: */
518                                         mutex_priority_adjust(*mutex);
519
520                                 /*
521                                  * Unlock the mutex structure and schedule the
522                                  * next thread:
523                                  */
524                                 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
525                                     &(*mutex)->lock, __FILE__, __LINE__);
526
527                                 /* Lock the mutex structure again: */
528                                 _SPINLOCK(&(*mutex)->lock);
529                         }
530                         break;
531
532                 /* POSIX priority protection mutex: */
533                 case PTHREAD_PRIO_PROTECT:
534                         /* Check for a priority ceiling violation: */
535                         if (_thread_run->active_priority > (*mutex)->m_prio)
536                                 ret = EINVAL;
537
538                         /* Check if this mutex is not locked: */
539                         else if ((*mutex)->m_owner == NULL) {
540                                 /*
541                                  * Lock the mutex for the running
542                                  * thread:
543                                  */
544                                 (*mutex)->m_owner = _thread_run;
545
546                                 /* Track number of priority mutexes owned: */
547                                 _thread_run->priority_mutex_count++;
548
549                                 /*
550                                  * The running thread inherits the ceiling
551                                  * priority of the mutex and executes at that
552                                  * priority:
553                                  */
554                                 _thread_run->active_priority = (*mutex)->m_prio;
555                                 (*mutex)->m_saved_prio =
556                                     _thread_run->inherited_priority;
557                                 _thread_run->inherited_priority =
558                                     (*mutex)->m_prio;
559
560                                 /* Add to the list of owned mutexes: */
561                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
562                                 TAILQ_INSERT_TAIL(&_thread_run->mutexq,
563                                     (*mutex), m_qe);
564                         } else if ((*mutex)->m_owner == _thread_run)
565                                 ret = mutex_self_lock(*mutex);
566                         else {
567                                 /*
568                                  * Join the queue of threads waiting to lock
569                                  * the mutex: 
570                                  */
571                                 mutex_queue_enq(*mutex, _thread_run);
572
573                                 /*
574                                  * Keep a pointer to the mutex this thread
575                                  * is waiting on:
576                                  */
577                                 _thread_run->data.mutex = *mutex;
578
579                                 /* Clear any previous error: */
580                                 _thread_run->error = 0;
581
582                                 /*
583                                  * Unlock the mutex structure and schedule the
584                                  * next thread:
585                                  */
586                                 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
587                                     &(*mutex)->lock, __FILE__, __LINE__);
588
589                                 /* Lock the mutex structure again: */
590                                 _SPINLOCK(&(*mutex)->lock);
591
592                                 /*
593                                  * The threads priority may have changed while
594                                  * waiting for the mutex causing a ceiling
595                                  * violation.
596                                  */
597                                 ret = _thread_run->error;
598                                 _thread_run->error = 0;
599                         }
600                         break;
601
602                 /* Trap invalid mutex types: */
603                 default:
604                         /* Return an invalid argument error: */
605                         ret = EINVAL;
606                         break;
607                 }
608
609                 /*
610                  * Check to see if this thread was interrupted and
611                  * is still in the mutex queue of waiting threads:
612                  */
613                 if (_thread_run->interrupted != 0)
614                         mutex_queue_remove(*mutex, _thread_run);
615
616                 /* Unlock the mutex structure: */
617                 _SPINUNLOCK(&(*mutex)->lock);
618
619                 /*
620                  * Undefer and handle pending signals, yielding if
621                  * necessary:
622                  */
623                 _thread_kern_sig_undefer();
624
625                 if ((_thread_run->cancelflags & PTHREAD_CANCEL_NEEDED) != 0) {
626                         _thread_run->cancelflags &= ~PTHREAD_CANCEL_NEEDED;
627                         _thread_exit_cleanup();
628                         pthread_exit(PTHREAD_CANCELED);
629                 }
630         }
631
632         /* Return the completion status: */
633         return (ret);
634 }
635
636 int
637 pthread_mutex_unlock(pthread_mutex_t * mutex)
638 {
639         return (mutex_unlock_common(mutex, /* add reference */ 0));
640 }
641
642 int
643 _mutex_cv_unlock(pthread_mutex_t * mutex)
644 {
645         return (mutex_unlock_common(mutex, /* add reference */ 1));
646 }
647
648 int
649 _mutex_cv_lock(pthread_mutex_t * mutex)
650 {
651         int ret;
652         if ((ret = pthread_mutex_lock(mutex)) == 0)
653                 (*mutex)->m_refcount--;
654         return (ret);
655 }
656
657 static inline int
658 mutex_self_trylock(pthread_mutex_t mutex)
659 {
660         int ret = 0;
661
662         switch (mutex->m_type) {
663
664         /* case PTHREAD_MUTEX_DEFAULT: */
665         case PTHREAD_MUTEX_ERRORCHECK:
666         case PTHREAD_MUTEX_NORMAL:
667                 /*
668                  * POSIX specifies that mutexes should return EDEADLK if a
669                  * recursive lock is detected.
670                  */
671                 ret = EBUSY; 
672                 break;
673
674         case PTHREAD_MUTEX_RECURSIVE:
675                 /* Increment the lock count: */
676                 mutex->m_data.m_count++;
677                 break;
678
679         default:
680                 /* Trap invalid mutex types; */
681                 ret = EINVAL;
682         }
683
684         return(ret);
685 }
686
687 static inline int
688 mutex_self_lock(pthread_mutex_t mutex)
689 {
690         int ret = 0;
691
692         switch (mutex->m_type) {
693         /* case PTHREAD_MUTEX_DEFAULT: */
694         case PTHREAD_MUTEX_ERRORCHECK:
695                 /*
696                  * POSIX specifies that mutexes should return EDEADLK if a
697                  * recursive lock is detected.
698                  */
699                 ret = EDEADLK; 
700                 break;
701
702         case PTHREAD_MUTEX_NORMAL:
703                 /*
704                  * What SS2 define as a 'normal' mutex.  Intentionally
705                  * deadlock on attempts to get a lock you already own.
706                  */
707                 _thread_kern_sched_state_unlock(PS_DEADLOCK,
708                     &mutex->lock, __FILE__, __LINE__);
709                 break;
710
711         case PTHREAD_MUTEX_RECURSIVE:
712                 /* Increment the lock count: */
713                 mutex->m_data.m_count++;
714                 break;
715
716         default:
717                 /* Trap invalid mutex types; */
718                 ret = EINVAL;
719         }
720
721         return(ret);
722 }
723
724 static inline int
725 mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
726 {
727         int ret = 0;
728
729         if (mutex == NULL || *mutex == NULL) {
730                 ret = EINVAL;
731         } else {
732                 /*
733                  * Defer signals to protect the scheduling queues from
734                  * access by the signal handler:
735                  */
736                 _thread_kern_sig_defer();
737
738                 /* Lock the mutex structure: */
739                 _SPINLOCK(&(*mutex)->lock);
740
741                 /* Process according to mutex type: */
742                 switch ((*mutex)->m_protocol) {
743                 /* Default POSIX mutex: */
744                 case PTHREAD_PRIO_NONE:
745                         /*
746                          * Check if the running thread is not the owner of the
747                          * mutex:
748                          */
749                         if ((*mutex)->m_owner != _thread_run) {
750                                 /*
751                                  * Return an invalid argument error for no
752                                  * owner and a permission error otherwise:
753                                  */
754                                 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
755                         }
756                         else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
757                             ((*mutex)->m_data.m_count > 1)) {
758                                 /* Decrement the count: */
759                                 (*mutex)->m_data.m_count--;
760                         } else {
761                                 /*
762                                  * Clear the count in case this is recursive
763                                  * mutex.
764                                  */
765                                 (*mutex)->m_data.m_count = 0;
766
767                                 /* Remove the mutex from the threads queue. */
768                                 _MUTEX_ASSERT_IS_OWNED(*mutex);
769                                 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
770                                     (*mutex), m_qe);
771                                 _MUTEX_INIT_LINK(*mutex);
772
773                                 /*
774                                  * Get the next thread from the queue of
775                                  * threads waiting on the mutex: 
776                                  */
777                                 if (((*mutex)->m_owner =
778                                     mutex_queue_deq(*mutex)) != NULL) {
779                                         /*
780                                          * Allow the new owner of the mutex to
781                                          * run:
782                                          */
783                                         PTHREAD_NEW_STATE((*mutex)->m_owner,
784                                             PS_RUNNING);
785
786                                         /*
787                                          * Add the mutex to the threads list of
788                                          * owned mutexes:
789                                          */
790                                         TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
791                                             (*mutex), m_qe);
792
793                                         /*
794                                          * The owner is no longer waiting for
795                                          * this mutex:
796                                          */
797                                         (*mutex)->m_owner->data.mutex = NULL;
798                                 }
799                         }
800                         break;
801
802                 /* POSIX priority inheritence mutex: */
803                 case PTHREAD_PRIO_INHERIT:
804                         /*
805                          * Check if the running thread is not the owner of the
806                          * mutex:
807                          */
808                         if ((*mutex)->m_owner != _thread_run) {
809                                 /*
810                                  * Return an invalid argument error for no
811                                  * owner and a permission error otherwise:
812                                  */
813                                 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
814                         }
815                         else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
816                             ((*mutex)->m_data.m_count > 1)) {
817                                 /* Decrement the count: */
818                                 (*mutex)->m_data.m_count--;
819                         } else {
820                                 /*
821                                  * Clear the count in case this is recursive
822                                  * mutex.
823                                  */
824                                 (*mutex)->m_data.m_count = 0;
825
826                                 /*
827                                  * Restore the threads inherited priority and
828                                  * recompute the active priority (being careful
829                                  * not to override changes in the threads base
830                                  * priority subsequent to locking the mutex).
831                                  */
832                                 _thread_run->inherited_priority =
833                                         (*mutex)->m_saved_prio;
834                                 _thread_run->active_priority =
835                                     MAX(_thread_run->inherited_priority,
836                                     _thread_run->base_priority);
837
838                                 /*
839                                  * This thread now owns one less priority mutex.
840                                  */
841                                 _thread_run->priority_mutex_count--;
842
843                                 /* Remove the mutex from the threads queue. */
844                                 _MUTEX_ASSERT_IS_OWNED(*mutex);
845                                 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
846                                     (*mutex), m_qe);
847                                 _MUTEX_INIT_LINK(*mutex);
848
849                                 /*
850                                  * Get the next thread from the queue of threads
851                                  * waiting on the mutex: 
852                                  */
853                                 if (((*mutex)->m_owner = 
854                                     mutex_queue_deq(*mutex)) == NULL)
855                                         /* This mutex has no priority. */
856                                         (*mutex)->m_prio = 0;
857                                 else {
858                                         /*
859                                          * Track number of priority mutexes owned:
860                                          */
861                                         (*mutex)->m_owner->priority_mutex_count++;
862
863                                         /*
864                                          * Add the mutex to the threads list
865                                          * of owned mutexes:
866                                          */
867                                         TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
868                                             (*mutex), m_qe);
869
870                                         /*
871                                          * The owner is no longer waiting for
872                                          * this mutex:
873                                          */
874                                         (*mutex)->m_owner->data.mutex = NULL;
875
876                                         /*
877                                          * Set the priority of the mutex.  Since
878                                          * our waiting threads are in descending
879                                          * priority order, the priority of the
880                                          * mutex becomes the active priority of
881                                          * the thread we just dequeued.
882                                          */
883                                         (*mutex)->m_prio =
884                                             (*mutex)->m_owner->active_priority;
885
886                                         /*
887                                          * Save the owning threads inherited
888                                          * priority:
889                                          */
890                                         (*mutex)->m_saved_prio =
891                                                 (*mutex)->m_owner->inherited_priority;
892
893                                         /*
894                                          * The owning threads inherited priority
895                                          * now becomes his active priority (the
896                                          * priority of the mutex).
897                                          */
898                                         (*mutex)->m_owner->inherited_priority =
899                                                 (*mutex)->m_prio;
900
901                                         /*
902                                          * Allow the new owner of the mutex to
903                                          * run:
904                                          */
905                                         PTHREAD_NEW_STATE((*mutex)->m_owner,
906                                             PS_RUNNING);
907                                 }
908                         }
909                         break;
910
911                 /* POSIX priority ceiling mutex: */
912                 case PTHREAD_PRIO_PROTECT:
913                         /*
914                          * Check if the running thread is not the owner of the
915                          * mutex:
916                          */
917                         if ((*mutex)->m_owner != _thread_run) {
918                                 /*
919                                  * Return an invalid argument error for no
920                                  * owner and a permission error otherwise:
921                                  */
922                                 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
923                         }
924                         else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
925                             ((*mutex)->m_data.m_count > 1)) {
926                                 /* Decrement the count: */
927                                 (*mutex)->m_data.m_count--;
928                         } else {
929                                 /*
930                                  * Clear the count in case this is recursive
931                                  * mutex.
932                                  */
933                                 (*mutex)->m_data.m_count = 0;
934
935                                 /*
936                                  * Restore the threads inherited priority and
937                                  * recompute the active priority (being careful
938                                  * not to override changes in the threads base
939                                  * priority subsequent to locking the mutex).
940                                  */
941                                 _thread_run->inherited_priority =
942                                         (*mutex)->m_saved_prio;
943                                 _thread_run->active_priority =
944                                     MAX(_thread_run->inherited_priority,
945                                     _thread_run->base_priority);
946
947                                 /*
948                                  * This thread now owns one less priority mutex.
949                                  */
950                                 _thread_run->priority_mutex_count--;
951
952                                 /* Remove the mutex from the threads queue. */
953                                 _MUTEX_ASSERT_IS_OWNED(*mutex);
954                                 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
955                                     (*mutex), m_qe);
956                                 _MUTEX_INIT_LINK(*mutex);
957
958                                 /*
959                                  * Enter a loop to find a waiting thread whose
960                                  * active priority will not cause a ceiling
961                                  * violation:
962                                  */
963                                 while ((((*mutex)->m_owner =
964                                     mutex_queue_deq(*mutex)) != NULL) &&
965                                     ((*mutex)->m_owner->active_priority >
966                                      (*mutex)->m_prio)) {
967                                         /*
968                                          * Either the mutex ceiling priority
969                                          * been lowered and/or this threads
970                                          * priority has been raised subsequent
971                                          * to this thread being queued on the
972                                          * waiting list.
973                                          */
974                                         (*mutex)->m_owner->error = EINVAL;
975                                         PTHREAD_NEW_STATE((*mutex)->m_owner,
976                                             PS_RUNNING);
977                                         /*
978                                          * The thread is no longer waiting for
979                                          * this mutex:
980                                          */
981                                         (*mutex)->m_owner->data.mutex = NULL;
982                                 }
983
984                                 /* Check for a new owner: */
985                                 if ((*mutex)->m_owner != NULL) {
986                                         /*
987                                          * Track number of priority mutexes owned:
988                                          */
989                                         (*mutex)->m_owner->priority_mutex_count++;
990
991                                         /*
992                                          * Add the mutex to the threads list
993                                          * of owned mutexes:
994                                          */
995                                         TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
996                                             (*mutex), m_qe);
997
998                                         /*
999                                          * The owner is no longer waiting for
1000                                          * this mutex:
1001                                          */
1002                                         (*mutex)->m_owner->data.mutex = NULL;
1003
1004                                         /*
1005                                          * Save the owning threads inherited
1006                                          * priority:
1007                                          */
1008                                         (*mutex)->m_saved_prio =
1009                                                 (*mutex)->m_owner->inherited_priority;
1010
1011                                         /*
1012                                          * The owning thread inherits the
1013                                          * ceiling priority of the mutex and
1014                                          * executes at that priority:
1015                                          */
1016                                         (*mutex)->m_owner->inherited_priority =
1017                                             (*mutex)->m_prio;
1018                                         (*mutex)->m_owner->active_priority =
1019                                             (*mutex)->m_prio;
1020
1021                                         /*
1022                                          * Allow the new owner of the mutex to
1023                                          * run:
1024                                          */
1025                                         PTHREAD_NEW_STATE((*mutex)->m_owner,
1026                                             PS_RUNNING);
1027                                 }
1028                         }
1029                         break;
1030
1031                 /* Trap invalid mutex types: */
1032                 default:
1033                         /* Return an invalid argument error: */
1034                         ret = EINVAL;
1035                         break;
1036                 }
1037
1038                 if ((ret == 0) && (add_reference != 0)) {
1039                         /* Increment the reference count: */
1040                         (*mutex)->m_refcount++;
1041                 }
1042
1043                 /* Unlock the mutex structure: */
1044                 _SPINUNLOCK(&(*mutex)->lock);
1045
1046                 /*
1047                  * Undefer and handle pending signals, yielding if
1048                  * necessary:
1049                  */
1050                 _thread_kern_sig_undefer();
1051         }
1052
1053         /* Return the completion status: */
1054         return (ret);
1055 }
1056
1057
1058 /*
1059  * This function is called when a change in base priority occurs for
1060  * a thread that is holding or waiting for a priority protection or
1061  * inheritence mutex.  A change in a threads base priority can effect
1062  * changes to active priorities of other threads and to the ordering
1063  * of mutex locking by waiting threads.
1064  *
1065  * This must be called while thread scheduling is deferred.
1066  */
1067 void
1068 _mutex_notify_priochange(pthread_t pthread)
1069 {
1070         /* Adjust the priorites of any owned priority mutexes: */
1071         if (pthread->priority_mutex_count > 0) {
1072                 /*
1073                  * Rescan the mutexes owned by this thread and correct
1074                  * their priorities to account for this threads change
1075                  * in priority.  This has the side effect of changing
1076                  * the threads active priority.
1077                  */
1078                 mutex_rescan_owned(pthread, /* rescan all owned */ NULL);
1079         }
1080
1081         /*
1082          * If this thread is waiting on a priority inheritence mutex,
1083          * check for priority adjustments.  A change in priority can
1084          * also effect a ceiling violation(*) for a thread waiting on
1085          * a priority protection mutex; we don't perform the check here
1086          * as it is done in pthread_mutex_unlock.
1087          *
1088          * (*) It should be noted that a priority change to a thread
1089          *     _after_ taking and owning a priority ceiling mutex
1090          *     does not affect ownership of that mutex; the ceiling
1091          *     priority is only checked before mutex ownership occurs.
1092          */
1093         if (pthread->state == PS_MUTEX_WAIT) {
1094                 /* Lock the mutex structure: */
1095                 _SPINLOCK(&pthread->data.mutex->lock);
1096
1097                 /*
1098                  * Check to make sure this thread is still in the same state
1099                  * (the spinlock above can yield the CPU to another thread):
1100                  */
1101                 if (pthread->state == PS_MUTEX_WAIT) {
1102                         /*
1103                          * Remove and reinsert this thread into the list of
1104                          * waiting threads to preserve decreasing priority
1105                          * order.
1106                          */
1107                         mutex_queue_remove(pthread->data.mutex, pthread);
1108                         mutex_queue_enq(pthread->data.mutex, pthread);
1109
1110                         if (pthread->data.mutex->m_protocol ==
1111                              PTHREAD_PRIO_INHERIT) {
1112                                 /* Adjust priorities: */
1113                                 mutex_priority_adjust(pthread->data.mutex);
1114                         }
1115                 }
1116
1117                 /* Unlock the mutex structure: */
1118                 _SPINUNLOCK(&pthread->data.mutex->lock);
1119         }
1120 }
1121
1122 /*
1123  * Called when a new thread is added to the mutex waiting queue or
1124  * when a threads priority changes that is already in the mutex
1125  * waiting queue.
1126  */
1127 static void
1128 mutex_priority_adjust(pthread_mutex_t mutex)
1129 {
1130         pthread_t       pthread_next, pthread = mutex->m_owner;
1131         int             temp_prio;
1132         pthread_mutex_t m = mutex;
1133
1134         /*
1135          * Calculate the mutex priority as the maximum of the highest
1136          * active priority of any waiting threads and the owning threads
1137          * active priority(*).
1138          *
1139          * (*) Because the owning threads current active priority may
1140          *     reflect priority inherited from this mutex (and the mutex
1141          *     priority may have changed) we must recalculate the active
1142          *     priority based on the threads saved inherited priority
1143          *     and its base priority.
1144          */
1145         pthread_next = TAILQ_FIRST(&m->m_queue);  /* should never be NULL */
1146         temp_prio = MAX(pthread_next->active_priority,
1147             MAX(m->m_saved_prio, pthread->base_priority));
1148
1149         /* See if this mutex really needs adjusting: */
1150         if (temp_prio == m->m_prio)
1151                 /* No need to propagate the priority: */
1152                 return;
1153
1154         /* Set new priority of the mutex: */
1155         m->m_prio = temp_prio;
1156
1157         while (m != NULL) {
1158                 /*
1159                  * Save the threads priority before rescanning the
1160                  * owned mutexes:
1161                  */
1162                 temp_prio = pthread->active_priority;
1163
1164                 /*
1165                  * Fix the priorities for all the mutexes this thread has
1166                  * locked since taking this mutex.  This also has a
1167                  * potential side-effect of changing the threads priority.
1168                  */
1169                 mutex_rescan_owned(pthread, m);
1170
1171                 /*
1172                  * If the thread is currently waiting on a mutex, check
1173                  * to see if the threads new priority has affected the
1174                  * priority of the mutex.
1175                  */
1176                 if ((temp_prio != pthread->active_priority) &&
1177                     (pthread->state == PS_MUTEX_WAIT) &&
1178                     (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) {
1179                         /* Grab the mutex this thread is waiting on: */
1180                         m = pthread->data.mutex;
1181
1182                         /*
1183                          * The priority for this thread has changed.  Remove
1184                          * and reinsert this thread into the list of waiting
1185                          * threads to preserve decreasing priority order.
1186                          */
1187                         mutex_queue_remove(m, pthread);
1188                         mutex_queue_enq(m, pthread);
1189
1190                         /* Grab the waiting thread with highest priority: */
1191                         pthread_next = TAILQ_FIRST(&m->m_queue);
1192
1193                         /*
1194                          * Calculate the mutex priority as the maximum of the
1195                          * highest active priority of any waiting threads and
1196                          * the owning threads active priority.
1197                          */
1198                         temp_prio = MAX(pthread_next->active_priority,
1199                             MAX(m->m_saved_prio, m->m_owner->base_priority));
1200
1201                         if (temp_prio != m->m_prio) {
1202                                 /*
1203                                  * The priority needs to be propagated to the
1204                                  * mutex this thread is waiting on and up to
1205                                  * the owner of that mutex.
1206                                  */
1207                                 m->m_prio = temp_prio;
1208                                 pthread = m->m_owner;
1209                         }
1210                         else
1211                                 /* We're done: */
1212                                 m = NULL;
1213
1214                 }
1215                 else
1216                         /* We're done: */
1217                         m = NULL;
1218         }
1219 }
1220
1221 static void
1222 mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
1223 {
1224         int             active_prio, inherited_prio;
1225         pthread_mutex_t m;
1226         pthread_t       pthread_next;
1227
1228         /*
1229          * Start walking the mutexes the thread has taken since
1230          * taking this mutex.
1231          */
1232         if (mutex == NULL) {
1233                 /*
1234                  * A null mutex means start at the beginning of the owned
1235                  * mutex list.
1236                  */
1237                 m = TAILQ_FIRST(&pthread->mutexq);
1238
1239                 /* There is no inherited priority yet. */
1240                 inherited_prio = 0;
1241         }
1242         else {
1243                 /*
1244                  * The caller wants to start after a specific mutex.  It
1245                  * is assumed that this mutex is a priority inheritence
1246                  * mutex and that its priority has been correctly
1247                  * calculated.
1248                  */
1249                 m = TAILQ_NEXT(mutex, m_qe);
1250
1251                 /* Start inheriting priority from the specified mutex. */
1252                 inherited_prio = mutex->m_prio;
1253         }
1254         active_prio = MAX(inherited_prio, pthread->base_priority);
1255
1256         while (m != NULL) {
1257                 /*
1258                  * We only want to deal with priority inheritence
1259                  * mutexes.  This might be optimized by only placing
1260                  * priority inheritence mutexes into the owned mutex
1261                  * list, but it may prove to be useful having all
1262                  * owned mutexes in this list.  Consider a thread
1263                  * exiting while holding mutexes...
1264                  */
1265                 if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1266                         /*
1267                          * Fix the owners saved (inherited) priority to
1268                          * reflect the priority of the previous mutex.
1269                          */
1270                         m->m_saved_prio = inherited_prio;
1271
1272                         if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1273                                 /* Recalculate the priority of the mutex: */
1274                                 m->m_prio = MAX(active_prio,
1275                                      pthread_next->active_priority);
1276                         else
1277                                 m->m_prio = active_prio;
1278
1279                         /* Recalculate new inherited and active priorities: */
1280                         inherited_prio = m->m_prio;
1281                         active_prio = MAX(m->m_prio, pthread->base_priority);
1282                 }
1283
1284                 /* Advance to the next mutex owned by this thread: */
1285                 m = TAILQ_NEXT(m, m_qe);
1286         }
1287
1288         /*
1289          * Fix the threads inherited priority and recalculate its
1290          * active priority.
1291          */
1292         pthread->inherited_priority = inherited_prio;
1293         active_prio = MAX(inherited_prio, pthread->base_priority);
1294
1295         if (active_prio != pthread->active_priority) {
1296                 /*
1297                  * If this thread is in the priority queue, it must be
1298                  * removed and reinserted for its new priority.
1299                  */
1300                 if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) {
1301                         /*
1302                          * Remove the thread from the priority queue
1303                          * before changing its priority:
1304                          */
1305                         PTHREAD_PRIOQ_REMOVE(pthread);
1306
1307                         /*
1308                          * POSIX states that if the priority is being
1309                          * lowered, the thread must be inserted at the
1310                          * head of the queue for its priority if it owns
1311                          * any priority protection or inheritence mutexes.
1312                          */
1313                         if ((active_prio < pthread->active_priority) &&
1314                             (pthread->priority_mutex_count > 0)) {
1315                                 /* Set the new active priority. */
1316                                 pthread->active_priority = active_prio;
1317
1318                                 PTHREAD_PRIOQ_INSERT_HEAD(pthread);
1319                         }
1320                         else {
1321                                 /* Set the new active priority. */
1322                                 pthread->active_priority = active_prio;
1323
1324                                 PTHREAD_PRIOQ_INSERT_TAIL(pthread);
1325                         }
1326                 }
1327                 else {
1328                         /* Set the new active priority. */
1329                         pthread->active_priority = active_prio;
1330                 }
1331         }
1332 }
1333
1334 void
1335 _mutex_unlock_private(pthread_t pthread)
1336 {
1337         struct pthread_mutex    *m, *m_next;
1338
1339         for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1340                 m_next = TAILQ_NEXT(m, m_qe);
1341                 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1342                         pthread_mutex_unlock(&m);
1343         }
1344 }
1345
1346 /*
1347  * Dequeue a waiting thread from the head of a mutex queue in descending
1348  * priority order.
1349  */
1350 static inline pthread_t
1351 mutex_queue_deq(pthread_mutex_t mutex)
1352 {
1353         pthread_t pthread;
1354
1355         while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1356                 TAILQ_REMOVE(&mutex->m_queue, pthread, qe);
1357                 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1358
1359                 /*
1360                  * Only exit the loop if the thread hasn't been
1361                  * cancelled.
1362                  */
1363                 if (pthread->interrupted == 0)
1364                         break;
1365         }
1366
1367         return(pthread);
1368 }
1369
1370 /*
1371  * Remove a waiting thread from a mutex queue in descending priority order.
1372  */
1373 static inline void
1374 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1375 {
1376         if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1377                 TAILQ_REMOVE(&mutex->m_queue, pthread, qe);
1378                 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1379         }
1380 }
1381
1382 /*
1383  * Enqueue a waiting thread to a queue in descending priority order.
1384  */
1385 static inline void
1386 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1387 {
1388         pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1389
1390         /*
1391          * For the common case of all threads having equal priority,
1392          * we perform a quick check against the priority of the thread
1393          * at the tail of the queue.
1394          */
1395         if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1396                 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, qe);
1397         else {
1398                 tid = TAILQ_FIRST(&mutex->m_queue);
1399                 while (pthread->active_priority <= tid->active_priority)
1400                         tid = TAILQ_NEXT(tid, qe);
1401                 TAILQ_INSERT_BEFORE(tid, pthread, qe);
1402         }
1403         pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;
1404 }
1405
1406 #endif