]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - lib/libpthread/thread/thr_mutex.c
At the beginning of pthread_mutex_lock(), call _thread_init() if
[FreeBSD/FreeBSD.git] / lib / libpthread / thread / thr_mutex.c
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed by John Birrell.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34 #include <stdlib.h>
35 #include <errno.h>
36 #include <string.h>
37 #include <sys/param.h>
38 #include <sys/queue.h>
39 #ifdef _THREAD_SAFE
40 #include <pthread.h>
41 #include "pthread_private.h"
42
43 #if defined(_PTHREADS_INVARIANTS)
44 #define _MUTEX_INIT_LINK(m)             do {            \
45         (m)->m_qe.tqe_prev = NULL;                      \
46         (m)->m_qe.tqe_next = NULL;                      \
47 } while (0)
48 #define _MUTEX_ASSERT_IS_OWNED(m)       do {            \
49         if ((m)->m_qe.tqe_prev == NULL)                 \
50                 PANIC("mutex is not on list");          \
51 } while (0)
52 #define _MUTEX_ASSERT_NOT_OWNED(m)      do {            \
53         if (((m)->m_qe.tqe_prev != NULL) ||             \
54             ((m)->m_qe.tqe_next != NULL))               \
55                 PANIC("mutex is on list");              \
56 } while (0)
57 #else
58 #define _MUTEX_INIT_LINK(m)
59 #define _MUTEX_ASSERT_IS_OWNED(m)
60 #define _MUTEX_ASSERT_NOT_OWNED(m)
61 #endif
62
63 /*
64  * Prototypes
65  */
66 static inline int       mutex_self_trylock(pthread_mutex_t);
67 static inline int       mutex_self_lock(pthread_mutex_t);
68 static inline int       mutex_unlock_common(pthread_mutex_t *, int);
69 static void             mutex_priority_adjust(pthread_mutex_t);
70 static void             mutex_rescan_owned (pthread_t, pthread_mutex_t);
71 static inline pthread_t mutex_queue_deq(pthread_mutex_t);
72 static inline void      mutex_queue_remove(pthread_mutex_t, pthread_t);
73 static inline void      mutex_queue_enq(pthread_mutex_t, pthread_t);
74
75
76 static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
77
78 /* Reinitialize a mutex to defaults. */
79 int
80 _mutex_reinit(pthread_mutex_t * mutex)
81 {
82         int     ret = 0;
83
84         if (mutex == NULL)
85                 ret = EINVAL;
86         else if (*mutex == NULL)
87                 ret = pthread_mutex_init(mutex, NULL);
88         else {
89                 /*
90                  * Initialize the mutex structure:
91                  */
92                 (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT;
93                 (*mutex)->m_protocol = PTHREAD_PRIO_NONE;
94                 TAILQ_INIT(&(*mutex)->m_queue);
95                 (*mutex)->m_owner = NULL;
96                 (*mutex)->m_data.m_count = 0;
97                 (*mutex)->m_flags &= MUTEX_FLAGS_PRIVATE;
98                 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
99                 (*mutex)->m_refcount = 0;
100                 (*mutex)->m_prio = 0;
101                 (*mutex)->m_saved_prio = 0;
102                 _MUTEX_INIT_LINK(*mutex);
103                 memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock));
104         }
105         return (ret);
106 }
107
108 int
109 pthread_mutex_init(pthread_mutex_t * mutex,
110                    const pthread_mutexattr_t * mutex_attr)
111 {
112         enum pthread_mutextype  type;
113         int             protocol;
114         int             ceiling;
115         pthread_mutex_t pmutex;
116         int             ret = 0;
117
118         if (mutex == NULL)
119                 ret = EINVAL;
120
121         /* Check if default mutex attributes: */
122         else if (mutex_attr == NULL || *mutex_attr == NULL) {
123                 /* Default to a (error checking) POSIX mutex: */
124                 type = PTHREAD_MUTEX_ERRORCHECK;
125                 protocol = PTHREAD_PRIO_NONE;
126                 ceiling = PTHREAD_MAX_PRIORITY;
127         }
128
129         /* Check mutex type: */
130         else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
131             ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX))
132                 /* Return an invalid argument error: */
133                 ret = EINVAL;
134
135         /* Check mutex protocol: */
136         else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
137             ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
138                 /* Return an invalid argument error: */
139                 ret = EINVAL;
140
141         else {
142                 /* Use the requested mutex type and protocol: */
143                 type = (*mutex_attr)->m_type;
144                 protocol = (*mutex_attr)->m_protocol;
145                 ceiling = (*mutex_attr)->m_ceiling;
146         }
147
148         /* Check no errors so far: */
149         if (ret == 0) {
150                 if ((pmutex = (pthread_mutex_t)
151                     malloc(sizeof(struct pthread_mutex))) == NULL)
152                         ret = ENOMEM;
153                 else {
154                         /* Reset the mutex flags: */
155                         pmutex->m_flags = 0;
156
157                         /* Process according to mutex type: */
158                         switch (type) {
159                         /* case PTHREAD_MUTEX_DEFAULT: */
160                         case PTHREAD_MUTEX_ERRORCHECK:
161                         case PTHREAD_MUTEX_NORMAL:
162                                 /* Nothing to do here. */
163                                 break;
164
165                         /* Single UNIX Spec 2 recursive mutex: */
166                         case PTHREAD_MUTEX_RECURSIVE:
167                                 /* Reset the mutex count: */
168                                 pmutex->m_data.m_count = 0;
169                                 break;
170
171                         /* Trap invalid mutex types: */
172                         default:
173                                 /* Return an invalid argument error: */
174                                 ret = EINVAL;
175                                 break;
176                         }
177                         if (ret == 0) {
178                                 /* Initialise the rest of the mutex: */
179                                 TAILQ_INIT(&pmutex->m_queue);
180                                 pmutex->m_flags |= MUTEX_FLAGS_INITED;
181                                 pmutex->m_owner = NULL;
182                                 pmutex->m_type = type;
183                                 pmutex->m_protocol = protocol;
184                                 pmutex->m_refcount = 0;
185                                 if (protocol == PTHREAD_PRIO_PROTECT)
186                                         pmutex->m_prio = ceiling;
187                                 else
188                                         pmutex->m_prio = 0;
189                                 pmutex->m_saved_prio = 0;
190                                 _MUTEX_INIT_LINK(pmutex);
191                                 memset(&pmutex->lock, 0, sizeof(pmutex->lock));
192                                 *mutex = pmutex;
193                         } else {
194                                 free(pmutex);
195                                 *mutex = NULL;
196                         }
197                 }
198         }
199         /* Return the completion status: */
200         return(ret);
201 }
202
203 int
204 pthread_mutex_destroy(pthread_mutex_t * mutex)
205 {
206         int     ret = 0;
207
208         if (mutex == NULL || *mutex == NULL)
209                 ret = EINVAL;
210         else {
211                 /* Lock the mutex structure: */
212                 _SPINLOCK(&(*mutex)->lock);
213
214                 /*
215                  * Check to see if this mutex is in use:
216                  */
217                 if (((*mutex)->m_owner != NULL) ||
218                     (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
219                     ((*mutex)->m_refcount != 0)) {
220                         ret = EBUSY;
221
222                         /* Unlock the mutex structure: */
223                         _SPINUNLOCK(&(*mutex)->lock);
224                 }
225                 else {
226                         /*
227                          * Free the memory allocated for the mutex
228                          * structure:
229                          */
230                         _MUTEX_ASSERT_NOT_OWNED(*mutex);
231                         free(*mutex);
232
233                         /*
234                          * Leave the caller's pointer NULL now that
235                          * the mutex has been destroyed:
236                          */
237                         *mutex = NULL;
238                 }
239         }
240
241         /* Return the completion status: */
242         return (ret);
243 }
244
245 static int
246 init_static(pthread_mutex_t *mutex)
247 {
248         int     ret;
249
250         _SPINLOCK(&static_init_lock);
251
252         if (*mutex == NULL)
253                 ret = pthread_mutex_init(mutex, NULL);
254         else
255                 ret = 0;
256
257         _SPINUNLOCK(&static_init_lock);
258
259         return(ret);
260 }
261
262 int
263 pthread_mutex_trylock(pthread_mutex_t * mutex)
264 {
265         int     ret = 0;
266
267         if (mutex == NULL)
268                 ret = EINVAL;
269
270         /*
271          * If the mutex is statically initialized, perform the dynamic
272          * initialization:
273          */
274         else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
275                 /*
276                  * Defer signals to protect the scheduling queues from
277                  * access by the signal handler:
278                  */
279                 _thread_kern_sig_defer();
280
281                 /* Lock the mutex structure: */
282                 _SPINLOCK(&(*mutex)->lock);
283
284                 /*
285                  * If the mutex was statically allocated, properly
286                  * initialize the tail queue.
287                  */
288                 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
289                         TAILQ_INIT(&(*mutex)->m_queue);
290                         _MUTEX_INIT_LINK(*mutex);
291                         (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
292                 }
293
294                 /* Process according to mutex type: */
295                 switch ((*mutex)->m_protocol) {
296                 /* Default POSIX mutex: */
297                 case PTHREAD_PRIO_NONE: 
298                         /* Check if this mutex is not locked: */
299                         if ((*mutex)->m_owner == NULL) {
300                                 /* Lock the mutex for the running thread: */
301                                 (*mutex)->m_owner = _thread_run;
302
303                                 /* Add to the list of owned mutexes: */
304                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
305                                 TAILQ_INSERT_TAIL(&_thread_run->mutexq,
306                                     (*mutex), m_qe);
307                         } else if ((*mutex)->m_owner == _thread_run)
308                                 ret = mutex_self_trylock(*mutex);
309                         else
310                                 /* Return a busy error: */
311                                 ret = EBUSY;
312                         break;
313
314                 /* POSIX priority inheritence mutex: */
315                 case PTHREAD_PRIO_INHERIT:
316                         /* Check if this mutex is not locked: */
317                         if ((*mutex)->m_owner == NULL) {
318                                 /* Lock the mutex for the running thread: */
319                                 (*mutex)->m_owner = _thread_run;
320
321                                 /* Track number of priority mutexes owned: */
322                                 _thread_run->priority_mutex_count++;
323
324                                 /*
325                                  * The mutex takes on the attributes of the
326                                  * running thread when there are no waiters.
327                                  */
328                                 (*mutex)->m_prio = _thread_run->active_priority;
329                                 (*mutex)->m_saved_prio =
330                                     _thread_run->inherited_priority;
331
332                                 /* Add to the list of owned mutexes: */
333                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
334                                 TAILQ_INSERT_TAIL(&_thread_run->mutexq,
335                                     (*mutex), m_qe);
336                         } else if ((*mutex)->m_owner == _thread_run)
337                                 ret = mutex_self_trylock(*mutex);
338                         else
339                                 /* Return a busy error: */
340                                 ret = EBUSY;
341                         break;
342
343                 /* POSIX priority protection mutex: */
344                 case PTHREAD_PRIO_PROTECT:
345                         /* Check for a priority ceiling violation: */
346                         if (_thread_run->active_priority > (*mutex)->m_prio)
347                                 ret = EINVAL;
348
349                         /* Check if this mutex is not locked: */
350                         else if ((*mutex)->m_owner == NULL) {
351                                 /* Lock the mutex for the running thread: */
352                                 (*mutex)->m_owner = _thread_run;
353
354                                 /* Track number of priority mutexes owned: */
355                                 _thread_run->priority_mutex_count++;
356
357                                 /*
358                                  * The running thread inherits the ceiling
359                                  * priority of the mutex and executes at that
360                                  * priority.
361                                  */
362                                 _thread_run->active_priority = (*mutex)->m_prio;
363                                 (*mutex)->m_saved_prio =
364                                     _thread_run->inherited_priority;
365                                 _thread_run->inherited_priority =
366                                     (*mutex)->m_prio;
367
368                                 /* Add to the list of owned mutexes: */
369                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
370                                 TAILQ_INSERT_TAIL(&_thread_run->mutexq,
371                                     (*mutex), m_qe);
372                         } else if ((*mutex)->m_owner == _thread_run)
373                                 ret = mutex_self_trylock(*mutex);
374                         else
375                                 /* Return a busy error: */
376                                 ret = EBUSY;
377                         break;
378
379                 /* Trap invalid mutex types: */
380                 default:
381                         /* Return an invalid argument error: */
382                         ret = EINVAL;
383                         break;
384                 }
385
386                 /* Unlock the mutex structure: */
387                 _SPINUNLOCK(&(*mutex)->lock);
388
389                 /*
390                  * Undefer and handle pending signals, yielding if
391                  * necessary:
392                  */
393                 _thread_kern_sig_undefer();
394         }
395
396         /* Return the completion status: */
397         return (ret);
398 }
399
400 int
401 pthread_mutex_lock(pthread_mutex_t * mutex)
402 {
403         int     ret = 0;
404
405         if (_thread_initial == NULL)
406                 _thread_init();
407
408         if (mutex == NULL)
409                 ret = EINVAL;
410
411         /*
412          * If the mutex is statically initialized, perform the dynamic
413          * initialization:
414          */
415         else if (*mutex != NULL || (ret = init_static(mutex)) == 0) {
416                 /*
417                  * Defer signals to protect the scheduling queues from
418                  * access by the signal handler:
419                  */
420                 _thread_kern_sig_defer();
421
422                 /* Lock the mutex structure: */
423                 _SPINLOCK(&(*mutex)->lock);
424
425                 /*
426                  * If the mutex was statically allocated, properly
427                  * initialize the tail queue.
428                  */
429                 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
430                         TAILQ_INIT(&(*mutex)->m_queue);
431                         (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
432                         _MUTEX_INIT_LINK(*mutex);
433                 }
434
435                 /* Reset the interrupted flag: */
436                 _thread_run->interrupted = 0;
437
438                 /* Process according to mutex type: */
439                 switch ((*mutex)->m_protocol) {
440                 /* Default POSIX mutex: */
441                 case PTHREAD_PRIO_NONE:
442                         if ((*mutex)->m_owner == NULL) {
443                                 /* Lock the mutex for this thread: */
444                                 (*mutex)->m_owner = _thread_run;
445
446                                 /* Add to the list of owned mutexes: */
447                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
448                                 TAILQ_INSERT_TAIL(&_thread_run->mutexq,
449                                     (*mutex), m_qe);
450
451                         } else if ((*mutex)->m_owner == _thread_run)
452                                 ret = mutex_self_lock(*mutex);
453                         else {
454                                 /*
455                                  * Join the queue of threads waiting to lock
456                                  * the mutex: 
457                                  */
458                                 mutex_queue_enq(*mutex, _thread_run);
459
460                                 /*
461                                  * Keep a pointer to the mutex this thread
462                                  * is waiting on:
463                                  */
464                                 _thread_run->data.mutex = *mutex;
465
466                                 /*
467                                  * Unlock the mutex structure and schedule the
468                                  * next thread:
469                                  */
470                                 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
471                                     &(*mutex)->lock, __FILE__, __LINE__);
472
473                                 /* Lock the mutex structure again: */
474                                 _SPINLOCK(&(*mutex)->lock);
475                         }
476                         break;
477
478                 /* POSIX priority inheritence mutex: */
479                 case PTHREAD_PRIO_INHERIT:
480                         /* Check if this mutex is not locked: */
481                         if ((*mutex)->m_owner == NULL) {
482                                 /* Lock the mutex for this thread: */
483                                 (*mutex)->m_owner = _thread_run;
484
485                                 /* Track number of priority mutexes owned: */
486                                 _thread_run->priority_mutex_count++;
487
488                                 /*
489                                  * The mutex takes on attributes of the
490                                  * running thread when there are no waiters.
491                                  */
492                                 (*mutex)->m_prio = _thread_run->active_priority;
493                                 (*mutex)->m_saved_prio =
494                                     _thread_run->inherited_priority;
495                                 _thread_run->inherited_priority =
496                                     (*mutex)->m_prio;
497
498                                 /* Add to the list of owned mutexes: */
499                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
500                                 TAILQ_INSERT_TAIL(&_thread_run->mutexq,
501                                     (*mutex), m_qe);
502
503                         } else if ((*mutex)->m_owner == _thread_run)
504                                 ret = mutex_self_lock(*mutex);
505                         else {
506                                 /*
507                                  * Join the queue of threads waiting to lock
508                                  * the mutex: 
509                                  */
510                                 mutex_queue_enq(*mutex, _thread_run);
511
512                                 /*
513                                  * Keep a pointer to the mutex this thread
514                                  * is waiting on:
515                                  */
516                                 _thread_run->data.mutex = *mutex;
517
518                                 if (_thread_run->active_priority >
519                                     (*mutex)->m_prio)
520                                         /* Adjust priorities: */
521                                         mutex_priority_adjust(*mutex);
522
523                                 /*
524                                  * Unlock the mutex structure and schedule the
525                                  * next thread:
526                                  */
527                                 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
528                                     &(*mutex)->lock, __FILE__, __LINE__);
529
530                                 /* Lock the mutex structure again: */
531                                 _SPINLOCK(&(*mutex)->lock);
532                         }
533                         break;
534
535                 /* POSIX priority protection mutex: */
536                 case PTHREAD_PRIO_PROTECT:
537                         /* Check for a priority ceiling violation: */
538                         if (_thread_run->active_priority > (*mutex)->m_prio)
539                                 ret = EINVAL;
540
541                         /* Check if this mutex is not locked: */
542                         else if ((*mutex)->m_owner == NULL) {
543                                 /*
544                                  * Lock the mutex for the running
545                                  * thread:
546                                  */
547                                 (*mutex)->m_owner = _thread_run;
548
549                                 /* Track number of priority mutexes owned: */
550                                 _thread_run->priority_mutex_count++;
551
552                                 /*
553                                  * The running thread inherits the ceiling
554                                  * priority of the mutex and executes at that
555                                  * priority:
556                                  */
557                                 _thread_run->active_priority = (*mutex)->m_prio;
558                                 (*mutex)->m_saved_prio =
559                                     _thread_run->inherited_priority;
560                                 _thread_run->inherited_priority =
561                                     (*mutex)->m_prio;
562
563                                 /* Add to the list of owned mutexes: */
564                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
565                                 TAILQ_INSERT_TAIL(&_thread_run->mutexq,
566                                     (*mutex), m_qe);
567                         } else if ((*mutex)->m_owner == _thread_run)
568                                 ret = mutex_self_lock(*mutex);
569                         else {
570                                 /*
571                                  * Join the queue of threads waiting to lock
572                                  * the mutex: 
573                                  */
574                                 mutex_queue_enq(*mutex, _thread_run);
575
576                                 /*
577                                  * Keep a pointer to the mutex this thread
578                                  * is waiting on:
579                                  */
580                                 _thread_run->data.mutex = *mutex;
581
582                                 /* Clear any previous error: */
583                                 _thread_run->error = 0;
584
585                                 /*
586                                  * Unlock the mutex structure and schedule the
587                                  * next thread:
588                                  */
589                                 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
590                                     &(*mutex)->lock, __FILE__, __LINE__);
591
592                                 /* Lock the mutex structure again: */
593                                 _SPINLOCK(&(*mutex)->lock);
594
595                                 /*
596                                  * The threads priority may have changed while
597                                  * waiting for the mutex causing a ceiling
598                                  * violation.
599                                  */
600                                 ret = _thread_run->error;
601                                 _thread_run->error = 0;
602                         }
603                         break;
604
605                 /* Trap invalid mutex types: */
606                 default:
607                         /* Return an invalid argument error: */
608                         ret = EINVAL;
609                         break;
610                 }
611
612                 /*
613                  * Check to see if this thread was interrupted and
614                  * is still in the mutex queue of waiting threads:
615                  */
616                 if (_thread_run->interrupted != 0)
617                         mutex_queue_remove(*mutex, _thread_run);
618
619                 /* Unlock the mutex structure: */
620                 _SPINUNLOCK(&(*mutex)->lock);
621
622                 /*
623                  * Undefer and handle pending signals, yielding if
624                  * necessary:
625                  */
626                 _thread_kern_sig_undefer();
627
628                 if (_thread_run->interrupted != 0 &&
629                     _thread_run->continuation != NULL)
630                         _thread_run->continuation((void *) _thread_run);
631         }
632
633         /* Return the completion status: */
634         return (ret);
635 }
636
637 int
638 pthread_mutex_unlock(pthread_mutex_t * mutex)
639 {
640         return (mutex_unlock_common(mutex, /* add reference */ 0));
641 }
642
643 int
644 _mutex_cv_unlock(pthread_mutex_t * mutex)
645 {
646         return (mutex_unlock_common(mutex, /* add reference */ 1));
647 }
648
649 int
650 _mutex_cv_lock(pthread_mutex_t * mutex)
651 {
652         int     ret;
653         if ((ret = pthread_mutex_lock(mutex)) == 0)
654                 (*mutex)->m_refcount--;
655         return (ret);
656 }
657
658 static inline int
659 mutex_self_trylock(pthread_mutex_t mutex)
660 {
661         int     ret = 0;
662
663         switch (mutex->m_type) {
664
665         /* case PTHREAD_MUTEX_DEFAULT: */
666         case PTHREAD_MUTEX_ERRORCHECK:
667         case PTHREAD_MUTEX_NORMAL:
668                 /*
669                  * POSIX specifies that mutexes should return EDEADLK if a
670                  * recursive lock is detected.
671                  */
672                 ret = EBUSY; 
673                 break;
674
675         case PTHREAD_MUTEX_RECURSIVE:
676                 /* Increment the lock count: */
677                 mutex->m_data.m_count++;
678                 break;
679
680         default:
681                 /* Trap invalid mutex types; */
682                 ret = EINVAL;
683         }
684
685         return(ret);
686 }
687
688 static inline int
689 mutex_self_lock(pthread_mutex_t mutex)
690 {
691         int ret = 0;
692
693         switch (mutex->m_type) {
694         /* case PTHREAD_MUTEX_DEFAULT: */
695         case PTHREAD_MUTEX_ERRORCHECK:
696                 /*
697                  * POSIX specifies that mutexes should return EDEADLK if a
698                  * recursive lock is detected.
699                  */
700                 ret = EDEADLK; 
701                 break;
702
703         case PTHREAD_MUTEX_NORMAL:
704                 /*
705                  * What SS2 define as a 'normal' mutex.  Intentionally
706                  * deadlock on attempts to get a lock you already own.
707                  */
708                 _thread_kern_sched_state_unlock(PS_DEADLOCK,
709                     &mutex->lock, __FILE__, __LINE__);
710                 break;
711
712         case PTHREAD_MUTEX_RECURSIVE:
713                 /* Increment the lock count: */
714                 mutex->m_data.m_count++;
715                 break;
716
717         default:
718                 /* Trap invalid mutex types; */
719                 ret = EINVAL;
720         }
721
722         return(ret);
723 }
724
725 static inline int
726 mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
727 {
728         int     ret = 0;
729
730         if (mutex == NULL || *mutex == NULL) {
731                 ret = EINVAL;
732         } else {
733                 /*
734                  * Defer signals to protect the scheduling queues from
735                  * access by the signal handler:
736                  */
737                 _thread_kern_sig_defer();
738
739                 /* Lock the mutex structure: */
740                 _SPINLOCK(&(*mutex)->lock);
741
742                 /* Process according to mutex type: */
743                 switch ((*mutex)->m_protocol) {
744                 /* Default POSIX mutex: */
745                 case PTHREAD_PRIO_NONE:
746                         /*
747                          * Check if the running thread is not the owner of the
748                          * mutex:
749                          */
750                         if ((*mutex)->m_owner != _thread_run) {
751                                 /*
752                                  * Return an invalid argument error for no
753                                  * owner and a permission error otherwise:
754                                  */
755                                 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
756                         }
757                         else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
758                             ((*mutex)->m_data.m_count > 0)) {
759                                 /* Decrement the count: */
760                                 (*mutex)->m_data.m_count--;
761                         } else {
762                                 /*
763                                  * Clear the count in case this is recursive
764                                  * mutex.
765                                  */
766                                 (*mutex)->m_data.m_count = 0;
767
768                                 /* Remove the mutex from the threads queue. */
769                                 _MUTEX_ASSERT_IS_OWNED(*mutex);
770                                 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
771                                     (*mutex), m_qe);
772                                 _MUTEX_INIT_LINK(*mutex);
773
774                                 /*
775                                  * Get the next thread from the queue of
776                                  * threads waiting on the mutex: 
777                                  */
778                                 if (((*mutex)->m_owner =
779                                     mutex_queue_deq(*mutex)) != NULL) {
780                                         /*
781                                          * Unless the new owner of the mutex is
782                                          * currently suspended, allow the owner
783                                          * to run.  If the thread is suspended,
784                                          * make a note that the thread isn't in
785                                          * a wait queue any more.
786                                          */
787                                         if (((*mutex)->m_owner->state !=
788                                             PS_SUSPENDED)) {
789                                                 PTHREAD_NEW_STATE((*mutex)->m_owner,
790                                                     PS_RUNNING);
791                                         } else {
792                                                 (*mutex)->m_owner->suspended =
793                                                     SUSP_NOWAIT;
794                                         }
795
796                                         /*
797                                          * Add the mutex to the threads list of
798                                          * owned mutexes:
799                                          */
800                                         TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
801                                             (*mutex), m_qe);
802
803                                         /*
804                                          * The owner is no longer waiting for
805                                          * this mutex:
806                                          */
807                                         (*mutex)->m_owner->data.mutex = NULL;
808                                 }
809                         }
810                         break;
811
812                 /* POSIX priority inheritence mutex: */
813                 case PTHREAD_PRIO_INHERIT:
814                         /*
815                          * Check if the running thread is not the owner of the
816                          * mutex:
817                          */
818                         if ((*mutex)->m_owner != _thread_run) {
819                                 /*
820                                  * Return an invalid argument error for no
821                                  * owner and a permission error otherwise:
822                                  */
823                                 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
824                         }
825                         else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
826                             ((*mutex)->m_data.m_count > 0)) {
827                                 /* Decrement the count: */
828                                 (*mutex)->m_data.m_count--;
829                         } else {
830                                 /*
831                                  * Clear the count in case this is recursive
832                                  * mutex.
833                                  */
834                                 (*mutex)->m_data.m_count = 0;
835
836                                 /*
837                                  * Restore the threads inherited priority and
838                                  * recompute the active priority (being careful
839                                  * not to override changes in the threads base
840                                  * priority subsequent to locking the mutex).
841                                  */
842                                 _thread_run->inherited_priority =
843                                         (*mutex)->m_saved_prio;
844                                 _thread_run->active_priority =
845                                     MAX(_thread_run->inherited_priority,
846                                     _thread_run->base_priority);
847
848                                 /*
849                                  * This thread now owns one less priority mutex.
850                                  */
851                                 _thread_run->priority_mutex_count--;
852
853                                 /* Remove the mutex from the threads queue. */
854                                 _MUTEX_ASSERT_IS_OWNED(*mutex);
855                                 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
856                                     (*mutex), m_qe);
857                                 _MUTEX_INIT_LINK(*mutex);
858
859                                 /*
860                                  * Get the next thread from the queue of threads
861                                  * waiting on the mutex: 
862                                  */
863                                 if (((*mutex)->m_owner = 
864                                     mutex_queue_deq(*mutex)) == NULL)
865                                         /* This mutex has no priority. */
866                                         (*mutex)->m_prio = 0;
867                                 else {
868                                         /*
869                                          * Track number of priority mutexes owned:
870                                          */
871                                         (*mutex)->m_owner->priority_mutex_count++;
872
873                                         /*
874                                          * Add the mutex to the threads list
875                                          * of owned mutexes:
876                                          */
877                                         TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
878                                             (*mutex), m_qe);
879
880                                         /*
881                                          * The owner is no longer waiting for
882                                          * this mutex:
883                                          */
884                                         (*mutex)->m_owner->data.mutex = NULL;
885
886                                         /*
887                                          * Set the priority of the mutex.  Since
888                                          * our waiting threads are in descending
889                                          * priority order, the priority of the
890                                          * mutex becomes the active priority of
891                                          * the thread we just dequeued.
892                                          */
893                                         (*mutex)->m_prio =
894                                             (*mutex)->m_owner->active_priority;
895
896                                         /*
897                                          * Save the owning threads inherited
898                                          * priority:
899                                          */
900                                         (*mutex)->m_saved_prio =
901                                                 (*mutex)->m_owner->inherited_priority;
902
903                                         /*
904                                          * The owning threads inherited priority
905                                          * now becomes his active priority (the
906                                          * priority of the mutex).
907                                          */
908                                         (*mutex)->m_owner->inherited_priority =
909                                                 (*mutex)->m_prio;
910
911                                         /*
912                                          * Unless the new owner of the mutex is
913                                          * currently suspended, allow the owner
914                                          * to run.  If the thread is suspended,
915                                          * make a note that the thread isn't in
916                                          * a wait queue any more.
917                                          */
918                                         if (((*mutex)->m_owner->state !=
919                                             PS_SUSPENDED)) {
920                                                 PTHREAD_NEW_STATE((*mutex)->m_owner,
921                                                     PS_RUNNING);
922                                         } else {
923                                                 (*mutex)->m_owner->suspended =
924                                                     SUSP_NOWAIT;
925                                         }
926                                 }
927                         }
928                         break;
929
930                 /* POSIX priority ceiling mutex: */
931                 case PTHREAD_PRIO_PROTECT:
932                         /*
933                          * Check if the running thread is not the owner of the
934                          * mutex:
935                          */
936                         if ((*mutex)->m_owner != _thread_run) {
937                                 /*
938                                  * Return an invalid argument error for no
939                                  * owner and a permission error otherwise:
940                                  */
941                                 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
942                         }
943                         else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
944                             ((*mutex)->m_data.m_count > 0)) {
945                                 /* Decrement the count: */
946                                 (*mutex)->m_data.m_count--;
947                         } else {
948                                 /*
949                                  * Clear the count in case this is recursive
950                                  * mutex.
951                                  */
952                                 (*mutex)->m_data.m_count = 0;
953
954                                 /*
955                                  * Restore the threads inherited priority and
956                                  * recompute the active priority (being careful
957                                  * not to override changes in the threads base
958                                  * priority subsequent to locking the mutex).
959                                  */
960                                 _thread_run->inherited_priority =
961                                         (*mutex)->m_saved_prio;
962                                 _thread_run->active_priority =
963                                     MAX(_thread_run->inherited_priority,
964                                     _thread_run->base_priority);
965
966                                 /*
967                                  * This thread now owns one less priority mutex.
968                                  */
969                                 _thread_run->priority_mutex_count--;
970
971                                 /* Remove the mutex from the threads queue. */
972                                 _MUTEX_ASSERT_IS_OWNED(*mutex);
973                                 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
974                                     (*mutex), m_qe);
975                                 _MUTEX_INIT_LINK(*mutex);
976
977                                 /*
978                                  * Enter a loop to find a waiting thread whose
979                                  * active priority will not cause a ceiling
980                                  * violation:
981                                  */
982                                 while ((((*mutex)->m_owner =
983                                     mutex_queue_deq(*mutex)) != NULL) &&
984                                     ((*mutex)->m_owner->active_priority >
985                                      (*mutex)->m_prio)) {
986                                         /*
987                                          * Either the mutex ceiling priority
988                                          * been lowered and/or this threads
989                                          * priority has been raised subsequent
990                                          * to this thread being queued on the
991                                          * waiting list.
992                                          */
993                                         (*mutex)->m_owner->error = EINVAL;
994                                         PTHREAD_NEW_STATE((*mutex)->m_owner,
995                                             PS_RUNNING);
996                                         /*
997                                          * The thread is no longer waiting for
998                                          * this mutex:
999                                          */
1000                                         (*mutex)->m_owner->data.mutex = NULL;
1001                                 }
1002
1003                                 /* Check for a new owner: */
1004                                 if ((*mutex)->m_owner != NULL) {
1005                                         /*
1006                                          * Track number of priority mutexes owned:
1007                                          */
1008                                         (*mutex)->m_owner->priority_mutex_count++;
1009
1010                                         /*
1011                                          * Add the mutex to the threads list
1012                                          * of owned mutexes:
1013                                          */
1014                                         TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
1015                                             (*mutex), m_qe);
1016
1017                                         /*
1018                                          * The owner is no longer waiting for
1019                                          * this mutex:
1020                                          */
1021                                         (*mutex)->m_owner->data.mutex = NULL;
1022
1023                                         /*
1024                                          * Save the owning threads inherited
1025                                          * priority:
1026                                          */
1027                                         (*mutex)->m_saved_prio =
1028                                                 (*mutex)->m_owner->inherited_priority;
1029
1030                                         /*
1031                                          * The owning thread inherits the
1032                                          * ceiling priority of the mutex and
1033                                          * executes at that priority:
1034                                          */
1035                                         (*mutex)->m_owner->inherited_priority =
1036                                             (*mutex)->m_prio;
1037                                         (*mutex)->m_owner->active_priority =
1038                                             (*mutex)->m_prio;
1039
1040                                         /*
1041                                          * Unless the new owner of the mutex is
1042                                          * currently suspended, allow the owner
1043                                          * to run.  If the thread is suspended,
1044                                          * make a note that the thread isn't in
1045                                          * a wait queue any more.
1046                                          */
1047                                         if (((*mutex)->m_owner->state !=
1048                                             PS_SUSPENDED)) {
1049                                                 PTHREAD_NEW_STATE((*mutex)->m_owner,
1050                                                     PS_RUNNING);
1051                                         } else {
1052                                                 (*mutex)->m_owner->suspended =
1053                                                     SUSP_NOWAIT;
1054                                         }
1055                                 }
1056                         }
1057                         break;
1058
1059                 /* Trap invalid mutex types: */
1060                 default:
1061                         /* Return an invalid argument error: */
1062                         ret = EINVAL;
1063                         break;
1064                 }
1065
1066                 if ((ret == 0) && (add_reference != 0)) {
1067                         /* Increment the reference count: */
1068                         (*mutex)->m_refcount++;
1069                 }
1070
1071                 /* Unlock the mutex structure: */
1072                 _SPINUNLOCK(&(*mutex)->lock);
1073
1074                 /*
1075                  * Undefer and handle pending signals, yielding if
1076                  * necessary:
1077                  */
1078                 _thread_kern_sig_undefer();
1079         }
1080
1081         /* Return the completion status: */
1082         return (ret);
1083 }
1084
1085
1086 /*
1087  * This function is called when a change in base priority occurs for
1088  * a thread that is holding or waiting for a priority protection or
1089  * inheritence mutex.  A change in a threads base priority can effect
1090  * changes to active priorities of other threads and to the ordering
1091  * of mutex locking by waiting threads.
1092  *
1093  * This must be called while thread scheduling is deferred.
1094  */
1095 void
1096 _mutex_notify_priochange(pthread_t pthread)
1097 {
1098         /* Adjust the priorites of any owned priority mutexes: */
1099         if (pthread->priority_mutex_count > 0) {
1100                 /*
1101                  * Rescan the mutexes owned by this thread and correct
1102                  * their priorities to account for this threads change
1103                  * in priority.  This has the side effect of changing
1104                  * the threads active priority.
1105                  */
1106                 mutex_rescan_owned(pthread, /* rescan all owned */ NULL);
1107         }
1108
1109         /*
1110          * If this thread is waiting on a priority inheritence mutex,
1111          * check for priority adjustments.  A change in priority can
1112          * also effect a ceiling violation(*) for a thread waiting on
1113          * a priority protection mutex; we don't perform the check here
1114          * as it is done in pthread_mutex_unlock.
1115          *
1116          * (*) It should be noted that a priority change to a thread
1117          *     _after_ taking and owning a priority ceiling mutex
1118          *     does not affect ownership of that mutex; the ceiling
1119          *     priority is only checked before mutex ownership occurs.
1120          */
1121         if (pthread->state == PS_MUTEX_WAIT) {
1122                 /* Lock the mutex structure: */
1123                 _SPINLOCK(&pthread->data.mutex->lock);
1124
1125                 /*
1126                  * Check to make sure this thread is still in the same state
1127                  * (the spinlock above can yield the CPU to another thread):
1128                  */
1129                 if (pthread->state == PS_MUTEX_WAIT) {
1130                         /*
1131                          * Remove and reinsert this thread into the list of
1132                          * waiting threads to preserve decreasing priority
1133                          * order.
1134                          */
1135                         mutex_queue_remove(pthread->data.mutex, pthread);
1136                         mutex_queue_enq(pthread->data.mutex, pthread);
1137
1138                         if (pthread->data.mutex->m_protocol ==
1139                              PTHREAD_PRIO_INHERIT) {
1140                                 /* Adjust priorities: */
1141                                 mutex_priority_adjust(pthread->data.mutex);
1142                         }
1143                 }
1144
1145                 /* Unlock the mutex structure: */
1146                 _SPINUNLOCK(&pthread->data.mutex->lock);
1147         }
1148 }
1149
1150 /*
1151  * Called when a new thread is added to the mutex waiting queue or
1152  * when a threads priority changes that is already in the mutex
1153  * waiting queue.
1154  */
1155 static void
1156 mutex_priority_adjust(pthread_mutex_t mutex)
1157 {
1158         pthread_t       pthread_next, pthread = mutex->m_owner;
1159         int             temp_prio;
1160         pthread_mutex_t m = mutex;
1161
1162         /*
1163          * Calculate the mutex priority as the maximum of the highest
1164          * active priority of any waiting threads and the owning threads
1165          * active priority(*).
1166          *
1167          * (*) Because the owning threads current active priority may
1168          *     reflect priority inherited from this mutex (and the mutex
1169          *     priority may have changed) we must recalculate the active
1170          *     priority based on the threads saved inherited priority
1171          *     and its base priority.
1172          */
1173         pthread_next = TAILQ_FIRST(&m->m_queue);  /* should never be NULL */
1174         temp_prio = MAX(pthread_next->active_priority,
1175             MAX(m->m_saved_prio, pthread->base_priority));
1176
1177         /* See if this mutex really needs adjusting: */
1178         if (temp_prio == m->m_prio)
1179                 /* No need to propagate the priority: */
1180                 return;
1181
1182         /* Set new priority of the mutex: */
1183         m->m_prio = temp_prio;
1184
1185         while (m != NULL) {
1186                 /*
1187                  * Save the threads priority before rescanning the
1188                  * owned mutexes:
1189                  */
1190                 temp_prio = pthread->active_priority;
1191
1192                 /*
1193                  * Fix the priorities for all the mutexes this thread has
1194                  * locked since taking this mutex.  This also has a
1195                  * potential side-effect of changing the threads priority.
1196                  */
1197                 mutex_rescan_owned(pthread, m);
1198
1199                 /*
1200                  * If the thread is currently waiting on a mutex, check
1201                  * to see if the threads new priority has affected the
1202                  * priority of the mutex.
1203                  */
1204                 if ((temp_prio != pthread->active_priority) &&
1205                     (pthread->state == PS_MUTEX_WAIT) &&
1206                     (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) {
1207                         /* Grab the mutex this thread is waiting on: */
1208                         m = pthread->data.mutex;
1209
1210                         /*
1211                          * The priority for this thread has changed.  Remove
1212                          * and reinsert this thread into the list of waiting
1213                          * threads to preserve decreasing priority order.
1214                          */
1215                         mutex_queue_remove(m, pthread);
1216                         mutex_queue_enq(m, pthread);
1217
1218                         /* Grab the waiting thread with highest priority: */
1219                         pthread_next = TAILQ_FIRST(&m->m_queue);
1220
1221                         /*
1222                          * Calculate the mutex priority as the maximum of the
1223                          * highest active priority of any waiting threads and
1224                          * the owning threads active priority.
1225                          */
1226                         temp_prio = MAX(pthread_next->active_priority,
1227                             MAX(m->m_saved_prio, m->m_owner->base_priority));
1228
1229                         if (temp_prio != m->m_prio) {
1230                                 /*
1231                                  * The priority needs to be propagated to the
1232                                  * mutex this thread is waiting on and up to
1233                                  * the owner of that mutex.
1234                                  */
1235                                 m->m_prio = temp_prio;
1236                                 pthread = m->m_owner;
1237                         }
1238                         else
1239                                 /* We're done: */
1240                                 m = NULL;
1241
1242                 }
1243                 else
1244                         /* We're done: */
1245                         m = NULL;
1246         }
1247 }
1248
1249 static void
1250 mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
1251 {
1252         int             active_prio, inherited_prio;
1253         pthread_mutex_t m;
1254         pthread_t       pthread_next;
1255
1256         /*
1257          * Start walking the mutexes the thread has taken since
1258          * taking this mutex.
1259          */
1260         if (mutex == NULL) {
1261                 /*
1262                  * A null mutex means start at the beginning of the owned
1263                  * mutex list.
1264                  */
1265                 m = TAILQ_FIRST(&pthread->mutexq);
1266
1267                 /* There is no inherited priority yet. */
1268                 inherited_prio = 0;
1269         }
1270         else {
1271                 /*
1272                  * The caller wants to start after a specific mutex.  It
1273                  * is assumed that this mutex is a priority inheritence
1274                  * mutex and that its priority has been correctly
1275                  * calculated.
1276                  */
1277                 m = TAILQ_NEXT(mutex, m_qe);
1278
1279                 /* Start inheriting priority from the specified mutex. */
1280                 inherited_prio = mutex->m_prio;
1281         }
1282         active_prio = MAX(inherited_prio, pthread->base_priority);
1283
1284         while (m != NULL) {
1285                 /*
1286                  * We only want to deal with priority inheritence
1287                  * mutexes.  This might be optimized by only placing
1288                  * priority inheritence mutexes into the owned mutex
1289                  * list, but it may prove to be useful having all
1290                  * owned mutexes in this list.  Consider a thread
1291                  * exiting while holding mutexes...
1292                  */
1293                 if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1294                         /*
1295                          * Fix the owners saved (inherited) priority to
1296                          * reflect the priority of the previous mutex.
1297                          */
1298                         m->m_saved_prio = inherited_prio;
1299
1300                         if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1301                                 /* Recalculate the priority of the mutex: */
1302                                 m->m_prio = MAX(active_prio,
1303                                      pthread_next->active_priority);
1304                         else
1305                                 m->m_prio = active_prio;
1306
1307                         /* Recalculate new inherited and active priorities: */
1308                         inherited_prio = m->m_prio;
1309                         active_prio = MAX(m->m_prio, pthread->base_priority);
1310                 }
1311
1312                 /* Advance to the next mutex owned by this thread: */
1313                 m = TAILQ_NEXT(m, m_qe);
1314         }
1315
1316         /*
1317          * Fix the threads inherited priority and recalculate its
1318          * active priority.
1319          */
1320         pthread->inherited_priority = inherited_prio;
1321         active_prio = MAX(inherited_prio, pthread->base_priority);
1322
1323         if (active_prio != pthread->active_priority) {
1324                 /*
1325                  * If this thread is in the priority queue, it must be
1326                  * removed and reinserted for its new priority.
1327                  */
1328                 if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) {
1329                         /*
1330                          * Remove the thread from the priority queue
1331                          * before changing its priority:
1332                          */
1333                         PTHREAD_PRIOQ_REMOVE(pthread);
1334
1335                         /*
1336                          * POSIX states that if the priority is being
1337                          * lowered, the thread must be inserted at the
1338                          * head of the queue for its priority if it owns
1339                          * any priority protection or inheritence mutexes.
1340                          */
1341                         if ((active_prio < pthread->active_priority) &&
1342                             (pthread->priority_mutex_count > 0)) {
1343                                 /* Set the new active priority. */
1344                                 pthread->active_priority = active_prio;
1345
1346                                 PTHREAD_PRIOQ_INSERT_HEAD(pthread);
1347                         }
1348                         else {
1349                                 /* Set the new active priority. */
1350                                 pthread->active_priority = active_prio;
1351
1352                                 PTHREAD_PRIOQ_INSERT_TAIL(pthread);
1353                         }
1354                 }
1355                 else {
1356                         /* Set the new active priority. */
1357                         pthread->active_priority = active_prio;
1358                 }
1359         }
1360 }
1361
1362 void
1363 _mutex_unlock_private(pthread_t pthread)
1364 {
1365         struct pthread_mutex    *m, *m_next;
1366
1367         for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1368                 m_next = TAILQ_NEXT(m, m_qe);
1369                 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1370                         pthread_mutex_unlock(&m);
1371         }
1372 }
1373
1374 void
1375 _mutex_lock_backout(pthread_t pthread)
1376 {
1377         struct pthread_mutex    *mutex;
1378
1379         /*
1380          * Defer signals to protect the scheduling queues from
1381          * access by the signal handler:
1382          */
1383         _thread_kern_sig_defer();
1384         if (pthread->state == PS_MUTEX_WAIT) {
1385                 mutex = pthread->data.mutex;
1386
1387                 /* Lock the mutex structure: */
1388                 _SPINLOCK(&mutex->lock);
1389
1390                 mutex_queue_remove(mutex, pthread);
1391
1392                 /* This thread is no longer waiting for the mutex: */
1393                 mutex->m_owner->data.mutex = NULL;
1394
1395                 /* Unlock the mutex structure: */
1396                 _SPINUNLOCK(&mutex->lock);
1397
1398         }
1399         /*
1400          * Undefer and handle pending signals, yielding if
1401          * necessary:
1402          */
1403         _thread_kern_sig_undefer();
1404 }
1405
1406 /*
1407  * Dequeue a waiting thread from the head of a mutex queue in descending
1408  * priority order.
1409  */
1410 static inline pthread_t
1411 mutex_queue_deq(pthread_mutex_t mutex)
1412 {
1413         pthread_t pthread;
1414
1415         while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1416                 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1417                 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1418
1419                 /*
1420                  * Only exit the loop if the thread hasn't been
1421                  * cancelled.
1422                  */
1423                 if (pthread->interrupted == 0)
1424                         break;
1425         }
1426
1427         return(pthread);
1428 }
1429
1430 /*
1431  * Remove a waiting thread from a mutex queue in descending priority order.
1432  */
1433 static inline void
1434 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1435 {
1436         if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1437                 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1438                 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1439         }
1440 }
1441
1442 /*
1443  * Enqueue a waiting thread to a queue in descending priority order.
1444  */
1445 static inline void
1446 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1447 {
1448         pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1449
1450         PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
1451         /*
1452          * For the common case of all threads having equal priority,
1453          * we perform a quick check against the priority of the thread
1454          * at the tail of the queue.
1455          */
1456         if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1457                 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1458         else {
1459                 tid = TAILQ_FIRST(&mutex->m_queue);
1460                 while (pthread->active_priority <= tid->active_priority)
1461                         tid = TAILQ_NEXT(tid, sqe);
1462                 TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1463         }
1464         pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;
1465 }
1466
1467 #endif