]> CyberLeo.Net >> Repos - FreeBSD/releng/8.1.git/blob - lib/libc_r/uthread/uthread_mutex.c
Copy stable/8 to releng/8.1 in preparation for 8.1-RC1.
[FreeBSD/releng/8.1.git] / lib / libc_r / uthread / uthread_mutex.c
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 #include <stdlib.h>
32 #include <errno.h>
33 #include <string.h>
34 #include <sys/param.h>
35 #include <sys/queue.h>
36 #include <pthread.h>
37 #include "pthread_private.h"
38
39 #if defined(_PTHREADS_INVARIANTS)
40 #define _MUTEX_INIT_LINK(m)             do {            \
41         (m)->m_qe.tqe_prev = NULL;                      \
42         (m)->m_qe.tqe_next = NULL;                      \
43 } while (0)
44 #define _MUTEX_ASSERT_IS_OWNED(m)       do {            \
45         if ((m)->m_qe.tqe_prev == NULL)                 \
46                 PANIC("mutex is not on list");          \
47 } while (0)
48 #define _MUTEX_ASSERT_NOT_OWNED(m)      do {            \
49         if (((m)->m_qe.tqe_prev != NULL) ||             \
50             ((m)->m_qe.tqe_next != NULL))               \
51                 PANIC("mutex is on list");              \
52 } while (0)
53 #else
54 #define _MUTEX_INIT_LINK(m)
55 #define _MUTEX_ASSERT_IS_OWNED(m)
56 #define _MUTEX_ASSERT_NOT_OWNED(m)
57 #endif
58
59 /*
60  * Prototypes
61  */
62 static inline int       mutex_self_trylock(pthread_mutex_t);
63 static inline int       mutex_self_lock(pthread_mutex_t);
64 static inline int       mutex_unlock_common(pthread_mutex_t *, int);
65 static void             mutex_priority_adjust(pthread_mutex_t);
66 static void             mutex_rescan_owned (pthread_t, pthread_mutex_t);
67 static inline pthread_t mutex_queue_deq(pthread_mutex_t);
68 static inline void      mutex_queue_remove(pthread_mutex_t, pthread_t);
69 static inline void      mutex_queue_enq(pthread_mutex_t, pthread_t);
70
71
72 static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER;
73
74 static struct pthread_mutex_attr        static_mutex_attr =
75     PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
76 static pthread_mutexattr_t              static_mattr = &static_mutex_attr;
77
78 /* Single underscore versions provided for libc internal usage: */
79 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
80 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
81
82 /* No difference between libc and application usage of these: */
83 __weak_reference(_pthread_mutex_init, pthread_mutex_init);
84 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
85 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
86
87
88 /*
89  * Reinitialize a private mutex; this is only used for internal mutexes.
90  */
91 int
92 _mutex_reinit(pthread_mutex_t * mutex)
93 {
94         int     ret = 0;
95
96         if (mutex == NULL)
97                 ret = EINVAL;
98         else if (*mutex == NULL)
99                 ret = _pthread_mutex_init(mutex, NULL);
100         else {
101                 /*
102                  * Initialize the mutex structure:
103                  */
104                 (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT;
105                 (*mutex)->m_protocol = PTHREAD_PRIO_NONE;
106                 TAILQ_INIT(&(*mutex)->m_queue);
107                 (*mutex)->m_owner = NULL;
108                 (*mutex)->m_data.m_count = 0;
109                 (*mutex)->m_flags |= MUTEX_FLAGS_INITED | MUTEX_FLAGS_PRIVATE;
110                 (*mutex)->m_refcount = 0;
111                 (*mutex)->m_prio = 0;
112                 (*mutex)->m_saved_prio = 0;
113                 _MUTEX_INIT_LINK(*mutex);
114                 memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock));
115         }
116         return (ret);
117 }
118
119 int
120 _pthread_mutex_init(pthread_mutex_t * mutex,
121                    const pthread_mutexattr_t * mutex_attr)
122 {
123         enum pthread_mutextype  type;
124         int             protocol;
125         int             ceiling;
126         int             flags;
127         pthread_mutex_t pmutex;
128         int             ret = 0;
129
130         if (mutex == NULL)
131                 ret = EINVAL;
132
133         /* Check if default mutex attributes: */
134         if (mutex_attr == NULL || *mutex_attr == NULL) {
135                 /* Default to a (error checking) POSIX mutex: */
136                 type = PTHREAD_MUTEX_ERRORCHECK;
137                 protocol = PTHREAD_PRIO_NONE;
138                 ceiling = PTHREAD_MAX_PRIORITY;
139                 flags = 0;
140         }
141
142         /* Check mutex type: */
143         else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
144             ((*mutex_attr)->m_type >= PTHREAD_MUTEX_TYPE_MAX))
145                 /* Return an invalid argument error: */
146                 ret = EINVAL;
147
148         /* Check mutex protocol: */
149         else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
150             ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
151                 /* Return an invalid argument error: */
152                 ret = EINVAL;
153
154         else {
155                 /* Use the requested mutex type and protocol: */
156                 type = (*mutex_attr)->m_type;
157                 protocol = (*mutex_attr)->m_protocol;
158                 ceiling = (*mutex_attr)->m_ceiling;
159                 flags = (*mutex_attr)->m_flags;
160         }
161
162         /* Check no errors so far: */
163         if (ret == 0) {
164                 if ((pmutex = (pthread_mutex_t)
165                     malloc(sizeof(struct pthread_mutex))) == NULL)
166                         ret = ENOMEM;
167                 else {
168                         /* Set the mutex flags: */
169                         pmutex->m_flags = flags;
170
171                         /* Process according to mutex type: */
172                         switch (type) {
173                         /* case PTHREAD_MUTEX_DEFAULT: */
174                         case PTHREAD_MUTEX_ERRORCHECK:
175                         case PTHREAD_MUTEX_NORMAL:
176                                 /* Nothing to do here. */
177                                 break;
178
179                         /* Single UNIX Spec 2 recursive mutex: */
180                         case PTHREAD_MUTEX_RECURSIVE:
181                                 /* Reset the mutex count: */
182                                 pmutex->m_data.m_count = 0;
183                                 break;
184
185                         /* Trap invalid mutex types: */
186                         default:
187                                 /* Return an invalid argument error: */
188                                 ret = EINVAL;
189                                 break;
190                         }
191                         if (ret == 0) {
192                                 /* Initialise the rest of the mutex: */
193                                 TAILQ_INIT(&pmutex->m_queue);
194                                 pmutex->m_flags |= MUTEX_FLAGS_INITED;
195                                 pmutex->m_owner = NULL;
196                                 pmutex->m_type = type;
197                                 pmutex->m_protocol = protocol;
198                                 pmutex->m_refcount = 0;
199                                 if (protocol == PTHREAD_PRIO_PROTECT)
200                                         pmutex->m_prio = ceiling;
201                                 else
202                                         pmutex->m_prio = 0;
203                                 pmutex->m_saved_prio = 0;
204                                 _MUTEX_INIT_LINK(pmutex);
205                                 memset(&pmutex->lock, 0, sizeof(pmutex->lock));
206                                 *mutex = pmutex;
207                         } else {
208                                 free(pmutex);
209                                 *mutex = NULL;
210                         }
211                 }
212         }
213         /* Return the completion status: */
214         return (ret);
215 }
216
217 int
218 _pthread_mutex_destroy(pthread_mutex_t * mutex)
219 {
220         int     ret = 0;
221
222         if (mutex == NULL || *mutex == NULL)
223                 ret = EINVAL;
224         else {
225                 /* Lock the mutex structure: */
226                 _SPINLOCK(&(*mutex)->lock);
227
228                 /*
229                  * Check to see if this mutex is in use:
230                  */
231                 if (((*mutex)->m_owner != NULL) ||
232                     (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
233                     ((*mutex)->m_refcount != 0)) {
234                         ret = EBUSY;
235
236                         /* Unlock the mutex structure: */
237                         _SPINUNLOCK(&(*mutex)->lock);
238                 }
239                 else {
240                         /*
241                          * Free the memory allocated for the mutex
242                          * structure:
243                          */
244                         _MUTEX_ASSERT_NOT_OWNED(*mutex);
245                         free(*mutex);
246
247                         /*
248                          * Leave the caller's pointer NULL now that
249                          * the mutex has been destroyed:
250                          */
251                         *mutex = NULL;
252                 }
253         }
254
255         /* Return the completion status: */
256         return (ret);
257 }
258
259 static int
260 init_static(pthread_mutex_t *mutex)
261 {
262         int     ret;
263
264         _SPINLOCK(&static_init_lock);
265
266         if (*mutex == NULL)
267                 ret = _pthread_mutex_init(mutex, NULL);
268         else
269                 ret = 0;
270
271         _SPINUNLOCK(&static_init_lock);
272
273         return (ret);
274 }
275
276 static int
277 init_static_private(pthread_mutex_t *mutex)
278 {
279         int     ret;
280
281         _SPINLOCK(&static_init_lock);
282
283         if (*mutex == NULL)
284                 ret = _pthread_mutex_init(mutex, &static_mattr);
285         else
286                 ret = 0;
287
288         _SPINUNLOCK(&static_init_lock);
289
290         return (ret);
291 }
292
293 static int
294 mutex_trylock_common(pthread_mutex_t *mutex)
295 {
296         struct pthread  *curthread = _get_curthread();
297         int     ret = 0;
298
299         PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL),
300             "Uninitialized mutex in pthread_mutex_trylock_basic");
301
302         /*
303          * Defer signals to protect the scheduling queues from
304          * access by the signal handler:
305          */
306         _thread_kern_sig_defer();
307
308         /* Lock the mutex structure: */
309         _SPINLOCK(&(*mutex)->lock);
310
311         /*
312          * If the mutex was statically allocated, properly
313          * initialize the tail queue.
314          */
315         if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
316                 TAILQ_INIT(&(*mutex)->m_queue);
317                 _MUTEX_INIT_LINK(*mutex);
318                 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
319         }
320
321         /* Process according to mutex type: */
322         switch ((*mutex)->m_protocol) {
323         /* Default POSIX mutex: */
324         case PTHREAD_PRIO_NONE: 
325                 /* Check if this mutex is not locked: */
326                 if ((*mutex)->m_owner == NULL) {
327                         /* Lock the mutex for the running thread: */
328                         (*mutex)->m_owner = curthread;
329
330                         /* Add to the list of owned mutexes: */
331                         _MUTEX_ASSERT_NOT_OWNED(*mutex);
332                         TAILQ_INSERT_TAIL(&curthread->mutexq,
333                             (*mutex), m_qe);
334                 } else if ((*mutex)->m_owner == curthread)
335                         ret = mutex_self_trylock(*mutex);
336                 else
337                         /* Return a busy error: */
338                         ret = EBUSY;
339                 break;
340
341         /* POSIX priority inheritence mutex: */
342         case PTHREAD_PRIO_INHERIT:
343                 /* Check if this mutex is not locked: */
344                 if ((*mutex)->m_owner == NULL) {
345                         /* Lock the mutex for the running thread: */
346                         (*mutex)->m_owner = curthread;
347
348                         /* Track number of priority mutexes owned: */
349                         curthread->priority_mutex_count++;
350
351                         /*
352                          * The mutex takes on the attributes of the
353                          * running thread when there are no waiters.
354                          */
355                         (*mutex)->m_prio = curthread->active_priority;
356                         (*mutex)->m_saved_prio =
357                             curthread->inherited_priority;
358
359                         /* Add to the list of owned mutexes: */
360                         _MUTEX_ASSERT_NOT_OWNED(*mutex);
361                         TAILQ_INSERT_TAIL(&curthread->mutexq,
362                             (*mutex), m_qe);
363                 } else if ((*mutex)->m_owner == curthread)
364                         ret = mutex_self_trylock(*mutex);
365                 else
366                         /* Return a busy error: */
367                         ret = EBUSY;
368                 break;
369
370         /* POSIX priority protection mutex: */
371         case PTHREAD_PRIO_PROTECT:
372                 /* Check for a priority ceiling violation: */
373                 if (curthread->active_priority > (*mutex)->m_prio)
374                         ret = EINVAL;
375
376                 /* Check if this mutex is not locked: */
377                 else if ((*mutex)->m_owner == NULL) {
378                         /* Lock the mutex for the running thread: */
379                         (*mutex)->m_owner = curthread;
380
381                         /* Track number of priority mutexes owned: */
382                         curthread->priority_mutex_count++;
383
384                         /*
385                          * The running thread inherits the ceiling
386                          * priority of the mutex and executes at that
387                          * priority.
388                          */
389                         curthread->active_priority = (*mutex)->m_prio;
390                         (*mutex)->m_saved_prio =
391                             curthread->inherited_priority;
392                         curthread->inherited_priority =
393                             (*mutex)->m_prio;
394
395                         /* Add to the list of owned mutexes: */
396                         _MUTEX_ASSERT_NOT_OWNED(*mutex);
397                         TAILQ_INSERT_TAIL(&curthread->mutexq,
398                             (*mutex), m_qe);
399                 } else if ((*mutex)->m_owner == curthread)
400                         ret = mutex_self_trylock(*mutex);
401                 else
402                         /* Return a busy error: */
403                         ret = EBUSY;
404                 break;
405
406         /* Trap invalid mutex types: */
407         default:
408                 /* Return an invalid argument error: */
409                 ret = EINVAL;
410                 break;
411         }
412
413         /* Unlock the mutex structure: */
414         _SPINUNLOCK(&(*mutex)->lock);
415
416         /*
417          * Undefer and handle pending signals, yielding if
418          * necessary:
419          */
420         _thread_kern_sig_undefer();
421
422         /* Return the completion status: */
423         return (ret);
424 }
425
426 int
427 __pthread_mutex_trylock(pthread_mutex_t *mutex)
428 {
429         int     ret = 0;
430
431         if (mutex == NULL)
432                 ret = EINVAL;
433
434         /*
435          * If the mutex is statically initialized, perform the dynamic
436          * initialization:
437          */
438         else if ((*mutex != NULL) || (ret = init_static(mutex)) == 0)
439                 ret = mutex_trylock_common(mutex);
440
441         return (ret);
442 }
443
444 int
445 _pthread_mutex_trylock(pthread_mutex_t *mutex)
446 {
447         int     ret = 0;
448
449         if (mutex == NULL)
450                 ret = EINVAL;
451
452         /*
453          * If the mutex is statically initialized, perform the dynamic
454          * initialization marking the mutex private (delete safe):
455          */
456         else if ((*mutex != NULL) || (ret = init_static_private(mutex)) == 0)
457                 ret = mutex_trylock_common(mutex);
458
459         return (ret);
460 }
461
462 static int
463 mutex_lock_common(pthread_mutex_t * mutex)
464 {
465         struct pthread  *curthread = _get_curthread();
466         int     ret = 0;
467
468         PTHREAD_ASSERT((mutex != NULL) && (*mutex != NULL),
469             "Uninitialized mutex in pthread_mutex_trylock_basic");
470
471         /* Reset the interrupted flag: */
472         curthread->interrupted = 0;
473
474         /*
475          * Enter a loop waiting to become the mutex owner.  We need a
476          * loop in case the waiting thread is interrupted by a signal
477          * to execute a signal handler.  It is not (currently) possible
478          * to remain in the waiting queue while running a handler.
479          * Instead, the thread is interrupted and backed out of the
480          * waiting queue prior to executing the signal handler.
481          */
482         do {
483                 /*
484                  * Defer signals to protect the scheduling queues from
485                  * access by the signal handler:
486                  */
487                 _thread_kern_sig_defer();
488
489                 /* Lock the mutex structure: */
490                 _SPINLOCK(&(*mutex)->lock);
491
492                 /*
493                  * If the mutex was statically allocated, properly
494                  * initialize the tail queue.
495                  */
496                 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
497                         TAILQ_INIT(&(*mutex)->m_queue);
498                         (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
499                         _MUTEX_INIT_LINK(*mutex);
500                 }
501
502                 /* Process according to mutex type: */
503                 switch ((*mutex)->m_protocol) {
504                 /* Default POSIX mutex: */
505                 case PTHREAD_PRIO_NONE:
506                         if ((*mutex)->m_owner == NULL) {
507                                 /* Lock the mutex for this thread: */
508                                 (*mutex)->m_owner = curthread;
509
510                                 /* Add to the list of owned mutexes: */
511                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
512                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
513                                     (*mutex), m_qe);
514
515                         } else if ((*mutex)->m_owner == curthread)
516                                 ret = mutex_self_lock(*mutex);
517                         else {
518                                 /*
519                                  * Join the queue of threads waiting to lock
520                                  * the mutex: 
521                                  */
522                                 mutex_queue_enq(*mutex, curthread);
523
524                                 /*
525                                  * Keep a pointer to the mutex this thread
526                                  * is waiting on:
527                                  */
528                                 curthread->data.mutex = *mutex;
529
530                                 /*
531                                  * Unlock the mutex structure and schedule the
532                                  * next thread:
533                                  */
534                                 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
535                                     &(*mutex)->lock, __FILE__, __LINE__);
536
537                                 /* Lock the mutex structure again: */
538                                 _SPINLOCK(&(*mutex)->lock);
539                         }
540                         break;
541
542                 /* POSIX priority inheritence mutex: */
543                 case PTHREAD_PRIO_INHERIT:
544                         /* Check if this mutex is not locked: */
545                         if ((*mutex)->m_owner == NULL) {
546                                 /* Lock the mutex for this thread: */
547                                 (*mutex)->m_owner = curthread;
548
549                                 /* Track number of priority mutexes owned: */
550                                 curthread->priority_mutex_count++;
551
552                                 /*
553                                  * The mutex takes on attributes of the
554                                  * running thread when there are no waiters.
555                                  */
556                                 (*mutex)->m_prio = curthread->active_priority;
557                                 (*mutex)->m_saved_prio =
558                                     curthread->inherited_priority;
559                                 curthread->inherited_priority =
560                                     (*mutex)->m_prio;
561
562                                 /* Add to the list of owned mutexes: */
563                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
564                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
565                                     (*mutex), m_qe);
566
567                         } else if ((*mutex)->m_owner == curthread)
568                                 ret = mutex_self_lock(*mutex);
569                         else {
570                                 /*
571                                  * Join the queue of threads waiting to lock
572                                  * the mutex: 
573                                  */
574                                 mutex_queue_enq(*mutex, curthread);
575
576                                 /*
577                                  * Keep a pointer to the mutex this thread
578                                  * is waiting on:
579                                  */
580                                 curthread->data.mutex = *mutex;
581
582                                 if (curthread->active_priority >
583                                     (*mutex)->m_prio)
584                                         /* Adjust priorities: */
585                                         mutex_priority_adjust(*mutex);
586
587                                 /*
588                                  * Unlock the mutex structure and schedule the
589                                  * next thread:
590                                  */
591                                 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
592                                     &(*mutex)->lock, __FILE__, __LINE__);
593
594                                 /* Lock the mutex structure again: */
595                                 _SPINLOCK(&(*mutex)->lock);
596                         }
597                         break;
598
599                 /* POSIX priority protection mutex: */
600                 case PTHREAD_PRIO_PROTECT:
601                         /* Check for a priority ceiling violation: */
602                         if (curthread->active_priority > (*mutex)->m_prio)
603                                 ret = EINVAL;
604
605                         /* Check if this mutex is not locked: */
606                         else if ((*mutex)->m_owner == NULL) {
607                                 /*
608                                  * Lock the mutex for the running
609                                  * thread:
610                                  */
611                                 (*mutex)->m_owner = curthread;
612
613                                 /* Track number of priority mutexes owned: */
614                                 curthread->priority_mutex_count++;
615
616                                 /*
617                                  * The running thread inherits the ceiling
618                                  * priority of the mutex and executes at that
619                                  * priority:
620                                  */
621                                 curthread->active_priority = (*mutex)->m_prio;
622                                 (*mutex)->m_saved_prio =
623                                     curthread->inherited_priority;
624                                 curthread->inherited_priority =
625                                     (*mutex)->m_prio;
626
627                                 /* Add to the list of owned mutexes: */
628                                 _MUTEX_ASSERT_NOT_OWNED(*mutex);
629                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
630                                     (*mutex), m_qe);
631                         } else if ((*mutex)->m_owner == curthread)
632                                 ret = mutex_self_lock(*mutex);
633                         else {
634                                 /*
635                                  * Join the queue of threads waiting to lock
636                                  * the mutex: 
637                                  */
638                                 mutex_queue_enq(*mutex, curthread);
639
640                                 /*
641                                  * Keep a pointer to the mutex this thread
642                                  * is waiting on:
643                                  */
644                                 curthread->data.mutex = *mutex;
645
646                                 /* Clear any previous error: */
647                                 curthread->error = 0;
648
649                                 /*
650                                  * Unlock the mutex structure and schedule the
651                                  * next thread:
652                                  */
653                                 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT,
654                                     &(*mutex)->lock, __FILE__, __LINE__);
655
656                                 /* Lock the mutex structure again: */
657                                 _SPINLOCK(&(*mutex)->lock);
658
659                                 /*
660                                  * The threads priority may have changed while
661                                  * waiting for the mutex causing a ceiling
662                                  * violation.
663                                  */
664                                 ret = curthread->error;
665                                 curthread->error = 0;
666                         }
667                         break;
668
669                 /* Trap invalid mutex types: */
670                 default:
671                         /* Return an invalid argument error: */
672                         ret = EINVAL;
673                         break;
674                 }
675
676                 /*
677                  * Check to see if this thread was interrupted and
678                  * is still in the mutex queue of waiting threads:
679                  */
680                 if (curthread->interrupted != 0)
681                         mutex_queue_remove(*mutex, curthread);
682
683                 /* Unlock the mutex structure: */
684                 _SPINUNLOCK(&(*mutex)->lock);
685
686                 /*
687                  * Undefer and handle pending signals, yielding if
688                  * necessary:
689                  */
690                 _thread_kern_sig_undefer();
691         } while (((*mutex)->m_owner != curthread) && (ret == 0) &&
692             (curthread->interrupted == 0));
693
694         if (curthread->interrupted != 0 &&
695             curthread->continuation != NULL)
696                 curthread->continuation((void *) curthread);
697
698         /* Return the completion status: */
699         return (ret);
700 }
701
702 int
703 __pthread_mutex_lock(pthread_mutex_t *mutex)
704 {
705         int     ret = 0;
706
707         if (_thread_initial == NULL)
708                 _thread_init();
709
710         if (mutex == NULL)
711                 ret = EINVAL;
712
713         /*
714          * If the mutex is statically initialized, perform the dynamic
715          * initialization:
716          */
717         else if ((*mutex != NULL) || ((ret = init_static(mutex)) == 0))
718                 ret = mutex_lock_common(mutex);
719
720         return (ret);
721 }
722
723 int
724 _pthread_mutex_lock(pthread_mutex_t *mutex)
725 {
726         int     ret = 0;
727
728         if (_thread_initial == NULL)
729                 _thread_init();
730
731         if (mutex == NULL)
732                 ret = EINVAL;
733
734         /*
735          * If the mutex is statically initialized, perform the dynamic
736          * initialization marking it private (delete safe):
737          */
738         else if ((*mutex != NULL) || ((ret = init_static_private(mutex)) == 0))
739                 ret = mutex_lock_common(mutex);
740
741         return (ret);
742 }
743
744 int
745 _pthread_mutex_unlock(pthread_mutex_t * mutex)
746 {
747         return (mutex_unlock_common(mutex, /* add reference */ 0));
748 }
749
750 int
751 _mutex_cv_unlock(pthread_mutex_t * mutex)
752 {
753         return (mutex_unlock_common(mutex, /* add reference */ 1));
754 }
755
756 int
757 _mutex_cv_lock(pthread_mutex_t * mutex)
758 {
759         int     ret;
760         if ((ret = _pthread_mutex_lock(mutex)) == 0)
761                 (*mutex)->m_refcount--;
762         return (ret);
763 }
764
765 static inline int
766 mutex_self_trylock(pthread_mutex_t mutex)
767 {
768         int     ret = 0;
769
770         switch (mutex->m_type) {
771
772         /* case PTHREAD_MUTEX_DEFAULT: */
773         case PTHREAD_MUTEX_ERRORCHECK:
774         case PTHREAD_MUTEX_NORMAL:
775                 /*
776                  * POSIX specifies that mutexes should return EDEADLK if a
777                  * recursive lock is detected.
778                  */
779                 ret = EBUSY; 
780                 break;
781
782         case PTHREAD_MUTEX_RECURSIVE:
783                 /* Increment the lock count: */
784                 mutex->m_data.m_count++;
785                 break;
786
787         default:
788                 /* Trap invalid mutex types; */
789                 ret = EINVAL;
790         }
791
792         return (ret);
793 }
794
795 static inline int
796 mutex_self_lock(pthread_mutex_t mutex)
797 {
798         int ret = 0;
799
800         switch (mutex->m_type) {
801         /* case PTHREAD_MUTEX_DEFAULT: */
802         case PTHREAD_MUTEX_ERRORCHECK:
803                 /*
804                  * POSIX specifies that mutexes should return EDEADLK if a
805                  * recursive lock is detected.
806                  */
807                 ret = EDEADLK; 
808                 break;
809
810         case PTHREAD_MUTEX_NORMAL:
811                 /*
812                  * What SS2 define as a 'normal' mutex.  Intentionally
813                  * deadlock on attempts to get a lock you already own.
814                  */
815                 _thread_kern_sched_state_unlock(PS_DEADLOCK,
816                     &mutex->lock, __FILE__, __LINE__);
817                 break;
818
819         case PTHREAD_MUTEX_RECURSIVE:
820                 /* Increment the lock count: */
821                 mutex->m_data.m_count++;
822                 break;
823
824         default:
825                 /* Trap invalid mutex types; */
826                 ret = EINVAL;
827         }
828
829         return (ret);
830 }
831
832 static inline int
833 mutex_unlock_common(pthread_mutex_t * mutex, int add_reference)
834 {
835         struct pthread  *curthread = _get_curthread();
836         int     ret = 0;
837
838         if (mutex == NULL || *mutex == NULL) {
839                 ret = EINVAL;
840         } else {
841                 /*
842                  * Defer signals to protect the scheduling queues from
843                  * access by the signal handler:
844                  */
845                 _thread_kern_sig_defer();
846
847                 /* Lock the mutex structure: */
848                 _SPINLOCK(&(*mutex)->lock);
849
850                 /* Process according to mutex type: */
851                 switch ((*mutex)->m_protocol) {
852                 /* Default POSIX mutex: */
853                 case PTHREAD_PRIO_NONE:
854                         /*
855                          * Check if the running thread is not the owner of the
856                          * mutex:
857                          */
858                         if ((*mutex)->m_owner != curthread) {
859                                 /*
860                                  * Return an invalid argument error for no
861                                  * owner and a permission error otherwise:
862                                  */
863                                 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
864                         }
865                         else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
866                             ((*mutex)->m_data.m_count > 0)) {
867                                 /* Decrement the count: */
868                                 (*mutex)->m_data.m_count--;
869                         } else {
870                                 /*
871                                  * Clear the count in case this is recursive
872                                  * mutex.
873                                  */
874                                 (*mutex)->m_data.m_count = 0;
875
876                                 /* Remove the mutex from the threads queue. */
877                                 _MUTEX_ASSERT_IS_OWNED(*mutex);
878                                 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
879                                     (*mutex), m_qe);
880                                 _MUTEX_INIT_LINK(*mutex);
881
882                                 /*
883                                  * Get the next thread from the queue of
884                                  * threads waiting on the mutex: 
885                                  */
886                                 if (((*mutex)->m_owner =
887                                     mutex_queue_deq(*mutex)) != NULL) {
888                                         /* Make the new owner runnable: */
889                                         PTHREAD_NEW_STATE((*mutex)->m_owner,
890                                             PS_RUNNING);
891
892                                         /*
893                                          * Add the mutex to the threads list of
894                                          * owned mutexes:
895                                          */
896                                         TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
897                                             (*mutex), m_qe);
898
899                                         /*
900                                          * The owner is no longer waiting for
901                                          * this mutex:
902                                          */
903                                         (*mutex)->m_owner->data.mutex = NULL;
904                                 }
905                         }
906                         break;
907
908                 /* POSIX priority inheritence mutex: */
909                 case PTHREAD_PRIO_INHERIT:
910                         /*
911                          * Check if the running thread is not the owner of the
912                          * mutex:
913                          */
914                         if ((*mutex)->m_owner != curthread) {
915                                 /*
916                                  * Return an invalid argument error for no
917                                  * owner and a permission error otherwise:
918                                  */
919                                 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
920                         }
921                         else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
922                             ((*mutex)->m_data.m_count > 0)) {
923                                 /* Decrement the count: */
924                                 (*mutex)->m_data.m_count--;
925                         } else {
926                                 /*
927                                  * Clear the count in case this is recursive
928                                  * mutex.
929                                  */
930                                 (*mutex)->m_data.m_count = 0;
931
932                                 /*
933                                  * Restore the threads inherited priority and
934                                  * recompute the active priority (being careful
935                                  * not to override changes in the threads base
936                                  * priority subsequent to locking the mutex).
937                                  */
938                                 curthread->inherited_priority =
939                                         (*mutex)->m_saved_prio;
940                                 curthread->active_priority =
941                                     MAX(curthread->inherited_priority,
942                                     curthread->base_priority);
943
944                                 /*
945                                  * This thread now owns one less priority mutex.
946                                  */
947                                 curthread->priority_mutex_count--;
948
949                                 /* Remove the mutex from the threads queue. */
950                                 _MUTEX_ASSERT_IS_OWNED(*mutex);
951                                 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
952                                     (*mutex), m_qe);
953                                 _MUTEX_INIT_LINK(*mutex);
954
955                                 /*
956                                  * Get the next thread from the queue of threads
957                                  * waiting on the mutex: 
958                                  */
959                                 if (((*mutex)->m_owner = 
960                                     mutex_queue_deq(*mutex)) == NULL)
961                                         /* This mutex has no priority. */
962                                         (*mutex)->m_prio = 0;
963                                 else {
964                                         /*
965                                          * Track number of priority mutexes owned:
966                                          */
967                                         (*mutex)->m_owner->priority_mutex_count++;
968
969                                         /*
970                                          * Add the mutex to the threads list
971                                          * of owned mutexes:
972                                          */
973                                         TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
974                                             (*mutex), m_qe);
975
976                                         /*
977                                          * The owner is no longer waiting for
978                                          * this mutex:
979                                          */
980                                         (*mutex)->m_owner->data.mutex = NULL;
981
982                                         /*
983                                          * Set the priority of the mutex.  Since
984                                          * our waiting threads are in descending
985                                          * priority order, the priority of the
986                                          * mutex becomes the active priority of
987                                          * the thread we just dequeued.
988                                          */
989                                         (*mutex)->m_prio =
990                                             (*mutex)->m_owner->active_priority;
991
992                                         /*
993                                          * Save the owning threads inherited
994                                          * priority:
995                                          */
996                                         (*mutex)->m_saved_prio =
997                                                 (*mutex)->m_owner->inherited_priority;
998
999                                         /*
1000                                          * The owning threads inherited priority
1001                                          * now becomes his active priority (the
1002                                          * priority of the mutex).
1003                                          */
1004                                         (*mutex)->m_owner->inherited_priority =
1005                                                 (*mutex)->m_prio;
1006
1007                                         /*
1008                                          * Make the new owner runnable:
1009                                          */
1010                                         PTHREAD_NEW_STATE((*mutex)->m_owner,
1011                                             PS_RUNNING);
1012                                 }
1013                         }
1014                         break;
1015
1016                 /* POSIX priority ceiling mutex: */
1017                 case PTHREAD_PRIO_PROTECT:
1018                         /*
1019                          * Check if the running thread is not the owner of the
1020                          * mutex:
1021                          */
1022                         if ((*mutex)->m_owner != curthread) {
1023                                 /*
1024                                  * Return an invalid argument error for no
1025                                  * owner and a permission error otherwise:
1026                                  */
1027                                 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM;
1028                         }
1029                         else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1030                             ((*mutex)->m_data.m_count > 0)) {
1031                                 /* Decrement the count: */
1032                                 (*mutex)->m_data.m_count--;
1033                         } else {
1034                                 /*
1035                                  * Clear the count in case this is recursive
1036                                  * mutex.
1037                                  */
1038                                 (*mutex)->m_data.m_count = 0;
1039
1040                                 /*
1041                                  * Restore the threads inherited priority and
1042                                  * recompute the active priority (being careful
1043                                  * not to override changes in the threads base
1044                                  * priority subsequent to locking the mutex).
1045                                  */
1046                                 curthread->inherited_priority =
1047                                         (*mutex)->m_saved_prio;
1048                                 curthread->active_priority =
1049                                     MAX(curthread->inherited_priority,
1050                                     curthread->base_priority);
1051
1052                                 /*
1053                                  * This thread now owns one less priority mutex.
1054                                  */
1055                                 curthread->priority_mutex_count--;
1056
1057                                 /* Remove the mutex from the threads queue. */
1058                                 _MUTEX_ASSERT_IS_OWNED(*mutex);
1059                                 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq,
1060                                     (*mutex), m_qe);
1061                                 _MUTEX_INIT_LINK(*mutex);
1062
1063                                 /*
1064                                  * Enter a loop to find a waiting thread whose
1065                                  * active priority will not cause a ceiling
1066                                  * violation:
1067                                  */
1068                                 while ((((*mutex)->m_owner =
1069                                     mutex_queue_deq(*mutex)) != NULL) &&
1070                                     ((*mutex)->m_owner->active_priority >
1071                                      (*mutex)->m_prio)) {
1072                                         /*
1073                                          * Either the mutex ceiling priority
1074                                          * been lowered and/or this threads
1075                                          * priority has been raised subsequent
1076                                          * to this thread being queued on the
1077                                          * waiting list.
1078                                          */
1079                                         (*mutex)->m_owner->error = EINVAL;
1080                                         PTHREAD_NEW_STATE((*mutex)->m_owner,
1081                                             PS_RUNNING);
1082                                         /*
1083                                          * The thread is no longer waiting for
1084                                          * this mutex:
1085                                          */
1086                                         (*mutex)->m_owner->data.mutex = NULL;
1087                                 }
1088
1089                                 /* Check for a new owner: */
1090                                 if ((*mutex)->m_owner != NULL) {
1091                                         /*
1092                                          * Track number of priority mutexes owned:
1093                                          */
1094                                         (*mutex)->m_owner->priority_mutex_count++;
1095
1096                                         /*
1097                                          * Add the mutex to the threads list
1098                                          * of owned mutexes:
1099                                          */
1100                                         TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq,
1101                                             (*mutex), m_qe);
1102
1103                                         /*
1104                                          * The owner is no longer waiting for
1105                                          * this mutex:
1106                                          */
1107                                         (*mutex)->m_owner->data.mutex = NULL;
1108
1109                                         /*
1110                                          * Save the owning threads inherited
1111                                          * priority:
1112                                          */
1113                                         (*mutex)->m_saved_prio =
1114                                                 (*mutex)->m_owner->inherited_priority;
1115
1116                                         /*
1117                                          * The owning thread inherits the
1118                                          * ceiling priority of the mutex and
1119                                          * executes at that priority:
1120                                          */
1121                                         (*mutex)->m_owner->inherited_priority =
1122                                             (*mutex)->m_prio;
1123                                         (*mutex)->m_owner->active_priority =
1124                                             (*mutex)->m_prio;
1125
1126                                         /*
1127                                          * Make the new owner runnable:
1128                                          */
1129                                         PTHREAD_NEW_STATE((*mutex)->m_owner,
1130                                             PS_RUNNING);
1131                                 }
1132                         }
1133                         break;
1134
1135                 /* Trap invalid mutex types: */
1136                 default:
1137                         /* Return an invalid argument error: */
1138                         ret = EINVAL;
1139                         break;
1140                 }
1141
1142                 if ((ret == 0) && (add_reference != 0)) {
1143                         /* Increment the reference count: */
1144                         (*mutex)->m_refcount++;
1145                 }
1146
1147                 /* Unlock the mutex structure: */
1148                 _SPINUNLOCK(&(*mutex)->lock);
1149
1150                 /*
1151                  * Undefer and handle pending signals, yielding if
1152                  * necessary:
1153                  */
1154                 _thread_kern_sig_undefer();
1155         }
1156
1157         /* Return the completion status: */
1158         return (ret);
1159 }
1160
1161
1162 /*
1163  * This function is called when a change in base priority occurs for
1164  * a thread that is holding or waiting for a priority protection or
1165  * inheritence mutex.  A change in a threads base priority can effect
1166  * changes to active priorities of other threads and to the ordering
1167  * of mutex locking by waiting threads.
1168  *
1169  * This must be called while thread scheduling is deferred.
1170  */
1171 void
1172 _mutex_notify_priochange(pthread_t pthread)
1173 {
1174         /* Adjust the priorites of any owned priority mutexes: */
1175         if (pthread->priority_mutex_count > 0) {
1176                 /*
1177                  * Rescan the mutexes owned by this thread and correct
1178                  * their priorities to account for this threads change
1179                  * in priority.  This has the side effect of changing
1180                  * the threads active priority.
1181                  */
1182                 mutex_rescan_owned(pthread, /* rescan all owned */ NULL);
1183         }
1184
1185         /*
1186          * If this thread is waiting on a priority inheritence mutex,
1187          * check for priority adjustments.  A change in priority can
1188          * also effect a ceiling violation(*) for a thread waiting on
1189          * a priority protection mutex; we don't perform the check here
1190          * as it is done in pthread_mutex_unlock.
1191          *
1192          * (*) It should be noted that a priority change to a thread
1193          *     _after_ taking and owning a priority ceiling mutex
1194          *     does not affect ownership of that mutex; the ceiling
1195          *     priority is only checked before mutex ownership occurs.
1196          */
1197         if (pthread->state == PS_MUTEX_WAIT) {
1198                 /* Lock the mutex structure: */
1199                 _SPINLOCK(&pthread->data.mutex->lock);
1200
1201                 /*
1202                  * Check to make sure this thread is still in the same state
1203                  * (the spinlock above can yield the CPU to another thread):
1204                  */
1205                 if (pthread->state == PS_MUTEX_WAIT) {
1206                         /*
1207                          * Remove and reinsert this thread into the list of
1208                          * waiting threads to preserve decreasing priority
1209                          * order.
1210                          */
1211                         mutex_queue_remove(pthread->data.mutex, pthread);
1212                         mutex_queue_enq(pthread->data.mutex, pthread);
1213
1214                         if (pthread->data.mutex->m_protocol ==
1215                              PTHREAD_PRIO_INHERIT) {
1216                                 /* Adjust priorities: */
1217                                 mutex_priority_adjust(pthread->data.mutex);
1218                         }
1219                 }
1220
1221                 /* Unlock the mutex structure: */
1222                 _SPINUNLOCK(&pthread->data.mutex->lock);
1223         }
1224 }
1225
1226 /*
1227  * Called when a new thread is added to the mutex waiting queue or
1228  * when a threads priority changes that is already in the mutex
1229  * waiting queue.
1230  */
1231 static void
1232 mutex_priority_adjust(pthread_mutex_t mutex)
1233 {
1234         pthread_t       pthread_next, pthread = mutex->m_owner;
1235         int             temp_prio;
1236         pthread_mutex_t m = mutex;
1237
1238         /*
1239          * Calculate the mutex priority as the maximum of the highest
1240          * active priority of any waiting threads and the owning threads
1241          * active priority(*).
1242          *
1243          * (*) Because the owning threads current active priority may
1244          *     reflect priority inherited from this mutex (and the mutex
1245          *     priority may have changed) we must recalculate the active
1246          *     priority based on the threads saved inherited priority
1247          *     and its base priority.
1248          */
1249         pthread_next = TAILQ_FIRST(&m->m_queue);  /* should never be NULL */
1250         temp_prio = MAX(pthread_next->active_priority,
1251             MAX(m->m_saved_prio, pthread->base_priority));
1252
1253         /* See if this mutex really needs adjusting: */
1254         if (temp_prio == m->m_prio)
1255                 /* No need to propagate the priority: */
1256                 return;
1257
1258         /* Set new priority of the mutex: */
1259         m->m_prio = temp_prio;
1260
1261         while (m != NULL) {
1262                 /*
1263                  * Save the threads priority before rescanning the
1264                  * owned mutexes:
1265                  */
1266                 temp_prio = pthread->active_priority;
1267
1268                 /*
1269                  * Fix the priorities for all the mutexes this thread has
1270                  * locked since taking this mutex.  This also has a
1271                  * potential side-effect of changing the threads priority.
1272                  */
1273                 mutex_rescan_owned(pthread, m);
1274
1275                 /*
1276                  * If the thread is currently waiting on a mutex, check
1277                  * to see if the threads new priority has affected the
1278                  * priority of the mutex.
1279                  */
1280                 if ((temp_prio != pthread->active_priority) &&
1281                     (pthread->state == PS_MUTEX_WAIT) &&
1282                     (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) {
1283                         /* Grab the mutex this thread is waiting on: */
1284                         m = pthread->data.mutex;
1285
1286                         /*
1287                          * The priority for this thread has changed.  Remove
1288                          * and reinsert this thread into the list of waiting
1289                          * threads to preserve decreasing priority order.
1290                          */
1291                         mutex_queue_remove(m, pthread);
1292                         mutex_queue_enq(m, pthread);
1293
1294                         /* Grab the waiting thread with highest priority: */
1295                         pthread_next = TAILQ_FIRST(&m->m_queue);
1296
1297                         /*
1298                          * Calculate the mutex priority as the maximum of the
1299                          * highest active priority of any waiting threads and
1300                          * the owning threads active priority.
1301                          */
1302                         temp_prio = MAX(pthread_next->active_priority,
1303                             MAX(m->m_saved_prio, m->m_owner->base_priority));
1304
1305                         if (temp_prio != m->m_prio) {
1306                                 /*
1307                                  * The priority needs to be propagated to the
1308                                  * mutex this thread is waiting on and up to
1309                                  * the owner of that mutex.
1310                                  */
1311                                 m->m_prio = temp_prio;
1312                                 pthread = m->m_owner;
1313                         }
1314                         else
1315                                 /* We're done: */
1316                                 m = NULL;
1317
1318                 }
1319                 else
1320                         /* We're done: */
1321                         m = NULL;
1322         }
1323 }
1324
1325 static void
1326 mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex)
1327 {
1328         int             active_prio, inherited_prio;
1329         pthread_mutex_t m;
1330         pthread_t       pthread_next;
1331
1332         /*
1333          * Start walking the mutexes the thread has taken since
1334          * taking this mutex.
1335          */
1336         if (mutex == NULL) {
1337                 /*
1338                  * A null mutex means start at the beginning of the owned
1339                  * mutex list.
1340                  */
1341                 m = TAILQ_FIRST(&pthread->mutexq);
1342
1343                 /* There is no inherited priority yet. */
1344                 inherited_prio = 0;
1345         }
1346         else {
1347                 /*
1348                  * The caller wants to start after a specific mutex.  It
1349                  * is assumed that this mutex is a priority inheritence
1350                  * mutex and that its priority has been correctly
1351                  * calculated.
1352                  */
1353                 m = TAILQ_NEXT(mutex, m_qe);
1354
1355                 /* Start inheriting priority from the specified mutex. */
1356                 inherited_prio = mutex->m_prio;
1357         }
1358         active_prio = MAX(inherited_prio, pthread->base_priority);
1359
1360         while (m != NULL) {
1361                 /*
1362                  * We only want to deal with priority inheritence
1363                  * mutexes.  This might be optimized by only placing
1364                  * priority inheritence mutexes into the owned mutex
1365                  * list, but it may prove to be useful having all
1366                  * owned mutexes in this list.  Consider a thread
1367                  * exiting while holding mutexes...
1368                  */
1369                 if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1370                         /*
1371                          * Fix the owners saved (inherited) priority to
1372                          * reflect the priority of the previous mutex.
1373                          */
1374                         m->m_saved_prio = inherited_prio;
1375
1376                         if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1377                                 /* Recalculate the priority of the mutex: */
1378                                 m->m_prio = MAX(active_prio,
1379                                      pthread_next->active_priority);
1380                         else
1381                                 m->m_prio = active_prio;
1382
1383                         /* Recalculate new inherited and active priorities: */
1384                         inherited_prio = m->m_prio;
1385                         active_prio = MAX(m->m_prio, pthread->base_priority);
1386                 }
1387
1388                 /* Advance to the next mutex owned by this thread: */
1389                 m = TAILQ_NEXT(m, m_qe);
1390         }
1391
1392         /*
1393          * Fix the threads inherited priority and recalculate its
1394          * active priority.
1395          */
1396         pthread->inherited_priority = inherited_prio;
1397         active_prio = MAX(inherited_prio, pthread->base_priority);
1398
1399         if (active_prio != pthread->active_priority) {
1400                 /*
1401                  * If this thread is in the priority queue, it must be
1402                  * removed and reinserted for its new priority.
1403                  */
1404                 if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) {
1405                         /*
1406                          * Remove the thread from the priority queue
1407                          * before changing its priority:
1408                          */
1409                         PTHREAD_PRIOQ_REMOVE(pthread);
1410
1411                         /*
1412                          * POSIX states that if the priority is being
1413                          * lowered, the thread must be inserted at the
1414                          * head of the queue for its priority if it owns
1415                          * any priority protection or inheritence mutexes.
1416                          */
1417                         if ((active_prio < pthread->active_priority) &&
1418                             (pthread->priority_mutex_count > 0)) {
1419                                 /* Set the new active priority. */
1420                                 pthread->active_priority = active_prio;
1421
1422                                 PTHREAD_PRIOQ_INSERT_HEAD(pthread);
1423                         }
1424                         else {
1425                                 /* Set the new active priority. */
1426                                 pthread->active_priority = active_prio;
1427
1428                                 PTHREAD_PRIOQ_INSERT_TAIL(pthread);
1429                         }
1430                 }
1431                 else {
1432                         /* Set the new active priority. */
1433                         pthread->active_priority = active_prio;
1434                 }
1435         }
1436 }
1437
1438 void
1439 _mutex_unlock_private(pthread_t pthread)
1440 {
1441         struct pthread_mutex    *m, *m_next;
1442
1443         for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1444                 m_next = TAILQ_NEXT(m, m_qe);
1445                 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1446                         _pthread_mutex_unlock(&m);
1447         }
1448 }
1449
1450 void
1451 _mutex_lock_backout(pthread_t pthread)
1452 {
1453         struct pthread_mutex    *mutex;
1454
1455         /*
1456          * Defer signals to protect the scheduling queues from
1457          * access by the signal handler:
1458          */
1459         _thread_kern_sig_defer();
1460         if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1461                 mutex = pthread->data.mutex;
1462
1463                 /* Lock the mutex structure: */
1464                 _SPINLOCK(&mutex->lock);
1465
1466                 mutex_queue_remove(mutex, pthread);
1467
1468                 /* This thread is no longer waiting for the mutex: */
1469                 pthread->data.mutex = NULL;
1470
1471                 /* Unlock the mutex structure: */
1472                 _SPINUNLOCK(&mutex->lock);
1473
1474         }
1475         /*
1476          * Undefer and handle pending signals, yielding if
1477          * necessary:
1478          */
1479         _thread_kern_sig_undefer();
1480 }
1481
1482 /*
1483  * Dequeue a waiting thread from the head of a mutex queue in descending
1484  * priority order.
1485  */
1486 static inline pthread_t
1487 mutex_queue_deq(pthread_mutex_t mutex)
1488 {
1489         pthread_t pthread;
1490
1491         while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1492                 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1493                 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1494
1495                 /*
1496                  * Only exit the loop if the thread hasn't been
1497                  * cancelled.
1498                  */
1499                 if (pthread->interrupted == 0)
1500                         break;
1501         }
1502
1503         return (pthread);
1504 }
1505
1506 /*
1507  * Remove a waiting thread from a mutex queue in descending priority order.
1508  */
1509 static inline void
1510 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1511 {
1512         if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
1513                 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1514                 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ;
1515         }
1516 }
1517
1518 /*
1519  * Enqueue a waiting thread to a queue in descending priority order.
1520  */
1521 static inline void
1522 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1523 {
1524         pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1525
1526         PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
1527         /*
1528          * For the common case of all threads having equal priority,
1529          * we perform a quick check against the priority of the thread
1530          * at the tail of the queue.
1531          */
1532         if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1533                 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1534         else {
1535                 tid = TAILQ_FIRST(&mutex->m_queue);
1536                 while (pthread->active_priority <= tid->active_priority)
1537                         tid = TAILQ_NEXT(tid, sqe);
1538                 TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1539         }
1540         pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ;
1541 }
1542