]> CyberLeo.Net >> Repos - FreeBSD/releng/10.0.git/blob - lib/libkse/thread/thr_mutex.c
- Copy stable/10 (r259064) to releng/10.0 as part of the
[FreeBSD/releng/10.0.git] / lib / libkse / thread / thr_mutex.c
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31
32 #include "namespace.h"
33 #include <stdlib.h>
34 #include <errno.h>
35 #include <string.h>
36 #include <sys/param.h>
37 #include <sys/queue.h>
38 #include <pthread.h>
39 #include <pthread_np.h>
40 #include "un-namespace.h"
41 #include "thr_private.h"
42
43 #if defined(_PTHREADS_INVARIANTS)
44 #define MUTEX_INIT_LINK(m)              do {            \
45         (m)->m_qe.tqe_prev = NULL;                      \
46         (m)->m_qe.tqe_next = NULL;                      \
47 } while (0)
48 #define MUTEX_ASSERT_IS_OWNED(m)        do {            \
49         if ((m)->m_qe.tqe_prev == NULL)                 \
50                 PANIC("mutex is not on list");          \
51 } while (0)
52 #define MUTEX_ASSERT_NOT_OWNED(m)       do {            \
53         if (((m)->m_qe.tqe_prev != NULL) ||             \
54             ((m)->m_qe.tqe_next != NULL))               \
55                 PANIC("mutex is on list");              \
56 } while (0)
57 #define THR_ASSERT_NOT_IN_SYNCQ(thr)    do {            \
58         THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
59             "thread in syncq when it shouldn't be.");   \
60 } while (0);
61 #else
62 #define MUTEX_INIT_LINK(m)
63 #define MUTEX_ASSERT_IS_OWNED(m)
64 #define MUTEX_ASSERT_NOT_OWNED(m)
65 #define THR_ASSERT_NOT_IN_SYNCQ(thr)
66 #endif
67
68 #define THR_IN_MUTEXQ(thr)      (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
69 #define MUTEX_DESTROY(m) do {           \
70         _lock_destroy(&(m)->m_lock);    \
71         free(m);                        \
72 } while (0)
73
74
75 /*
76  * Prototypes
77  */
78 static struct kse_mailbox *mutex_handoff(struct pthread *,
79                             struct pthread_mutex *);
80 static inline int       mutex_self_trylock(pthread_mutex_t);
81 static inline int       mutex_self_lock(struct pthread *, pthread_mutex_t);
82 static int              mutex_unlock_common(pthread_mutex_t *, int);
83 static void             mutex_priority_adjust(struct pthread *, pthread_mutex_t);
84 static void             mutex_rescan_owned (struct pthread *, struct pthread *,
85                             struct pthread_mutex *);
86 static inline pthread_t mutex_queue_deq(pthread_mutex_t);
87 static inline void      mutex_queue_remove(pthread_mutex_t, pthread_t);
88 static inline void      mutex_queue_enq(pthread_mutex_t, pthread_t);
89 static void             mutex_lock_backout(void *arg);
90
91 int     __pthread_mutex_init(pthread_mutex_t *mutex,
92             const pthread_mutexattr_t *mutex_attr);
93 int     __pthread_mutex_trylock(pthread_mutex_t *mutex);
94 int     __pthread_mutex_lock(pthread_mutex_t *m);
95 int     __pthread_mutex_timedlock(pthread_mutex_t *m,
96             const struct timespec *abs_timeout);
97 int     _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
98             void *(calloc_cb)(size_t, size_t));
99
100
101 static struct pthread_mutex_attr        static_mutex_attr =
102     PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
103 static pthread_mutexattr_t              static_mattr = &static_mutex_attr;
104
105 /* Single underscore versions provided for libc internal usage: */
106 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
107 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
108 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
109 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
110
111 /* No difference between libc and application usage of these: */
112 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
113 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
114 __weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np);
115
116 static int
117 thr_mutex_init(pthread_mutex_t *mutex,
118     const pthread_mutexattr_t *mutex_attr, void *(calloc_cb)(size_t, size_t))
119 {
120         struct pthread_mutex *pmutex;
121         enum pthread_mutextype type;
122         int             protocol;
123         int             ceiling;
124         int             flags;
125         int             ret = 0;
126
127         if (mutex == NULL)
128                 ret = EINVAL;
129
130         /* Check if default mutex attributes: */
131         else if (mutex_attr == NULL || *mutex_attr == NULL) {
132                 /* Default to a (error checking) POSIX mutex: */
133                 type = PTHREAD_MUTEX_ERRORCHECK;
134                 protocol = PTHREAD_PRIO_NONE;
135                 ceiling = THR_MAX_PRIORITY;
136                 flags = 0;
137         }
138
139         /* Check mutex type: */
140         else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
141             ((*mutex_attr)->m_type >= PTHREAD_MUTEX_TYPE_MAX))
142                 /* Return an invalid argument error: */
143                 ret = EINVAL;
144
145         /* Check mutex protocol: */
146         else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
147             ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
148                 /* Return an invalid argument error: */
149                 ret = EINVAL;
150
151         else {
152                 /* Use the requested mutex type and protocol: */
153                 type = (*mutex_attr)->m_type;
154                 protocol = (*mutex_attr)->m_protocol;
155                 ceiling = (*mutex_attr)->m_ceiling;
156                 flags = (*mutex_attr)->m_flags;
157         }
158
159         /* Check no errors so far: */
160         if (ret == 0) {
161                 if ((pmutex = (pthread_mutex_t)
162                     calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
163                         ret = ENOMEM;
164                 else if (_lock_init(&pmutex->m_lock, LCK_ADAPTIVE,
165                     _thr_lock_wait, _thr_lock_wakeup, calloc_cb) != 0) {
166                         free(pmutex);
167                         *mutex = NULL;
168                         ret = ENOMEM;
169                 } else {
170                         /* Set the mutex flags: */
171                         pmutex->m_flags = flags;
172
173                         /* Process according to mutex type: */
174                         switch (type) {
175                         /* case PTHREAD_MUTEX_DEFAULT: */
176                         case PTHREAD_MUTEX_ERRORCHECK:
177                         case PTHREAD_MUTEX_NORMAL:
178                         case PTHREAD_MUTEX_ADAPTIVE_NP:
179                                 /* Nothing to do here. */
180                                 break;
181
182                         /* Single UNIX Spec 2 recursive mutex: */
183                         case PTHREAD_MUTEX_RECURSIVE:
184                                 /* Reset the mutex count: */
185                                 pmutex->m_count = 0;
186                                 break;
187
188                         /* Trap invalid mutex types: */
189                         default:
190                                 /* Return an invalid argument error: */
191                                 ret = EINVAL;
192                                 break;
193                         }
194                         if (ret == 0) {
195                                 /* Initialise the rest of the mutex: */
196                                 TAILQ_INIT(&pmutex->m_queue);
197                                 pmutex->m_flags |= MUTEX_FLAGS_INITED;
198                                 pmutex->m_owner = NULL;
199                                 pmutex->m_type = type;
200                                 pmutex->m_protocol = protocol;
201                                 pmutex->m_refcount = 0;
202                                 if (protocol == PTHREAD_PRIO_PROTECT)
203                                         pmutex->m_prio = ceiling;
204                                 else
205                                         pmutex->m_prio = -1;
206                                 pmutex->m_saved_prio = 0;
207                                 MUTEX_INIT_LINK(pmutex);
208                                 *mutex = pmutex;
209                         } else {
210                                 /* Free the mutex lock structure: */
211                                 MUTEX_DESTROY(pmutex);
212                                 *mutex = NULL;
213                         }
214                 }
215         }
216         /* Return the completion status: */
217         return (ret);
218 }
219
220 int
221 __pthread_mutex_init(pthread_mutex_t *mutex,
222     const pthread_mutexattr_t *mutex_attr)
223 {
224
225         return (thr_mutex_init(mutex, mutex_attr, calloc));
226 }
227
228 int
229 _pthread_mutex_init(pthread_mutex_t *mutex,
230     const pthread_mutexattr_t *mutex_attr)
231 {
232         struct pthread_mutex_attr mattr, *mattrp;
233
234         if ((mutex_attr == NULL) || (*mutex_attr == NULL))
235                 return (__pthread_mutex_init(mutex, &static_mattr));
236         else {
237                 mattr = **mutex_attr;
238                 mattr.m_flags |= MUTEX_FLAGS_PRIVATE;
239                 mattrp = &mattr;
240                 return (__pthread_mutex_init(mutex, &mattrp));
241         }
242 }
243
244 /* This function is used internally by malloc. */
245 int
246 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
247     void *(calloc_cb)(size_t, size_t))
248 {
249         static const struct pthread_mutex_attr attr = {
250                 .m_type = PTHREAD_MUTEX_NORMAL,
251                 .m_protocol = PTHREAD_PRIO_NONE,
252                 .m_ceiling = 0,
253                 .m_flags = 0
254         };
255         static const struct pthread_mutex_attr *pattr = &attr;
256
257         return (thr_mutex_init(mutex, (pthread_mutexattr_t *)&pattr,
258             calloc_cb));
259 }
260
261 void
262 _thr_mutex_reinit(pthread_mutex_t *mutex)
263 {
264         _lock_reinit(&(*mutex)->m_lock, LCK_ADAPTIVE,
265             _thr_lock_wait, _thr_lock_wakeup);
266         TAILQ_INIT(&(*mutex)->m_queue);
267         (*mutex)->m_owner = NULL;
268         (*mutex)->m_count = 0;
269         (*mutex)->m_refcount = 0;
270         (*mutex)->m_prio = 0;
271         (*mutex)->m_saved_prio = 0;
272 }
273
274 int
275 _pthread_mutex_destroy(pthread_mutex_t *mutex)
276 {
277         struct pthread  *curthread = _get_curthread();
278         pthread_mutex_t m;
279         int ret = 0;
280
281         if (mutex == NULL || *mutex == NULL)
282                 ret = EINVAL;
283         else {
284                 /* Lock the mutex structure: */
285                 THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
286
287                 /*
288                  * Check to see if this mutex is in use:
289                  */
290                 if (((*mutex)->m_owner != NULL) ||
291                     (!TAILQ_EMPTY(&(*mutex)->m_queue)) ||
292                     ((*mutex)->m_refcount != 0)) {
293                         ret = EBUSY;
294
295                         /* Unlock the mutex structure: */
296                         THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
297                 } else {
298                         /*
299                          * Save a pointer to the mutex so it can be free'd
300                          * and set the caller's pointer to NULL:
301                          */
302                         m = *mutex;
303                         *mutex = NULL;
304
305                         /* Unlock the mutex structure: */
306                         THR_LOCK_RELEASE(curthread, &m->m_lock);
307
308                         /*
309                          * Free the memory allocated for the mutex
310                          * structure:
311                          */
312                         MUTEX_ASSERT_NOT_OWNED(m);
313                         MUTEX_DESTROY(m);
314                 }
315         }
316
317         /* Return the completion status: */
318         return (ret);
319 }
320
321 static int
322 init_static(struct pthread *thread, pthread_mutex_t *mutex)
323 {
324         int ret;
325
326         THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
327
328         if (*mutex == NULL)
329                 ret = _pthread_mutex_init(mutex, NULL);
330         else
331                 ret = 0;
332
333         THR_LOCK_RELEASE(thread, &_mutex_static_lock);
334
335         return (ret);
336 }
337
338 static int
339 init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
340 {
341         int ret;
342
343         THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
344
345         if (*mutex == NULL)
346                 ret = _pthread_mutex_init(mutex, &static_mattr);
347         else
348                 ret = 0;
349
350         THR_LOCK_RELEASE(thread, &_mutex_static_lock);
351
352         return (ret);
353 }
354
355 static int
356 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
357 {
358         int private;
359         int ret = 0;
360
361         THR_ASSERT((mutex != NULL) && (*mutex != NULL),
362             "Uninitialized mutex in pthread_mutex_trylock_basic");
363
364         /* Lock the mutex structure: */
365         THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
366         private = (*mutex)->m_flags & MUTEX_FLAGS_PRIVATE;
367
368         /*
369          * If the mutex was statically allocated, properly
370          * initialize the tail queue.
371          */
372         if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
373                 TAILQ_INIT(&(*mutex)->m_queue);
374                 MUTEX_INIT_LINK(*mutex);
375                 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
376         }
377
378         /* Process according to mutex type: */
379         switch ((*mutex)->m_protocol) {
380         /* Default POSIX mutex: */
381         case PTHREAD_PRIO_NONE: 
382                 /* Check if this mutex is not locked: */
383                 if ((*mutex)->m_owner == NULL) {
384                         /* Lock the mutex for the running thread: */
385                         (*mutex)->m_owner = curthread;
386
387                         /* Add to the list of owned mutexes: */
388                         MUTEX_ASSERT_NOT_OWNED(*mutex);
389                         TAILQ_INSERT_TAIL(&curthread->mutexq,
390                             (*mutex), m_qe);
391                 } else if ((*mutex)->m_owner == curthread)
392                         ret = mutex_self_trylock(*mutex);
393                 else
394                         /* Return a busy error: */
395                         ret = EBUSY;
396                 break;
397
398         /* POSIX priority inheritence mutex: */
399         case PTHREAD_PRIO_INHERIT:
400                 /* Check if this mutex is not locked: */
401                 if ((*mutex)->m_owner == NULL) {
402                         /* Lock the mutex for the running thread: */
403                         (*mutex)->m_owner = curthread;
404
405                         THR_SCHED_LOCK(curthread, curthread);
406                         /* Track number of priority mutexes owned: */
407                         curthread->priority_mutex_count++;
408
409                         /*
410                          * The mutex takes on the attributes of the
411                          * running thread when there are no waiters.
412                          */
413                         (*mutex)->m_prio = curthread->active_priority;
414                         (*mutex)->m_saved_prio =
415                             curthread->inherited_priority;
416                         curthread->inherited_priority = (*mutex)->m_prio;
417                         THR_SCHED_UNLOCK(curthread, curthread);
418
419                         /* Add to the list of owned mutexes: */
420                         MUTEX_ASSERT_NOT_OWNED(*mutex);
421                         TAILQ_INSERT_TAIL(&curthread->mutexq,
422                             (*mutex), m_qe);
423                 } else if ((*mutex)->m_owner == curthread)
424                         ret = mutex_self_trylock(*mutex);
425                 else
426                         /* Return a busy error: */
427                         ret = EBUSY;
428                 break;
429
430         /* POSIX priority protection mutex: */
431         case PTHREAD_PRIO_PROTECT:
432                 /* Check for a priority ceiling violation: */
433                 if (curthread->active_priority > (*mutex)->m_prio)
434                         ret = EINVAL;
435
436                 /* Check if this mutex is not locked: */
437                 else if ((*mutex)->m_owner == NULL) {
438                         /* Lock the mutex for the running thread: */
439                         (*mutex)->m_owner = curthread;
440
441                         THR_SCHED_LOCK(curthread, curthread);
442                         /* Track number of priority mutexes owned: */
443                         curthread->priority_mutex_count++;
444
445                         /*
446                          * The running thread inherits the ceiling
447                          * priority of the mutex and executes at that
448                          * priority.
449                          */
450                         curthread->active_priority = (*mutex)->m_prio;
451                         (*mutex)->m_saved_prio =
452                             curthread->inherited_priority;
453                         curthread->inherited_priority =
454                             (*mutex)->m_prio;
455                         THR_SCHED_UNLOCK(curthread, curthread);
456                         /* Add to the list of owned mutexes: */
457                         MUTEX_ASSERT_NOT_OWNED(*mutex);
458                         TAILQ_INSERT_TAIL(&curthread->mutexq,
459                             (*mutex), m_qe);
460                 } else if ((*mutex)->m_owner == curthread)
461                         ret = mutex_self_trylock(*mutex);
462                 else
463                         /* Return a busy error: */
464                         ret = EBUSY;
465                 break;
466
467         /* Trap invalid mutex types: */
468         default:
469                 /* Return an invalid argument error: */
470                 ret = EINVAL;
471                 break;
472         }
473
474         if (ret == 0 && private)
475                 THR_CRITICAL_ENTER(curthread);
476
477         /* Unlock the mutex structure: */
478         THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
479
480         /* Return the completion status: */
481         return (ret);
482 }
483
484 int
485 __pthread_mutex_trylock(pthread_mutex_t *mutex)
486 {
487         struct pthread *curthread = _get_curthread();
488         int ret = 0;
489
490         if (mutex == NULL)
491                 ret = EINVAL;
492
493         /*
494          * If the mutex is statically initialized, perform the dynamic
495          * initialization:
496          */
497         else if ((*mutex != NULL) ||
498             ((ret = init_static(curthread, mutex)) == 0))
499                 ret = mutex_trylock_common(curthread, mutex);
500
501         return (ret);
502 }
503
504 int
505 _pthread_mutex_trylock(pthread_mutex_t *mutex)
506 {
507         struct pthread  *curthread = _get_curthread();
508         int     ret = 0;
509
510         if (mutex == NULL)
511                 ret = EINVAL;
512
513         /*
514          * If the mutex is statically initialized, perform the dynamic
515          * initialization marking the mutex private (delete safe):
516          */
517         else if ((*mutex != NULL) ||
518             ((ret = init_static_private(curthread, mutex)) == 0))
519                 ret = mutex_trylock_common(curthread, mutex);
520
521         return (ret);
522 }
523
524 static int
525 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
526         const struct timespec * abstime)
527 {
528         int     private;
529         int     ret = 0;
530
531         THR_ASSERT((m != NULL) && (*m != NULL),
532             "Uninitialized mutex in pthread_mutex_trylock_basic");
533
534         if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
535             abstime->tv_nsec >= 1000000000))
536                 return (EINVAL);
537
538         /* Reset the interrupted flag: */
539         curthread->interrupted = 0;
540         curthread->timeout = 0;
541         curthread->wakeup_time.tv_sec = -1;
542
543         private = (*m)->m_flags & MUTEX_FLAGS_PRIVATE;
544
545         /*
546          * Enter a loop waiting to become the mutex owner.  We need a
547          * loop in case the waiting thread is interrupted by a signal
548          * to execute a signal handler.  It is not (currently) possible
549          * to remain in the waiting queue while running a handler.
550          * Instead, the thread is interrupted and backed out of the
551          * waiting queue prior to executing the signal handler.
552          */
553         do {
554                 /* Lock the mutex structure: */
555                 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
556
557                 /*
558                  * If the mutex was statically allocated, properly
559                  * initialize the tail queue.
560                  */
561                 if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) {
562                         TAILQ_INIT(&(*m)->m_queue);
563                         (*m)->m_flags |= MUTEX_FLAGS_INITED;
564                         MUTEX_INIT_LINK(*m);
565                 }
566
567                 /* Process according to mutex type: */
568                 switch ((*m)->m_protocol) {
569                 /* Default POSIX mutex: */
570                 case PTHREAD_PRIO_NONE:
571                         if ((*m)->m_owner == NULL) {
572                                 /* Lock the mutex for this thread: */
573                                 (*m)->m_owner = curthread;
574
575                                 /* Add to the list of owned mutexes: */
576                                 MUTEX_ASSERT_NOT_OWNED(*m);
577                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
578                                     (*m), m_qe);
579                                 if (private)
580                                         THR_CRITICAL_ENTER(curthread);
581
582                                 /* Unlock the mutex structure: */
583                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
584                         } else if ((*m)->m_owner == curthread) {
585                                 ret = mutex_self_lock(curthread, *m);
586
587                                 /* Unlock the mutex structure: */
588                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
589                         } else {
590                                 /*
591                                  * Join the queue of threads waiting to lock
592                                  * the mutex and save a pointer to the mutex.
593                                  */
594                                 mutex_queue_enq(*m, curthread);
595                                 curthread->data.mutex = *m;
596                                 curthread->sigbackout = mutex_lock_backout;
597                                 /*
598                                  * This thread is active and is in a critical
599                                  * region (holding the mutex lock); we should
600                                  * be able to safely set the state.
601                                  */
602                                 THR_SCHED_LOCK(curthread, curthread);
603                                 /* Set the wakeup time: */
604                                 if (abstime) {
605                                         curthread->wakeup_time.tv_sec =
606                                                 abstime->tv_sec;
607                                         curthread->wakeup_time.tv_nsec =
608                                                 abstime->tv_nsec;
609                                 }
610
611                                 THR_SET_STATE(curthread, PS_MUTEX_WAIT);
612                                 THR_SCHED_UNLOCK(curthread, curthread);
613
614                                 /* Unlock the mutex structure: */
615                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
616
617                                 /* Schedule the next thread: */
618                                 _thr_sched_switch(curthread);
619
620                                 if (THR_IN_MUTEXQ(curthread)) {
621                                         THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
622                                         mutex_queue_remove(*m, curthread);
623                                         THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
624                                 }
625                                 /*
626                                  * Only clear these after assuring the
627                                  * thread is dequeued.
628                                  */
629                                 curthread->data.mutex = NULL;
630                                 curthread->sigbackout = NULL;
631                         }
632                         break;
633
634                 /* POSIX priority inheritence mutex: */
635                 case PTHREAD_PRIO_INHERIT:
636                         /* Check if this mutex is not locked: */
637                         if ((*m)->m_owner == NULL) {
638                                 /* Lock the mutex for this thread: */
639                                 (*m)->m_owner = curthread;
640
641                                 THR_SCHED_LOCK(curthread, curthread);
642                                 /* Track number of priority mutexes owned: */
643                                 curthread->priority_mutex_count++;
644
645                                 /*
646                                  * The mutex takes on attributes of the
647                                  * running thread when there are no waiters.
648                                  * Make sure the thread's scheduling lock is
649                                  * held while priorities are adjusted.
650                                  */
651                                 (*m)->m_prio = curthread->active_priority;
652                                 (*m)->m_saved_prio =
653                                     curthread->inherited_priority;
654                                 curthread->inherited_priority = (*m)->m_prio;
655                                 THR_SCHED_UNLOCK(curthread, curthread);
656
657                                 /* Add to the list of owned mutexes: */
658                                 MUTEX_ASSERT_NOT_OWNED(*m);
659                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
660                                     (*m), m_qe);
661                                 if (private)
662                                         THR_CRITICAL_ENTER(curthread);
663
664                                 /* Unlock the mutex structure: */
665                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
666                         } else if ((*m)->m_owner == curthread) {
667                                 ret = mutex_self_lock(curthread, *m);
668
669                                 /* Unlock the mutex structure: */
670                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
671                         } else {
672                                 /*
673                                  * Join the queue of threads waiting to lock
674                                  * the mutex and save a pointer to the mutex.
675                                  */
676                                 mutex_queue_enq(*m, curthread);
677                                 curthread->data.mutex = *m;
678                                 curthread->sigbackout = mutex_lock_backout;
679
680                                 /*
681                                  * This thread is active and is in a critical
682                                  * region (holding the mutex lock); we should
683                                  * be able to safely set the state.
684                                  */
685                                 if (curthread->active_priority > (*m)->m_prio)
686                                         /* Adjust priorities: */
687                                         mutex_priority_adjust(curthread, *m);
688
689                                 THR_SCHED_LOCK(curthread, curthread);
690                                 /* Set the wakeup time: */
691                                 if (abstime) {
692                                         curthread->wakeup_time.tv_sec =
693                                                 abstime->tv_sec;
694                                         curthread->wakeup_time.tv_nsec =
695                                                 abstime->tv_nsec;
696                                 }
697                                 THR_SET_STATE(curthread, PS_MUTEX_WAIT);
698                                 THR_SCHED_UNLOCK(curthread, curthread);
699
700                                 /* Unlock the mutex structure: */
701                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
702
703                                 /* Schedule the next thread: */
704                                 _thr_sched_switch(curthread);
705
706                                 if (THR_IN_MUTEXQ(curthread)) {
707                                         THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
708                                         mutex_queue_remove(*m, curthread);
709                                         THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
710                                 }
711                                 /*
712                                  * Only clear these after assuring the
713                                  * thread is dequeued.
714                                  */
715                                 curthread->data.mutex = NULL;
716                                 curthread->sigbackout = NULL;
717                         }
718                         break;
719
720                 /* POSIX priority protection mutex: */
721                 case PTHREAD_PRIO_PROTECT:
722                         /* Check for a priority ceiling violation: */
723                         if (curthread->active_priority > (*m)->m_prio) {
724                                 /* Unlock the mutex structure: */
725                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
726                                 ret = EINVAL;
727                         }
728                         /* Check if this mutex is not locked: */
729                         else if ((*m)->m_owner == NULL) {
730                                 /*
731                                  * Lock the mutex for the running
732                                  * thread:
733                                  */
734                                 (*m)->m_owner = curthread;
735
736                                 THR_SCHED_LOCK(curthread, curthread);
737                                 /* Track number of priority mutexes owned: */
738                                 curthread->priority_mutex_count++;
739
740                                 /*
741                                  * The running thread inherits the ceiling
742                                  * priority of the mutex and executes at that
743                                  * priority.  Make sure the thread's
744                                  * scheduling lock is held while priorities
745                                  * are adjusted.
746                                  */
747                                 curthread->active_priority = (*m)->m_prio;
748                                 (*m)->m_saved_prio =
749                                     curthread->inherited_priority;
750                                 curthread->inherited_priority = (*m)->m_prio;
751                                 THR_SCHED_UNLOCK(curthread, curthread);
752
753                                 /* Add to the list of owned mutexes: */
754                                 MUTEX_ASSERT_NOT_OWNED(*m);
755                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
756                                     (*m), m_qe);
757                                 if (private)
758                                         THR_CRITICAL_ENTER(curthread);
759
760                                 /* Unlock the mutex structure: */
761                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
762                         } else if ((*m)->m_owner == curthread) {
763                                 ret = mutex_self_lock(curthread, *m);
764
765                                 /* Unlock the mutex structure: */
766                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
767                         } else {
768                                 /*
769                                  * Join the queue of threads waiting to lock
770                                  * the mutex and save a pointer to the mutex.
771                                  */
772                                 mutex_queue_enq(*m, curthread);
773                                 curthread->data.mutex = *m;
774                                 curthread->sigbackout = mutex_lock_backout;
775
776                                 /* Clear any previous error: */
777                                 curthread->error = 0;
778
779                                 /*
780                                  * This thread is active and is in a critical
781                                  * region (holding the mutex lock); we should
782                                  * be able to safely set the state.
783                                  */
784
785                                 THR_SCHED_LOCK(curthread, curthread);
786                                 /* Set the wakeup time: */
787                                 if (abstime) {
788                                         curthread->wakeup_time.tv_sec =
789                                                 abstime->tv_sec;
790                                         curthread->wakeup_time.tv_nsec =
791                                                 abstime->tv_nsec;
792                                 }
793                                 THR_SET_STATE(curthread, PS_MUTEX_WAIT);
794                                 THR_SCHED_UNLOCK(curthread, curthread);
795
796                                 /* Unlock the mutex structure: */
797                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
798
799                                 /* Schedule the next thread: */
800                                 _thr_sched_switch(curthread);
801
802                                 if (THR_IN_MUTEXQ(curthread)) {
803                                         THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
804                                         mutex_queue_remove(*m, curthread);
805                                         THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
806                                 }
807                                 /*
808                                  * Only clear these after assuring the
809                                  * thread is dequeued.
810                                  */
811                                 curthread->data.mutex = NULL;
812                                 curthread->sigbackout = NULL;
813
814                                 /*
815                                  * The threads priority may have changed while
816                                  * waiting for the mutex causing a ceiling
817                                  * violation.
818                                  */
819                                 ret = curthread->error;
820                                 curthread->error = 0;
821                         }
822                         break;
823
824                 /* Trap invalid mutex types: */
825                 default:
826                         /* Unlock the mutex structure: */
827                         THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
828
829                         /* Return an invalid argument error: */
830                         ret = EINVAL;
831                         break;
832                 }
833
834         } while (((*m)->m_owner != curthread) && (ret == 0) &&
835             (curthread->interrupted == 0) && (curthread->timeout == 0));
836
837         if (ret == 0 && (*m)->m_owner != curthread && curthread->timeout)
838                 ret = ETIMEDOUT;
839
840         /*
841          * Check to see if this thread was interrupted and
842          * is still in the mutex queue of waiting threads:
843          */
844         if (curthread->interrupted != 0) {
845                 /* Remove this thread from the mutex queue. */
846                 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
847                 if (THR_IN_SYNCQ(curthread))
848                         mutex_queue_remove(*m, curthread);
849                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
850
851                 /* Check for asynchronous cancellation. */
852                 if (curthread->continuation != NULL)
853                         curthread->continuation((void *) curthread);
854         }
855
856         /* Return the completion status: */
857         return (ret);
858 }
859
860 int
861 __pthread_mutex_lock(pthread_mutex_t *m)
862 {
863         struct pthread *curthread;
864         int     ret = 0;
865
866         if (_thr_initial == NULL)
867                 _libpthread_init(NULL);
868
869         curthread = _get_curthread();
870         if (m == NULL)
871                 ret = EINVAL;
872
873         /*
874          * If the mutex is statically initialized, perform the dynamic
875          * initialization:
876          */
877         else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
878                 ret = mutex_lock_common(curthread, m, NULL);
879
880         return (ret);
881 }
882
883 __strong_reference(__pthread_mutex_lock, _thr_mutex_lock);
884
885 int
886 _pthread_mutex_lock(pthread_mutex_t *m)
887 {
888         struct pthread *curthread;
889         int     ret = 0;
890
891         if (_thr_initial == NULL)
892                 _libpthread_init(NULL);
893         curthread = _get_curthread();
894
895         if (m == NULL)
896                 ret = EINVAL;
897
898         /*
899          * If the mutex is statically initialized, perform the dynamic
900          * initialization marking it private (delete safe):
901          */
902         else if ((*m != NULL) ||
903             ((ret = init_static_private(curthread, m)) == 0))
904                 ret = mutex_lock_common(curthread, m, NULL);
905
906         return (ret);
907 }
908
909 int
910 __pthread_mutex_timedlock(pthread_mutex_t *m,
911         const struct timespec *abs_timeout)
912 {
913         struct pthread *curthread;
914         int     ret = 0;
915
916         if (_thr_initial == NULL)
917                 _libpthread_init(NULL);
918
919         curthread = _get_curthread();
920         if (m == NULL)
921                 ret = EINVAL;
922
923         /*
924          * If the mutex is statically initialized, perform the dynamic
925          * initialization:
926          */
927         else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
928                 ret = mutex_lock_common(curthread, m, abs_timeout);
929
930         return (ret);
931 }
932
933 int
934 _pthread_mutex_timedlock(pthread_mutex_t *m,
935         const struct timespec *abs_timeout)
936 {
937         struct pthread *curthread;
938         int     ret = 0;
939
940         if (_thr_initial == NULL)
941                 _libpthread_init(NULL);
942         curthread = _get_curthread();
943
944         if (m == NULL)
945                 ret = EINVAL;
946
947         /*
948          * If the mutex is statically initialized, perform the dynamic
949          * initialization marking it private (delete safe):
950          */
951         else if ((*m != NULL) ||
952             ((ret = init_static_private(curthread, m)) == 0))
953                 ret = mutex_lock_common(curthread, m, abs_timeout);
954
955         return (ret);
956 }
957
958 int
959 _pthread_mutex_unlock(pthread_mutex_t *m)
960 {
961         return (mutex_unlock_common(m, /* add reference */ 0));
962 }
963
964 __strong_reference(_pthread_mutex_unlock, _thr_mutex_unlock);
965
966 int
967 _mutex_cv_unlock(pthread_mutex_t *m)
968 {
969         return (mutex_unlock_common(m, /* add reference */ 1));
970 }
971
972 int
973 _mutex_cv_lock(pthread_mutex_t *m)
974 {
975         struct  pthread *curthread;
976         int     ret;
977
978         curthread = _get_curthread();
979         if ((ret = _pthread_mutex_lock(m)) == 0) {
980                 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
981                 (*m)->m_refcount--;
982                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
983         }
984         return (ret);
985 }
986
987 static inline int
988 mutex_self_trylock(pthread_mutex_t m)
989 {
990         int     ret = 0;
991
992         switch (m->m_type) {
993         /* case PTHREAD_MUTEX_DEFAULT: */
994         case PTHREAD_MUTEX_ERRORCHECK:
995         case PTHREAD_MUTEX_NORMAL:
996         case PTHREAD_MUTEX_ADAPTIVE_NP:
997                 ret = EBUSY; 
998                 break;
999
1000         case PTHREAD_MUTEX_RECURSIVE:
1001                 /* Increment the lock count: */
1002                 m->m_count++;
1003                 break;
1004
1005         default:
1006                 /* Trap invalid mutex types; */
1007                 ret = EINVAL;
1008         }
1009
1010         return (ret);
1011 }
1012
1013 static inline int
1014 mutex_self_lock(struct pthread *curthread, pthread_mutex_t m)
1015 {
1016         int ret = 0;
1017
1018         /*
1019          * Don't allow evil recursive mutexes for private use
1020          * in libc and libpthread.
1021          */
1022         if (m->m_flags & MUTEX_FLAGS_PRIVATE)
1023                 PANIC("Recurse on a private mutex.");
1024
1025         switch (m->m_type) {
1026         /* case PTHREAD_MUTEX_DEFAULT: */
1027         case PTHREAD_MUTEX_ERRORCHECK:
1028         case PTHREAD_MUTEX_ADAPTIVE_NP:
1029                 /*
1030                  * POSIX specifies that mutexes should return EDEADLK if a
1031                  * recursive lock is detected.
1032                  */
1033                 ret = EDEADLK; 
1034                 break;
1035
1036         case PTHREAD_MUTEX_NORMAL:
1037                 /*
1038                  * What SS2 define as a 'normal' mutex.  Intentionally
1039                  * deadlock on attempts to get a lock you already own.
1040                  */
1041
1042                 THR_SCHED_LOCK(curthread, curthread);
1043                 THR_SET_STATE(curthread, PS_DEADLOCK);
1044                 THR_SCHED_UNLOCK(curthread, curthread);
1045
1046                 /* Unlock the mutex structure: */
1047                 THR_LOCK_RELEASE(curthread, &m->m_lock);
1048
1049                 /* Schedule the next thread: */
1050                 _thr_sched_switch(curthread);
1051                 break;
1052
1053         case PTHREAD_MUTEX_RECURSIVE:
1054                 /* Increment the lock count: */
1055                 m->m_count++;
1056                 break;
1057
1058         default:
1059                 /* Trap invalid mutex types; */
1060                 ret = EINVAL;
1061         }
1062
1063         return (ret);
1064 }
1065
1066 static int
1067 mutex_unlock_common(pthread_mutex_t *m, int add_reference)
1068 {
1069         struct pthread *curthread = _get_curthread();
1070         struct kse_mailbox *kmbx = NULL;
1071         int ret = 0;
1072
1073         if (m == NULL || *m == NULL)
1074                 ret = EINVAL;
1075         else {
1076                 /* Lock the mutex structure: */
1077                 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
1078
1079                 /* Process according to mutex type: */
1080                 switch ((*m)->m_protocol) {
1081                 /* Default POSIX mutex: */
1082                 case PTHREAD_PRIO_NONE:
1083                         /*
1084                          * Check if the running thread is not the owner of the
1085                          * mutex:
1086                          */
1087                         if ((*m)->m_owner != curthread)
1088                                 ret = EPERM;
1089                         else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1090                             ((*m)->m_count > 0))
1091                                 /* Decrement the count: */
1092                                 (*m)->m_count--;
1093                         else {
1094                                 /*
1095                                  * Clear the count in case this is a recursive
1096                                  * mutex.
1097                                  */
1098                                 (*m)->m_count = 0;
1099
1100                                 /* Remove the mutex from the threads queue. */
1101                                 MUTEX_ASSERT_IS_OWNED(*m);
1102                                 TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1103                                     (*m), m_qe);
1104                                 MUTEX_INIT_LINK(*m);
1105
1106                                 /*
1107                                  * Hand off the mutex to the next waiting
1108                                  * thread:
1109                                  */
1110                                 kmbx = mutex_handoff(curthread, *m);
1111                         }
1112                         break;
1113
1114                 /* POSIX priority inheritence mutex: */
1115                 case PTHREAD_PRIO_INHERIT:
1116                         /*
1117                          * Check if the running thread is not the owner of the
1118                          * mutex:
1119                          */
1120                         if ((*m)->m_owner != curthread)
1121                                 ret = EPERM;
1122                         else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1123                             ((*m)->m_count > 0))
1124                                 /* Decrement the count: */
1125                                 (*m)->m_count--;
1126                         else {
1127                                 /*
1128                                  * Clear the count in case this is recursive
1129                                  * mutex.
1130                                  */
1131                                 (*m)->m_count = 0;
1132
1133                                 /*
1134                                  * Restore the threads inherited priority and
1135                                  * recompute the active priority (being careful
1136                                  * not to override changes in the threads base
1137                                  * priority subsequent to locking the mutex).
1138                                  */
1139                                 THR_SCHED_LOCK(curthread, curthread);
1140                                 curthread->inherited_priority =
1141                                         (*m)->m_saved_prio;
1142                                 curthread->active_priority =
1143                                     MAX(curthread->inherited_priority,
1144                                     curthread->base_priority);
1145
1146                                 /*
1147                                  * This thread now owns one less priority mutex.
1148                                  */
1149                                 curthread->priority_mutex_count--;
1150                                 THR_SCHED_UNLOCK(curthread, curthread);
1151
1152                                 /* Remove the mutex from the threads queue. */
1153                                 MUTEX_ASSERT_IS_OWNED(*m);
1154                                 TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1155                                     (*m), m_qe);
1156                                 MUTEX_INIT_LINK(*m);
1157
1158                                 /*
1159                                  * Hand off the mutex to the next waiting
1160                                  * thread:
1161                                  */
1162                                 kmbx = mutex_handoff(curthread, *m);
1163                         }
1164                         break;
1165
1166                 /* POSIX priority ceiling mutex: */
1167                 case PTHREAD_PRIO_PROTECT:
1168                         /*
1169                          * Check if the running thread is not the owner of the
1170                          * mutex:
1171                          */
1172                         if ((*m)->m_owner != curthread)
1173                                 ret = EPERM;
1174                         else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1175                             ((*m)->m_count > 0))
1176                                 /* Decrement the count: */
1177                                 (*m)->m_count--;
1178                         else {
1179                                 /*
1180                                  * Clear the count in case this is a recursive
1181                                  * mutex.
1182                                  */
1183                                 (*m)->m_count = 0;
1184
1185                                 /*
1186                                  * Restore the threads inherited priority and
1187                                  * recompute the active priority (being careful
1188                                  * not to override changes in the threads base
1189                                  * priority subsequent to locking the mutex).
1190                                  */
1191                                 THR_SCHED_LOCK(curthread, curthread);
1192                                 curthread->inherited_priority =
1193                                         (*m)->m_saved_prio;
1194                                 curthread->active_priority =
1195                                     MAX(curthread->inherited_priority,
1196                                     curthread->base_priority);
1197
1198                                 /*
1199                                  * This thread now owns one less priority mutex.
1200                                  */
1201                                 curthread->priority_mutex_count--;
1202                                 THR_SCHED_UNLOCK(curthread, curthread);
1203
1204                                 /* Remove the mutex from the threads queue. */
1205                                 MUTEX_ASSERT_IS_OWNED(*m);
1206                                 TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1207                                     (*m), m_qe);
1208                                 MUTEX_INIT_LINK(*m);
1209
1210                                 /*
1211                                  * Hand off the mutex to the next waiting
1212                                  * thread:
1213                                  */
1214                                 kmbx = mutex_handoff(curthread, *m);
1215                         }
1216                         break;
1217
1218                 /* Trap invalid mutex types: */
1219                 default:
1220                         /* Return an invalid argument error: */
1221                         ret = EINVAL;
1222                         break;
1223                 }
1224
1225                 if ((ret == 0) && (add_reference != 0))
1226                         /* Increment the reference count: */
1227                         (*m)->m_refcount++;
1228
1229                 /* Leave the critical region if this is a private mutex. */
1230                 if ((ret == 0) && ((*m)->m_flags & MUTEX_FLAGS_PRIVATE))
1231                         THR_CRITICAL_LEAVE(curthread);
1232
1233                 /* Unlock the mutex structure: */
1234                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
1235
1236                 if (kmbx != NULL)
1237                         kse_wakeup(kmbx);
1238         }
1239
1240         /* Return the completion status: */
1241         return (ret);
1242 }
1243
1244
1245 /*
1246  * This function is called when a change in base priority occurs for
1247  * a thread that is holding or waiting for a priority protection or
1248  * inheritence mutex.  A change in a threads base priority can effect
1249  * changes to active priorities of other threads and to the ordering
1250  * of mutex locking by waiting threads.
1251  *
1252  * This must be called without the target thread's scheduling lock held.
1253  */
1254 void
1255 _mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread,
1256     int propagate_prio)
1257 {
1258         struct pthread_mutex *m;
1259
1260         /* Adjust the priorites of any owned priority mutexes: */
1261         if (pthread->priority_mutex_count > 0) {
1262                 /*
1263                  * Rescan the mutexes owned by this thread and correct
1264                  * their priorities to account for this threads change
1265                  * in priority.  This has the side effect of changing
1266                  * the threads active priority.
1267                  *
1268                  * Be sure to lock the first mutex in the list of owned
1269                  * mutexes.  This acts as a barrier against another
1270                  * simultaneous call to change the threads priority
1271                  * and from the owning thread releasing the mutex.
1272                  */
1273                 m = TAILQ_FIRST(&pthread->mutexq);
1274                 if (m != NULL) {
1275                         THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1276                         /*
1277                          * Make sure the thread still owns the lock.
1278                          */
1279                         if (m == TAILQ_FIRST(&pthread->mutexq))
1280                                 mutex_rescan_owned(curthread, pthread,
1281                                     /* rescan all owned */ NULL);
1282                         THR_LOCK_RELEASE(curthread, &m->m_lock);
1283                 }
1284         }
1285
1286         /*
1287          * If this thread is waiting on a priority inheritence mutex,
1288          * check for priority adjustments.  A change in priority can
1289          * also cause a ceiling violation(*) for a thread waiting on
1290          * a priority protection mutex; we don't perform the check here
1291          * as it is done in pthread_mutex_unlock.
1292          *
1293          * (*) It should be noted that a priority change to a thread
1294          *     _after_ taking and owning a priority ceiling mutex
1295          *     does not affect ownership of that mutex; the ceiling
1296          *     priority is only checked before mutex ownership occurs.
1297          */
1298         if (propagate_prio != 0) {
1299                 /*
1300                  * Lock the thread's scheduling queue.  This is a bit
1301                  * convoluted; the "in synchronization queue flag" can
1302                  * only be cleared with both the thread's scheduling and
1303                  * mutex locks held.  The thread's pointer to the wanted
1304                  * mutex is guaranteed to be valid during this time.
1305                  */
1306                 THR_SCHED_LOCK(curthread, pthread);
1307
1308                 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) ||
1309                     ((m = pthread->data.mutex) == NULL))
1310                         THR_SCHED_UNLOCK(curthread, pthread);
1311                 else {
1312                         /*
1313                          * This thread is currently waiting on a mutex; unlock
1314                          * the scheduling queue lock and lock the mutex.  We
1315                          * can't hold both at the same time because the locking
1316                          * order could cause a deadlock.
1317                          */
1318                         THR_SCHED_UNLOCK(curthread, pthread);
1319                         THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1320
1321                         /*
1322                          * Check to make sure this thread is still in the
1323                          * same state (the lock above can yield the CPU to
1324                          * another thread or the thread may be running on
1325                          * another CPU).
1326                          */
1327                         if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1328                             (pthread->data.mutex == m)) {
1329                                 /*
1330                                  * Remove and reinsert this thread into
1331                                  * the list of waiting threads to preserve
1332                                  * decreasing priority order.
1333                                  */
1334                                 mutex_queue_remove(m, pthread);
1335                                 mutex_queue_enq(m, pthread);
1336
1337                                 if (m->m_protocol == PTHREAD_PRIO_INHERIT)
1338                                         /* Adjust priorities: */
1339                                         mutex_priority_adjust(curthread, m);
1340                         }
1341
1342                         /* Unlock the mutex structure: */
1343                         THR_LOCK_RELEASE(curthread, &m->m_lock);
1344                 }
1345         }
1346 }
1347
1348 /*
1349  * Called when a new thread is added to the mutex waiting queue or
1350  * when a threads priority changes that is already in the mutex
1351  * waiting queue.
1352  *
1353  * This must be called with the mutex locked by the current thread.
1354  */
1355 static void
1356 mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex)
1357 {
1358         pthread_mutex_t m = mutex;
1359         struct pthread  *pthread_next, *pthread = mutex->m_owner;
1360         int             done, temp_prio;
1361
1362         /*
1363          * Calculate the mutex priority as the maximum of the highest
1364          * active priority of any waiting threads and the owning threads
1365          * active priority(*).
1366          *
1367          * (*) Because the owning threads current active priority may
1368          *     reflect priority inherited from this mutex (and the mutex
1369          *     priority may have changed) we must recalculate the active
1370          *     priority based on the threads saved inherited priority
1371          *     and its base priority.
1372          */
1373         pthread_next = TAILQ_FIRST(&m->m_queue);  /* should never be NULL */
1374         temp_prio = MAX(pthread_next->active_priority,
1375             MAX(m->m_saved_prio, pthread->base_priority));
1376
1377         /* See if this mutex really needs adjusting: */
1378         if (temp_prio == m->m_prio)
1379                 /* No need to propagate the priority: */
1380                 return;
1381
1382         /* Set new priority of the mutex: */
1383         m->m_prio = temp_prio;
1384
1385         /*
1386          * Don't unlock the mutex passed in as an argument.  It is
1387          * expected to be locked and unlocked by the caller.
1388          */
1389         done = 1;
1390         do {
1391                 /*
1392                  * Save the threads priority before rescanning the
1393                  * owned mutexes:
1394                  */
1395                 temp_prio = pthread->active_priority;
1396
1397                 /*
1398                  * Fix the priorities for all mutexes held by the owning
1399                  * thread since taking this mutex.  This also has a
1400                  * potential side-effect of changing the threads priority.
1401                  *
1402                  * At this point the mutex is locked by the current thread.
1403                  * The owning thread can't release the mutex until it is
1404                  * unlocked, so we should be able to safely walk its list
1405                  * of owned mutexes.
1406                  */
1407                 mutex_rescan_owned(curthread, pthread, m);
1408
1409                 /*
1410                  * If this isn't the first time through the loop,
1411                  * the current mutex needs to be unlocked.
1412                  */
1413                 if (done == 0)
1414                         THR_LOCK_RELEASE(curthread, &m->m_lock);
1415
1416                 /* Assume we're done unless told otherwise: */
1417                 done = 1;
1418
1419                 /*
1420                  * If the thread is currently waiting on a mutex, check
1421                  * to see if the threads new priority has affected the
1422                  * priority of the mutex.
1423                  */
1424                 if ((temp_prio != pthread->active_priority) &&
1425                     ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1426                     ((m = pthread->data.mutex) != NULL) &&
1427                     (m->m_protocol == PTHREAD_PRIO_INHERIT)) {
1428                         /* Lock the mutex structure: */
1429                         THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1430
1431                         /*
1432                          * Make sure the thread is still waiting on the
1433                          * mutex:
1434                          */
1435                         if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1436                             (m == pthread->data.mutex)) {
1437                                 /*
1438                                  * The priority for this thread has changed.
1439                                  * Remove and reinsert this thread into the
1440                                  * list of waiting threads to preserve
1441                                  * decreasing priority order.
1442                                  */
1443                                 mutex_queue_remove(m, pthread);
1444                                 mutex_queue_enq(m, pthread);
1445
1446                                 /*
1447                                  * Grab the waiting thread with highest
1448                                  * priority:
1449                                  */
1450                                 pthread_next = TAILQ_FIRST(&m->m_queue);
1451
1452                                 /*
1453                                  * Calculate the mutex priority as the maximum
1454                                  * of the highest active priority of any
1455                                  * waiting threads and the owning threads
1456                                  * active priority.
1457                                  */
1458                                 temp_prio = MAX(pthread_next->active_priority,
1459                                     MAX(m->m_saved_prio,
1460                                     m->m_owner->base_priority));
1461
1462                                 if (temp_prio != m->m_prio) {
1463                                         /*
1464                                          * The priority needs to be propagated
1465                                          * to the mutex this thread is waiting
1466                                          * on and up to the owner of that mutex.
1467                                          */
1468                                         m->m_prio = temp_prio;
1469                                         pthread = m->m_owner;
1470
1471                                         /* We're not done yet: */
1472                                         done = 0;
1473                                 }
1474                         }
1475                         /* Only release the mutex if we're done: */
1476                         if (done != 0)
1477                                 THR_LOCK_RELEASE(curthread, &m->m_lock);
1478                 }
1479         } while (done == 0);
1480 }
1481
1482 static void
1483 mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread,
1484     struct pthread_mutex *mutex)
1485 {
1486         struct pthread_mutex    *m;
1487         struct pthread          *pthread_next;
1488         int                     active_prio, inherited_prio;
1489
1490         /*
1491          * Start walking the mutexes the thread has taken since
1492          * taking this mutex.
1493          */
1494         if (mutex == NULL) {
1495                 /*
1496                  * A null mutex means start at the beginning of the owned
1497                  * mutex list.
1498                  */
1499                 m = TAILQ_FIRST(&pthread->mutexq);
1500
1501                 /* There is no inherited priority yet. */
1502                 inherited_prio = 0;
1503         } else {
1504                 /*
1505                  * The caller wants to start after a specific mutex.  It
1506                  * is assumed that this mutex is a priority inheritence
1507                  * mutex and that its priority has been correctly
1508                  * calculated.
1509                  */
1510                 m = TAILQ_NEXT(mutex, m_qe);
1511
1512                 /* Start inheriting priority from the specified mutex. */
1513                 inherited_prio = mutex->m_prio;
1514         }
1515         active_prio = MAX(inherited_prio, pthread->base_priority);
1516
1517         for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) {
1518                 /*
1519                  * We only want to deal with priority inheritence
1520                  * mutexes.  This might be optimized by only placing
1521                  * priority inheritence mutexes into the owned mutex
1522                  * list, but it may prove to be useful having all
1523                  * owned mutexes in this list.  Consider a thread
1524                  * exiting while holding mutexes...
1525                  */
1526                 if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1527                         /*
1528                          * Fix the owners saved (inherited) priority to
1529                          * reflect the priority of the previous mutex.
1530                          */
1531                         m->m_saved_prio = inherited_prio;
1532
1533                         if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1534                                 /* Recalculate the priority of the mutex: */
1535                                 m->m_prio = MAX(active_prio,
1536                                      pthread_next->active_priority);
1537                         else
1538                                 m->m_prio = active_prio;
1539
1540                         /* Recalculate new inherited and active priorities: */
1541                         inherited_prio = m->m_prio;
1542                         active_prio = MAX(m->m_prio, pthread->base_priority);
1543                 }
1544         }
1545
1546         /*
1547          * Fix the threads inherited priority and recalculate its
1548          * active priority.
1549          */
1550         pthread->inherited_priority = inherited_prio;
1551         active_prio = MAX(inherited_prio, pthread->base_priority);
1552
1553         if (active_prio != pthread->active_priority) {
1554                 /* Lock the thread's scheduling queue: */
1555                 THR_SCHED_LOCK(curthread, pthread);
1556
1557                 if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) {
1558                         /*
1559                          * This thread is not in a run queue.  Just set
1560                          * its active priority.
1561                          */
1562                         pthread->active_priority = active_prio;
1563                 }
1564                 else {
1565                         /*
1566                          * This thread is in a run queue.  Remove it from
1567                          * the queue before changing its priority:
1568                          */
1569                         THR_RUNQ_REMOVE(pthread);
1570
1571                         /*
1572                          * POSIX states that if the priority is being
1573                          * lowered, the thread must be inserted at the
1574                          * head of the queue for its priority if it owns
1575                          * any priority protection or inheritence mutexes.
1576                          */
1577                         if ((active_prio < pthread->active_priority) &&
1578                             (pthread->priority_mutex_count > 0)) {
1579                                 /* Set the new active priority. */
1580                                 pthread->active_priority = active_prio;
1581
1582                                 THR_RUNQ_INSERT_HEAD(pthread);
1583                         } else {
1584                                 /* Set the new active priority. */
1585                                 pthread->active_priority = active_prio;
1586
1587                                 THR_RUNQ_INSERT_TAIL(pthread);
1588                         }
1589                 }
1590                 THR_SCHED_UNLOCK(curthread, pthread);
1591         }
1592 }
1593
1594 void
1595 _mutex_unlock_private(pthread_t pthread)
1596 {
1597         struct pthread_mutex    *m, *m_next;
1598
1599         for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1600                 m_next = TAILQ_NEXT(m, m_qe);
1601                 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1602                         _pthread_mutex_unlock(&m);
1603         }
1604 }
1605
1606 /*
1607  * This is called by the current thread when it wants to back out of a
1608  * mutex_lock in order to run a signal handler.
1609  */
1610 static void
1611 mutex_lock_backout(void *arg)
1612 {
1613         struct pthread *curthread = (struct pthread *)arg;
1614         struct pthread_mutex *m;
1615
1616         if ((curthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1617                 /*
1618                  * Any other thread may clear the "in sync queue flag",
1619                  * but only the current thread can clear the pointer
1620                  * to the mutex.  So if the flag is set, we can
1621                  * guarantee that the pointer to the mutex is valid.
1622                  * The only problem may be if the mutex is destroyed
1623                  * out from under us, but that should be considered
1624                  * an application bug.
1625                  */
1626                 m = curthread->data.mutex;
1627
1628                 /* Lock the mutex structure: */
1629                 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1630
1631
1632                 /*
1633                  * Check to make sure this thread doesn't already own
1634                  * the mutex.  Since mutexes are unlocked with direct
1635                  * handoffs, it is possible the previous owner gave it
1636                  * to us after we checked the sync queue flag and before
1637                  * we locked the mutex structure.
1638                  */
1639                 if (m->m_owner == curthread) {
1640                         THR_LOCK_RELEASE(curthread, &m->m_lock);
1641                         mutex_unlock_common(&m, /* add_reference */ 0);
1642                 } else {
1643                         /*
1644                          * Remove ourselves from the mutex queue and
1645                          * clear the pointer to the mutex.  We may no
1646                          * longer be in the mutex queue, but the removal
1647                          * function will DTRT.
1648                          */
1649                         mutex_queue_remove(m, curthread);
1650                         curthread->data.mutex = NULL;
1651                         THR_LOCK_RELEASE(curthread, &m->m_lock);
1652                 }
1653         }
1654         /* No need to call this again. */
1655         curthread->sigbackout = NULL;
1656 }
1657
1658 /*
1659  * Dequeue a waiting thread from the head of a mutex queue in descending
1660  * priority order.
1661  *
1662  * In order to properly dequeue a thread from the mutex queue and
1663  * make it runnable without the possibility of errant wakeups, it
1664  * is necessary to lock the thread's scheduling queue while also
1665  * holding the mutex lock.
1666  */
1667 static struct kse_mailbox *
1668 mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
1669 {
1670         struct kse_mailbox *kmbx = NULL;
1671         struct pthread *pthread;
1672
1673         /* Keep dequeueing until we find a valid thread: */
1674         mutex->m_owner = NULL;
1675         pthread = TAILQ_FIRST(&mutex->m_queue);
1676         while (pthread != NULL) {
1677                 /* Take the thread's scheduling lock: */
1678                 THR_SCHED_LOCK(curthread, pthread);
1679
1680                 /* Remove the thread from the mutex queue: */
1681                 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1682                 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1683
1684                 /*
1685                  * Only exit the loop if the thread hasn't been
1686                  * cancelled.
1687                  */
1688                 switch (mutex->m_protocol) {
1689                 case PTHREAD_PRIO_NONE:
1690                         /*
1691                          * Assign the new owner and add the mutex to the
1692                          * thread's list of owned mutexes.
1693                          */
1694                         mutex->m_owner = pthread;
1695                         TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1696                         break;
1697
1698                 case PTHREAD_PRIO_INHERIT:
1699                         /*
1700                          * Assign the new owner and add the mutex to the
1701                          * thread's list of owned mutexes.
1702                          */
1703                         mutex->m_owner = pthread;
1704                         TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1705
1706                         /* Track number of priority mutexes owned: */
1707                         pthread->priority_mutex_count++;
1708
1709                         /*
1710                          * Set the priority of the mutex.  Since our waiting
1711                          * threads are in descending priority order, the
1712                          * priority of the mutex becomes the active priority
1713                          * of the thread we just dequeued.
1714                          */
1715                         mutex->m_prio = pthread->active_priority;
1716
1717                         /* Save the owning threads inherited priority: */
1718                         mutex->m_saved_prio = pthread->inherited_priority;
1719
1720                         /*
1721                          * The owning threads inherited priority now becomes
1722                          * his active priority (the priority of the mutex).
1723                          */
1724                         pthread->inherited_priority = mutex->m_prio;
1725                         break;
1726
1727                 case PTHREAD_PRIO_PROTECT:
1728                         if (pthread->active_priority > mutex->m_prio) {
1729                                 /*
1730                                  * Either the mutex ceiling priority has
1731                                  * been lowered and/or this threads priority
1732                                  * has been raised subsequent to the thread
1733                                  * being queued on the waiting list.
1734                                  */
1735                                 pthread->error = EINVAL;
1736                         }
1737                         else {
1738                                 /*
1739                                  * Assign the new owner and add the mutex
1740                                  * to the thread's list of owned mutexes.
1741                                  */
1742                                 mutex->m_owner = pthread;
1743                                 TAILQ_INSERT_TAIL(&pthread->mutexq,
1744                                     mutex, m_qe);
1745
1746                                 /* Track number of priority mutexes owned: */
1747                                 pthread->priority_mutex_count++;
1748
1749                                 /*
1750                                  * Save the owning threads inherited
1751                                  * priority:
1752                                  */
1753                                 mutex->m_saved_prio =
1754                                     pthread->inherited_priority;
1755
1756                                 /*
1757                                  * The owning thread inherits the ceiling
1758                                  * priority of the mutex and executes at
1759                                  * that priority:
1760                                  */
1761                                 pthread->inherited_priority = mutex->m_prio;
1762                                 pthread->active_priority = mutex->m_prio;
1763
1764                         }
1765                         break;
1766                 }
1767
1768                 /* Make the thread runnable and unlock the scheduling queue: */
1769                 kmbx = _thr_setrunnable_unlocked(pthread);
1770
1771                 /* Add a preemption point. */
1772                 if ((curthread->kseg == pthread->kseg) &&
1773                     (pthread->active_priority > curthread->active_priority))
1774                         curthread->critical_yield = 1;
1775
1776                 if (mutex->m_owner == pthread) {
1777                         /* We're done; a valid owner was found. */
1778                         if (mutex->m_flags & MUTEX_FLAGS_PRIVATE)
1779                                 THR_CRITICAL_ENTER(pthread);
1780                         THR_SCHED_UNLOCK(curthread, pthread);
1781                         break;
1782                 }
1783                 THR_SCHED_UNLOCK(curthread, pthread);
1784                 /* Get the next thread from the waiting queue: */
1785                 pthread = TAILQ_NEXT(pthread, sqe);
1786         }
1787
1788         if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
1789                 /* This mutex has no priority: */
1790                 mutex->m_prio = 0;
1791         return (kmbx);
1792 }
1793
1794 /*
1795  * Dequeue a waiting thread from the head of a mutex queue in descending
1796  * priority order.
1797  */
1798 static inline pthread_t
1799 mutex_queue_deq(struct pthread_mutex *mutex)
1800 {
1801         pthread_t pthread;
1802
1803         while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1804                 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1805                 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1806
1807                 /*
1808                  * Only exit the loop if the thread hasn't been
1809                  * cancelled.
1810                  */
1811                 if (pthread->interrupted == 0)
1812                         break;
1813         }
1814
1815         return (pthread);
1816 }
1817
1818 /*
1819  * Remove a waiting thread from a mutex queue in descending priority order.
1820  */
1821 static inline void
1822 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1823 {
1824         if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1825                 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1826                 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1827         }
1828 }
1829
1830 /*
1831  * Enqueue a waiting thread to a queue in descending priority order.
1832  */
1833 static inline void
1834 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1835 {
1836         pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1837
1838         THR_ASSERT_NOT_IN_SYNCQ(pthread);
1839         /*
1840          * For the common case of all threads having equal priority,
1841          * we perform a quick check against the priority of the thread
1842          * at the tail of the queue.
1843          */
1844         if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1845                 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1846         else {
1847                 tid = TAILQ_FIRST(&mutex->m_queue);
1848                 while (pthread->active_priority <= tid->active_priority)
1849                         tid = TAILQ_NEXT(tid, sqe);
1850                 TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1851         }
1852         pthread->sflags |= THR_FLAGS_IN_SYNCQ;
1853 }
1854
1855 int
1856 _pthread_mutex_isowned_np(pthread_mutex_t *mutex)
1857 {
1858         struct pthread  *curthread = _get_curthread();
1859
1860         return ((*mutex)->m_owner == curthread);
1861 }
1862