]> CyberLeo.Net >> Repos - FreeBSD/releng/7.2.git/blob - lib/libkse/thread/thr_mutex.c
Create releng/7.2 from stable/7 in preparation for 7.2-RELEASE.
[FreeBSD/releng/7.2.git] / lib / libkse / thread / thr_mutex.c
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 #include <stdlib.h>
32 #include <errno.h>
33 #include <string.h>
34 #include <sys/param.h>
35 #include <sys/queue.h>
36 #include <pthread.h>
37 #include "thr_private.h"
38
39 #if defined(_PTHREADS_INVARIANTS)
40 #define MUTEX_INIT_LINK(m)              do {            \
41         (m)->m_qe.tqe_prev = NULL;                      \
42         (m)->m_qe.tqe_next = NULL;                      \
43 } while (0)
44 #define MUTEX_ASSERT_IS_OWNED(m)        do {            \
45         if ((m)->m_qe.tqe_prev == NULL)                 \
46                 PANIC("mutex is not on list");          \
47 } while (0)
48 #define MUTEX_ASSERT_NOT_OWNED(m)       do {            \
49         if (((m)->m_qe.tqe_prev != NULL) ||             \
50             ((m)->m_qe.tqe_next != NULL))               \
51                 PANIC("mutex is on list");              \
52 } while (0)
53 #define THR_ASSERT_NOT_IN_SYNCQ(thr)    do {            \
54         THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
55             "thread in syncq when it shouldn't be.");   \
56 } while (0);
57 #else
58 #define MUTEX_INIT_LINK(m)
59 #define MUTEX_ASSERT_IS_OWNED(m)
60 #define MUTEX_ASSERT_NOT_OWNED(m)
61 #define THR_ASSERT_NOT_IN_SYNCQ(thr)
62 #endif
63
64 #define THR_IN_MUTEXQ(thr)      (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
65 #define MUTEX_DESTROY(m) do {           \
66         _lock_destroy(&(m)->m_lock);    \
67         free(m);                        \
68 } while (0)
69
70
71 /*
72  * Prototypes
73  */
74 static struct kse_mailbox *mutex_handoff(struct pthread *,
75                             struct pthread_mutex *);
76 static inline int       mutex_self_trylock(struct pthread *, pthread_mutex_t);
77 static inline int       mutex_self_lock(struct pthread *, pthread_mutex_t);
78 static int              mutex_unlock_common(pthread_mutex_t *, int);
79 static void             mutex_priority_adjust(struct pthread *, pthread_mutex_t);
80 static void             mutex_rescan_owned (struct pthread *, struct pthread *,
81                             struct pthread_mutex *);
82 static inline pthread_t mutex_queue_deq(pthread_mutex_t);
83 static inline void      mutex_queue_remove(pthread_mutex_t, pthread_t);
84 static inline void      mutex_queue_enq(pthread_mutex_t, pthread_t);
85 static void             mutex_lock_backout(void *arg);
86
87 static struct pthread_mutex_attr        static_mutex_attr =
88     PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
89 static pthread_mutexattr_t              static_mattr = &static_mutex_attr;
90
91 LT10_COMPAT_PRIVATE(__pthread_mutex_init);
92 LT10_COMPAT_PRIVATE(_pthread_mutex_init);
93 LT10_COMPAT_DEFAULT(pthread_mutex_init);
94 LT10_COMPAT_PRIVATE(__pthread_mutex_lock);
95 LT10_COMPAT_PRIVATE(_pthread_mutex_lock);
96 LT10_COMPAT_DEFAULT(pthread_mutex_lock);
97 LT10_COMPAT_PRIVATE(__pthread_mutex_timedlock);
98 LT10_COMPAT_PRIVATE(_pthread_mutex_timedlock);
99 LT10_COMPAT_DEFAULT(pthread_mutex_timedlock);
100 LT10_COMPAT_PRIVATE(__pthread_mutex_trylock);
101 LT10_COMPAT_PRIVATE(_pthread_mutex_trylock);
102 LT10_COMPAT_DEFAULT(pthread_mutex_trylock);
103 LT10_COMPAT_PRIVATE(_pthread_mutex_destroy);
104 LT10_COMPAT_DEFAULT(pthread_mutex_destroy);
105 LT10_COMPAT_PRIVATE(_pthread_mutex_unlock);
106 LT10_COMPAT_DEFAULT(pthread_mutex_unlock);
107
108 /* Single underscore versions provided for libc internal usage: */
109 __weak_reference(__pthread_mutex_init, pthread_mutex_init);
110 __weak_reference(__pthread_mutex_lock, pthread_mutex_lock);
111 __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
112 __weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
113
114 /* No difference between libc and application usage of these: */
115 __weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
116 __weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
117
118 static int
119 thr_mutex_init(pthread_mutex_t *mutex,
120     const pthread_mutexattr_t *mutex_attr, void *(calloc_cb)(size_t, size_t))
121 {
122         struct pthread_mutex *pmutex;
123         enum pthread_mutextype type;
124         int             protocol;
125         int             ceiling;
126         int             flags;
127         int             ret = 0;
128
129         if (mutex == NULL)
130                 ret = EINVAL;
131
132         /* Check if default mutex attributes: */
133         else if (mutex_attr == NULL || *mutex_attr == NULL) {
134                 /* Default to a (error checking) POSIX mutex: */
135                 type = PTHREAD_MUTEX_ERRORCHECK;
136                 protocol = PTHREAD_PRIO_NONE;
137                 ceiling = THR_MAX_PRIORITY;
138                 flags = 0;
139         }
140
141         /* Check mutex type: */
142         else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) ||
143             ((*mutex_attr)->m_type >= PTHREAD_MUTEX_TYPE_MAX))
144                 /* Return an invalid argument error: */
145                 ret = EINVAL;
146
147         /* Check mutex protocol: */
148         else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) ||
149             ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE))
150                 /* Return an invalid argument error: */
151                 ret = EINVAL;
152
153         else {
154                 /* Use the requested mutex type and protocol: */
155                 type = (*mutex_attr)->m_type;
156                 protocol = (*mutex_attr)->m_protocol;
157                 ceiling = (*mutex_attr)->m_ceiling;
158                 flags = (*mutex_attr)->m_flags;
159         }
160
161         /* Check no errors so far: */
162         if (ret == 0) {
163                 if ((pmutex = (pthread_mutex_t)
164                     calloc_cb(1, sizeof(struct pthread_mutex))) == NULL)
165                         ret = ENOMEM;
166                 else if (_lock_init(&pmutex->m_lock, LCK_ADAPTIVE,
167                     _thr_lock_wait, _thr_lock_wakeup, calloc_cb) != 0) {
168                         free(pmutex);
169                         *mutex = NULL;
170                         ret = ENOMEM;
171                 } else {
172                         /* Set the mutex flags: */
173                         pmutex->m_flags = flags;
174
175                         /* Process according to mutex type: */
176                         switch (type) {
177                         /* case PTHREAD_MUTEX_DEFAULT: */
178                         case PTHREAD_MUTEX_ERRORCHECK:
179                         case PTHREAD_MUTEX_NORMAL:
180                         case PTHREAD_MUTEX_ADAPTIVE_NP:
181                                 /* Nothing to do here. */
182                                 break;
183
184                         /* Single UNIX Spec 2 recursive mutex: */
185                         case PTHREAD_MUTEX_RECURSIVE:
186                                 /* Reset the mutex count: */
187                                 pmutex->m_count = 0;
188                                 break;
189
190                         /* Trap invalid mutex types: */
191                         default:
192                                 /* Return an invalid argument error: */
193                                 ret = EINVAL;
194                                 break;
195                         }
196                         if (ret == 0) {
197                                 /* Initialise the rest of the mutex: */
198                                 TAILQ_INIT(&pmutex->m_queue);
199                                 pmutex->m_flags |= MUTEX_FLAGS_INITED;
200                                 pmutex->m_owner = NULL;
201                                 pmutex->m_type = type;
202                                 pmutex->m_protocol = protocol;
203                                 pmutex->m_refcount = 0;
204                                 if (protocol == PTHREAD_PRIO_PROTECT)
205                                         pmutex->m_prio = ceiling;
206                                 else
207                                         pmutex->m_prio = -1;
208                                 pmutex->m_saved_prio = 0;
209                                 MUTEX_INIT_LINK(pmutex);
210                                 *mutex = pmutex;
211                         } else {
212                                 /* Free the mutex lock structure: */
213                                 MUTEX_DESTROY(pmutex);
214                                 *mutex = NULL;
215                         }
216                 }
217         }
218         /* Return the completion status: */
219         return (ret);
220 }
221
222 int
223 __pthread_mutex_init(pthread_mutex_t *mutex,
224     const pthread_mutexattr_t *mutex_attr)
225 {
226
227         return (thr_mutex_init(mutex, mutex_attr, calloc));
228 }
229
230 int
231 _pthread_mutex_init(pthread_mutex_t *mutex,
232     const pthread_mutexattr_t *mutex_attr)
233 {
234         struct pthread_mutex_attr mattr, *mattrp;
235
236         if ((mutex_attr == NULL) || (*mutex_attr == NULL))
237                 return (__pthread_mutex_init(mutex, &static_mattr));
238         else {
239                 mattr = **mutex_attr;
240                 mattr.m_flags |= MUTEX_FLAGS_PRIVATE;
241                 mattrp = &mattr;
242                 return (__pthread_mutex_init(mutex, &mattrp));
243         }
244 }
245
246 /* This function is used internally by malloc. */
247 int
248 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
249     void *(calloc_cb)(size_t, size_t))
250 {
251         static const struct pthread_mutex_attr attr = {
252                 .m_type = PTHREAD_MUTEX_NORMAL,
253                 .m_protocol = PTHREAD_PRIO_NONE,
254                 .m_ceiling = 0,
255                 .m_flags = 0
256         };
257         static const struct pthread_mutex_attr *pattr = &attr;
258
259         return (thr_mutex_init(mutex, (pthread_mutexattr_t *)&pattr,
260             calloc_cb));
261 }
262
263 void
264 _thr_mutex_reinit(pthread_mutex_t *mutex)
265 {
266         _lock_reinit(&(*mutex)->m_lock, LCK_ADAPTIVE,
267             _thr_lock_wait, _thr_lock_wakeup);
268         TAILQ_INIT(&(*mutex)->m_queue);
269         (*mutex)->m_owner = NULL;
270         (*mutex)->m_count = 0;
271         (*mutex)->m_refcount = 0;
272         (*mutex)->m_prio = 0;
273         (*mutex)->m_saved_prio = 0;
274 }
275
276 int
277 _pthread_mutex_destroy(pthread_mutex_t *mutex)
278 {
279         struct pthread  *curthread = _get_curthread();
280         pthread_mutex_t m;
281         int ret = 0;
282
283         if (mutex == NULL || *mutex == NULL)
284                 ret = EINVAL;
285         else {
286                 /* Lock the mutex structure: */
287                 THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
288
289                 /*
290                  * Check to see if this mutex is in use:
291                  */
292                 if (((*mutex)->m_owner != NULL) ||
293                     (!TAILQ_EMPTY(&(*mutex)->m_queue)) ||
294                     ((*mutex)->m_refcount != 0)) {
295                         ret = EBUSY;
296
297                         /* Unlock the mutex structure: */
298                         THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
299                 } else {
300                         /*
301                          * Save a pointer to the mutex so it can be free'd
302                          * and set the caller's pointer to NULL:
303                          */
304                         m = *mutex;
305                         *mutex = NULL;
306
307                         /* Unlock the mutex structure: */
308                         THR_LOCK_RELEASE(curthread, &m->m_lock);
309
310                         /*
311                          * Free the memory allocated for the mutex
312                          * structure:
313                          */
314                         MUTEX_ASSERT_NOT_OWNED(m);
315                         MUTEX_DESTROY(m);
316                 }
317         }
318
319         /* Return the completion status: */
320         return (ret);
321 }
322
323 static int
324 init_static(struct pthread *thread, pthread_mutex_t *mutex)
325 {
326         int ret;
327
328         THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
329
330         if (*mutex == NULL)
331                 ret = pthread_mutex_init(mutex, NULL);
332         else
333                 ret = 0;
334
335         THR_LOCK_RELEASE(thread, &_mutex_static_lock);
336
337         return (ret);
338 }
339
340 static int
341 init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
342 {
343         int ret;
344
345         THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
346
347         if (*mutex == NULL)
348                 ret = pthread_mutex_init(mutex, &static_mattr);
349         else
350                 ret = 0;
351
352         THR_LOCK_RELEASE(thread, &_mutex_static_lock);
353
354         return (ret);
355 }
356
357 static int
358 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
359 {
360         int private;
361         int ret = 0;
362
363         THR_ASSERT((mutex != NULL) && (*mutex != NULL),
364             "Uninitialized mutex in pthread_mutex_trylock_basic");
365
366         /* Lock the mutex structure: */
367         THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock);
368         private = (*mutex)->m_flags & MUTEX_FLAGS_PRIVATE;
369
370         /*
371          * If the mutex was statically allocated, properly
372          * initialize the tail queue.
373          */
374         if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) {
375                 TAILQ_INIT(&(*mutex)->m_queue);
376                 MUTEX_INIT_LINK(*mutex);
377                 (*mutex)->m_flags |= MUTEX_FLAGS_INITED;
378         }
379
380         /* Process according to mutex type: */
381         switch ((*mutex)->m_protocol) {
382         /* Default POSIX mutex: */
383         case PTHREAD_PRIO_NONE: 
384                 /* Check if this mutex is not locked: */
385                 if ((*mutex)->m_owner == NULL) {
386                         /* Lock the mutex for the running thread: */
387                         (*mutex)->m_owner = curthread;
388
389                         /* Add to the list of owned mutexes: */
390                         MUTEX_ASSERT_NOT_OWNED(*mutex);
391                         TAILQ_INSERT_TAIL(&curthread->mutexq,
392                             (*mutex), m_qe);
393                 } else if ((*mutex)->m_owner == curthread)
394                         ret = mutex_self_trylock(curthread, *mutex);
395                 else
396                         /* Return a busy error: */
397                         ret = EBUSY;
398                 break;
399
400         /* POSIX priority inheritence mutex: */
401         case PTHREAD_PRIO_INHERIT:
402                 /* Check if this mutex is not locked: */
403                 if ((*mutex)->m_owner == NULL) {
404                         /* Lock the mutex for the running thread: */
405                         (*mutex)->m_owner = curthread;
406
407                         THR_SCHED_LOCK(curthread, curthread);
408                         /* Track number of priority mutexes owned: */
409                         curthread->priority_mutex_count++;
410
411                         /*
412                          * The mutex takes on the attributes of the
413                          * running thread when there are no waiters.
414                          */
415                         (*mutex)->m_prio = curthread->active_priority;
416                         (*mutex)->m_saved_prio =
417                             curthread->inherited_priority;
418                         curthread->inherited_priority = (*mutex)->m_prio;
419                         THR_SCHED_UNLOCK(curthread, curthread);
420
421                         /* Add to the list of owned mutexes: */
422                         MUTEX_ASSERT_NOT_OWNED(*mutex);
423                         TAILQ_INSERT_TAIL(&curthread->mutexq,
424                             (*mutex), m_qe);
425                 } else if ((*mutex)->m_owner == curthread)
426                         ret = mutex_self_trylock(curthread, *mutex);
427                 else
428                         /* Return a busy error: */
429                         ret = EBUSY;
430                 break;
431
432         /* POSIX priority protection mutex: */
433         case PTHREAD_PRIO_PROTECT:
434                 /* Check for a priority ceiling violation: */
435                 if (curthread->active_priority > (*mutex)->m_prio)
436                         ret = EINVAL;
437
438                 /* Check if this mutex is not locked: */
439                 else if ((*mutex)->m_owner == NULL) {
440                         /* Lock the mutex for the running thread: */
441                         (*mutex)->m_owner = curthread;
442
443                         THR_SCHED_LOCK(curthread, curthread);
444                         /* Track number of priority mutexes owned: */
445                         curthread->priority_mutex_count++;
446
447                         /*
448                          * The running thread inherits the ceiling
449                          * priority of the mutex and executes at that
450                          * priority.
451                          */
452                         curthread->active_priority = (*mutex)->m_prio;
453                         (*mutex)->m_saved_prio =
454                             curthread->inherited_priority;
455                         curthread->inherited_priority =
456                             (*mutex)->m_prio;
457                         THR_SCHED_UNLOCK(curthread, curthread);
458                         /* Add to the list of owned mutexes: */
459                         MUTEX_ASSERT_NOT_OWNED(*mutex);
460                         TAILQ_INSERT_TAIL(&curthread->mutexq,
461                             (*mutex), m_qe);
462                 } else if ((*mutex)->m_owner == curthread)
463                         ret = mutex_self_trylock(curthread, *mutex);
464                 else
465                         /* Return a busy error: */
466                         ret = EBUSY;
467                 break;
468
469         /* Trap invalid mutex types: */
470         default:
471                 /* Return an invalid argument error: */
472                 ret = EINVAL;
473                 break;
474         }
475
476         if (ret == 0 && private)
477                 THR_CRITICAL_ENTER(curthread);
478
479         /* Unlock the mutex structure: */
480         THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock);
481
482         /* Return the completion status: */
483         return (ret);
484 }
485
486 int
487 __pthread_mutex_trylock(pthread_mutex_t *mutex)
488 {
489         struct pthread *curthread = _get_curthread();
490         int ret = 0;
491
492         if (mutex == NULL)
493                 ret = EINVAL;
494
495         /*
496          * If the mutex is statically initialized, perform the dynamic
497          * initialization:
498          */
499         else if ((*mutex != NULL) ||
500             ((ret = init_static(curthread, mutex)) == 0))
501                 ret = mutex_trylock_common(curthread, mutex);
502
503         return (ret);
504 }
505
506 int
507 _pthread_mutex_trylock(pthread_mutex_t *mutex)
508 {
509         struct pthread  *curthread = _get_curthread();
510         int     ret = 0;
511
512         if (mutex == NULL)
513                 ret = EINVAL;
514
515         /*
516          * If the mutex is statically initialized, perform the dynamic
517          * initialization marking the mutex private (delete safe):
518          */
519         else if ((*mutex != NULL) ||
520             ((ret = init_static_private(curthread, mutex)) == 0))
521                 ret = mutex_trylock_common(curthread, mutex);
522
523         return (ret);
524 }
525
526 static int
527 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m,
528         const struct timespec * abstime)
529 {
530         int     private;
531         int     ret = 0;
532
533         THR_ASSERT((m != NULL) && (*m != NULL),
534             "Uninitialized mutex in pthread_mutex_trylock_basic");
535
536         if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
537             abstime->tv_nsec >= 1000000000))
538                 return (EINVAL);
539
540         /* Reset the interrupted flag: */
541         curthread->interrupted = 0;
542         curthread->timeout = 0;
543         curthread->wakeup_time.tv_sec = -1;
544
545         private = (*m)->m_flags & MUTEX_FLAGS_PRIVATE;
546
547         /*
548          * Enter a loop waiting to become the mutex owner.  We need a
549          * loop in case the waiting thread is interrupted by a signal
550          * to execute a signal handler.  It is not (currently) possible
551          * to remain in the waiting queue while running a handler.
552          * Instead, the thread is interrupted and backed out of the
553          * waiting queue prior to executing the signal handler.
554          */
555         do {
556                 /* Lock the mutex structure: */
557                 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
558
559                 /*
560                  * If the mutex was statically allocated, properly
561                  * initialize the tail queue.
562                  */
563                 if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) {
564                         TAILQ_INIT(&(*m)->m_queue);
565                         (*m)->m_flags |= MUTEX_FLAGS_INITED;
566                         MUTEX_INIT_LINK(*m);
567                 }
568
569                 /* Process according to mutex type: */
570                 switch ((*m)->m_protocol) {
571                 /* Default POSIX mutex: */
572                 case PTHREAD_PRIO_NONE:
573                         if ((*m)->m_owner == NULL) {
574                                 /* Lock the mutex for this thread: */
575                                 (*m)->m_owner = curthread;
576
577                                 /* Add to the list of owned mutexes: */
578                                 MUTEX_ASSERT_NOT_OWNED(*m);
579                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
580                                     (*m), m_qe);
581                                 if (private)
582                                         THR_CRITICAL_ENTER(curthread);
583
584                                 /* Unlock the mutex structure: */
585                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
586                         } else if ((*m)->m_owner == curthread) {
587                                 ret = mutex_self_lock(curthread, *m);
588
589                                 /* Unlock the mutex structure: */
590                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
591                         } else {
592                                 /*
593                                  * Join the queue of threads waiting to lock
594                                  * the mutex and save a pointer to the mutex.
595                                  */
596                                 mutex_queue_enq(*m, curthread);
597                                 curthread->data.mutex = *m;
598                                 curthread->sigbackout = mutex_lock_backout;
599                                 /*
600                                  * This thread is active and is in a critical
601                                  * region (holding the mutex lock); we should
602                                  * be able to safely set the state.
603                                  */
604                                 THR_SCHED_LOCK(curthread, curthread);
605                                 /* Set the wakeup time: */
606                                 if (abstime) {
607                                         curthread->wakeup_time.tv_sec =
608                                                 abstime->tv_sec;
609                                         curthread->wakeup_time.tv_nsec =
610                                                 abstime->tv_nsec;
611                                 }
612
613                                 THR_SET_STATE(curthread, PS_MUTEX_WAIT);
614                                 THR_SCHED_UNLOCK(curthread, curthread);
615
616                                 /* Unlock the mutex structure: */
617                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
618
619                                 /* Schedule the next thread: */
620                                 _thr_sched_switch(curthread);
621
622                                 if (THR_IN_MUTEXQ(curthread)) {
623                                         THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
624                                         mutex_queue_remove(*m, curthread);
625                                         THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
626                                 }
627                                 /*
628                                  * Only clear these after assuring the
629                                  * thread is dequeued.
630                                  */
631                                 curthread->data.mutex = NULL;
632                                 curthread->sigbackout = NULL;
633                         }
634                         break;
635
636                 /* POSIX priority inheritence mutex: */
637                 case PTHREAD_PRIO_INHERIT:
638                         /* Check if this mutex is not locked: */
639                         if ((*m)->m_owner == NULL) {
640                                 /* Lock the mutex for this thread: */
641                                 (*m)->m_owner = curthread;
642
643                                 THR_SCHED_LOCK(curthread, curthread);
644                                 /* Track number of priority mutexes owned: */
645                                 curthread->priority_mutex_count++;
646
647                                 /*
648                                  * The mutex takes on attributes of the
649                                  * running thread when there are no waiters.
650                                  * Make sure the thread's scheduling lock is
651                                  * held while priorities are adjusted.
652                                  */
653                                 (*m)->m_prio = curthread->active_priority;
654                                 (*m)->m_saved_prio =
655                                     curthread->inherited_priority;
656                                 curthread->inherited_priority = (*m)->m_prio;
657                                 THR_SCHED_UNLOCK(curthread, curthread);
658
659                                 /* Add to the list of owned mutexes: */
660                                 MUTEX_ASSERT_NOT_OWNED(*m);
661                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
662                                     (*m), m_qe);
663                                 if (private)
664                                         THR_CRITICAL_ENTER(curthread);
665
666                                 /* Unlock the mutex structure: */
667                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
668                         } else if ((*m)->m_owner == curthread) {
669                                 ret = mutex_self_lock(curthread, *m);
670
671                                 /* Unlock the mutex structure: */
672                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
673                         } else {
674                                 /*
675                                  * Join the queue of threads waiting to lock
676                                  * the mutex and save a pointer to the mutex.
677                                  */
678                                 mutex_queue_enq(*m, curthread);
679                                 curthread->data.mutex = *m;
680                                 curthread->sigbackout = mutex_lock_backout;
681
682                                 /*
683                                  * This thread is active and is in a critical
684                                  * region (holding the mutex lock); we should
685                                  * be able to safely set the state.
686                                  */
687                                 if (curthread->active_priority > (*m)->m_prio)
688                                         /* Adjust priorities: */
689                                         mutex_priority_adjust(curthread, *m);
690
691                                 THR_SCHED_LOCK(curthread, curthread);
692                                 /* Set the wakeup time: */
693                                 if (abstime) {
694                                         curthread->wakeup_time.tv_sec =
695                                                 abstime->tv_sec;
696                                         curthread->wakeup_time.tv_nsec =
697                                                 abstime->tv_nsec;
698                                 }
699                                 THR_SET_STATE(curthread, PS_MUTEX_WAIT);
700                                 THR_SCHED_UNLOCK(curthread, curthread);
701
702                                 /* Unlock the mutex structure: */
703                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
704
705                                 /* Schedule the next thread: */
706                                 _thr_sched_switch(curthread);
707
708                                 if (THR_IN_MUTEXQ(curthread)) {
709                                         THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
710                                         mutex_queue_remove(*m, curthread);
711                                         THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
712                                 }
713                                 /*
714                                  * Only clear these after assuring the
715                                  * thread is dequeued.
716                                  */
717                                 curthread->data.mutex = NULL;
718                                 curthread->sigbackout = NULL;
719                         }
720                         break;
721
722                 /* POSIX priority protection mutex: */
723                 case PTHREAD_PRIO_PROTECT:
724                         /* Check for a priority ceiling violation: */
725                         if (curthread->active_priority > (*m)->m_prio) {
726                                 /* Unlock the mutex structure: */
727                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
728                                 ret = EINVAL;
729                         }
730                         /* Check if this mutex is not locked: */
731                         else if ((*m)->m_owner == NULL) {
732                                 /*
733                                  * Lock the mutex for the running
734                                  * thread:
735                                  */
736                                 (*m)->m_owner = curthread;
737
738                                 THR_SCHED_LOCK(curthread, curthread);
739                                 /* Track number of priority mutexes owned: */
740                                 curthread->priority_mutex_count++;
741
742                                 /*
743                                  * The running thread inherits the ceiling
744                                  * priority of the mutex and executes at that
745                                  * priority.  Make sure the thread's
746                                  * scheduling lock is held while priorities
747                                  * are adjusted.
748                                  */
749                                 curthread->active_priority = (*m)->m_prio;
750                                 (*m)->m_saved_prio =
751                                     curthread->inherited_priority;
752                                 curthread->inherited_priority = (*m)->m_prio;
753                                 THR_SCHED_UNLOCK(curthread, curthread);
754
755                                 /* Add to the list of owned mutexes: */
756                                 MUTEX_ASSERT_NOT_OWNED(*m);
757                                 TAILQ_INSERT_TAIL(&curthread->mutexq,
758                                     (*m), m_qe);
759                                 if (private)
760                                         THR_CRITICAL_ENTER(curthread);
761
762                                 /* Unlock the mutex structure: */
763                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
764                         } else if ((*m)->m_owner == curthread) {
765                                 ret = mutex_self_lock(curthread, *m);
766
767                                 /* Unlock the mutex structure: */
768                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
769                         } else {
770                                 /*
771                                  * Join the queue of threads waiting to lock
772                                  * the mutex and save a pointer to the mutex.
773                                  */
774                                 mutex_queue_enq(*m, curthread);
775                                 curthread->data.mutex = *m;
776                                 curthread->sigbackout = mutex_lock_backout;
777
778                                 /* Clear any previous error: */
779                                 curthread->error = 0;
780
781                                 /*
782                                  * This thread is active and is in a critical
783                                  * region (holding the mutex lock); we should
784                                  * be able to safely set the state.
785                                  */
786
787                                 THR_SCHED_LOCK(curthread, curthread);
788                                 /* Set the wakeup time: */
789                                 if (abstime) {
790                                         curthread->wakeup_time.tv_sec =
791                                                 abstime->tv_sec;
792                                         curthread->wakeup_time.tv_nsec =
793                                                 abstime->tv_nsec;
794                                 }
795                                 THR_SET_STATE(curthread, PS_MUTEX_WAIT);
796                                 THR_SCHED_UNLOCK(curthread, curthread);
797
798                                 /* Unlock the mutex structure: */
799                                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
800
801                                 /* Schedule the next thread: */
802                                 _thr_sched_switch(curthread);
803
804                                 if (THR_IN_MUTEXQ(curthread)) {
805                                         THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
806                                         mutex_queue_remove(*m, curthread);
807                                         THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
808                                 }
809                                 /*
810                                  * Only clear these after assuring the
811                                  * thread is dequeued.
812                                  */
813                                 curthread->data.mutex = NULL;
814                                 curthread->sigbackout = NULL;
815
816                                 /*
817                                  * The threads priority may have changed while
818                                  * waiting for the mutex causing a ceiling
819                                  * violation.
820                                  */
821                                 ret = curthread->error;
822                                 curthread->error = 0;
823                         }
824                         break;
825
826                 /* Trap invalid mutex types: */
827                 default:
828                         /* Unlock the mutex structure: */
829                         THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
830
831                         /* Return an invalid argument error: */
832                         ret = EINVAL;
833                         break;
834                 }
835
836         } while (((*m)->m_owner != curthread) && (ret == 0) &&
837             (curthread->interrupted == 0) && (curthread->timeout == 0));
838
839         if (ret == 0 && (*m)->m_owner != curthread && curthread->timeout)
840                 ret = ETIMEDOUT;
841
842         /*
843          * Check to see if this thread was interrupted and
844          * is still in the mutex queue of waiting threads:
845          */
846         if (curthread->interrupted != 0) {
847                 /* Remove this thread from the mutex queue. */
848                 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
849                 if (THR_IN_SYNCQ(curthread))
850                         mutex_queue_remove(*m, curthread);
851                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
852
853                 /* Check for asynchronous cancellation. */
854                 if (curthread->continuation != NULL)
855                         curthread->continuation((void *) curthread);
856         }
857
858         /* Return the completion status: */
859         return (ret);
860 }
861
862 int
863 __pthread_mutex_lock(pthread_mutex_t *m)
864 {
865         struct pthread *curthread;
866         int     ret = 0;
867
868         if (_thr_initial == NULL)
869                 _libpthread_init(NULL);
870
871         curthread = _get_curthread();
872         if (m == NULL)
873                 ret = EINVAL;
874
875         /*
876          * If the mutex is statically initialized, perform the dynamic
877          * initialization:
878          */
879         else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
880                 ret = mutex_lock_common(curthread, m, NULL);
881
882         return (ret);
883 }
884
885 __strong_reference(__pthread_mutex_lock, _thr_mutex_lock);
886
887 int
888 _pthread_mutex_lock(pthread_mutex_t *m)
889 {
890         struct pthread *curthread;
891         int     ret = 0;
892
893         if (_thr_initial == NULL)
894                 _libpthread_init(NULL);
895         curthread = _get_curthread();
896
897         if (m == NULL)
898                 ret = EINVAL;
899
900         /*
901          * If the mutex is statically initialized, perform the dynamic
902          * initialization marking it private (delete safe):
903          */
904         else if ((*m != NULL) ||
905             ((ret = init_static_private(curthread, m)) == 0))
906                 ret = mutex_lock_common(curthread, m, NULL);
907
908         return (ret);
909 }
910
911 int
912 __pthread_mutex_timedlock(pthread_mutex_t *m,
913         const struct timespec *abs_timeout)
914 {
915         struct pthread *curthread;
916         int     ret = 0;
917
918         if (_thr_initial == NULL)
919                 _libpthread_init(NULL);
920
921         curthread = _get_curthread();
922         if (m == NULL)
923                 ret = EINVAL;
924
925         /*
926          * If the mutex is statically initialized, perform the dynamic
927          * initialization:
928          */
929         else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0))
930                 ret = mutex_lock_common(curthread, m, abs_timeout);
931
932         return (ret);
933 }
934
935 int
936 _pthread_mutex_timedlock(pthread_mutex_t *m,
937         const struct timespec *abs_timeout)
938 {
939         struct pthread *curthread;
940         int     ret = 0;
941
942         if (_thr_initial == NULL)
943                 _libpthread_init(NULL);
944         curthread = _get_curthread();
945
946         if (m == NULL)
947                 ret = EINVAL;
948
949         /*
950          * If the mutex is statically initialized, perform the dynamic
951          * initialization marking it private (delete safe):
952          */
953         else if ((*m != NULL) ||
954             ((ret = init_static_private(curthread, m)) == 0))
955                 ret = mutex_lock_common(curthread, m, abs_timeout);
956
957         return (ret);
958 }
959
960 int
961 _pthread_mutex_unlock(pthread_mutex_t *m)
962 {
963         return (mutex_unlock_common(m, /* add reference */ 0));
964 }
965
966 __strong_reference(_pthread_mutex_unlock, _thr_mutex_unlock);
967
968 int
969 _mutex_cv_unlock(pthread_mutex_t *m)
970 {
971         return (mutex_unlock_common(m, /* add reference */ 1));
972 }
973
974 int
975 _mutex_cv_lock(pthread_mutex_t *m)
976 {
977         struct  pthread *curthread;
978         int     ret;
979
980         curthread = _get_curthread();
981         if ((ret = _pthread_mutex_lock(m)) == 0) {
982                 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
983                 (*m)->m_refcount--;
984                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
985         }
986         return (ret);
987 }
988
989 static inline int
990 mutex_self_trylock(struct pthread *curthread, pthread_mutex_t m)
991 {
992         int     ret = 0;
993
994         switch (m->m_type) {
995         /* case PTHREAD_MUTEX_DEFAULT: */
996         case PTHREAD_MUTEX_ERRORCHECK:
997         case PTHREAD_MUTEX_NORMAL:
998         case PTHREAD_MUTEX_ADAPTIVE_NP:
999                 ret = EBUSY; 
1000                 break;
1001
1002         case PTHREAD_MUTEX_RECURSIVE:
1003                 /* Increment the lock count: */
1004                 m->m_count++;
1005                 break;
1006
1007         default:
1008                 /* Trap invalid mutex types; */
1009                 ret = EINVAL;
1010         }
1011
1012         return (ret);
1013 }
1014
1015 static inline int
1016 mutex_self_lock(struct pthread *curthread, pthread_mutex_t m)
1017 {
1018         int ret = 0;
1019
1020         /*
1021          * Don't allow evil recursive mutexes for private use
1022          * in libc and libpthread.
1023          */
1024         if (m->m_flags & MUTEX_FLAGS_PRIVATE)
1025                 PANIC("Recurse on a private mutex.");
1026
1027         switch (m->m_type) {
1028         /* case PTHREAD_MUTEX_DEFAULT: */
1029         case PTHREAD_MUTEX_ERRORCHECK:
1030         case PTHREAD_MUTEX_ADAPTIVE_NP:
1031                 /*
1032                  * POSIX specifies that mutexes should return EDEADLK if a
1033                  * recursive lock is detected.
1034                  */
1035                 ret = EDEADLK; 
1036                 break;
1037
1038         case PTHREAD_MUTEX_NORMAL:
1039                 /*
1040                  * What SS2 define as a 'normal' mutex.  Intentionally
1041                  * deadlock on attempts to get a lock you already own.
1042                  */
1043
1044                 THR_SCHED_LOCK(curthread, curthread);
1045                 THR_SET_STATE(curthread, PS_DEADLOCK);
1046                 THR_SCHED_UNLOCK(curthread, curthread);
1047
1048                 /* Unlock the mutex structure: */
1049                 THR_LOCK_RELEASE(curthread, &m->m_lock);
1050
1051                 /* Schedule the next thread: */
1052                 _thr_sched_switch(curthread);
1053                 break;
1054
1055         case PTHREAD_MUTEX_RECURSIVE:
1056                 /* Increment the lock count: */
1057                 m->m_count++;
1058                 break;
1059
1060         default:
1061                 /* Trap invalid mutex types; */
1062                 ret = EINVAL;
1063         }
1064
1065         return (ret);
1066 }
1067
1068 static int
1069 mutex_unlock_common(pthread_mutex_t *m, int add_reference)
1070 {
1071         struct pthread *curthread = _get_curthread();
1072         struct kse_mailbox *kmbx = NULL;
1073         int ret = 0;
1074
1075         if (m == NULL || *m == NULL)
1076                 ret = EINVAL;
1077         else {
1078                 /* Lock the mutex structure: */
1079                 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock);
1080
1081                 /* Process according to mutex type: */
1082                 switch ((*m)->m_protocol) {
1083                 /* Default POSIX mutex: */
1084                 case PTHREAD_PRIO_NONE:
1085                         /*
1086                          * Check if the running thread is not the owner of the
1087                          * mutex:
1088                          */
1089                         if ((*m)->m_owner != curthread)
1090                                 ret = EPERM;
1091                         else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1092                             ((*m)->m_count > 0))
1093                                 /* Decrement the count: */
1094                                 (*m)->m_count--;
1095                         else {
1096                                 /*
1097                                  * Clear the count in case this is a recursive
1098                                  * mutex.
1099                                  */
1100                                 (*m)->m_count = 0;
1101
1102                                 /* Remove the mutex from the threads queue. */
1103                                 MUTEX_ASSERT_IS_OWNED(*m);
1104                                 TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1105                                     (*m), m_qe);
1106                                 MUTEX_INIT_LINK(*m);
1107
1108                                 /*
1109                                  * Hand off the mutex to the next waiting
1110                                  * thread:
1111                                  */
1112                                 kmbx = mutex_handoff(curthread, *m);
1113                         }
1114                         break;
1115
1116                 /* POSIX priority inheritence mutex: */
1117                 case PTHREAD_PRIO_INHERIT:
1118                         /*
1119                          * Check if the running thread is not the owner of the
1120                          * mutex:
1121                          */
1122                         if ((*m)->m_owner != curthread)
1123                                 ret = EPERM;
1124                         else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1125                             ((*m)->m_count > 0))
1126                                 /* Decrement the count: */
1127                                 (*m)->m_count--;
1128                         else {
1129                                 /*
1130                                  * Clear the count in case this is recursive
1131                                  * mutex.
1132                                  */
1133                                 (*m)->m_count = 0;
1134
1135                                 /*
1136                                  * Restore the threads inherited priority and
1137                                  * recompute the active priority (being careful
1138                                  * not to override changes in the threads base
1139                                  * priority subsequent to locking the mutex).
1140                                  */
1141                                 THR_SCHED_LOCK(curthread, curthread);
1142                                 curthread->inherited_priority =
1143                                         (*m)->m_saved_prio;
1144                                 curthread->active_priority =
1145                                     MAX(curthread->inherited_priority,
1146                                     curthread->base_priority);
1147
1148                                 /*
1149                                  * This thread now owns one less priority mutex.
1150                                  */
1151                                 curthread->priority_mutex_count--;
1152                                 THR_SCHED_UNLOCK(curthread, curthread);
1153
1154                                 /* Remove the mutex from the threads queue. */
1155                                 MUTEX_ASSERT_IS_OWNED(*m);
1156                                 TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1157                                     (*m), m_qe);
1158                                 MUTEX_INIT_LINK(*m);
1159
1160                                 /*
1161                                  * Hand off the mutex to the next waiting
1162                                  * thread:
1163                                  */
1164                                 kmbx = mutex_handoff(curthread, *m);
1165                         }
1166                         break;
1167
1168                 /* POSIX priority ceiling mutex: */
1169                 case PTHREAD_PRIO_PROTECT:
1170                         /*
1171                          * Check if the running thread is not the owner of the
1172                          * mutex:
1173                          */
1174                         if ((*m)->m_owner != curthread)
1175                                 ret = EPERM;
1176                         else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) &&
1177                             ((*m)->m_count > 0))
1178                                 /* Decrement the count: */
1179                                 (*m)->m_count--;
1180                         else {
1181                                 /*
1182                                  * Clear the count in case this is a recursive
1183                                  * mutex.
1184                                  */
1185                                 (*m)->m_count = 0;
1186
1187                                 /*
1188                                  * Restore the threads inherited priority and
1189                                  * recompute the active priority (being careful
1190                                  * not to override changes in the threads base
1191                                  * priority subsequent to locking the mutex).
1192                                  */
1193                                 THR_SCHED_LOCK(curthread, curthread);
1194                                 curthread->inherited_priority =
1195                                         (*m)->m_saved_prio;
1196                                 curthread->active_priority =
1197                                     MAX(curthread->inherited_priority,
1198                                     curthread->base_priority);
1199
1200                                 /*
1201                                  * This thread now owns one less priority mutex.
1202                                  */
1203                                 curthread->priority_mutex_count--;
1204                                 THR_SCHED_UNLOCK(curthread, curthread);
1205
1206                                 /* Remove the mutex from the threads queue. */
1207                                 MUTEX_ASSERT_IS_OWNED(*m);
1208                                 TAILQ_REMOVE(&(*m)->m_owner->mutexq,
1209                                     (*m), m_qe);
1210                                 MUTEX_INIT_LINK(*m);
1211
1212                                 /*
1213                                  * Hand off the mutex to the next waiting
1214                                  * thread:
1215                                  */
1216                                 kmbx = mutex_handoff(curthread, *m);
1217                         }
1218                         break;
1219
1220                 /* Trap invalid mutex types: */
1221                 default:
1222                         /* Return an invalid argument error: */
1223                         ret = EINVAL;
1224                         break;
1225                 }
1226
1227                 if ((ret == 0) && (add_reference != 0))
1228                         /* Increment the reference count: */
1229                         (*m)->m_refcount++;
1230
1231                 /* Leave the critical region if this is a private mutex. */
1232                 if ((ret == 0) && ((*m)->m_flags & MUTEX_FLAGS_PRIVATE))
1233                         THR_CRITICAL_LEAVE(curthread);
1234
1235                 /* Unlock the mutex structure: */
1236                 THR_LOCK_RELEASE(curthread, &(*m)->m_lock);
1237
1238                 if (kmbx != NULL)
1239                         kse_wakeup(kmbx);
1240         }
1241
1242         /* Return the completion status: */
1243         return (ret);
1244 }
1245
1246
1247 /*
1248  * This function is called when a change in base priority occurs for
1249  * a thread that is holding or waiting for a priority protection or
1250  * inheritence mutex.  A change in a threads base priority can effect
1251  * changes to active priorities of other threads and to the ordering
1252  * of mutex locking by waiting threads.
1253  *
1254  * This must be called without the target thread's scheduling lock held.
1255  */
1256 void
1257 _mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread,
1258     int propagate_prio)
1259 {
1260         struct pthread_mutex *m;
1261
1262         /* Adjust the priorites of any owned priority mutexes: */
1263         if (pthread->priority_mutex_count > 0) {
1264                 /*
1265                  * Rescan the mutexes owned by this thread and correct
1266                  * their priorities to account for this threads change
1267                  * in priority.  This has the side effect of changing
1268                  * the threads active priority.
1269                  *
1270                  * Be sure to lock the first mutex in the list of owned
1271                  * mutexes.  This acts as a barrier against another
1272                  * simultaneous call to change the threads priority
1273                  * and from the owning thread releasing the mutex.
1274                  */
1275                 m = TAILQ_FIRST(&pthread->mutexq);
1276                 if (m != NULL) {
1277                         THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1278                         /*
1279                          * Make sure the thread still owns the lock.
1280                          */
1281                         if (m == TAILQ_FIRST(&pthread->mutexq))
1282                                 mutex_rescan_owned(curthread, pthread,
1283                                     /* rescan all owned */ NULL);
1284                         THR_LOCK_RELEASE(curthread, &m->m_lock);
1285                 }
1286         }
1287
1288         /*
1289          * If this thread is waiting on a priority inheritence mutex,
1290          * check for priority adjustments.  A change in priority can
1291          * also cause a ceiling violation(*) for a thread waiting on
1292          * a priority protection mutex; we don't perform the check here
1293          * as it is done in pthread_mutex_unlock.
1294          *
1295          * (*) It should be noted that a priority change to a thread
1296          *     _after_ taking and owning a priority ceiling mutex
1297          *     does not affect ownership of that mutex; the ceiling
1298          *     priority is only checked before mutex ownership occurs.
1299          */
1300         if (propagate_prio != 0) {
1301                 /*
1302                  * Lock the thread's scheduling queue.  This is a bit
1303                  * convoluted; the "in synchronization queue flag" can
1304                  * only be cleared with both the thread's scheduling and
1305                  * mutex locks held.  The thread's pointer to the wanted
1306                  * mutex is guaranteed to be valid during this time.
1307                  */
1308                 THR_SCHED_LOCK(curthread, pthread);
1309
1310                 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) ||
1311                     ((m = pthread->data.mutex) == NULL))
1312                         THR_SCHED_UNLOCK(curthread, pthread);
1313                 else {
1314                         /*
1315                          * This thread is currently waiting on a mutex; unlock
1316                          * the scheduling queue lock and lock the mutex.  We
1317                          * can't hold both at the same time because the locking
1318                          * order could cause a deadlock.
1319                          */
1320                         THR_SCHED_UNLOCK(curthread, pthread);
1321                         THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1322
1323                         /*
1324                          * Check to make sure this thread is still in the
1325                          * same state (the lock above can yield the CPU to
1326                          * another thread or the thread may be running on
1327                          * another CPU).
1328                          */
1329                         if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1330                             (pthread->data.mutex == m)) {
1331                                 /*
1332                                  * Remove and reinsert this thread into
1333                                  * the list of waiting threads to preserve
1334                                  * decreasing priority order.
1335                                  */
1336                                 mutex_queue_remove(m, pthread);
1337                                 mutex_queue_enq(m, pthread);
1338
1339                                 if (m->m_protocol == PTHREAD_PRIO_INHERIT)
1340                                         /* Adjust priorities: */
1341                                         mutex_priority_adjust(curthread, m);
1342                         }
1343
1344                         /* Unlock the mutex structure: */
1345                         THR_LOCK_RELEASE(curthread, &m->m_lock);
1346                 }
1347         }
1348 }
1349
1350 /*
1351  * Called when a new thread is added to the mutex waiting queue or
1352  * when a threads priority changes that is already in the mutex
1353  * waiting queue.
1354  *
1355  * This must be called with the mutex locked by the current thread.
1356  */
1357 static void
1358 mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex)
1359 {
1360         pthread_mutex_t m = mutex;
1361         struct pthread  *pthread_next, *pthread = mutex->m_owner;
1362         int             done, temp_prio;
1363
1364         /*
1365          * Calculate the mutex priority as the maximum of the highest
1366          * active priority of any waiting threads and the owning threads
1367          * active priority(*).
1368          *
1369          * (*) Because the owning threads current active priority may
1370          *     reflect priority inherited from this mutex (and the mutex
1371          *     priority may have changed) we must recalculate the active
1372          *     priority based on the threads saved inherited priority
1373          *     and its base priority.
1374          */
1375         pthread_next = TAILQ_FIRST(&m->m_queue);  /* should never be NULL */
1376         temp_prio = MAX(pthread_next->active_priority,
1377             MAX(m->m_saved_prio, pthread->base_priority));
1378
1379         /* See if this mutex really needs adjusting: */
1380         if (temp_prio == m->m_prio)
1381                 /* No need to propagate the priority: */
1382                 return;
1383
1384         /* Set new priority of the mutex: */
1385         m->m_prio = temp_prio;
1386
1387         /*
1388          * Don't unlock the mutex passed in as an argument.  It is
1389          * expected to be locked and unlocked by the caller.
1390          */
1391         done = 1;
1392         do {
1393                 /*
1394                  * Save the threads priority before rescanning the
1395                  * owned mutexes:
1396                  */
1397                 temp_prio = pthread->active_priority;
1398
1399                 /*
1400                  * Fix the priorities for all mutexes held by the owning
1401                  * thread since taking this mutex.  This also has a
1402                  * potential side-effect of changing the threads priority.
1403                  *
1404                  * At this point the mutex is locked by the current thread.
1405                  * The owning thread can't release the mutex until it is
1406                  * unlocked, so we should be able to safely walk its list
1407                  * of owned mutexes.
1408                  */
1409                 mutex_rescan_owned(curthread, pthread, m);
1410
1411                 /*
1412                  * If this isn't the first time through the loop,
1413                  * the current mutex needs to be unlocked.
1414                  */
1415                 if (done == 0)
1416                         THR_LOCK_RELEASE(curthread, &m->m_lock);
1417
1418                 /* Assume we're done unless told otherwise: */
1419                 done = 1;
1420
1421                 /*
1422                  * If the thread is currently waiting on a mutex, check
1423                  * to see if the threads new priority has affected the
1424                  * priority of the mutex.
1425                  */
1426                 if ((temp_prio != pthread->active_priority) &&
1427                     ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1428                     ((m = pthread->data.mutex) != NULL) &&
1429                     (m->m_protocol == PTHREAD_PRIO_INHERIT)) {
1430                         /* Lock the mutex structure: */
1431                         THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1432
1433                         /*
1434                          * Make sure the thread is still waiting on the
1435                          * mutex:
1436                          */
1437                         if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) &&
1438                             (m == pthread->data.mutex)) {
1439                                 /*
1440                                  * The priority for this thread has changed.
1441                                  * Remove and reinsert this thread into the
1442                                  * list of waiting threads to preserve
1443                                  * decreasing priority order.
1444                                  */
1445                                 mutex_queue_remove(m, pthread);
1446                                 mutex_queue_enq(m, pthread);
1447
1448                                 /*
1449                                  * Grab the waiting thread with highest
1450                                  * priority:
1451                                  */
1452                                 pthread_next = TAILQ_FIRST(&m->m_queue);
1453
1454                                 /*
1455                                  * Calculate the mutex priority as the maximum
1456                                  * of the highest active priority of any
1457                                  * waiting threads and the owning threads
1458                                  * active priority.
1459                                  */
1460                                 temp_prio = MAX(pthread_next->active_priority,
1461                                     MAX(m->m_saved_prio,
1462                                     m->m_owner->base_priority));
1463
1464                                 if (temp_prio != m->m_prio) {
1465                                         /*
1466                                          * The priority needs to be propagated
1467                                          * to the mutex this thread is waiting
1468                                          * on and up to the owner of that mutex.
1469                                          */
1470                                         m->m_prio = temp_prio;
1471                                         pthread = m->m_owner;
1472
1473                                         /* We're not done yet: */
1474                                         done = 0;
1475                                 }
1476                         }
1477                         /* Only release the mutex if we're done: */
1478                         if (done != 0)
1479                                 THR_LOCK_RELEASE(curthread, &m->m_lock);
1480                 }
1481         } while (done == 0);
1482 }
1483
1484 static void
1485 mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread,
1486     struct pthread_mutex *mutex)
1487 {
1488         struct pthread_mutex    *m;
1489         struct pthread          *pthread_next;
1490         int                     active_prio, inherited_prio;
1491
1492         /*
1493          * Start walking the mutexes the thread has taken since
1494          * taking this mutex.
1495          */
1496         if (mutex == NULL) {
1497                 /*
1498                  * A null mutex means start at the beginning of the owned
1499                  * mutex list.
1500                  */
1501                 m = TAILQ_FIRST(&pthread->mutexq);
1502
1503                 /* There is no inherited priority yet. */
1504                 inherited_prio = 0;
1505         } else {
1506                 /*
1507                  * The caller wants to start after a specific mutex.  It
1508                  * is assumed that this mutex is a priority inheritence
1509                  * mutex and that its priority has been correctly
1510                  * calculated.
1511                  */
1512                 m = TAILQ_NEXT(mutex, m_qe);
1513
1514                 /* Start inheriting priority from the specified mutex. */
1515                 inherited_prio = mutex->m_prio;
1516         }
1517         active_prio = MAX(inherited_prio, pthread->base_priority);
1518
1519         for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) {
1520                 /*
1521                  * We only want to deal with priority inheritence
1522                  * mutexes.  This might be optimized by only placing
1523                  * priority inheritence mutexes into the owned mutex
1524                  * list, but it may prove to be useful having all
1525                  * owned mutexes in this list.  Consider a thread
1526                  * exiting while holding mutexes...
1527                  */
1528                 if (m->m_protocol == PTHREAD_PRIO_INHERIT) {
1529                         /*
1530                          * Fix the owners saved (inherited) priority to
1531                          * reflect the priority of the previous mutex.
1532                          */
1533                         m->m_saved_prio = inherited_prio;
1534
1535                         if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL)
1536                                 /* Recalculate the priority of the mutex: */
1537                                 m->m_prio = MAX(active_prio,
1538                                      pthread_next->active_priority);
1539                         else
1540                                 m->m_prio = active_prio;
1541
1542                         /* Recalculate new inherited and active priorities: */
1543                         inherited_prio = m->m_prio;
1544                         active_prio = MAX(m->m_prio, pthread->base_priority);
1545                 }
1546         }
1547
1548         /*
1549          * Fix the threads inherited priority and recalculate its
1550          * active priority.
1551          */
1552         pthread->inherited_priority = inherited_prio;
1553         active_prio = MAX(inherited_prio, pthread->base_priority);
1554
1555         if (active_prio != pthread->active_priority) {
1556                 /* Lock the thread's scheduling queue: */
1557                 THR_SCHED_LOCK(curthread, pthread);
1558
1559                 if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) {
1560                         /*
1561                          * This thread is not in a run queue.  Just set
1562                          * its active priority.
1563                          */
1564                         pthread->active_priority = active_prio;
1565                 }
1566                 else {
1567                         /*
1568                          * This thread is in a run queue.  Remove it from
1569                          * the queue before changing its priority:
1570                          */
1571                         THR_RUNQ_REMOVE(pthread);
1572
1573                         /*
1574                          * POSIX states that if the priority is being
1575                          * lowered, the thread must be inserted at the
1576                          * head of the queue for its priority if it owns
1577                          * any priority protection or inheritence mutexes.
1578                          */
1579                         if ((active_prio < pthread->active_priority) &&
1580                             (pthread->priority_mutex_count > 0)) {
1581                                 /* Set the new active priority. */
1582                                 pthread->active_priority = active_prio;
1583
1584                                 THR_RUNQ_INSERT_HEAD(pthread);
1585                         } else {
1586                                 /* Set the new active priority. */
1587                                 pthread->active_priority = active_prio;
1588
1589                                 THR_RUNQ_INSERT_TAIL(pthread);
1590                         }
1591                 }
1592                 THR_SCHED_UNLOCK(curthread, pthread);
1593         }
1594 }
1595
1596 void
1597 _mutex_unlock_private(pthread_t pthread)
1598 {
1599         struct pthread_mutex    *m, *m_next;
1600
1601         for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
1602                 m_next = TAILQ_NEXT(m, m_qe);
1603                 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
1604                         pthread_mutex_unlock(&m);
1605         }
1606 }
1607
1608 /*
1609  * This is called by the current thread when it wants to back out of a
1610  * mutex_lock in order to run a signal handler.
1611  */
1612 static void
1613 mutex_lock_backout(void *arg)
1614 {
1615         struct pthread *curthread = (struct pthread *)arg;
1616         struct pthread_mutex *m;
1617
1618         if ((curthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1619                 /*
1620                  * Any other thread may clear the "in sync queue flag",
1621                  * but only the current thread can clear the pointer
1622                  * to the mutex.  So if the flag is set, we can
1623                  * guarantee that the pointer to the mutex is valid.
1624                  * The only problem may be if the mutex is destroyed
1625                  * out from under us, but that should be considered
1626                  * an application bug.
1627                  */
1628                 m = curthread->data.mutex;
1629
1630                 /* Lock the mutex structure: */
1631                 THR_LOCK_ACQUIRE(curthread, &m->m_lock);
1632
1633
1634                 /*
1635                  * Check to make sure this thread doesn't already own
1636                  * the mutex.  Since mutexes are unlocked with direct
1637                  * handoffs, it is possible the previous owner gave it
1638                  * to us after we checked the sync queue flag and before
1639                  * we locked the mutex structure.
1640                  */
1641                 if (m->m_owner == curthread) {
1642                         THR_LOCK_RELEASE(curthread, &m->m_lock);
1643                         mutex_unlock_common(&m, /* add_reference */ 0);
1644                 } else {
1645                         /*
1646                          * Remove ourselves from the mutex queue and
1647                          * clear the pointer to the mutex.  We may no
1648                          * longer be in the mutex queue, but the removal
1649                          * function will DTRT.
1650                          */
1651                         mutex_queue_remove(m, curthread);
1652                         curthread->data.mutex = NULL;
1653                         THR_LOCK_RELEASE(curthread, &m->m_lock);
1654                 }
1655         }
1656         /* No need to call this again. */
1657         curthread->sigbackout = NULL;
1658 }
1659
1660 /*
1661  * Dequeue a waiting thread from the head of a mutex queue in descending
1662  * priority order.
1663  *
1664  * In order to properly dequeue a thread from the mutex queue and
1665  * make it runnable without the possibility of errant wakeups, it
1666  * is necessary to lock the thread's scheduling queue while also
1667  * holding the mutex lock.
1668  */
1669 static struct kse_mailbox *
1670 mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex)
1671 {
1672         struct kse_mailbox *kmbx = NULL;
1673         struct pthread *pthread;
1674
1675         /* Keep dequeueing until we find a valid thread: */
1676         mutex->m_owner = NULL;
1677         pthread = TAILQ_FIRST(&mutex->m_queue);
1678         while (pthread != NULL) {
1679                 /* Take the thread's scheduling lock: */
1680                 THR_SCHED_LOCK(curthread, pthread);
1681
1682                 /* Remove the thread from the mutex queue: */
1683                 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1684                 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1685
1686                 /*
1687                  * Only exit the loop if the thread hasn't been
1688                  * cancelled.
1689                  */
1690                 switch (mutex->m_protocol) {
1691                 case PTHREAD_PRIO_NONE:
1692                         /*
1693                          * Assign the new owner and add the mutex to the
1694                          * thread's list of owned mutexes.
1695                          */
1696                         mutex->m_owner = pthread;
1697                         TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1698                         break;
1699
1700                 case PTHREAD_PRIO_INHERIT:
1701                         /*
1702                          * Assign the new owner and add the mutex to the
1703                          * thread's list of owned mutexes.
1704                          */
1705                         mutex->m_owner = pthread;
1706                         TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe);
1707
1708                         /* Track number of priority mutexes owned: */
1709                         pthread->priority_mutex_count++;
1710
1711                         /*
1712                          * Set the priority of the mutex.  Since our waiting
1713                          * threads are in descending priority order, the
1714                          * priority of the mutex becomes the active priority
1715                          * of the thread we just dequeued.
1716                          */
1717                         mutex->m_prio = pthread->active_priority;
1718
1719                         /* Save the owning threads inherited priority: */
1720                         mutex->m_saved_prio = pthread->inherited_priority;
1721
1722                         /*
1723                          * The owning threads inherited priority now becomes
1724                          * his active priority (the priority of the mutex).
1725                          */
1726                         pthread->inherited_priority = mutex->m_prio;
1727                         break;
1728
1729                 case PTHREAD_PRIO_PROTECT:
1730                         if (pthread->active_priority > mutex->m_prio) {
1731                                 /*
1732                                  * Either the mutex ceiling priority has
1733                                  * been lowered and/or this threads priority
1734                                  * has been raised subsequent to the thread
1735                                  * being queued on the waiting list.
1736                                  */
1737                                 pthread->error = EINVAL;
1738                         }
1739                         else {
1740                                 /*
1741                                  * Assign the new owner and add the mutex
1742                                  * to the thread's list of owned mutexes.
1743                                  */
1744                                 mutex->m_owner = pthread;
1745                                 TAILQ_INSERT_TAIL(&pthread->mutexq,
1746                                     mutex, m_qe);
1747
1748                                 /* Track number of priority mutexes owned: */
1749                                 pthread->priority_mutex_count++;
1750
1751                                 /*
1752                                  * Save the owning threads inherited
1753                                  * priority:
1754                                  */
1755                                 mutex->m_saved_prio =
1756                                     pthread->inherited_priority;
1757
1758                                 /*
1759                                  * The owning thread inherits the ceiling
1760                                  * priority of the mutex and executes at
1761                                  * that priority:
1762                                  */
1763                                 pthread->inherited_priority = mutex->m_prio;
1764                                 pthread->active_priority = mutex->m_prio;
1765
1766                         }
1767                         break;
1768                 }
1769
1770                 /* Make the thread runnable and unlock the scheduling queue: */
1771                 kmbx = _thr_setrunnable_unlocked(pthread);
1772
1773                 /* Add a preemption point. */
1774                 if ((curthread->kseg == pthread->kseg) &&
1775                     (pthread->active_priority > curthread->active_priority))
1776                         curthread->critical_yield = 1;
1777
1778                 if (mutex->m_owner == pthread) {
1779                         /* We're done; a valid owner was found. */
1780                         if (mutex->m_flags & MUTEX_FLAGS_PRIVATE)
1781                                 THR_CRITICAL_ENTER(pthread);
1782                         THR_SCHED_UNLOCK(curthread, pthread);
1783                         break;
1784                 }
1785                 THR_SCHED_UNLOCK(curthread, pthread);
1786                 /* Get the next thread from the waiting queue: */
1787                 pthread = TAILQ_NEXT(pthread, sqe);
1788         }
1789
1790         if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT))
1791                 /* This mutex has no priority: */
1792                 mutex->m_prio = 0;
1793         return (kmbx);
1794 }
1795
1796 /*
1797  * Dequeue a waiting thread from the head of a mutex queue in descending
1798  * priority order.
1799  */
1800 static inline pthread_t
1801 mutex_queue_deq(struct pthread_mutex *mutex)
1802 {
1803         pthread_t pthread;
1804
1805         while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) {
1806                 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1807                 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1808
1809                 /*
1810                  * Only exit the loop if the thread hasn't been
1811                  * cancelled.
1812                  */
1813                 if (pthread->interrupted == 0)
1814                         break;
1815         }
1816
1817         return (pthread);
1818 }
1819
1820 /*
1821  * Remove a waiting thread from a mutex queue in descending priority order.
1822  */
1823 static inline void
1824 mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread)
1825 {
1826         if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) {
1827                 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe);
1828                 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ;
1829         }
1830 }
1831
1832 /*
1833  * Enqueue a waiting thread to a queue in descending priority order.
1834  */
1835 static inline void
1836 mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread)
1837 {
1838         pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head);
1839
1840         THR_ASSERT_NOT_IN_SYNCQ(pthread);
1841         /*
1842          * For the common case of all threads having equal priority,
1843          * we perform a quick check against the priority of the thread
1844          * at the tail of the queue.
1845          */
1846         if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
1847                 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe);
1848         else {
1849                 tid = TAILQ_FIRST(&mutex->m_queue);
1850                 while (pthread->active_priority <= tid->active_priority)
1851                         tid = TAILQ_NEXT(tid, sqe);
1852                 TAILQ_INSERT_BEFORE(tid, pthread, sqe);
1853         }
1854         pthread->sflags |= THR_FLAGS_IN_SYNCQ;
1855 }