2 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * Private thread definitions for the uthread kernel.
37 #ifndef _THR_PRIVATE_H
38 #define _THR_PRIVATE_H
46 #include <sys/queue.h>
47 #include <sys/types.h>
49 #include <sys/cdefs.h>
55 #include <pthread_np.h>
59 #include "pthread_md.h"
63 * Evaluate the storage class specifier.
65 #ifdef GLOBAL_PTHREAD_PRIVATE
67 #define SCLASS_PRESET(x...) = x
70 #define SCLASS_PRESET(x...)
74 * Kernel fatal error handler macro.
76 #define PANIC(string) _thr_exit(__FILE__,__LINE__,string)
79 /* Output debug messages like this: */
80 #define stdout_debug(args...) _thread_printf(STDOUT_FILENO, ##args)
81 #define stderr_debug(args...) _thread_printf(STDOUT_FILENO, ##args)
83 #define DBG_MUTEX 0x0001
84 #define DBG_SIG 0x0002
86 #ifdef _PTHREADS_INVARIANTS
87 #define THR_ASSERT(cond, msg) do { \
92 #define THR_ASSERT(cond, msg)
96 * State change macro without scheduling queue change:
98 #define THR_SET_STATE(thrd, newstate) do { \
99 (thrd)->state = newstate; \
100 (thrd)->fname = __FILE__; \
101 (thrd)->lineno = __LINE__; \
105 #define TIMESPEC_ADD(dst, src, val) \
107 (dst)->tv_sec = (src)->tv_sec + (val)->tv_sec; \
108 (dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \
109 if ((dst)->tv_nsec > 1000000000) { \
111 (dst)->tv_nsec -= 1000000000; \
115 #define TIMESPEC_SUB(dst, src, val) \
117 (dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \
118 (dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \
119 if ((dst)->tv_nsec < 0) { \
121 (dst)->tv_nsec += 1000000000; \
128 * XXX It'd be nice if these were contained in uthread_priority_queue.[ch].
130 typedef struct pq_list {
131 TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */
132 TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */
133 int pl_prio; /* the priority of this list */
134 int pl_queued; /* is this in the priority queue */
137 typedef struct pq_queue {
138 TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */
139 pq_list_t *pq_lists; /* array of all priority lists */
140 int pq_size; /* number of priority lists */
141 #define PQF_ACTIVE 0x0001
147 * Each KSEG has a scheduling queue. For now, threads that exist in their
148 * own KSEG (system scope) will get a full priority queue. In the future
149 * this can be optimized for the single thread per KSEG case.
153 TAILQ_HEAD(, pthread) sq_waitq; /* waiting in userland */
156 typedef struct kse_thr_mailbox *kse_critical_t;
160 #define MAX_KSE_LOCKLEVEL 5
162 /* -- location and order specific items for gdb -- */
164 struct pthread *k_curthread; /* current thread */
165 struct kse_group *k_kseg; /* parent KSEG */
166 struct sched_queue *k_schedq; /* scheduling queue */
167 /* -- end of location and order specific items -- */
168 TAILQ_ENTRY(kse) k_qe; /* KSE list link entry */
169 TAILQ_ENTRY(kse) k_kgqe; /* KSEG's KSE list entry */
171 * Items that are only modified by the kse, or that otherwise
172 * don't need to be locked when accessed
175 struct lockuser k_lockusers[MAX_KSE_LOCKLEVEL];
179 #define KF_STARTED 0x0001 /* kernel kse created */
180 #define KF_INITIALIZED 0x0002 /* initialized on 1st upcall */
181 #define KF_TERMINATED 0x0004 /* kse is terminated */
182 #define KF_IDLE 0x0008 /* kse is idle */
183 #define KF_SWITCH 0x0010 /* thread switch in UTS */
184 int k_error; /* syscall errno in critical */
185 int k_cpu; /* CPU ID when bound */
186 int k_sigseqno; /* signal buffered count */
189 #define KSE_SET_IDLE(kse) ((kse)->k_flags |= KF_IDLE)
190 #define KSE_CLEAR_IDLE(kse) ((kse)->k_flags &= ~KF_IDLE)
191 #define KSE_IS_IDLE(kse) (((kse)->k_flags & KF_IDLE) != 0)
192 #define KSE_SET_SWITCH(kse) ((kse)->k_flags |= KF_SWITCH)
193 #define KSE_CLEAR_SWITCH(kse) ((kse)->k_flags &= ~KF_SWITCH)
194 #define KSE_IS_SWITCH(kse) (((kse)->k_flags & KF_SWITCH) != 0)
197 * Each KSE group contains one or more KSEs in which threads can run.
198 * At least for now, there is one scheduling queue per KSE group; KSEs
199 * within the same KSE group compete for threads from the same scheduling
200 * queue. A scope system thread has one KSE in one KSE group; the group
201 * does not use its scheduling queue.
204 TAILQ_HEAD(, kse) kg_kseq; /* list of KSEs in group */
205 TAILQ_HEAD(, pthread) kg_threadq; /* list of threads in group */
206 TAILQ_ENTRY(kse_group) kg_qe; /* link entry */
207 struct sched_queue kg_schedq; /* scheduling queue */
209 int kg_threadcount; /* # of assigned threads */
210 int kg_ksecount; /* # of assigned KSEs */
213 #define KGF_SINGLE_THREAD 0x0001 /* scope system kse group */
214 #define KGF_SCHEDQ_INITED 0x0002 /* has an initialized schedq */
218 * Add/remove threads from a KSE's scheduling queue.
219 * For now the scheduling queue is hung off the KSEG.
221 #define KSEG_THRQ_ADD(kseg, thr) \
223 TAILQ_INSERT_TAIL(&(kseg)->kg_threadq, thr, kle);\
224 (kseg)->kg_threadcount++; \
227 #define KSEG_THRQ_REMOVE(kseg, thr) \
229 TAILQ_REMOVE(&(kseg)->kg_threadq, thr, kle); \
230 (kseg)->kg_threadcount--; \
235 * Lock acquire and release for KSEs.
237 #define KSE_LOCK_ACQUIRE(kse, lck) \
239 if ((kse)->k_locklevel < MAX_KSE_LOCKLEVEL) { \
240 (kse)->k_locklevel++; \
241 _lock_acquire((lck), \
242 &(kse)->k_lockusers[(kse)->k_locklevel - 1], 0); \
245 PANIC("Exceeded maximum lock level"); \
248 #define KSE_LOCK_RELEASE(kse, lck) \
250 if ((kse)->k_locklevel > 0) { \
251 _lock_release((lck), \
252 &(kse)->k_lockusers[(kse)->k_locklevel - 1]); \
253 (kse)->k_locklevel--; \
260 #define KSE_LOCK(curkse) \
261 KSE_LOCK_ACQUIRE(curkse, &(curkse)->k_kseg->kg_lock)
262 #define KSE_UNLOCK(curkse) \
263 KSE_LOCK_RELEASE(curkse, &(curkse)->k_kseg->kg_lock)
266 * Lock a potentially different KSEG.
268 #define KSE_SCHED_LOCK(curkse, kseg) \
269 KSE_LOCK_ACQUIRE(curkse, &(kseg)->kg_lock)
270 #define KSE_SCHED_UNLOCK(curkse, kseg) \
271 KSE_LOCK_RELEASE(curkse, &(kseg)->kg_lock)
274 * Waiting queue manipulation macros (using pqe link):
276 #define KSE_WAITQ_REMOVE(kse, thrd) \
278 if (((thrd)->flags & THR_FLAGS_IN_WAITQ) != 0) { \
279 TAILQ_REMOVE(&(kse)->k_schedq->sq_waitq, thrd, pqe); \
280 (thrd)->flags &= ~THR_FLAGS_IN_WAITQ; \
283 #define KSE_WAITQ_INSERT(kse, thrd) kse_waitq_insert(thrd)
284 #define KSE_WAITQ_FIRST(kse) TAILQ_FIRST(&(kse)->k_schedq->sq_waitq)
286 #define KSE_WAKEUP(kse) kse_wakeup(&(kse)->k_kcb->kcb_kmbx)
289 * TailQ initialization values.
291 #define TAILQ_INITIALIZER { NULL, NULL }
294 * lock initialization values.
296 #define LCK_INITIALIZER { NULL, NULL, LCK_DEFAULT }
298 struct pthread_mutex {
300 * Lock for accesses to this structure.
303 enum pthread_mutextype m_type;
305 TAILQ_HEAD(mutex_head, pthread) m_queue;
306 struct pthread *m_owner;
312 * Used for priority inheritence and protection.
314 * m_prio - For priority inheritence, the highest active
315 * priority (threads locking the mutex inherit
316 * this priority). For priority protection, the
317 * ceiling priority of this mutex.
318 * m_saved_prio - mutex owners inherited priority before
319 * taking the mutex, restored when the owner
326 * Link for list of all mutexes a thread currently owns.
328 TAILQ_ENTRY(pthread_mutex) m_qe;
334 #define MUTEX_FLAGS_PRIVATE 0x01
335 #define MUTEX_FLAGS_INITED 0x02
336 #define MUTEX_FLAGS_BUSY 0x04
339 * Static mutex initialization values.
341 #define PTHREAD_MUTEX_STATIC_INITIALIZER \
342 { LCK_INITIALIZER, PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, \
343 TAILQ_INITIALIZER, NULL, MUTEX_FLAGS_PRIVATE, 0, 0, 0, 0, \
346 struct pthread_mutex_attr {
347 enum pthread_mutextype m_type;
353 #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \
354 { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE }
357 * Condition variable definitions.
359 enum pthread_cond_type {
364 struct pthread_cond {
366 * Lock for accesses to this structure.
369 enum pthread_cond_type c_type;
370 TAILQ_HEAD(cond_head, pthread) c_queue;
371 struct pthread_mutex *c_mutex;
376 struct pthread_cond_attr {
377 enum pthread_cond_type c_type;
381 struct pthread_barrier {
382 pthread_mutex_t b_lock;
383 pthread_cond_t b_cond;
389 struct pthread_barrierattr {
393 struct pthread_spinlock {
399 * Flags for condition variables.
401 #define COND_FLAGS_PRIVATE 0x01
402 #define COND_FLAGS_INITED 0x02
403 #define COND_FLAGS_BUSY 0x04
406 * Static cond initialization values.
408 #define PTHREAD_COND_STATIC_INITIALIZER \
409 { LCK_INITIALIZER, COND_TYPE_FAST, TAILQ_INITIALIZER, \
413 * Cleanup definitions.
415 struct pthread_cleanup {
416 struct pthread_cleanup *next;
422 #define THR_CLEANUP_PUSH(td, func, arg) { \
423 struct pthread_cleanup __cup; \
425 __cup.routine = func; \
426 __cup.routine_arg = arg; \
428 __cup.next = (td)->cleanup; \
429 (td)->cleanup = &__cup;
431 #define THR_CLEANUP_POP(td, exec) \
432 (td)->cleanup = __cup.next; \
434 __cup.routine(__cup.routine_arg); \
437 struct pthread_atfork {
438 TAILQ_ENTRY(pthread_atfork) qe;
439 void (*prepare)(void);
440 void (*parent)(void);
444 struct pthread_attr {
450 #define THR_STACK_USER 0x100 /* 0xFF reserved for <pthread.h> */
451 #define THR_SIGNAL_THREAD 0x200 /* This is a signal thread */
454 void (*cleanup_attr) ();
455 void *stackaddr_attr;
456 size_t stacksize_attr;
457 size_t guardsize_attr;
461 * Thread creation state attributes.
463 #define THR_CREATE_RUNNING 0
464 #define THR_CREATE_SUSPENDED 1
467 * Miscellaneous definitions.
469 #define THR_STACK32_DEFAULT (1 * 1024 * 1024)
470 #define THR_STACK64_DEFAULT (2 * 1024 * 1024)
473 * Maximum size of initial thread's stack. This perhaps deserves to be larger
474 * than the stacks of other threads, since many applications are likely to run
475 * almost entirely on this stack.
477 #define THR_STACK32_INITIAL (2 * 1024 * 1024)
478 #define THR_STACK64_INITIAL (4 * 1024 * 1024)
481 * Define the different priority ranges. All applications have thread
482 * priorities constrained within 0-31. The threads library raises the
483 * priority when delivering signals in order to ensure that signal
484 * delivery happens (from the POSIX spec) "as soon as possible".
485 * In the future, the threads library will also be able to map specific
486 * threads into real-time (cooperating) processes or kernel threads.
487 * The RT and SIGNAL priorities will be used internally and added to
488 * thread base priorities so that the scheduling queue can handle both
489 * normal and RT priority threads with and without signal handling.
491 * The approach taken is that, within each class, signal delivery
492 * always has priority over thread execution.
494 #define THR_DEFAULT_PRIORITY 15
495 #define THR_MIN_PRIORITY 0
496 #define THR_MAX_PRIORITY 31 /* 0x1F */
497 #define THR_SIGNAL_PRIORITY 32 /* 0x20 */
498 #define THR_RT_PRIORITY 64 /* 0x40 */
499 #define THR_FIRST_PRIORITY THR_MIN_PRIORITY
500 #define THR_LAST_PRIORITY \
501 (THR_MAX_PRIORITY + THR_SIGNAL_PRIORITY + THR_RT_PRIORITY)
502 #define THR_BASE_PRIORITY(prio) ((prio) & THR_MAX_PRIORITY)
505 * Clock resolution in microseconds.
507 #define CLOCK_RES_USEC 10000
510 * Time slice period in microseconds.
512 #define TIMESLICE_USEC 20000
515 * XXX - Define a thread-safe macro to get the current time of day
516 * which is updated at regular intervals by something.
518 * For now, we just make the system call to get the time.
520 #define KSE_GET_TOD(curkse, tsp) \
522 *tsp = (curkse)->k_kcb->kcb_kmbx.km_timeofday; \
523 if ((tsp)->tv_sec == 0) \
524 clock_gettime(CLOCK_REALTIME, tsp); \
527 struct pthread_rwlockattr {
531 struct pthread_rwlock {
532 pthread_mutex_t lock; /* monitor lock */
533 pthread_cond_t read_signal;
534 pthread_cond_t write_signal;
535 int state; /* 0 = idle >0 = # of readers -1 = writer */
557 struct sigwait_data {
559 siginfo_t *siginfo; /* used to save siginfo for sigwaitinfo() */
562 union pthread_wait_data {
563 pthread_mutex_t mutex;
566 struct sigwait_data *sigwait;
570 * Define a continuation routine that can be used to perform a
571 * transfer of control:
573 typedef void (*thread_continuation_t) (void *);
576 * This stores a thread's state prior to running a signal handler.
577 * It is used when a signal is delivered to a thread blocked in
578 * userland. If the signal handler returns normally, the thread's
579 * state is restored from here.
581 struct pthread_sigframe {
588 enum pthread_state psf_state;
589 union pthread_wait_data psf_wait_data;
590 struct timespec psf_wakeup_time;
592 sigset_t psf_sigmask;
594 thread_continuation_t psf_continuation;
598 struct pthread *thread;
603 struct pthread_specific_elem {
609 volatile int allocated;
612 void (*destructor) (void *);
615 #define MAX_THR_LOCKLEVEL 5
620 /* Thread control block */
624 * Magic value to help recognize a valid thread structure
625 * from an invalid one:
627 #define THR_MAGIC ((u_int32_t) 0xd09ba115)
630 u_int64_t uniqueid; /* for gdb */
632 /* Queue entry for list of all threads: */
633 TAILQ_ENTRY(pthread) tle; /* link for all threads in process */
634 TAILQ_ENTRY(pthread) kle; /* link for all threads in KSE/KSEG */
636 /* Queue entry for GC lists: */
637 TAILQ_ENTRY(pthread) gcle;
639 /* Hash queue entry */
640 LIST_ENTRY(pthread) hle;
643 * Lock for accesses to this thread structure.
646 struct lockuser lockusers[MAX_THR_LOCKLEVEL];
648 kse_critical_t critical[MAX_KSE_LOCKLEVEL];
650 struct kse_group *kseg;
653 * Thread start routine, argument, stack pointer and thread
656 void *(*start_routine)(void *);
658 struct pthread_attr attr;
660 int active; /* thread running */
661 int blocked; /* thread blocked in kernel */
665 * Used for tracking delivery of signal handlers.
668 thread_continuation_t sigbackout;
671 * Cancelability flags - the lower 2 bits are used by cancel
672 * definitions in pthread.h
674 #define THR_AT_CANCEL_POINT 0x0004
675 #define THR_CANCELLING 0x0008
676 #define THR_CANCEL_NEEDED 0x0010
679 thread_continuation_t continuation;
682 * The thread's base and pending signal masks. The active
683 * signal mask is stored in the thread's context (in mailbox).
687 sigset_t *oldsigmask;
688 volatile int check_pending;
692 enum pthread_state state;
693 volatile int lock_switch;
696 * Number of microseconds accumulated by this thread when
697 * time slicing is active.
702 * Time to wake up thread. This is used for sleeping threads and
703 * for any operation which may time out (such as select).
705 struct timespec wakeup_time;
707 /* TRUE if operation has timed out. */
711 * Error variable used instead of errno. The function __error()
712 * returns a pointer to this.
717 * The joiner is the thread that is joining to this thread. The
718 * join status keeps track of a join operation to another thread.
720 struct pthread *joiner;
721 struct join_status join_status;
724 * The current thread can belong to only one scheduling queue at
725 * a time (ready or waiting queue). It can also belong to:
727 * o A queue of threads waiting for a mutex
728 * o A queue of threads waiting for a condition variable
730 * It is possible for a thread to belong to more than one of the
731 * above queues if it is handling a signal. A thread may only
732 * enter a mutex or condition variable queue when it is not
733 * being called from a signal handler. If a thread is a member
734 * of one of these queues when a signal handler is invoked, it
735 * must be removed from the queue before invoking the handler
736 * and then added back to the queue after return from the handler.
738 * Use pqe for the scheduling queue link (both ready and waiting),
739 * sqe for synchronization (mutex, condition variable, and join)
740 * queue links, and qe for all other links.
742 TAILQ_ENTRY(pthread) pqe; /* priority, wait queues link */
743 TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */
746 union pthread_wait_data data;
749 * Set to TRUE if a blocking operation was
750 * interrupted by a signal:
755 * Set to non-zero when this thread has entered a critical
756 * region. We allow for recursive entries into critical regions.
761 * Set to TRUE if this thread should yield after leaving a
762 * critical region to check for signals, messages, etc.
767 #define THR_FLAGS_IN_SYNCQ 0x0001
769 /* Miscellaneous flags; only set with scheduling lock held. */
771 #define THR_FLAGS_PRIVATE 0x0001
772 #define THR_FLAGS_IN_WAITQ 0x0002 /* in waiting queue using pqe link */
773 #define THR_FLAGS_IN_RUNQ 0x0004 /* in run queue using pqe link */
774 #define THR_FLAGS_EXITING 0x0008 /* thread is exiting */
775 #define THR_FLAGS_SUSPENDED 0x0010 /* thread is suspended */
777 /* Thread list flags; only set with thread list lock held. */
778 #define TLFLAGS_GC_SAFE 0x0001 /* thread safe for cleaning */
779 #define TLFLAGS_IN_TDLIST 0x0002 /* thread in all thread list */
780 #define TLFLAGS_IN_GCLIST 0x0004 /* thread in gc list */
784 * Base priority is the user setable and retrievable priority
785 * of the thread. It is only affected by explicit calls to
786 * set thread priority and upon thread creation via a thread
787 * attribute or default priority.
792 * Inherited priority is the priority a thread inherits by
793 * taking a priority inheritence or protection mutex. It
794 * is not affected by base priority changes. Inherited
795 * priority defaults to and remains 0 until a mutex is taken
796 * that is being waited on by any other thread whose priority
799 char inherited_priority;
802 * Active priority is always the maximum of the threads base
803 * priority and inherited priority. When there is a change
804 * in either the base or inherited priority, the active
805 * priority must be recalculated.
807 char active_priority;
809 /* Number of priority ceiling or protection mutexes owned. */
810 int priority_mutex_count;
812 /* Number rwlocks rdlocks held. */
816 * Queue of currently owned mutexes.
818 TAILQ_HEAD(, pthread_mutex) mutexq;
821 struct pthread_specific_elem *specific;
822 int specific_data_count;
824 /* Alternative stack for sigaltstack() */
828 * Current locks bitmap for rtld.
832 /* Cleanup handlers Link List */
833 struct pthread_cleanup *cleanup;
834 char *fname; /* Ptr to source file name */
835 int lineno; /* Source line number. */
839 * Critical regions can also be detected by looking at the threads
840 * current lock level. Ensure these macros increment and decrement
841 * the lock levels such that locks can not be held with a lock level
844 #define THR_IN_CRITICAL(thrd) \
845 (((thrd)->locklevel > 0) || \
846 ((thrd)->critical_count > 0))
848 #define THR_YIELD_CHECK(thrd) \
850 if (!THR_IN_CRITICAL(thrd)) { \
851 if (__predict_false(_libkse_debug)) \
852 _thr_debug_check_yield(thrd); \
853 if ((thrd)->critical_yield != 0) \
854 _thr_sched_switch(thrd); \
855 if ((thrd)->check_pending != 0) \
856 _thr_sig_check_pending(thrd); \
860 #define THR_LOCK_ACQUIRE(thrd, lck) \
862 if ((thrd)->locklevel < MAX_THR_LOCKLEVEL) { \
863 THR_DEACTIVATE_LAST_LOCK(thrd); \
864 (thrd)->locklevel++; \
865 _lock_acquire((lck), \
866 &(thrd)->lockusers[(thrd)->locklevel - 1], \
867 (thrd)->active_priority); \
869 PANIC("Exceeded maximum lock level"); \
872 #define THR_LOCK_RELEASE(thrd, lck) \
874 if ((thrd)->locklevel > 0) { \
875 _lock_release((lck), \
876 &(thrd)->lockusers[(thrd)->locklevel - 1]); \
877 (thrd)->locklevel--; \
878 THR_ACTIVATE_LAST_LOCK(thrd); \
879 if ((thrd)->locklevel == 0) \
880 THR_YIELD_CHECK(thrd); \
884 #define THR_ACTIVATE_LAST_LOCK(thrd) \
886 if ((thrd)->locklevel > 0) \
887 _lockuser_setactive( \
888 &(thrd)->lockusers[(thrd)->locklevel - 1], 1); \
891 #define THR_DEACTIVATE_LAST_LOCK(thrd) \
893 if ((thrd)->locklevel > 0) \
894 _lockuser_setactive( \
895 &(thrd)->lockusers[(thrd)->locklevel - 1], 0); \
899 * For now, threads will have their own lock separate from their
900 * KSE scheduling lock.
902 #define THR_LOCK(thr) THR_LOCK_ACQUIRE(thr, &(thr)->lock)
903 #define THR_UNLOCK(thr) THR_LOCK_RELEASE(thr, &(thr)->lock)
904 #define THR_THREAD_LOCK(curthrd, thr) THR_LOCK_ACQUIRE(curthrd, &(thr)->lock)
905 #define THR_THREAD_UNLOCK(curthrd, thr) THR_LOCK_RELEASE(curthrd, &(thr)->lock)
908 * Priority queue manipulation macros (using pqe link). We use
909 * the thread's kseg link instead of the kse link because a thread
910 * does not (currently) have a statically assigned kse.
912 #define THR_RUNQ_INSERT_HEAD(thrd) \
913 _pq_insert_head(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
914 #define THR_RUNQ_INSERT_TAIL(thrd) \
915 _pq_insert_tail(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
916 #define THR_RUNQ_REMOVE(thrd) \
917 _pq_remove(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
920 * Macros to insert/remove threads to the all thread list and
923 #define THR_LIST_ADD(thrd) do { \
924 if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) == 0) { \
925 TAILQ_INSERT_HEAD(&_thread_list, thrd, tle); \
926 _thr_hash_add(thrd); \
927 (thrd)->tlflags |= TLFLAGS_IN_TDLIST; \
930 #define THR_LIST_REMOVE(thrd) do { \
931 if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) != 0) { \
932 TAILQ_REMOVE(&_thread_list, thrd, tle); \
933 _thr_hash_remove(thrd); \
934 (thrd)->tlflags &= ~TLFLAGS_IN_TDLIST; \
937 #define THR_GCLIST_ADD(thrd) do { \
938 if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) == 0) { \
939 TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\
940 (thrd)->tlflags |= TLFLAGS_IN_GCLIST; \
944 #define THR_GCLIST_REMOVE(thrd) do { \
945 if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) != 0) { \
946 TAILQ_REMOVE(&_thread_gc_list, thrd, gcle); \
947 (thrd)->tlflags &= ~TLFLAGS_IN_GCLIST; \
952 #define GC_NEEDED() (atomic_load_acq_int(&_gc_count) >= 5)
955 * Locking the scheduling queue for another thread uses that thread's
958 #define THR_SCHED_LOCK(curthr, thr) do { \
959 (curthr)->critical[(curthr)->locklevel] = _kse_critical_enter(); \
960 (curthr)->locklevel++; \
961 KSE_SCHED_LOCK((curthr)->kse, (thr)->kseg); \
964 #define THR_SCHED_UNLOCK(curthr, thr) do { \
965 KSE_SCHED_UNLOCK((curthr)->kse, (thr)->kseg); \
966 (curthr)->locklevel--; \
967 _kse_critical_leave((curthr)->critical[(curthr)->locklevel]); \
970 /* Take the scheduling lock with the intent to call the scheduler. */
971 #define THR_LOCK_SWITCH(curthr) do { \
972 (void)_kse_critical_enter(); \
973 KSE_SCHED_LOCK((curthr)->kse, (curthr)->kseg); \
975 #define THR_UNLOCK_SWITCH(curthr) do { \
976 KSE_SCHED_UNLOCK((curthr)->kse, (curthr)->kseg);\
979 #define THR_CRITICAL_ENTER(thr) (thr)->critical_count++
980 #define THR_CRITICAL_LEAVE(thr) do { \
981 (thr)->critical_count--; \
982 if (((thr)->critical_yield != 0) && \
983 ((thr)->critical_count == 0)) { \
984 (thr)->critical_yield = 0; \
985 _thr_sched_switch(thr); \
989 #define THR_IS_ACTIVE(thrd) \
990 ((thrd)->kse != NULL) && ((thrd)->kse->k_curthread == (thrd))
992 #define THR_IN_SYNCQ(thrd) (((thrd)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
994 #define THR_IS_SUSPENDED(thrd) \
995 (((thrd)->state == PS_SUSPENDED) || \
996 (((thrd)->flags & THR_FLAGS_SUSPENDED) != 0))
997 #define THR_IS_EXITING(thrd) (((thrd)->flags & THR_FLAGS_EXITING) != 0)
998 #define DBG_CAN_RUN(thrd) (((thrd)->tcb->tcb_tmbx.tm_dflags & \
1001 extern int __isthreaded;
1004 _kse_isthreaded(void)
1006 return (__isthreaded != 0);
1010 * Global variables for the pthread kernel.
1013 SCLASS void *_usrstack SCLASS_PRESET(NULL);
1014 SCLASS struct kse *_kse_initial SCLASS_PRESET(NULL);
1015 SCLASS struct pthread *_thr_initial SCLASS_PRESET(NULL);
1017 SCLASS int _libkse_debug SCLASS_PRESET(0);
1018 SCLASS int _thread_activated SCLASS_PRESET(0);
1019 SCLASS int _thread_scope_system SCLASS_PRESET(0);
1021 /* List of all threads: */
1022 SCLASS TAILQ_HEAD(, pthread) _thread_list
1023 SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_list));
1025 /* List of threads needing GC: */
1026 SCLASS TAILQ_HEAD(, pthread) _thread_gc_list
1027 SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_gc_list));
1029 SCLASS int _thread_active_threads SCLASS_PRESET(1);
1031 SCLASS TAILQ_HEAD(atfork_head, pthread_atfork) _thr_atfork_list;
1032 SCLASS pthread_mutex_t _thr_atfork_mutex;
1034 /* Default thread attributes: */
1035 SCLASS struct pthread_attr _pthread_attr_default
1037 SCHED_RR, 0, TIMESLICE_USEC, THR_DEFAULT_PRIORITY,
1038 THR_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL,
1039 NULL, NULL, /* stacksize */0, /* guardsize */0
1042 /* Default mutex attributes: */
1043 SCLASS struct pthread_mutex_attr _pthread_mutexattr_default
1044 SCLASS_PRESET({PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 });
1046 /* Default condition variable attributes: */
1047 SCLASS struct pthread_cond_attr _pthread_condattr_default
1048 SCLASS_PRESET({COND_TYPE_FAST, 0});
1050 /* Clock resolution in usec. */
1051 SCLASS int _clock_res_usec SCLASS_PRESET(CLOCK_RES_USEC);
1053 /* Array of signal actions for this process: */
1054 SCLASS struct sigaction _thread_sigact[_SIG_MAXSIG];
1057 * Lock for above count of dummy handlers and for the process signal
1058 * mask and pending signal sets.
1060 SCLASS struct lock _thread_signal_lock;
1062 /* Pending signals and mask for this process: */
1063 SCLASS sigset_t _thr_proc_sigpending;
1064 SCLASS siginfo_t _thr_proc_siginfo[_SIG_MAXSIG];
1066 SCLASS pid_t _thr_pid SCLASS_PRESET(0);
1068 /* Garbage collector lock. */
1069 SCLASS struct lock _gc_lock;
1070 SCLASS int _gc_check SCLASS_PRESET(0);
1071 SCLASS int _gc_count SCLASS_PRESET(0);
1073 SCLASS struct lock _mutex_static_lock;
1074 SCLASS struct lock _rwlock_static_lock;
1075 SCLASS struct lock _keytable_lock;
1076 SCLASS struct lock _thread_list_lock;
1077 SCLASS int _thr_guard_default;
1078 SCLASS int _thr_stack_default;
1079 SCLASS int _thr_stack_initial;
1080 SCLASS int _thr_page_size;
1081 SCLASS pthread_t _thr_sig_daemon;
1082 SCLASS int _thr_debug_flags SCLASS_PRESET(0);
1084 /* Undefine the storage class and preset specifiers: */
1086 #undef SCLASS_PRESET
1090 * Function prototype definitions.
1093 int _cond_reinit(pthread_cond_t *);
1094 struct kse *_kse_alloc(struct pthread *, int sys_scope);
1095 kse_critical_t _kse_critical_enter(void);
1096 void _kse_critical_leave(kse_critical_t);
1097 int _kse_in_critical(void);
1098 void _kse_free(struct pthread *, struct kse *);
1100 struct kse_group *_kseg_alloc(struct pthread *);
1101 void _kse_lock_wait(struct lock *, struct lockuser *lu);
1102 void _kse_lock_wakeup(struct lock *, struct lockuser *lu);
1103 void _kse_single_thread(struct pthread *);
1104 int _kse_setthreaded(int);
1105 void _kseg_free(struct kse_group *);
1106 int _mutex_cv_lock(pthread_mutex_t *);
1107 int _mutex_cv_unlock(pthread_mutex_t *);
1108 void _mutex_notify_priochange(struct pthread *, struct pthread *, int);
1109 int _mutex_reinit(struct pthread_mutex *);
1110 void _mutex_unlock_private(struct pthread *);
1111 void _libpthread_init(struct pthread *);
1112 int _pq_alloc(struct pq_queue *, int, int);
1113 void _pq_free(struct pq_queue *);
1114 int _pq_init(struct pq_queue *);
1115 void _pq_remove(struct pq_queue *pq, struct pthread *);
1116 void _pq_insert_head(struct pq_queue *pq, struct pthread *);
1117 void _pq_insert_tail(struct pq_queue *pq, struct pthread *);
1118 struct pthread *_pq_first(struct pq_queue *pq);
1119 struct pthread *_pq_first_debug(struct pq_queue *pq);
1120 void *_pthread_getspecific(pthread_key_t);
1121 int _pthread_key_create(pthread_key_t *, void (*) (void *));
1122 int _pthread_key_delete(pthread_key_t);
1123 int _pthread_mutex_destroy(pthread_mutex_t *);
1124 int _pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *);
1125 int _pthread_mutex_lock(pthread_mutex_t *);
1126 int _pthread_mutex_trylock(pthread_mutex_t *);
1127 int _pthread_mutex_unlock(pthread_mutex_t *);
1128 int _pthread_mutexattr_init(pthread_mutexattr_t *);
1129 int _pthread_mutexattr_destroy(pthread_mutexattr_t *);
1130 int _pthread_mutexattr_settype(pthread_mutexattr_t *, int);
1131 int _pthread_once(pthread_once_t *, void (*) (void));
1132 int _pthread_rwlock_init(pthread_rwlock_t *, const pthread_rwlockattr_t *);
1133 int _pthread_rwlock_destroy (pthread_rwlock_t *);
1134 struct pthread *_pthread_self(void);
1135 int _pthread_setspecific(pthread_key_t, const void *);
1136 void _pthread_yield(void);
1137 void _pthread_cleanup_push(void (*routine) (void *), void *routine_arg);
1138 void _pthread_cleanup_pop(int execute);
1139 struct pthread *_thr_alloc(struct pthread *);
1140 void _thr_exit(char *, int, char *);
1141 void _thr_exit_cleanup(void);
1142 void _thr_lock_wait(struct lock *lock, struct lockuser *lu);
1143 void _thr_lock_wakeup(struct lock *lock, struct lockuser *lu);
1144 void _thr_mutex_reinit(pthread_mutex_t *);
1145 int _thr_ref_add(struct pthread *, struct pthread *, int);
1146 void _thr_ref_delete(struct pthread *, struct pthread *);
1147 void _thr_rtld_init(void);
1148 void _thr_rtld_fini(void);
1149 int _thr_schedule_add(struct pthread *, struct pthread *);
1150 void _thr_schedule_remove(struct pthread *, struct pthread *);
1151 void _thr_setrunnable(struct pthread *curthread, struct pthread *thread);
1152 struct kse_mailbox *_thr_setrunnable_unlocked(struct pthread *thread);
1153 struct kse_mailbox *_thr_sig_add(struct pthread *, int, siginfo_t *);
1154 void _thr_sig_dispatch(struct kse *, int, siginfo_t *);
1155 int _thr_stack_alloc(struct pthread_attr *);
1156 void _thr_stack_free(struct pthread_attr *);
1157 void _thr_exit_cleanup(void);
1158 void _thr_free(struct pthread *, struct pthread *);
1159 void _thr_gc(struct pthread *);
1160 void _thr_panic_exit(char *, int, char *);
1161 void _thread_cleanupspecific(void);
1162 void _thread_dump_info(void);
1163 void _thread_printf(int, const char *, ...);
1164 void _thr_sched_switch(struct pthread *);
1165 void _thr_sched_switch_unlocked(struct pthread *);
1166 void _thr_set_timeout(const struct timespec *);
1167 void _thr_seterrno(struct pthread *, int);
1168 void _thr_sig_handler(int, siginfo_t *, ucontext_t *);
1169 void _thr_sig_check_pending(struct pthread *);
1170 void _thr_sig_rundown(struct pthread *, ucontext_t *);
1171 void _thr_sig_send(struct pthread *pthread, int sig);
1172 void _thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf);
1173 void _thr_spinlock_init(void);
1174 void _thr_cancel_enter(struct pthread *);
1175 void _thr_cancel_leave(struct pthread *, int);
1176 int _thr_setconcurrency(int new_level);
1177 int _thr_setmaxconcurrency(void);
1178 void _thr_critical_enter(struct pthread *);
1179 void _thr_critical_leave(struct pthread *);
1180 int _thr_start_sig_daemon(void);
1181 int _thr_getprocsig(int sig, siginfo_t *siginfo);
1182 int _thr_getprocsig_unlocked(int sig, siginfo_t *siginfo);
1183 void _thr_signal_init(void);
1184 void _thr_signal_deinit(void);
1185 void _thr_hash_add(struct pthread *);
1186 void _thr_hash_remove(struct pthread *);
1187 struct pthread *_thr_hash_find(struct pthread *);
1188 void _thr_finish_cancellation(void *arg);
1189 int _thr_sigonstack(void *sp);
1190 void _thr_debug_check_yield(struct pthread *);
1193 * Aliases for _pthread functions. Should be called instead of
1194 * originals if PLT replocation is unwanted at runtme.
1196 int _thr_cond_broadcast(pthread_cond_t *);
1197 int _thr_cond_signal(pthread_cond_t *);
1198 int _thr_cond_wait(pthread_cond_t *, pthread_mutex_t *);
1199 int _thr_mutex_lock(pthread_mutex_t *);
1200 int _thr_mutex_unlock(pthread_mutex_t *);
1201 int _thr_rwlock_rdlock (pthread_rwlock_t *);
1202 int _thr_rwlock_wrlock (pthread_rwlock_t *);
1203 int _thr_rwlock_unlock (pthread_rwlock_t *);
1205 /* #include <sys/aio.h> */
1207 int __sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *);
1210 /* #include <fcntl.h> */
1211 #ifdef _SYS_FCNTL_H_
1212 int __sys_fcntl(int, int, ...);
1213 int __sys_open(const char *, int, ...);
1216 /* #include <sys/ioctl.h> */
1217 #ifdef _SYS_IOCTL_H_
1218 int __sys_ioctl(int, unsigned long, ...);
1221 /* #inclde <sched.h> */
1223 int __sys_sched_yield(void);
1226 /* #include <signal.h> */
1228 int __sys_kill(pid_t, int);
1229 int __sys_sigaction(int, const struct sigaction *, struct sigaction *);
1230 int __sys_sigpending(sigset_t *);
1231 int __sys_sigprocmask(int, const sigset_t *, sigset_t *);
1232 int __sys_sigsuspend(const sigset_t *);
1233 int __sys_sigreturn(ucontext_t *);
1234 int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *);
1237 /* #include <sys/socket.h> */
1238 #ifdef _SYS_SOCKET_H_
1239 int __sys_accept(int, struct sockaddr *, socklen_t *);
1240 int __sys_connect(int, const struct sockaddr *, socklen_t);
1241 int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *,
1245 /* #include <sys/uio.h> */
1247 ssize_t __sys_readv(int, const struct iovec *, int);
1248 ssize_t __sys_writev(int, const struct iovec *, int);
1251 /* #include <time.h> */
1253 int __sys_nanosleep(const struct timespec *, struct timespec *);
1256 /* #include <unistd.h> */
1258 int __sys_close(int);
1259 int __sys_execve(const char *, char * const *, char * const *);
1260 int __sys_fork(void);
1261 int __sys_fsync(int);
1262 pid_t __sys_getpid(void);
1263 int __sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *);
1264 ssize_t __sys_read(int, void *, size_t);
1265 ssize_t __sys_write(int, const void *, size_t);
1266 void __sys_exit(int);
1267 int __sys_sigwait(const sigset_t *, int *);
1268 int __sys_sigtimedwait(sigset_t *, siginfo_t *, struct timespec *);
1271 /* #include <poll.h> */
1273 int __sys_poll(struct pollfd *, unsigned, int);
1276 /* #include <sys/mman.h> */
1278 int __sys_msync(void *, size_t, int);
1281 #endif /* !_THR_PRIVATE_H */