2 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Private thread definitions for the uthread kernel.
34 #ifndef _THR_PRIVATE_H
35 #define _THR_PRIVATE_H
43 #include <sys/queue.h>
44 #include <sys/types.h>
46 #include <sys/cdefs.h>
52 #include <pthread_np.h>
56 #include "pthread_md.h"
61 * Evaluate the storage class specifier.
63 #ifdef GLOBAL_PTHREAD_PRIVATE
65 #define SCLASS_PRESET(x...) = x
68 #define SCLASS_PRESET(x...)
72 * Kernel fatal error handler macro.
74 #define PANIC(string) _thr_exit(__FILE__, __LINE__, string)
77 /* Output debug messages like this: */
79 #define stdout_debug(...) _thread_printf(STDOUT_FILENO, __VA_ARGS__)
82 #define stderr_debug(...) _thread_printf(STDERR_FILENO, __VA_ARGS__)
85 #define DBG_MUTEX 0x0001
86 #define DBG_SIG 0x0002
87 #define DBG_INFO_DUMP 0x0004
89 #ifdef _PTHREADS_INVARIANTS
90 #define THR_ASSERT(cond, msg) do { \
95 #define THR_ASSERT(cond, msg)
99 * State change macro without scheduling queue change:
101 #define THR_SET_STATE(thrd, newstate) do { \
102 (thrd)->state = newstate; \
103 (thrd)->fname = __FILE__; \
104 (thrd)->lineno = __LINE__; \
108 #define TIMESPEC_ADD(dst, src, val) \
110 (dst)->tv_sec = (src)->tv_sec + (val)->tv_sec; \
111 (dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \
112 if ((dst)->tv_nsec >= 1000000000) { \
114 (dst)->tv_nsec -= 1000000000; \
118 #define TIMESPEC_SUB(dst, src, val) \
120 (dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \
121 (dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \
122 if ((dst)->tv_nsec < 0) { \
124 (dst)->tv_nsec += 1000000000; \
131 * XXX It'd be nice if these were contained in uthread_priority_queue.[ch].
133 typedef struct pq_list {
134 TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */
135 TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */
136 int pl_prio; /* the priority of this list */
137 int pl_queued; /* is this in the priority queue */
140 typedef struct pq_queue {
141 TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */
142 pq_list_t *pq_lists; /* array of all priority lists */
143 int pq_size; /* number of priority lists */
144 #define PQF_ACTIVE 0x0001
150 * Each KSEG has a scheduling queue. For now, threads that exist in their
151 * own KSEG (system scope) will get a full priority queue. In the future
152 * this can be optimized for the single thread per KSEG case.
156 TAILQ_HEAD(, pthread) sq_waitq; /* waiting in userland */
159 typedef struct kse_thr_mailbox *kse_critical_t;
163 #define MAX_KSE_LOCKLEVEL 5
165 /* -- location and order specific items for gdb -- */
167 struct pthread *k_curthread; /* current thread */
168 struct kse_group *k_kseg; /* parent KSEG */
169 struct sched_queue *k_schedq; /* scheduling queue */
170 /* -- end of location and order specific items -- */
171 TAILQ_ENTRY(kse) k_qe; /* KSE list link entry */
172 TAILQ_ENTRY(kse) k_kgqe; /* KSEG's KSE list entry */
174 * Items that are only modified by the kse, or that otherwise
175 * don't need to be locked when accessed
178 struct lockuser k_lockusers[MAX_KSE_LOCKLEVEL];
182 #define KF_STARTED 0x0001 /* kernel kse created */
183 #define KF_INITIALIZED 0x0002 /* initialized on 1st upcall */
184 #define KF_TERMINATED 0x0004 /* kse is terminated */
185 #define KF_IDLE 0x0008 /* kse is idle */
186 #define KF_SWITCH 0x0010 /* thread switch in UTS */
187 int k_error; /* syscall errno in critical */
188 int k_cpu; /* CPU ID when bound */
189 int k_sigseqno; /* signal buffered count */
192 #define KSE_SET_IDLE(kse) ((kse)->k_flags |= KF_IDLE)
193 #define KSE_CLEAR_IDLE(kse) ((kse)->k_flags &= ~KF_IDLE)
194 #define KSE_IS_IDLE(kse) (((kse)->k_flags & KF_IDLE) != 0)
195 #define KSE_SET_SWITCH(kse) ((kse)->k_flags |= KF_SWITCH)
196 #define KSE_CLEAR_SWITCH(kse) ((kse)->k_flags &= ~KF_SWITCH)
197 #define KSE_IS_SWITCH(kse) (((kse)->k_flags & KF_SWITCH) != 0)
200 * Each KSE group contains one or more KSEs in which threads can run.
201 * At least for now, there is one scheduling queue per KSE group; KSEs
202 * within the same KSE group compete for threads from the same scheduling
203 * queue. A scope system thread has one KSE in one KSE group; the group
204 * does not use its scheduling queue.
207 TAILQ_HEAD(, kse) kg_kseq; /* list of KSEs in group */
208 TAILQ_HEAD(, pthread) kg_threadq; /* list of threads in group */
209 TAILQ_ENTRY(kse_group) kg_qe; /* link entry */
210 struct sched_queue kg_schedq; /* scheduling queue */
212 int kg_threadcount; /* # of assigned threads */
213 int kg_ksecount; /* # of assigned KSEs */
216 #define KGF_SINGLE_THREAD 0x0001 /* scope system kse group */
217 #define KGF_SCHEDQ_INITED 0x0002 /* has an initialized schedq */
221 * Add/remove threads from a KSE's scheduling queue.
222 * For now the scheduling queue is hung off the KSEG.
224 #define KSEG_THRQ_ADD(kseg, thr) \
226 TAILQ_INSERT_TAIL(&(kseg)->kg_threadq, thr, kle);\
227 (kseg)->kg_threadcount++; \
230 #define KSEG_THRQ_REMOVE(kseg, thr) \
232 TAILQ_REMOVE(&(kseg)->kg_threadq, thr, kle); \
233 (kseg)->kg_threadcount--; \
238 * Lock acquire and release for KSEs.
240 #define KSE_LOCK_ACQUIRE(kse, lck) \
242 if ((kse)->k_locklevel < MAX_KSE_LOCKLEVEL) { \
243 (kse)->k_locklevel++; \
244 _lock_acquire((lck), \
245 &(kse)->k_lockusers[(kse)->k_locklevel - 1], 0); \
248 PANIC("Exceeded maximum lock level"); \
251 #define KSE_LOCK_RELEASE(kse, lck) \
253 if ((kse)->k_locklevel > 0) { \
254 _lock_release((lck), \
255 &(kse)->k_lockusers[(kse)->k_locklevel - 1]); \
256 (kse)->k_locklevel--; \
263 #define KSE_LOCK(curkse) \
264 KSE_LOCK_ACQUIRE(curkse, &(curkse)->k_kseg->kg_lock)
265 #define KSE_UNLOCK(curkse) \
266 KSE_LOCK_RELEASE(curkse, &(curkse)->k_kseg->kg_lock)
269 * Lock a potentially different KSEG.
271 #define KSE_SCHED_LOCK(curkse, kseg) \
272 KSE_LOCK_ACQUIRE(curkse, &(kseg)->kg_lock)
273 #define KSE_SCHED_UNLOCK(curkse, kseg) \
274 KSE_LOCK_RELEASE(curkse, &(kseg)->kg_lock)
277 * Waiting queue manipulation macros (using pqe link):
279 #define KSE_WAITQ_REMOVE(kse, thrd) \
281 if (((thrd)->flags & THR_FLAGS_IN_WAITQ) != 0) { \
282 TAILQ_REMOVE(&(kse)->k_schedq->sq_waitq, thrd, pqe); \
283 (thrd)->flags &= ~THR_FLAGS_IN_WAITQ; \
286 #define KSE_WAITQ_INSERT(kse, thrd) kse_waitq_insert(thrd)
287 #define KSE_WAITQ_FIRST(kse) TAILQ_FIRST(&(kse)->k_schedq->sq_waitq)
289 #define KSE_WAKEUP(kse) kse_wakeup(&(kse)->k_kcb->kcb_kmbx)
292 * TailQ initialization values.
294 #define TAILQ_INITIALIZER { NULL, NULL }
297 * lock initialization values.
299 #define LCK_INITIALIZER { NULL, NULL, LCK_DEFAULT }
301 struct pthread_mutex {
303 * Lock for accesses to this structure.
306 enum pthread_mutextype m_type;
308 TAILQ_HEAD(mutex_head, pthread) m_queue;
309 struct pthread *m_owner;
315 * Used for priority inheritence and protection.
317 * m_prio - For priority inheritence, the highest active
318 * priority (threads locking the mutex inherit
319 * this priority). For priority protection, the
320 * ceiling priority of this mutex.
321 * m_saved_prio - mutex owners inherited priority before
322 * taking the mutex, restored when the owner
329 * Link for list of all mutexes a thread currently owns.
331 TAILQ_ENTRY(pthread_mutex) m_qe;
337 #define MUTEX_FLAGS_PRIVATE 0x01
338 #define MUTEX_FLAGS_INITED 0x02
339 #define MUTEX_FLAGS_BUSY 0x04
342 * Static mutex initialization values.
344 #define PTHREAD_MUTEX_STATIC_INITIALIZER \
345 { LCK_INITIALIZER, PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, \
346 TAILQ_INITIALIZER, NULL, MUTEX_FLAGS_PRIVATE, 0, 0, 0, 0, \
349 struct pthread_mutex_attr {
350 enum pthread_mutextype m_type;
356 #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \
357 { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE }
360 * Condition variable definitions.
362 enum pthread_cond_type {
367 struct pthread_cond {
369 * Lock for accesses to this structure.
372 enum pthread_cond_type c_type;
373 TAILQ_HEAD(cond_head, pthread) c_queue;
374 struct pthread_mutex *c_mutex;
379 struct pthread_cond_attr {
380 enum pthread_cond_type c_type;
384 struct pthread_barrier {
385 pthread_mutex_t b_lock;
386 pthread_cond_t b_cond;
392 struct pthread_barrierattr {
396 struct pthread_spinlock {
402 * Flags for condition variables.
404 #define COND_FLAGS_PRIVATE 0x01
405 #define COND_FLAGS_INITED 0x02
406 #define COND_FLAGS_BUSY 0x04
409 * Static cond initialization values.
411 #define PTHREAD_COND_STATIC_INITIALIZER \
412 { LCK_INITIALIZER, COND_TYPE_FAST, TAILQ_INITIALIZER, \
416 * Cleanup definitions.
418 struct pthread_cleanup {
419 struct pthread_cleanup *next;
420 void (*routine) (void *);
425 #define THR_CLEANUP_PUSH(td, func, arg) { \
426 struct pthread_cleanup __cup; \
428 __cup.routine = func; \
429 __cup.routine_arg = arg; \
431 __cup.next = (td)->cleanup; \
432 (td)->cleanup = &__cup;
434 #define THR_CLEANUP_POP(td, exec) \
435 (td)->cleanup = __cup.next; \
437 __cup.routine(__cup.routine_arg); \
440 struct pthread_atfork {
441 TAILQ_ENTRY(pthread_atfork) qe;
442 void (*prepare)(void);
443 void (*parent)(void);
447 struct pthread_attr {
453 #define THR_STACK_USER 0x100 /* 0xFF reserved for <pthread.h> */
454 #define THR_SIGNAL_THREAD 0x200 /* This is a signal thread */
457 void (*cleanup_attr) (void *);
458 void *stackaddr_attr;
459 size_t stacksize_attr;
460 size_t guardsize_attr;
464 * Thread creation state attributes.
466 #define THR_CREATE_RUNNING 0
467 #define THR_CREATE_SUSPENDED 1
470 * Miscellaneous definitions.
472 #define THR_STACK32_DEFAULT (1 * 1024 * 1024)
473 #define THR_STACK64_DEFAULT (2 * 1024 * 1024)
476 * Maximum size of initial thread's stack. This perhaps deserves to be larger
477 * than the stacks of other threads, since many applications are likely to run
478 * almost entirely on this stack.
480 #define THR_STACK32_INITIAL (2 * 1024 * 1024)
481 #define THR_STACK64_INITIAL (4 * 1024 * 1024)
484 * Define the different priority ranges. All applications have thread
485 * priorities constrained within 0-31. The threads library raises the
486 * priority when delivering signals in order to ensure that signal
487 * delivery happens (from the POSIX spec) "as soon as possible".
488 * In the future, the threads library will also be able to map specific
489 * threads into real-time (cooperating) processes or kernel threads.
490 * The RT and SIGNAL priorities will be used internally and added to
491 * thread base priorities so that the scheduling queue can handle both
492 * normal and RT priority threads with and without signal handling.
494 * The approach taken is that, within each class, signal delivery
495 * always has priority over thread execution.
497 #define THR_DEFAULT_PRIORITY 15
498 #define THR_MIN_PRIORITY 0
499 #define THR_MAX_PRIORITY 31 /* 0x1F */
500 #define THR_SIGNAL_PRIORITY 32 /* 0x20 */
501 #define THR_RT_PRIORITY 64 /* 0x40 */
502 #define THR_FIRST_PRIORITY THR_MIN_PRIORITY
503 #define THR_LAST_PRIORITY \
504 (THR_MAX_PRIORITY + THR_SIGNAL_PRIORITY + THR_RT_PRIORITY)
505 #define THR_BASE_PRIORITY(prio) ((prio) & THR_MAX_PRIORITY)
508 * Clock resolution in microseconds.
510 #define CLOCK_RES_USEC 10000
513 * Time slice period in microseconds.
515 #define TIMESLICE_USEC 20000
518 * XXX - Define a thread-safe macro to get the current time of day
519 * which is updated at regular intervals by something.
521 * For now, we just make the system call to get the time.
523 #define KSE_GET_TOD(curkse, tsp) \
525 *tsp = (curkse)->k_kcb->kcb_kmbx.km_timeofday; \
526 if ((tsp)->tv_sec == 0) \
527 clock_gettime(CLOCK_REALTIME, tsp); \
530 struct pthread_rwlockattr {
534 struct pthread_rwlock {
535 pthread_mutex_t lock; /* monitor lock */
536 pthread_cond_t read_signal;
537 pthread_cond_t write_signal;
538 int state; /* 0 = idle >0 = # of readers -1 = writer */
560 struct sigwait_data {
562 siginfo_t *siginfo; /* used to save siginfo for sigwaitinfo() */
565 union pthread_wait_data {
566 pthread_mutex_t mutex;
569 struct sigwait_data *sigwait;
573 * Define a continuation routine that can be used to perform a
574 * transfer of control:
576 typedef void (*thread_continuation_t) (void *);
579 * This stores a thread's state prior to running a signal handler.
580 * It is used when a signal is delivered to a thread blocked in
581 * userland. If the signal handler returns normally, the thread's
582 * state is restored from here.
584 struct pthread_sigframe {
591 enum pthread_state psf_state;
592 union pthread_wait_data psf_wait_data;
593 struct timespec psf_wakeup_time;
595 sigset_t psf_sigmask;
597 thread_continuation_t psf_continuation;
601 struct pthread *thread;
606 struct pthread_specific_elem {
611 typedef void (*const_key_destructor_t)(const void *);
612 typedef void (*key_destructor_t)(void *);
615 volatile int allocated;
618 key_destructor_t destructor;
621 #define MAX_THR_LOCKLEVEL 5
626 /* Thread control block */
630 * Magic value to help recognize a valid thread structure
631 * from an invalid one:
633 #define THR_MAGIC ((u_int32_t) 0xd09ba115)
636 u_int64_t uniqueid; /* for gdb */
638 /* Queue entry for list of all threads: */
639 TAILQ_ENTRY(pthread) tle; /* link for all threads in process */
640 TAILQ_ENTRY(pthread) kle; /* link for all threads in KSE/KSEG */
642 /* Queue entry for GC lists: */
643 TAILQ_ENTRY(pthread) gcle;
645 /* Hash queue entry */
646 LIST_ENTRY(pthread) hle;
649 * Lock for accesses to this thread structure.
652 struct lockuser lockusers[MAX_THR_LOCKLEVEL];
654 kse_critical_t critical[MAX_KSE_LOCKLEVEL];
656 struct kse_group *kseg;
659 * Thread start routine, argument, stack pointer and thread
662 void *(*start_routine)(void *);
664 struct pthread_attr attr;
666 int active; /* thread running */
667 int blocked; /* thread blocked in kernel */
671 * Used for tracking delivery of signal handlers.
674 thread_continuation_t sigbackout;
677 * Cancelability flags - the lower 2 bits are used by cancel
678 * definitions in pthread.h
680 #define THR_AT_CANCEL_POINT 0x0004
681 #define THR_CANCELLING 0x0008
682 #define THR_CANCEL_NEEDED 0x0010
685 thread_continuation_t continuation;
688 * The thread's base and pending signal masks. The active
689 * signal mask is stored in the thread's context (in mailbox).
693 sigset_t *oldsigmask;
694 volatile int check_pending;
698 enum pthread_state state;
699 volatile int lock_switch;
702 * Number of microseconds accumulated by this thread when
703 * time slicing is active.
708 * Time to wake up thread. This is used for sleeping threads and
709 * for any operation which may time out (such as select).
711 struct timespec wakeup_time;
713 /* TRUE if operation has timed out. */
717 * Error variable used instead of errno. The function __error()
718 * returns a pointer to this.
723 * The joiner is the thread that is joining to this thread. The
724 * join status keeps track of a join operation to another thread.
726 struct pthread *joiner;
727 struct join_status join_status;
730 * The current thread can belong to only one scheduling queue at
731 * a time (ready or waiting queue). It can also belong to:
733 * o A queue of threads waiting for a mutex
734 * o A queue of threads waiting for a condition variable
736 * It is possible for a thread to belong to more than one of the
737 * above queues if it is handling a signal. A thread may only
738 * enter a mutex or condition variable queue when it is not
739 * being called from a signal handler. If a thread is a member
740 * of one of these queues when a signal handler is invoked, it
741 * must be removed from the queue before invoking the handler
742 * and then added back to the queue after return from the handler.
744 * Use pqe for the scheduling queue link (both ready and waiting),
745 * sqe for synchronization (mutex, condition variable, and join)
746 * queue links, and qe for all other links.
748 TAILQ_ENTRY(pthread) pqe; /* priority, wait queues link */
749 TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */
752 union pthread_wait_data data;
755 * Set to TRUE if a blocking operation was
756 * interrupted by a signal:
761 * Set to non-zero when this thread has entered a critical
762 * region. We allow for recursive entries into critical regions.
767 * Set to TRUE if this thread should yield after leaving a
768 * critical region to check for signals, messages, etc.
773 #define THR_FLAGS_IN_SYNCQ 0x0001
775 /* Miscellaneous flags; only set with scheduling lock held. */
777 #define THR_FLAGS_PRIVATE 0x0001
778 #define THR_FLAGS_IN_WAITQ 0x0002 /* in waiting queue using pqe link */
779 #define THR_FLAGS_IN_RUNQ 0x0004 /* in run queue using pqe link */
780 #define THR_FLAGS_EXITING 0x0008 /* thread is exiting */
781 #define THR_FLAGS_SUSPENDED 0x0010 /* thread is suspended */
783 /* Thread list flags; only set with thread list lock held. */
784 #define TLFLAGS_GC_SAFE 0x0001 /* thread safe for cleaning */
785 #define TLFLAGS_IN_TDLIST 0x0002 /* thread in all thread list */
786 #define TLFLAGS_IN_GCLIST 0x0004 /* thread in gc list */
790 * Base priority is the user setable and retrievable priority
791 * of the thread. It is only affected by explicit calls to
792 * set thread priority and upon thread creation via a thread
793 * attribute or default priority.
798 * Inherited priority is the priority a thread inherits by
799 * taking a priority inheritence or protection mutex. It
800 * is not affected by base priority changes. Inherited
801 * priority defaults to and remains 0 until a mutex is taken
802 * that is being waited on by any other thread whose priority
805 char inherited_priority;
808 * Active priority is always the maximum of the threads base
809 * priority and inherited priority. When there is a change
810 * in either the base or inherited priority, the active
811 * priority must be recalculated.
813 char active_priority;
815 /* Number of priority ceiling or protection mutexes owned. */
816 int priority_mutex_count;
818 /* Number rwlocks rdlocks held. */
822 * Queue of currently owned mutexes.
824 TAILQ_HEAD(, pthread_mutex) mutexq;
827 struct pthread_specific_elem *specific;
828 int specific_data_count;
830 /* Alternative stack for sigaltstack() */
834 * Current locks bitmap for rtld.
838 /* Cleanup handlers Link List */
839 struct pthread_cleanup *cleanup;
840 const char *fname; /* Ptr to source file name */
841 int lineno; /* Source line number. */
845 * Critical regions can also be detected by looking at the threads
846 * current lock level. Ensure these macros increment and decrement
847 * the lock levels such that locks can not be held with a lock level
850 #define THR_IN_CRITICAL(thrd) \
851 (((thrd)->locklevel > 0) || \
852 ((thrd)->critical_count > 0))
854 #define THR_YIELD_CHECK(thrd) \
856 if (!THR_IN_CRITICAL(thrd)) { \
857 if (__predict_false(_libkse_debug)) \
858 _thr_debug_check_yield(thrd); \
859 if ((thrd)->critical_yield != 0) \
860 _thr_sched_switch(thrd); \
861 if ((thrd)->check_pending != 0) \
862 _thr_sig_check_pending(thrd); \
866 #define THR_LOCK_ACQUIRE(thrd, lck) \
868 if ((thrd)->locklevel < MAX_THR_LOCKLEVEL) { \
869 THR_DEACTIVATE_LAST_LOCK(thrd); \
870 (thrd)->locklevel++; \
871 _lock_acquire((lck), \
872 &(thrd)->lockusers[(thrd)->locklevel - 1], \
873 (thrd)->active_priority); \
875 PANIC("Exceeded maximum lock level"); \
878 #define THR_LOCK_RELEASE(thrd, lck) \
880 if ((thrd)->locklevel > 0) { \
881 _lock_release((lck), \
882 &(thrd)->lockusers[(thrd)->locklevel - 1]); \
883 (thrd)->locklevel--; \
884 THR_ACTIVATE_LAST_LOCK(thrd); \
885 if ((thrd)->locklevel == 0) \
886 THR_YIELD_CHECK(thrd); \
890 #define THR_ACTIVATE_LAST_LOCK(thrd) \
892 if ((thrd)->locklevel > 0) \
893 _lockuser_setactive( \
894 &(thrd)->lockusers[(thrd)->locklevel - 1], 1); \
897 #define THR_DEACTIVATE_LAST_LOCK(thrd) \
899 if ((thrd)->locklevel > 0) \
900 _lockuser_setactive( \
901 &(thrd)->lockusers[(thrd)->locklevel - 1], 0); \
905 * For now, threads will have their own lock separate from their
906 * KSE scheduling lock.
908 #define THR_LOCK(thr) THR_LOCK_ACQUIRE(thr, &(thr)->lock)
909 #define THR_UNLOCK(thr) THR_LOCK_RELEASE(thr, &(thr)->lock)
910 #define THR_THREAD_LOCK(curthrd, thr) THR_LOCK_ACQUIRE(curthrd, &(thr)->lock)
911 #define THR_THREAD_UNLOCK(curthrd, thr) THR_LOCK_RELEASE(curthrd, &(thr)->lock)
914 * Priority queue manipulation macros (using pqe link). We use
915 * the thread's kseg link instead of the kse link because a thread
916 * does not (currently) have a statically assigned kse.
918 #define THR_RUNQ_INSERT_HEAD(thrd) \
919 _pq_insert_head(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
920 #define THR_RUNQ_INSERT_TAIL(thrd) \
921 _pq_insert_tail(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
922 #define THR_RUNQ_REMOVE(thrd) \
923 _pq_remove(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
926 * Macros to insert/remove threads to the all thread list and
929 #define THR_LIST_ADD(thrd) do { \
930 if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) == 0) { \
931 TAILQ_INSERT_HEAD(&_thread_list, thrd, tle); \
932 _thr_hash_add(thrd); \
933 (thrd)->tlflags |= TLFLAGS_IN_TDLIST; \
936 #define THR_LIST_REMOVE(thrd) do { \
937 if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) != 0) { \
938 TAILQ_REMOVE(&_thread_list, thrd, tle); \
939 _thr_hash_remove(thrd); \
940 (thrd)->tlflags &= ~TLFLAGS_IN_TDLIST; \
943 #define THR_GCLIST_ADD(thrd) do { \
944 if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) == 0) { \
945 TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\
946 (thrd)->tlflags |= TLFLAGS_IN_GCLIST; \
950 #define THR_GCLIST_REMOVE(thrd) do { \
951 if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) != 0) { \
952 TAILQ_REMOVE(&_thread_gc_list, thrd, gcle); \
953 (thrd)->tlflags &= ~TLFLAGS_IN_GCLIST; \
958 #define GC_NEEDED() (atomic_load_acq_int(&_gc_count) >= 5)
961 * Locking the scheduling queue for another thread uses that thread's
964 #define THR_SCHED_LOCK(curthr, thr) do { \
965 (curthr)->critical[(curthr)->locklevel] = _kse_critical_enter(); \
966 (curthr)->locklevel++; \
967 KSE_SCHED_LOCK((curthr)->kse, (thr)->kseg); \
970 #define THR_SCHED_UNLOCK(curthr, thr) do { \
971 KSE_SCHED_UNLOCK((curthr)->kse, (thr)->kseg); \
972 (curthr)->locklevel--; \
973 _kse_critical_leave((curthr)->critical[(curthr)->locklevel]); \
976 /* Take the scheduling lock with the intent to call the scheduler. */
977 #define THR_LOCK_SWITCH(curthr) do { \
978 (void)_kse_critical_enter(); \
979 KSE_SCHED_LOCK((curthr)->kse, (curthr)->kseg); \
981 #define THR_UNLOCK_SWITCH(curthr) do { \
982 KSE_SCHED_UNLOCK((curthr)->kse, (curthr)->kseg);\
985 #define THR_CRITICAL_ENTER(thr) (thr)->critical_count++
986 #define THR_CRITICAL_LEAVE(thr) do { \
987 (thr)->critical_count--; \
988 if (((thr)->critical_yield != 0) && \
989 ((thr)->critical_count == 0)) { \
990 (thr)->critical_yield = 0; \
991 _thr_sched_switch(thr); \
995 #define THR_IS_ACTIVE(thrd) \
996 ((thrd)->kse != NULL) && ((thrd)->kse->k_curthread == (thrd))
998 #define THR_IN_SYNCQ(thrd) (((thrd)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
1000 #define THR_IS_SUSPENDED(thrd) \
1001 (((thrd)->state == PS_SUSPENDED) || \
1002 (((thrd)->flags & THR_FLAGS_SUSPENDED) != 0))
1003 #define THR_IS_EXITING(thrd) (((thrd)->flags & THR_FLAGS_EXITING) != 0)
1004 #define DBG_CAN_RUN(thrd) (((thrd)->tcb->tcb_tmbx.tm_dflags & \
1007 extern int __isthreaded;
1010 _kse_isthreaded(void)
1012 return (__isthreaded != 0);
1016 * Global variables for the pthread kernel.
1019 SCLASS void *_usrstack SCLASS_PRESET(NULL);
1020 SCLASS struct kse *_kse_initial SCLASS_PRESET(NULL);
1021 SCLASS struct pthread *_thr_initial SCLASS_PRESET(NULL);
1023 SCLASS int _libkse_debug SCLASS_PRESET(0);
1024 SCLASS int _thread_activated SCLASS_PRESET(0);
1025 SCLASS int _thread_scope_system SCLASS_PRESET(0);
1027 /* List of all threads: */
1028 SCLASS TAILQ_HEAD(, pthread) _thread_list
1029 SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_list));
1031 /* List of threads needing GC: */
1032 SCLASS TAILQ_HEAD(, pthread) _thread_gc_list
1033 SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_gc_list));
1035 SCLASS int _thread_active_threads SCLASS_PRESET(1);
1037 SCLASS TAILQ_HEAD(atfork_head, pthread_atfork) _thr_atfork_list;
1038 SCLASS pthread_mutex_t _thr_atfork_mutex;
1040 /* Default thread attributes: */
1041 SCLASS struct pthread_attr _pthread_attr_default
1043 SCHED_RR, 0, TIMESLICE_USEC, THR_DEFAULT_PRIORITY,
1044 THR_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL,
1045 NULL, NULL, /* stacksize */0, /* guardsize */0
1048 /* Default mutex attributes: */
1049 SCLASS struct pthread_mutex_attr _pthread_mutexattr_default
1050 SCLASS_PRESET({PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 });
1052 /* Default condition variable attributes: */
1053 SCLASS struct pthread_cond_attr _pthread_condattr_default
1054 SCLASS_PRESET({COND_TYPE_FAST, 0});
1056 /* Clock resolution in usec. */
1057 SCLASS int _clock_res_usec SCLASS_PRESET(CLOCK_RES_USEC);
1059 /* Array of signal actions for this process: */
1060 SCLASS struct sigaction _thread_sigact[_SIG_MAXSIG];
1063 * Lock for above count of dummy handlers and for the process signal
1064 * mask and pending signal sets.
1066 SCLASS struct lock _thread_signal_lock;
1068 /* Pending signals and mask for this process: */
1069 SCLASS sigset_t _thr_proc_sigpending;
1070 SCLASS siginfo_t _thr_proc_siginfo[_SIG_MAXSIG];
1072 SCLASS pid_t _thr_pid SCLASS_PRESET(0);
1074 /* Garbage collector lock. */
1075 SCLASS struct lock _gc_lock;
1076 SCLASS int _gc_check SCLASS_PRESET(0);
1077 SCLASS int _gc_count SCLASS_PRESET(0);
1079 SCLASS struct lock _mutex_static_lock;
1080 SCLASS struct lock _rwlock_static_lock;
1081 SCLASS struct lock _keytable_lock;
1082 SCLASS struct lock _thread_list_lock;
1083 SCLASS size_t _thr_guard_default;
1084 SCLASS size_t _thr_stack_default;
1085 SCLASS size_t _thr_stack_initial;
1086 SCLASS int _thr_page_size;
1087 SCLASS pthread_t _thr_sig_daemon;
1088 SCLASS int _thr_debug_flags SCLASS_PRESET(0);
1090 /* Undefine the storage class and preset specifiers: */
1092 #undef SCLASS_PRESET
1096 * Function prototype definitions.
1099 int _cond_reinit(pthread_cond_t *);
1100 struct kse *_kse_alloc(struct pthread *, int sys_scope);
1101 kse_critical_t _kse_critical_enter(void);
1102 void _kse_critical_leave(kse_critical_t);
1103 int _kse_in_critical(void);
1104 void _kse_free(struct pthread *, struct kse *);
1105 void _kse_init(void);
1106 struct kse_group *_kseg_alloc(struct pthread *);
1107 void _kse_lock_wait(struct lock *, struct lockuser *lu);
1108 void _kse_lock_wakeup(struct lock *, struct lockuser *lu);
1109 void _kse_single_thread(struct pthread *);
1110 int _kse_setthreaded(int);
1111 void _kseg_free(struct kse_group *);
1112 int _mutex_cv_lock(pthread_mutex_t *);
1113 int _mutex_cv_unlock(pthread_mutex_t *);
1114 void _mutex_notify_priochange(struct pthread *, struct pthread *, int);
1115 int _mutex_reinit(struct pthread_mutex *);
1116 void _mutex_unlock_private(struct pthread *);
1117 void _libpthread_init(struct pthread *);
1118 int _pq_alloc(struct pq_queue *, int, int);
1119 void _pq_free(struct pq_queue *);
1120 int _pq_init(struct pq_queue *);
1121 void _pq_remove(struct pq_queue *pq, struct pthread *);
1122 void _pq_insert_head(struct pq_queue *pq, struct pthread *);
1123 void _pq_insert_tail(struct pq_queue *pq, struct pthread *);
1124 struct pthread *_pq_first(struct pq_queue *pq);
1125 struct pthread *_pq_first_debug(struct pq_queue *pq);
1126 void *_pthread_getspecific(pthread_key_t);
1127 int _pthread_key_create(pthread_key_t *, void (*) (void *));
1128 int _pthread_key_delete(pthread_key_t);
1129 int _pthread_mutex_destroy(pthread_mutex_t *);
1130 int _pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *);
1131 int _pthread_mutex_lock(pthread_mutex_t *);
1132 int _pthread_mutex_trylock(pthread_mutex_t *);
1133 int _pthread_mutex_unlock(pthread_mutex_t *);
1134 int _pthread_mutexattr_init(pthread_mutexattr_t *);
1135 int _pthread_mutexattr_destroy(pthread_mutexattr_t *);
1136 int _pthread_mutexattr_settype(pthread_mutexattr_t *, int);
1137 int _pthread_once(pthread_once_t *, void (*) (void));
1138 int _pthread_rwlock_init(pthread_rwlock_t *, const pthread_rwlockattr_t *);
1139 int _pthread_rwlock_destroy (pthread_rwlock_t *);
1140 struct pthread *_pthread_self(void);
1141 int _pthread_setspecific(pthread_key_t, const void *);
1142 void _pthread_yield(void);
1143 void _pthread_cleanup_push(void (*routine) (void *), void *routine_arg);
1144 void _pthread_cleanup_pop(int execute);
1145 struct pthread *_thr_alloc(struct pthread *);
1146 void _thr_exit(const char *, int, const char *) __dead2;
1147 void _thr_exit_cleanup(void);
1148 void _thr_lock_wait(struct lock *lock, struct lockuser *lu);
1149 void _thr_lock_wakeup(struct lock *lock, struct lockuser *lu);
1150 void _thr_mutex_reinit(pthread_mutex_t *);
1151 int _thr_ref_add(struct pthread *, struct pthread *, int);
1152 void _thr_ref_delete(struct pthread *, struct pthread *);
1153 void _thr_rtld_init(void);
1154 void _thr_rtld_fini(void);
1155 int _thr_schedule_add(struct pthread *, struct pthread *);
1156 void _thr_schedule_remove(struct pthread *, struct pthread *);
1157 void _thr_setrunnable(struct pthread *curthread, struct pthread *thread);
1158 struct kse_mailbox *_thr_setrunnable_unlocked(struct pthread *thread);
1159 struct kse_mailbox *_thr_sig_add(struct pthread *, int, siginfo_t *);
1160 void _thr_sig_dispatch(struct kse *, int, siginfo_t *);
1161 int _thr_stack_alloc(struct pthread_attr *);
1162 void _thr_stack_free(struct pthread_attr *);
1163 void _thr_exit_cleanup(void);
1164 void _thr_free(struct pthread *, struct pthread *);
1165 void _thr_gc(struct pthread *);
1166 void _thr_panic_exit(char *, int, char *);
1167 void _thread_cleanupspecific(void);
1168 void _thread_dump_info(void);
1169 void _thread_printf(int, const char *, ...);
1170 void _thr_sched_switch(struct pthread *);
1171 void _thr_sched_switch_unlocked(struct pthread *);
1172 void _thr_set_timeout(const struct timespec *);
1173 void _thr_seterrno(struct pthread *, int);
1174 void _thr_sig_handler(int, siginfo_t *, void *);
1175 void _thr_sig_check_pending(struct pthread *);
1176 void _thr_sig_rundown(struct pthread *, ucontext_t *);
1177 void _thr_sig_send(struct pthread *pthread, int sig);
1178 void _thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf);
1179 void _thr_spinlock_init(void);
1180 void _thr_cancel_enter(struct pthread *);
1181 void _thr_cancel_leave(struct pthread *, int);
1182 int _thr_setconcurrency(int new_level);
1183 int _thr_setmaxconcurrency(void);
1184 void _thr_critical_enter(struct pthread *);
1185 void _thr_critical_leave(struct pthread *);
1186 int _thr_start_sig_daemon(void);
1187 int _thr_getprocsig(int sig, siginfo_t *siginfo);
1188 int _thr_getprocsig_unlocked(int sig, siginfo_t *siginfo);
1189 void _thr_signal_init(void);
1190 void _thr_signal_deinit(void);
1191 void _thr_hash_add(struct pthread *);
1192 void _thr_hash_remove(struct pthread *);
1193 struct pthread *_thr_hash_find(struct pthread *);
1194 void _thr_finish_cancellation(void *arg);
1195 int _thr_sigonstack(void *sp);
1196 void _thr_debug_check_yield(struct pthread *);
1199 * Aliases for _pthread functions. Should be called instead of
1200 * originals if PLT replocation is unwanted at runtme.
1202 int _thr_cond_broadcast(pthread_cond_t *);
1203 int _thr_cond_signal(pthread_cond_t *);
1204 int _thr_cond_wait(pthread_cond_t *, pthread_mutex_t *);
1205 int _thr_mutex_lock(pthread_mutex_t *);
1206 int _thr_mutex_unlock(pthread_mutex_t *);
1207 int _thr_rwlock_rdlock (pthread_rwlock_t *);
1208 int _thr_rwlock_wrlock (pthread_rwlock_t *);
1209 int _thr_rwlock_unlock (pthread_rwlock_t *);
1211 /* #include <sys/aio.h> */
1213 int __sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *);
1216 /* #include <fcntl.h> */
1217 #ifdef _SYS_FCNTL_H_
1218 int __sys_fcntl(int, int, ...);
1219 int __sys_open(const char *, int, ...);
1222 /* #include <sys/ioctl.h> */
1223 #ifdef _SYS_IOCTL_H_
1224 int __sys_ioctl(int, unsigned long, ...);
1227 /* #inclde <sched.h> */
1229 int __sys_sched_yield(void);
1232 /* #include <signal.h> */
1234 int __sys_kill(pid_t, int);
1235 int __sys_sigaction(int, const struct sigaction *, struct sigaction *);
1236 int __sys_sigpending(sigset_t *);
1237 int __sys_sigprocmask(int, const sigset_t *, sigset_t *);
1238 int __sys_sigsuspend(const sigset_t *);
1239 int __sys_sigreturn(ucontext_t *);
1240 int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *);
1243 /* #include <sys/socket.h> */
1244 #ifdef _SYS_SOCKET_H_
1245 int __sys_accept(int, struct sockaddr *, socklen_t *);
1246 int __sys_connect(int, const struct sockaddr *, socklen_t);
1247 int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *,
1251 /* #include <sys/uio.h> */
1253 ssize_t __sys_readv(int, const struct iovec *, int);
1254 ssize_t __sys_writev(int, const struct iovec *, int);
1257 /* #include <time.h> */
1259 int __sys_nanosleep(const struct timespec *, struct timespec *);
1262 /* #include <unistd.h> */
1264 int __sys_close(int);
1265 int __sys_execve(const char *, char * const *, char * const *);
1266 int __sys_fork(void);
1267 int __sys_fsync(int);
1268 pid_t __sys_getpid(void);
1269 int __sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *);
1270 ssize_t __sys_read(int, void *, size_t);
1271 ssize_t __sys_write(int, const void *, size_t);
1272 void __sys_exit(int);
1273 int __sys_sigwait(const sigset_t *, int *);
1274 int __sys_sigtimedwait(const sigset_t *, siginfo_t *, const struct timespec *);
1277 /* #include <poll.h> */
1279 int __sys_poll(struct pollfd *, unsigned, int);
1282 /* #include <sys/mman.h> */
1284 int __sys_msync(void *, size_t, int);
1288 _thr_dump_enabled(void)
1291 return ((_thr_debug_flags & DBG_INFO_DUMP) != 0);
1294 #endif /* !_THR_PRIVATE_H */