2 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Private thread definitions for the uthread kernel.
34 #ifndef _PTHREAD_PRIVATE_H
35 #define _PTHREAD_PRIVATE_H
38 * Evaluate the storage class specifier.
40 #ifdef GLOBAL_PTHREAD_PRIVATE
52 #include <sys/param.h>
53 #include <sys/queue.h>
54 #include <sys/types.h>
56 #include <sys/cdefs.h>
60 #include <pthread_np.h>
62 #include <vm/vm_param.h>
64 #include <vm/vm_map.h>
67 * Define machine dependent macros to get and set the stack pointer
68 * from the supported contexts. Also define a macro to set the return
69 * address in a jmp_buf context.
71 * XXX - These need to be moved into architecture dependent support files.
72 * XXX - These need to be documented so porters know what's required.
75 #define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[2]))
76 #define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[2]))
77 #define GET_STACK_UC(ucp) ((unsigned long)((ucp)->uc_mcontext.mc_esp))
78 #define SET_STACK_JB(jb, stk) (jb)[0]._jb[2] = (int)(stk)
79 #define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[2] = (int)(stk)
80 #define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_esp = (int)(stk)
81 #define FP_SAVE_UC(ucp) do { \
83 fdata = (char *) (ucp)->uc_mcontext.mc_fpstate; \
84 __asm__("fnsave %0": :"m"(*fdata)); \
86 #define FP_RESTORE_UC(ucp) do { \
88 fdata = (char *) (ucp)->uc_mcontext.mc_fpstate; \
89 __asm__("frstor %0": :"m"(*fdata)); \
91 #define SET_RETURN_ADDR_JB(jb, ra) do { \
92 (jb)[0]._jb[0] = (int)(ra); \
95 #elif defined(__amd64__)
96 #define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[2]))
97 #define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[2]))
98 #define GET_STACK_UC(ucp) ((unsigned long)((ucp)->uc_mcontext.mc_rsp))
99 #define SET_STACK_JB(jb, stk) (jb)[0]._jb[2] = (long)(stk)
100 #define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[2] = (long)(stk)
101 #define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_rsp = (long)(stk)
102 #define FP_SAVE_UC(ucp) do { \
104 fdata = (char *) (ucp)->uc_mcontext.mc_fpstate; \
105 __asm__("fxsave %0": :"m"(*fdata)); \
107 #define FP_RESTORE_UC(ucp) do { \
109 fdata = (char *) (ucp)->uc_mcontext.mc_fpstate; \
110 __asm__("fxrstor %0": :"m"(*fdata)); \
112 #define SET_RETURN_ADDR_JB(jb, ra) (jb)[0]._jb[0] = (long)(ra)
113 #elif defined(__alpha__)
114 #include <machine/reg.h>
115 #define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[R_SP + 4]))
116 #define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[R_SP + 4]))
117 #define GET_STACK_UC(ucp) ((ucp)->uc_mcontext.mc_regs[R_SP])
118 #define SET_STACK_JB(jb, stk) (jb)[0]._jb[R_SP + 4] = (long)(stk)
119 #define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[R_SP + 4] = (long)(stk)
120 #define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_regs[R_SP] = (unsigned long)(stk)
121 #define FP_SAVE_UC(ucp)
122 #define FP_RESTORE_UC(ucp)
123 #define SET_RETURN_ADDR_JB(jb, ra) do { \
124 (jb)[0]._jb[2] = (long)(ra); \
125 (jb)[0]._jb[R_RA + 4] = (long)(ra); \
126 (jb)[0]._jb[R_T12 + 4] = (long)(ra); \
128 #elif defined(__ia64__)
129 #define GET_BSP_JB(jb) (*((unsigned long*)JMPBUF_ADDR_OF(jb,J_BSP)))
130 #define GET_STACK_JB(jb) (*((unsigned long*)JMPBUF_ADDR_OF(jb,J_SP)))
131 #define GET_STACK_SJB(sjb) GET_STACK_JB(sjb)
132 #define SET_RETURN_ADDR_JB(jb, ra) \
134 *((unsigned long*)JMPBUF_ADDR_OF(jb,J_B0)) = ((long*)(ra))[0]; \
135 *((unsigned long*)JMPBUF_ADDR_OF(jb,J_GP)) = ((long*)(ra))[1]; \
136 *((unsigned long*)JMPBUF_ADDR_OF(jb,J_PFS)) &= ~0x1FFFFFFFFFUL; \
138 #define SET_STACK_JB(jb, stk, sz) \
140 UPD_STACK_JB(jb, stk + sz - 16); \
141 GET_BSP_JB(jb) = (long)(stk); \
143 #define UPD_STACK_JB(jb, stk) GET_STACK_JB(jb) = (long)(stk)
144 #elif defined(__sparc64__)
145 #include <machine/frame.h>
147 #define CCFSZ sizeof (struct frame)
149 #define GET_STACK_JB(jb) \
150 ((unsigned long)((jb)[0]._jb[_JB_SP]) + SPOFF)
151 #define GET_STACK_SJB(sjb) \
152 ((unsigned long)((sjb)[0]._sjb[_JB_SP]) + SPOFF)
153 #define GET_STACK_UC(ucp) \
154 ((ucp)->uc_mcontext.mc_sp + SPOFF)
156 * XXX: sparc64 _longjmp() expects a register window on the stack
157 * at the given position, so we must make sure that the space where
158 * it is expected is readable. Subtracting the frame size here works
159 * because the SET_STACK macros are only used to set up new stacks
160 * or signal stacks, but it is a bit dirty.
162 #define SET_STACK_JB(jb, stk) \
163 (jb)[0]._jb[_JB_SP] = (long)(stk) - SPOFF - CCFSZ
164 #define SET_STACK_SJB(sjb, stk) \
165 (sjb)[0]._sjb[_JB_SP] = (long)(stk) - SPOFF - CCFSZ
166 #define SET_STACK_UC(ucp, stk) \
167 (ucp)->uc_mcontext.mc_sp = (unsigned long)(stk) - SPOFF - CCFSZ
168 #define FP_SAVE_UC(ucp) /* XXX */
169 #define FP_RESTORE_UC(ucp) /* XXX */
170 #define SET_RETURN_ADDR_JB(jb, ra) \
171 (jb)[0]._jb[_JB_PC] = (long)(ra) - 8
173 #error "Don't recognize this architecture!"
177 * Kernel fatal error handler macro.
179 #define PANIC(string) _thread_exit(__FILE__,__LINE__,string)
182 /* Output debug messages like this: */
183 #define stdout_debug(args...) do { \
185 snprintf(buf, sizeof(buf), ##args); \
186 __sys_write(1, buf, strlen(buf)); \
188 #define stderr_debug(args...) do { \
190 snprintf(buf, sizeof(buf), ##args); \
191 __sys_write(2, buf, strlen(buf)); \
197 * Priority queue manipulation macros (using pqe link):
199 #define PTHREAD_PRIOQ_INSERT_HEAD(thrd) _pq_insert_head(&_readyq,thrd)
200 #define PTHREAD_PRIOQ_INSERT_TAIL(thrd) _pq_insert_tail(&_readyq,thrd)
201 #define PTHREAD_PRIOQ_REMOVE(thrd) _pq_remove(&_readyq,thrd)
202 #define PTHREAD_PRIOQ_FIRST() _pq_first(&_readyq)
205 * Waiting queue manipulation macros (using pqe link):
207 #define PTHREAD_WAITQ_REMOVE(thrd) _waitq_remove(thrd)
208 #define PTHREAD_WAITQ_INSERT(thrd) _waitq_insert(thrd)
210 #if defined(_PTHREADS_INVARIANTS)
211 #define PTHREAD_WAITQ_CLEARACTIVE() _waitq_clearactive()
212 #define PTHREAD_WAITQ_SETACTIVE() _waitq_setactive()
214 #define PTHREAD_WAITQ_CLEARACTIVE()
215 #define PTHREAD_WAITQ_SETACTIVE()
219 * Work queue manipulation macros (using qe link):
221 #define PTHREAD_WORKQ_INSERT(thrd) do { \
222 TAILQ_INSERT_TAIL(&_workq,thrd,qe); \
223 (thrd)->flags |= PTHREAD_FLAGS_IN_WORKQ; \
225 #define PTHREAD_WORKQ_REMOVE(thrd) do { \
226 TAILQ_REMOVE(&_workq,thrd,qe); \
227 (thrd)->flags &= ~PTHREAD_FLAGS_IN_WORKQ; \
232 * State change macro without scheduling queue change:
234 #define PTHREAD_SET_STATE(thrd, newstate) do { \
235 (thrd)->state = newstate; \
236 (thrd)->fname = __FILE__; \
237 (thrd)->lineno = __LINE__; \
241 * State change macro with scheduling queue change - This must be
242 * called with preemption deferred (see thread_kern_sched_[un]defer).
244 #if defined(_PTHREADS_INVARIANTS)
246 #define PTHREAD_ASSERT(cond, msg) do { \
250 #define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) \
251 PTHREAD_ASSERT((((thrd)->flags & PTHREAD_FLAGS_IN_SYNCQ) == 0), \
252 "Illegal call from signal handler");
253 #define PTHREAD_NEW_STATE(thrd, newstate) do { \
254 if (_thread_kern_new_state != 0) \
255 PANIC("Recursive PTHREAD_NEW_STATE"); \
256 _thread_kern_new_state = 1; \
257 if ((thrd)->state != newstate) { \
258 if ((thrd)->state == PS_RUNNING) { \
259 PTHREAD_PRIOQ_REMOVE(thrd); \
260 PTHREAD_SET_STATE(thrd, newstate); \
261 PTHREAD_WAITQ_INSERT(thrd); \
262 } else if (newstate == PS_RUNNING) { \
263 PTHREAD_WAITQ_REMOVE(thrd); \
264 PTHREAD_SET_STATE(thrd, newstate); \
265 PTHREAD_PRIOQ_INSERT_TAIL(thrd); \
268 _thread_kern_new_state = 0; \
271 #define PTHREAD_ASSERT(cond, msg)
272 #define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd)
273 #define PTHREAD_NEW_STATE(thrd, newstate) do { \
274 if ((thrd)->state != newstate) { \
275 if ((thrd)->state == PS_RUNNING) { \
276 PTHREAD_PRIOQ_REMOVE(thrd); \
277 PTHREAD_WAITQ_INSERT(thrd); \
278 } else if (newstate == PS_RUNNING) { \
279 PTHREAD_WAITQ_REMOVE(thrd); \
280 PTHREAD_PRIOQ_INSERT_TAIL(thrd); \
283 PTHREAD_SET_STATE(thrd, newstate); \
288 * Define the signals to be used for scheduling.
290 #if defined(_PTHREADS_COMPAT_SCHED)
291 #define _ITIMER_SCHED_TIMER ITIMER_VIRTUAL
292 #define _SCHED_SIGNAL SIGVTALRM
294 #define _ITIMER_SCHED_TIMER ITIMER_PROF
295 #define _SCHED_SIGNAL SIGPROF
301 * XXX It'd be nice if these were contained in uthread_priority_queue.[ch].
303 typedef struct pq_list {
304 TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */
305 TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */
306 int pl_prio; /* the priority of this list */
307 int pl_queued; /* is this in the priority queue */
310 typedef struct pq_queue {
311 TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */
312 pq_list_t *pq_lists; /* array of all priority lists */
313 int pq_size; /* number of priority lists */
318 * TailQ initialization values.
320 #define TAILQ_INITIALIZER { NULL, NULL }
325 union pthread_mutex_data {
330 struct pthread_mutex {
331 enum pthread_mutextype m_type;
333 TAILQ_HEAD(mutex_head, pthread) m_queue;
334 struct pthread *m_owner;
335 union pthread_mutex_data m_data;
340 * Used for priority inheritence and protection.
342 * m_prio - For priority inheritence, the highest active
343 * priority (threads locking the mutex inherit
344 * this priority). For priority protection, the
345 * ceiling priority of this mutex.
346 * m_saved_prio - mutex owners inherited priority before
347 * taking the mutex, restored when the owner
354 * Link for list of all mutexes a thread currently owns.
356 TAILQ_ENTRY(pthread_mutex) m_qe;
359 * Lock for accesses to this structure.
367 #define MUTEX_FLAGS_PRIVATE 0x01
368 #define MUTEX_FLAGS_INITED 0x02
369 #define MUTEX_FLAGS_BUSY 0x04
372 * Static mutex initialization values.
374 #define PTHREAD_MUTEX_STATIC_INITIALIZER \
375 { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, TAILQ_INITIALIZER, \
376 NULL, { NULL }, MUTEX_FLAGS_PRIVATE, 0, 0, 0, TAILQ_INITIALIZER, \
377 _SPINLOCK_INITIALIZER }
379 struct pthread_mutex_attr {
380 enum pthread_mutextype m_type;
386 #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \
387 { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE }
390 * Condition variable definitions.
392 enum pthread_cond_type {
397 struct pthread_cond {
398 enum pthread_cond_type c_type;
399 TAILQ_HEAD(cond_head, pthread) c_queue;
400 pthread_mutex_t c_mutex;
406 * Lock for accesses to this structure.
411 struct pthread_cond_attr {
412 enum pthread_cond_type c_type;
417 * Flags for condition variables.
419 #define COND_FLAGS_PRIVATE 0x01
420 #define COND_FLAGS_INITED 0x02
421 #define COND_FLAGS_BUSY 0x04
424 * Static cond initialization values.
426 #define PTHREAD_COND_STATIC_INITIALIZER \
427 { COND_TYPE_FAST, TAILQ_INITIALIZER, NULL, NULL, \
428 0, 0, _SPINLOCK_INITIALIZER }
431 * Semaphore definitions.
434 #define SEM_MAGIC ((u_int32_t) 0x09fa4012)
436 pthread_mutex_t lock;
437 pthread_cond_t gtzero;
443 * Cleanup definitions.
445 struct pthread_cleanup {
446 struct pthread_cleanup *next;
451 struct pthread_atfork {
452 TAILQ_ENTRY(pthread_atfork) qe;
453 void (*prepare)(void);
454 void (*parent)(void);
458 struct pthread_attr {
466 void (*cleanup_attr) ();
467 void *stackaddr_attr;
468 size_t stacksize_attr;
469 size_t guardsize_attr;
473 * Thread creation state attributes.
475 #define PTHREAD_CREATE_RUNNING 0
476 #define PTHREAD_CREATE_SUSPENDED 1
479 * Miscellaneous definitions.
481 #define PTHREAD_STACK32_DEFAULT (1 * 1024 * 1024)
482 #define PTHREAD_STACK64_DEFAULT (2 * 1024 * 1024)
485 * Size of default red zone at the end of each stack. In actuality, this "red
486 * zone" is merely an unmapped region, except in the case of the initial stack.
487 * Since mmap() makes it possible to specify the maximum growth of a MAP_STACK
488 * region, an unmapped gap between thread stacks achieves the same effect as
489 * explicitly mapped red zones.
490 * This is declared and initialized in uthread_init.c.
492 extern int _pthread_guard_default;
494 extern int _pthread_page_size;
496 extern int _pthread_stack_default;
498 extern int _pthread_stack_initial;
501 * Maximum size of initial thread's stack. This perhaps deserves to be larger
502 * than the stacks of other threads, since many applications are likely to run
503 * almost entirely on this stack.
505 #define PTHREAD_STACK32_INITIAL (2 * 1024 * 1024)
506 #define PTHREAD_STACK64_INITIAL (4 * 1024 * 1024)
509 * Define the different priority ranges. All applications have thread
510 * priorities constrained within 0-31. The threads library raises the
511 * priority when delivering signals in order to ensure that signal
512 * delivery happens (from the POSIX spec) "as soon as possible".
513 * In the future, the threads library will also be able to map specific
514 * threads into real-time (cooperating) processes or kernel threads.
515 * The RT and SIGNAL priorities will be used internally and added to
516 * thread base priorities so that the scheduling queue can handle both
517 * normal and RT priority threads with and without signal handling.
519 * The approach taken is that, within each class, signal delivery
520 * always has priority over thread execution.
522 #define PTHREAD_DEFAULT_PRIORITY 15
523 #define PTHREAD_MIN_PRIORITY 0
524 #define PTHREAD_MAX_PRIORITY 31 /* 0x1F */
525 #define PTHREAD_SIGNAL_PRIORITY 32 /* 0x20 */
526 #define PTHREAD_RT_PRIORITY 64 /* 0x40 */
527 #define PTHREAD_FIRST_PRIORITY PTHREAD_MIN_PRIORITY
528 #define PTHREAD_LAST_PRIORITY \
529 (PTHREAD_MAX_PRIORITY + PTHREAD_SIGNAL_PRIORITY + PTHREAD_RT_PRIORITY)
530 #define PTHREAD_BASE_PRIORITY(prio) ((prio) & PTHREAD_MAX_PRIORITY)
533 * Clock resolution in microseconds.
535 #define CLOCK_RES_USEC 10000
536 #define CLOCK_RES_USEC_MIN 1000
539 * Time slice period in microseconds.
541 #define TIMESLICE_USEC 20000
544 * Define a thread-safe macro to get the current time of day
545 * which is updated at regular intervals by the scheduling signal
548 #define GET_CURRENT_TOD(tv) \
550 tv.tv_sec = _sched_tod.tv_sec; \
551 tv.tv_usec = _sched_tod.tv_usec; \
552 } while (tv.tv_sec != _sched_tod.tv_sec)
555 struct pthread_rwlockattr {
559 struct pthread_rwlock {
560 pthread_mutex_t lock; /* monitor lock */
561 int state; /* 0 = idle >0 = # of readers -1 = writer */
562 pthread_cond_t read_signal;
563 pthread_cond_t write_signal;
596 * File descriptor locking definitions.
600 #define FD_RDWR (FD_READ | FD_WRITE)
603 * File descriptor table structure.
605 struct fd_table_entry {
607 * Lock for accesses to this file descriptor table
608 * entry. This is passed to _spinlock() to provide atomic
609 * access to this structure. It does *not* represent the
610 * state of the lock on the file descriptor.
613 TAILQ_HEAD(, pthread) r_queue; /* Read queue. */
614 TAILQ_HEAD(, pthread) w_queue; /* Write queue. */
615 struct pthread *r_owner; /* Ptr to thread owning read lock. */
616 struct pthread *w_owner; /* Ptr to thread owning write lock. */
617 char *r_fname; /* Ptr to read lock source file name */
618 int r_lineno; /* Read lock source line number. */
619 char *w_fname; /* Ptr to write lock source file name */
620 int w_lineno; /* Write lock source line number. */
621 int r_lockcount; /* Count for FILE read locks. */
622 int w_lockcount; /* Count for FILE write locks. */
623 int flags; /* Flags used in open. */
626 struct pthread_poll_data {
631 union pthread_wait_data {
632 pthread_mutex_t mutex;
634 const sigset_t *sigwait; /* Waiting on a signal in sigwait */
636 short fd; /* Used when thread waiting on fd */
637 short branch; /* Line number, for debugging. */
638 char *fname; /* Source file name for debugging.*/
641 struct pthread_poll_data *poll_data;
642 spinlock_t *spinlock;
643 struct pthread *thread;
647 * Define a continuation routine that can be used to perform a
648 * transfer of control:
650 typedef void (*thread_continuation_t) (void *);
652 struct pthread_signal_frame;
654 struct pthread_state_data {
655 struct pthread_signal_frame *psd_curframe;
656 sigset_t psd_sigmask;
657 struct timespec psd_wakeup_time;
658 union pthread_wait_data psd_wait_data;
659 enum pthread_state psd_state;
663 int psd_sigmask_seqno;
665 int psd_sig_defer_count;
666 /* XXX - What about thread->timeout and/or thread->error? */
670 struct pthread *thread;
676 * The frame that is added to the top of a threads stack when setting up
677 * up the thread to run a signal handler.
679 struct pthread_signal_frame {
681 * This stores the threads state before the signal.
683 struct pthread_state_data saved_state;
686 * Threads return context; we use only jmp_buf's for now.
692 int signo; /* signal, arg 1 to sighandler */
693 int sig_has_args; /* use signal args if true */
698 struct pthread_specific_elem {
708 * Magic value to help recognize a valid thread structure
709 * from an invalid one:
711 #define PTHREAD_MAGIC ((u_int32_t) 0xd09ba115)
714 u_int64_t uniqueid; /* for gdb */
717 * Lock for accesses to this thread structure.
721 /* Queue entry for list of all threads: */
722 TAILQ_ENTRY(pthread) tle;
724 /* Queue entry for list of dead threads: */
725 TAILQ_ENTRY(pthread) dle;
728 * Thread start routine, argument, stack pointer and thread
731 void *(*start_routine)(void *);
734 struct pthread_attr attr;
737 * Threads return context; we use only jmp_buf's for now.
745 * Used for tracking delivery of signal handlers.
747 struct pthread_signal_frame *curframe;
750 * Cancelability flags - the lower 2 bits are used by cancel
751 * definitions in pthread.h
753 #define PTHREAD_AT_CANCEL_POINT 0x0004
754 #define PTHREAD_CANCELLING 0x0008
755 #define PTHREAD_CANCEL_NEEDED 0x0010
758 thread_continuation_t continuation;
761 * Current signal mask and pending signals.
769 enum pthread_state state;
771 /* Scheduling clock when this thread was last made active. */
774 /* Scheduling clock when this thread was last made inactive. */
778 * Number of microseconds accumulated by this thread when
779 * time slicing is active.
784 * Time to wake up thread. This is used for sleeping threads and
785 * for any operation which may time out (such as select).
787 struct timespec wakeup_time;
789 /* TRUE if operation has timed out. */
793 * Error variable used instead of errno. The function __error()
794 * returns a pointer to this.
799 * The joiner is the thread that is joining to this thread. The
800 * join status keeps track of a join operation to another thread.
802 struct pthread *joiner;
803 struct join_status join_status;
806 * The current thread can belong to only one scheduling queue at
807 * a time (ready or waiting queue). It can also belong to:
809 * o A queue of threads waiting for a mutex
810 * o A queue of threads waiting for a condition variable
811 * o A queue of threads waiting for a file descriptor lock
812 * o A queue of threads needing work done by the kernel thread
813 * (waiting for a spinlock or file I/O)
815 * A thread can also be joining a thread (the joiner field above).
817 * It must not be possible for a thread to belong to any of the
818 * above queues while it is handling a signal. Signal handlers
819 * may longjmp back to previous stack frames circumventing normal
820 * control flow. This could corrupt queue integrity if the thread
821 * retains membership in the queue. Therefore, if a thread is a
822 * member of one of these queues when a signal handler is invoked,
823 * it must remove itself from the queue before calling the signal
824 * handler and reinsert itself after normal return of the handler.
826 * Use pqe for the scheduling queue link (both ready and waiting),
827 * sqe for synchronization (mutex and condition variable) queue
828 * links, and qe for all other links.
830 TAILQ_ENTRY(pthread) pqe; /* priority queue link */
831 TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */
832 TAILQ_ENTRY(pthread) qe; /* all other queues link */
835 union pthread_wait_data data;
838 * Allocated for converting select into poll.
840 struct pthread_poll_data poll_data;
843 * Set to TRUE if a blocking operation was
844 * interrupted by a signal:
848 /* Signal number when in state PS_SIGWAIT: */
852 * Set to non-zero when this thread has deferred signals.
853 * We allow for recursive deferral.
858 * Set to TRUE if this thread should yield after undeferring
861 int yield_on_sig_undefer;
863 /* Miscellaneous flags; only set with signals deferred. */
865 #define PTHREAD_FLAGS_PRIVATE 0x0001
866 #define PTHREAD_EXITING 0x0002
867 #define PTHREAD_FLAGS_IN_WAITQ 0x0004 /* in waiting queue using pqe link */
868 #define PTHREAD_FLAGS_IN_PRIOQ 0x0008 /* in priority queue using pqe link */
869 #define PTHREAD_FLAGS_IN_WORKQ 0x0010 /* in work queue using qe link */
870 #define PTHREAD_FLAGS_IN_FILEQ 0x0020 /* in file lock queue using qe link */
871 #define PTHREAD_FLAGS_IN_FDQ 0x0040 /* in fd lock queue using qe link */
872 #define PTHREAD_FLAGS_IN_CONDQ 0x0080 /* in condition queue using sqe link*/
873 #define PTHREAD_FLAGS_IN_MUTEXQ 0x0100 /* in mutex queue using sqe link */
874 #define PTHREAD_FLAGS_SUSPENDED 0x0200 /* thread is suspended */
875 #define PTHREAD_FLAGS_TRACE 0x0400 /* for debugging purposes */
876 #define PTHREAD_FLAGS_IN_SYNCQ \
877 (PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ)
880 * Base priority is the user setable and retrievable priority
881 * of the thread. It is only affected by explicit calls to
882 * set thread priority and upon thread creation via a thread
883 * attribute or default priority.
888 * Inherited priority is the priority a thread inherits by
889 * taking a priority inheritence or protection mutex. It
890 * is not affected by base priority changes. Inherited
891 * priority defaults to and remains 0 until a mutex is taken
892 * that is being waited on by any other thread whose priority
895 char inherited_priority;
898 * Active priority is always the maximum of the threads base
899 * priority and inherited priority. When there is a change
900 * in either the base or inherited priority, the active
901 * priority must be recalculated.
903 char active_priority;
905 /* Number of priority ceiling or protection mutexes owned. */
906 int priority_mutex_count;
908 /* Number rwlocks rdlocks held. */
912 * Queue of currently owned mutexes.
914 TAILQ_HEAD(, pthread_mutex) mutexq;
917 struct pthread_specific_elem *specific;
918 int specific_data_count;
920 /* Cleanup handlers Link List */
921 struct pthread_cleanup *cleanup;
922 char *fname; /* Ptr to source file name */
923 int lineno; /* Source line number. */
927 * Global variables for the uthread kernel.
930 SCLASS void *_usrstack
931 #ifdef GLOBAL_PTHREAD_PRIVATE
937 /* Kernel thread structure used when there are no running threads: */
938 SCLASS struct pthread _thread_kern_thread;
940 /* Ptr to the thread structure for the running thread: */
941 SCLASS struct pthread * volatile _thread_run
942 #ifdef GLOBAL_PTHREAD_PRIVATE
943 = &_thread_kern_thread;
948 /* Ptr to the thread structure for the last user thread to run: */
949 SCLASS struct pthread * volatile _last_user_thread
950 #ifdef GLOBAL_PTHREAD_PRIVATE
951 = &_thread_kern_thread;
956 /* List of all threads: */
957 SCLASS TAILQ_HEAD(, pthread) _thread_list
958 #ifdef GLOBAL_PTHREAD_PRIVATE
959 = TAILQ_HEAD_INITIALIZER(_thread_list);
965 * Array of kernel pipe file descriptors that are used to ensure that
966 * no signals are missed in calls to _select.
968 SCLASS int _thread_kern_pipe[2]
969 #ifdef GLOBAL_PTHREAD_PRIVATE
977 SCLASS int volatile _queue_signals
978 #ifdef GLOBAL_PTHREAD_PRIVATE
983 SCLASS int _thread_kern_in_sched
984 #ifdef GLOBAL_PTHREAD_PRIVATE
990 SCLASS int _sig_in_handler
991 #ifdef GLOBAL_PTHREAD_PRIVATE
997 /* Time of day at last scheduling timer signal: */
998 SCLASS struct timeval volatile _sched_tod
999 #ifdef GLOBAL_PTHREAD_PRIVATE
1006 * Current scheduling timer ticks; used as resource usage.
1008 SCLASS unsigned int volatile _sched_ticks
1009 #ifdef GLOBAL_PTHREAD_PRIVATE
1016 SCLASS TAILQ_HEAD(, pthread) _dead_list
1017 #ifdef GLOBAL_PTHREAD_PRIVATE
1018 = TAILQ_HEAD_INITIALIZER(_dead_list);
1023 /* Initial thread: */
1024 SCLASS struct pthread *_thread_initial
1025 #ifdef GLOBAL_PTHREAD_PRIVATE
1031 SCLASS TAILQ_HEAD(atfork_head, pthread_atfork) _atfork_list;
1032 SCLASS pthread_mutex_t _atfork_mutex;
1034 /* Default thread attributes: */
1035 SCLASS struct pthread_attr _pthread_attr_default
1036 #ifdef GLOBAL_PTHREAD_PRIVATE
1037 = { SCHED_RR, 0, TIMESLICE_USEC, PTHREAD_DEFAULT_PRIORITY,
1038 PTHREAD_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL,
1044 /* Default mutex attributes: */
1045 #define PTHREAD_MUTEXATTR_DEFAULT \
1046 { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 }
1048 /* Default condition variable attributes: */
1049 #define PTHREAD_CONDATTR_DEFAULT { COND_TYPE_FAST, 0 }
1052 * Standard I/O file descriptors need special flag treatment since
1053 * setting one to non-blocking does all on *BSD. Sigh. This array
1054 * is used to store the initial flag settings.
1056 SCLASS int _pthread_stdio_flags[3];
1058 /* File table information: */
1059 SCLASS struct fd_table_entry **_thread_fd_table
1060 #ifdef GLOBAL_PTHREAD_PRIVATE
1066 /* Table for polling file descriptors: */
1067 SCLASS struct pollfd *_thread_pfd_table
1068 #ifdef GLOBAL_PTHREAD_PRIVATE
1074 SCLASS const int dtablecount
1075 #ifdef GLOBAL_PTHREAD_PRIVATE
1076 = 4096/sizeof(struct fd_table_entry);
1080 SCLASS int _thread_dtablesize /* Descriptor table size. */
1081 #ifdef GLOBAL_PTHREAD_PRIVATE
1087 SCLASS int _clock_res_usec /* Clock resolution in usec. */
1088 #ifdef GLOBAL_PTHREAD_PRIVATE
1094 /* Garbage collector mutex and condition variable. */
1095 SCLASS pthread_mutex_t _gc_mutex
1096 #ifdef GLOBAL_PTHREAD_PRIVATE
1100 SCLASS pthread_cond_t _gc_cond
1101 #ifdef GLOBAL_PTHREAD_PRIVATE
1107 * Array of signal actions for this process.
1109 SCLASS struct sigaction _thread_sigact[NSIG];
1112 * Array of counts of dummy handlers for SIG_DFL signals. This is used to
1113 * assure that there is always a dummy signal handler installed while there is a
1114 * thread sigwait()ing on the corresponding signal.
1116 SCLASS int _thread_dfl_count[NSIG];
1119 * Pending signals and mask for this process:
1121 SCLASS sigset_t _process_sigpending;
1122 SCLASS sigset_t _process_sigmask
1123 #ifdef GLOBAL_PTHREAD_PRIVATE
1129 * Scheduling queues:
1131 SCLASS pq_queue_t _readyq;
1132 SCLASS TAILQ_HEAD(, pthread) _waitingq;
1137 SCLASS TAILQ_HEAD(, pthread) _workq;
1139 /* Tracks the number of threads blocked while waiting for a spinlock. */
1140 SCLASS volatile int _spinblock_count
1141 #ifdef GLOBAL_PTHREAD_PRIVATE
1146 /* Used to maintain pending and active signals: */
1148 int pending; /* Is this a pending signal? */
1150 * A handler is currently active for
1151 * this signal; ignore subsequent
1152 * signals until the handler is done.
1154 int signo; /* arg 1 to signal handler */
1155 siginfo_t siginfo; /* arg 2 to signal handler */
1156 ucontext_t uc; /* arg 3 to signal handler */
1159 SCLASS struct sigstatus _thread_sigq[NSIG];
1161 /* Indicates that the signal queue needs to be checked. */
1162 SCLASS volatile int _sigq_check_reqd
1163 #ifdef GLOBAL_PTHREAD_PRIVATE
1168 /* Thread switch hook. */
1169 SCLASS pthread_switch_routine_t _sched_switch_hook
1170 #ifdef GLOBAL_PTHREAD_PRIVATE
1176 * Declare the kernel scheduler jump buffer and stack:
1178 SCLASS jmp_buf _thread_kern_sched_jb;
1180 SCLASS void * _thread_kern_sched_stack
1181 #ifdef GLOBAL_PTHREAD_PRIVATE
1187 /* Used for _PTHREADS_INVARIANTS checking. */
1188 SCLASS int _thread_kern_new_state
1189 #ifdef GLOBAL_PTHREAD_PRIVATE
1194 /* Undefine the storage class specifier: */
1198 #define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock_debug(_fd, _type, \
1199 _ts, __FILE__, __LINE__)
1200 #define _FD_UNLOCK(_fd,_type) _thread_fd_unlock_debug(_fd, _type, \
1203 #define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock(_fd, _type, _ts)
1204 #define _FD_UNLOCK(_fd,_type) _thread_fd_unlock(_fd, _type)
1208 * Function prototype definitions.
1211 char *__ttyname_basic(int);
1212 void _cond_wait_backout(pthread_t);
1213 void _fd_lock_backout(pthread_t);
1214 int _find_thread(pthread_t);
1215 struct pthread *_get_curthread(void);
1216 void _set_curthread(struct pthread *);
1217 void *_thread_stack_alloc(size_t, size_t);
1218 void _thread_stack_free(void *, size_t, size_t);
1219 int _thread_create(pthread_t *,const pthread_attr_t *,void *(*start_routine)(void *),void *,pthread_t);
1220 int _mutex_cv_lock(pthread_mutex_t *);
1221 int _mutex_cv_unlock(pthread_mutex_t *);
1222 void _mutex_lock_backout(pthread_t);
1223 void _mutex_notify_priochange(pthread_t);
1224 int _mutex_reinit(pthread_mutex_t *);
1225 void _mutex_unlock_private(pthread_t);
1226 int _cond_reinit(pthread_cond_t *);
1227 int _pq_alloc(struct pq_queue *, int, int);
1228 int _pq_init(struct pq_queue *);
1229 void _pq_remove(struct pq_queue *pq, struct pthread *);
1230 void _pq_insert_head(struct pq_queue *pq, struct pthread *);
1231 void _pq_insert_tail(struct pq_queue *pq, struct pthread *);
1232 struct pthread *_pq_first(struct pq_queue *pq);
1233 void *_pthread_getspecific(pthread_key_t);
1234 int _pthread_key_create(pthread_key_t *, void (*) (void *));
1235 int _pthread_key_delete(pthread_key_t);
1236 int _pthread_mutex_destroy(pthread_mutex_t *);
1237 int _pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *);
1238 int _pthread_mutex_lock(pthread_mutex_t *);
1239 int _pthread_mutex_trylock(pthread_mutex_t *);
1240 int _pthread_mutex_unlock(pthread_mutex_t *);
1241 int _pthread_mutexattr_init(pthread_mutexattr_t *);
1242 int _pthread_mutexattr_destroy(pthread_mutexattr_t *);
1243 int _pthread_mutexattr_settype(pthread_mutexattr_t *, int);
1244 int _pthread_once(pthread_once_t *, void (*) (void));
1245 pthread_t _pthread_self(void);
1246 int _pthread_setspecific(pthread_key_t, const void *);
1247 void _waitq_insert(pthread_t pthread);
1248 void _waitq_remove(pthread_t pthread);
1249 #if defined(_PTHREADS_INVARIANTS)
1250 void _waitq_setactive(void);
1251 void _waitq_clearactive(void);
1253 void _thread_exit(char *, int, char *) __dead2;
1254 void _thread_exit_cleanup(void);
1255 int _thread_fd_getflags(int);
1256 int _thread_fd_lock(int, int, struct timespec *);
1257 int _thread_fd_lock_debug(int, int, struct timespec *,char *fname,int lineno);
1258 void _thread_fd_setflags(int, int);
1259 int _thread_fd_table_init(int fd);
1260 void _thread_fd_unlock(int, int);
1261 void _thread_fd_unlock_debug(int, int, char *, int);
1262 void _thread_fd_unlock_owned(pthread_t);
1263 void *_thread_cleanup(pthread_t);
1264 void _thread_cleanupspecific(void);
1265 void _thread_dump_info(void);
1266 void _thread_init(void);
1267 void _thread_kern_sched(ucontext_t *);
1268 void _thread_kern_scheduler(void);
1269 void _thread_kern_sched_frame(struct pthread_signal_frame *psf);
1270 void _thread_kern_sched_sig(void);
1271 void _thread_kern_sched_state(enum pthread_state, char *fname, int lineno);
1272 void _thread_kern_sched_state_unlock(enum pthread_state state,
1273 spinlock_t *lock, char *fname, int lineno);
1274 void _thread_kern_set_timeout(const struct timespec *);
1275 void _thread_kern_sig_defer(void);
1276 void _thread_kern_sig_undefer(void);
1277 void _thread_sig_handler(int, siginfo_t *, ucontext_t *);
1278 void _thread_sig_check_pending(struct pthread *pthread);
1279 void _thread_sig_handle_pending(void);
1280 void _thread_sig_send(struct pthread *pthread, int sig);
1281 void _thread_sig_wrapper(void);
1282 void _thread_sigframe_restore(struct pthread *thread,
1283 struct pthread_signal_frame *psf);
1284 void _thread_start(void);
1285 void _thread_seterrno(pthread_t, int);
1286 pthread_addr_t _thread_gc(pthread_addr_t);
1287 void _thread_enter_cancellation_point(void);
1288 void _thread_leave_cancellation_point(void);
1289 void _thread_cancellation_point(void);
1291 /* #include <sys/acl.h> */
1293 int __sys___acl_aclcheck_fd(int, acl_type_t, struct acl *);
1294 int __sys___acl_delete_fd(int, acl_type_t);
1295 int __sys___acl_get_fd(int, acl_type_t, struct acl *);
1296 int __sys___acl_set_fd(int, acl_type_t, struct acl *);
1299 /* #include <sys/aio.h> */
1301 int __sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *);
1304 /* #include <sys/event.h> */
1305 #ifdef _SYS_EVENT_H_
1306 int __sys_kevent(int, const struct kevent *, int, struct kevent *,
1307 int, const struct timespec *);
1310 /* #include <sys/ioctl.h> */
1311 #ifdef _SYS_IOCTL_H_
1312 int __sys_ioctl(int, unsigned long, ...);
1315 /* #include <sys/mman.h> */
1317 int __sys_msync(void *, size_t, int);
1320 /* #include <sys/mount.h> */
1321 #ifdef _SYS_MOUNT_H_
1322 int __sys_fstatfs(int, struct statfs *);
1325 /* #include <sys/socket.h> */
1326 #ifdef _SYS_SOCKET_H_
1327 int __sys_accept(int, struct sockaddr *, socklen_t *);
1328 int __sys_bind(int, const struct sockaddr *, socklen_t);
1329 int __sys_connect(int, const struct sockaddr *, socklen_t);
1330 int __sys_getpeername(int, struct sockaddr *, socklen_t *);
1331 int __sys_getsockname(int, struct sockaddr *, socklen_t *);
1332 int __sys_getsockopt(int, int, int, void *, socklen_t *);
1333 int __sys_listen(int, int);
1334 ssize_t __sys_recvfrom(int, void *, size_t, int, struct sockaddr *, socklen_t *);
1335 ssize_t __sys_recvmsg(int, struct msghdr *, int);
1336 int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *, off_t *, int);
1337 ssize_t __sys_sendmsg(int, const struct msghdr *, int);
1338 ssize_t __sys_sendto(int, const void *,size_t, int, const struct sockaddr *, socklen_t);
1339 int __sys_setsockopt(int, int, int, const void *, socklen_t);
1340 int __sys_shutdown(int, int);
1341 int __sys_socket(int, int, int);
1342 int __sys_socketpair(int, int, int, int *);
1345 /* #include <sys/stat.h> */
1347 int __sys_fchflags(int, u_long);
1348 int __sys_fchmod(int, mode_t);
1349 int __sys_fstat(int, struct stat *);
1352 /* #include <sys/uio.h> */
1354 ssize_t __sys_readv(int, const struct iovec *, int);
1355 ssize_t __sys_writev(int, const struct iovec *, int);
1358 /* #include <sys/wait.h> */
1360 pid_t __sys_wait4(pid_t, int *, int, struct rusage *);
1363 /* #include <dirent.h> */
1365 int __sys_getdirentries(int, char *, int, long *);
1368 /* #include <fcntl.h> */
1369 #ifdef _SYS_FCNTL_H_
1370 int __sys_fcntl(int, int, ...);
1371 int __sys_flock(int, int);
1372 int __sys_open(const char *, int, ...);
1375 /* #include <poll.h> */
1377 int __sys_poll(struct pollfd *, unsigned, int);
1380 /* #include <signal.h> */
1382 int __sys_sigaction(int, const struct sigaction *, struct sigaction *);
1383 int __sys_sigprocmask(int, const sigset_t *, sigset_t *);
1384 int __sys_sigreturn(ucontext_t *);
1387 /* #include <unistd.h> */
1389 int __sys_close(int);
1391 int __sys_dup2(int, int);
1392 int __sys_execve(const char *, char * const *, char * const *);
1393 void __sys_exit(int) __dead2;
1394 int __sys_fchown(int, uid_t, gid_t);
1395 pid_t __sys_fork(void);
1396 long __sys_fpathconf(int, int);
1397 int __sys_fsync(int);
1398 int __sys_pipe(int *);
1399 ssize_t __sys_read(int, void *, size_t);
1400 ssize_t __sys_write(int, const void *, size_t);
1403 /* #include <setjmp.h> */
1405 extern void __siglongjmp(sigjmp_buf, int) __dead2;
1406 extern void __longjmp(jmp_buf, int) __dead2;
1407 extern void ___longjmp(jmp_buf, int) __dead2;
1411 #endif /* !_PTHREAD_PRIVATE_H */