2 * Copyright (c) 2003 Marcel Moolenaar
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #ifndef _PTHREAD_MD_H_
30 #define _PTHREAD_MD_H_
36 #define KSE_STACKSIZE 16384
37 #define DTV_OFFSET offsetof(struct tcb, tcb_tp.tp_tdv)
39 #define THR_GETCONTEXT(ucp) _ia64_save_context(&(ucp)->uc_mcontext)
40 #define THR_SETCONTEXT(ucp) PANIC("THR_SETCONTEXT() now in use!\n")
48 struct tdv; /* We don't know what this is yet? */
51 * tp points to one of these. We define the static TLS as an array
52 * of long double to enforce 16-byte alignment of the TLS memory,
53 * struct ia64_tp, struct tcb and also struct kcb. Both static and
54 * dynamic allocation of any of these structures will result in a
55 * valid, well-aligned thread pointer.
58 struct tdv *tp_tdv; /* dynamic TLS */
60 long double tp_tls[0]; /* static TLS */
64 struct kse_thr_mailbox tcb_tmbx;
65 struct pthread *tcb_thread;
66 struct kcb *tcb_curkcb;
68 struct ia64_tp tcb_tp;
72 struct kse_mailbox kcb_kmbx;
73 struct tcb kcb_faketcb;
74 struct tcb *kcb_curtcb;
78 register struct ia64_tp *_tp __asm("%r13");
80 #define _tcb ((struct tcb*)((char*)(_tp) - offsetof(struct tcb, tcb_tp)))
83 * The kcb and tcb constructors.
85 struct tcb *_tcb_ctor(struct pthread *, int);
86 void _tcb_dtor(struct tcb *);
87 struct kcb *_kcb_ctor(struct kse *kse);
88 void _kcb_dtor(struct kcb *);
90 /* Called from the KSE to set its private data. */
92 _kcb_set(struct kcb *kcb)
94 /* There is no thread yet; use the fake tcb. */
95 _tp = &kcb->kcb_faketcb.tcb_tp;
99 * Get the current kcb.
101 * This can only be called while in a critical region; don't
102 * worry about having the kcb changed out from under us.
104 static __inline struct kcb *
107 return (_tcb->tcb_curkcb);
111 * Enter a critical region.
113 * Read and clear km_curthread in the kse mailbox.
115 static __inline struct kse_thr_mailbox *
116 _kcb_critical_enter(void)
118 struct kse_thr_mailbox *crit;
121 if (_tcb->tcb_isfake != 0) {
123 * We already are in a critical region since
124 * there is no current thread.
128 flags = _tcb->tcb_tmbx.tm_flags;
129 _tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL;
130 crit = _tcb->tcb_curkcb->kcb_kmbx.km_curthread;
131 _tcb->tcb_curkcb->kcb_kmbx.km_curthread = NULL;
132 _tcb->tcb_tmbx.tm_flags = flags;
138 _kcb_critical_leave(struct kse_thr_mailbox *crit)
140 /* No need to do anything if this is a fake tcb. */
141 if (_tcb->tcb_isfake == 0)
142 _tcb->tcb_curkcb->kcb_kmbx.km_curthread = crit;
146 _kcb_in_critical(void)
151 if (_tcb->tcb_isfake != 0) {
153 * We are in a critical region since there is no
158 flags = _tcb->tcb_tmbx.tm_flags;
159 _tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL;
160 ret = (_tcb->tcb_curkcb->kcb_kmbx.km_curthread == NULL);
161 _tcb->tcb_tmbx.tm_flags = flags;
167 _tcb_set(struct kcb *kcb, struct tcb *tcb)
170 tcb = &kcb->kcb_faketcb;
171 kcb->kcb_curtcb = tcb;
172 tcb->tcb_curkcb = kcb;
176 static __inline struct tcb *
182 static __inline struct pthread *
185 return (_tcb->tcb_thread);
189 * Get the current kse.
191 * Like _kcb_get(), this can only be called while in a critical region.
193 static __inline struct kse *
196 return (_tcb->tcb_curkcb->kcb_kse);
199 void _ia64_break_setcontext(mcontext_t *mc);
200 void _ia64_enter_uts(kse_func_t uts, struct kse_mailbox *km, void *stack,
202 int _ia64_restore_context(mcontext_t *mc, intptr_t val, intptr_t *loc);
203 int _ia64_save_context(mcontext_t *mc);
206 _thread_enter_uts(struct tcb *tcb, struct kcb *kcb)
208 if (_ia64_save_context(&tcb->tcb_tmbx.tm_context.uc_mcontext) == 0) {
209 /* Make the fake tcb the current thread. */
210 kcb->kcb_curtcb = &kcb->kcb_faketcb;
211 _tp = &kcb->kcb_faketcb.tcb_tp;
212 _ia64_enter_uts(kcb->kcb_kmbx.km_func, &kcb->kcb_kmbx,
213 kcb->kcb_kmbx.km_stack.ss_sp,
214 kcb->kcb_kmbx.km_stack.ss_size);
215 /* We should not reach here. */
222 _thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox)
227 mc = &tcb->tcb_tmbx.tm_context.uc_mcontext;
228 if (mc->mc_flags & _MC_FLAGS_ASYNC_CONTEXT) {
230 mc->mc_flags |= _MC_FLAGS_KSE_SET_MBOX;
232 (intptr_t)&kcb->kcb_kmbx.km_curthread;
233 mc->mc_special.isr = (intptr_t)&tcb->tcb_tmbx;
235 _ia64_break_setcontext(mc);
236 } else if (mc->mc_flags & _MC_FLAGS_SYSCALL_CONTEXT) {
238 kse_switchin(&tcb->tcb_tmbx, KSE_SWITCHIN_SETTMBX);
240 kse_switchin(&tcb->tcb_tmbx, 0);
243 _ia64_restore_context(mc, (intptr_t)&tcb->tcb_tmbx,
244 (intptr_t *)&kcb->kcb_kmbx.km_curthread);
246 _ia64_restore_context(mc, 0, NULL);
248 /* We should not reach here. */
252 #endif /* _PTHREAD_MD_H_ */