2 * Copyright (c) 2003-2006 Marcel Moolenaar
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #ifndef _PTHREAD_MD_H_
30 #define _PTHREAD_MD_H_
36 #define KSE_STACKSIZE 16384
37 #define DTV_OFFSET offsetof(struct tcb, tcb_tp.tp_dtv)
39 #define THR_GETCONTEXT(ucp) _ia64_save_context(&(ucp)->uc_mcontext)
40 #define THR_SETCONTEXT(ucp) PANIC("THR_SETCONTEXT() now in use!\n")
50 * tp points to one of these. We define the TLS structure as a union
51 * containing a long double to enforce 16-byte alignment. This makes
52 * sure that there will not be any padding in struct tcb after the
61 struct kse_thr_mailbox tcb_tmbx;
62 struct pthread *tcb_thread;
63 struct kcb *tcb_curkcb;
69 struct kse_mailbox kcb_kmbx;
71 struct tcb *kcb_curtcb;
72 struct tcb kcb_faketcb;
75 static __inline struct tcb *
78 register char *tp __asm("%r13");
80 return ((struct tcb *)(tp - offsetof(struct tcb, tcb_tp)));
84 ia64_set_tcb(struct tcb *tcb)
86 register char *tp __asm("%r13");
88 __asm __volatile("mov %0 = %1;;" : "=r"(tp) : "r"(&tcb->tcb_tp));
92 * The kcb and tcb constructors.
94 struct tcb *_tcb_ctor(struct pthread *, int);
95 void _tcb_dtor(struct tcb *);
96 struct kcb *_kcb_ctor(struct kse *kse);
97 void _kcb_dtor(struct kcb *);
99 /* Called from the KSE to set its private data. */
101 _kcb_set(struct kcb *kcb)
103 /* There is no thread yet; use the fake tcb. */
104 ia64_set_tcb(&kcb->kcb_faketcb);
108 * Get the current kcb.
110 * This can only be called while in a critical region; don't
111 * worry about having the kcb changed out from under us.
113 static __inline struct kcb *
116 return (ia64_get_tcb()->tcb_curkcb);
120 * Enter a critical region.
122 * Read and clear km_curthread in the kse mailbox.
124 static __inline struct kse_thr_mailbox *
125 _kcb_critical_enter(void)
128 struct kse_thr_mailbox *crit;
131 tcb = ia64_get_tcb();
132 if (tcb->tcb_isfake != 0) {
134 * We already are in a critical region since
135 * there is no current thread.
139 flags = tcb->tcb_tmbx.tm_flags;
140 tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL;
141 crit = tcb->tcb_curkcb->kcb_kmbx.km_curthread;
142 tcb->tcb_curkcb->kcb_kmbx.km_curthread = NULL;
143 tcb->tcb_tmbx.tm_flags = flags;
149 _kcb_critical_leave(struct kse_thr_mailbox *crit)
153 tcb = ia64_get_tcb();
154 /* No need to do anything if this is a fake tcb. */
155 if (tcb->tcb_isfake == 0)
156 tcb->tcb_curkcb->kcb_kmbx.km_curthread = crit;
160 _kcb_in_critical(void)
166 tcb = ia64_get_tcb();
167 if (tcb->tcb_isfake != 0) {
169 * We are in a critical region since there is no
174 flags = tcb->tcb_tmbx.tm_flags;
175 tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL;
176 ret = (tcb->tcb_curkcb->kcb_kmbx.km_curthread == NULL);
177 tcb->tcb_tmbx.tm_flags = flags;
183 _tcb_set(struct kcb *kcb, struct tcb *tcb)
186 tcb = &kcb->kcb_faketcb;
187 kcb->kcb_curtcb = tcb;
188 tcb->tcb_curkcb = kcb;
192 static __inline struct tcb *
195 return (ia64_get_tcb());
198 static __inline struct pthread *
201 return (ia64_get_tcb()->tcb_thread);
205 * Get the current kse.
207 * Like _kcb_get(), this can only be called while in a critical region.
209 static __inline struct kse *
212 return (ia64_get_tcb()->tcb_curkcb->kcb_kse);
215 void _ia64_break_setcontext(mcontext_t *mc);
216 void _ia64_enter_uts(kse_func_t uts, struct kse_mailbox *km, void *stack,
218 int _ia64_restore_context(mcontext_t *mc, intptr_t val, intptr_t *loc);
219 int _ia64_save_context(mcontext_t *mc);
222 _thread_enter_uts(struct tcb *tcb, struct kcb *kcb)
224 if (_ia64_save_context(&tcb->tcb_tmbx.tm_context.uc_mcontext) == 0) {
225 /* Make the fake tcb the current thread. */
226 kcb->kcb_curtcb = &kcb->kcb_faketcb;
227 ia64_set_tcb(&kcb->kcb_faketcb);
228 _ia64_enter_uts(kcb->kcb_kmbx.km_func, &kcb->kcb_kmbx,
229 kcb->kcb_kmbx.km_stack.ss_sp,
230 kcb->kcb_kmbx.km_stack.ss_size);
231 /* We should not reach here. */
238 _thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox)
243 mc = &tcb->tcb_tmbx.tm_context.uc_mcontext;
244 if (mc->mc_flags & _MC_FLAGS_ASYNC_CONTEXT) {
246 mc->mc_flags |= _MC_FLAGS_KSE_SET_MBOX;
248 (intptr_t)&kcb->kcb_kmbx.km_curthread;
249 mc->mc_special.isr = (intptr_t)&tcb->tcb_tmbx;
251 _ia64_break_setcontext(mc);
252 } else if (mc->mc_flags & _MC_FLAGS_SYSCALL_CONTEXT) {
254 kse_switchin(&tcb->tcb_tmbx, KSE_SWITCHIN_SETTMBX);
256 kse_switchin(&tcb->tcb_tmbx, 0);
259 _ia64_restore_context(mc, (intptr_t)&tcb->tcb_tmbx,
260 (intptr_t *)&kcb->kcb_kmbx.km_curthread);
262 _ia64_restore_context(mc, 0, NULL);
264 /* We should not reach here. */
268 #endif /* _PTHREAD_MD_H_ */