2 * Copyright 2004 by Peter Grehan. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
20 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
22 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
23 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * Machine-dependent thread prototypes/definitions for the thread kernel.
33 #ifndef _PTHREAD_MD_H_
34 #define _PTHREAD_MD_H_
40 extern void _ppc32_enter_uts(struct kse_mailbox *, kse_func_t, void *, size_t);
41 extern int _ppc32_setcontext(mcontext_t *, intptr_t, intptr_t *);
42 extern int _ppc32_getcontext(mcontext_t *);
44 #define KSE_STACKSIZE 16384
45 #define DTV_OFFSET offsetof(struct tcb, tcb_tp.tp_tdv)
47 #define THR_GETCONTEXT(ucp) _ppc32_getcontext(&(ucp)->uc_mcontext)
48 #define THR_SETCONTEXT(ucp) _ppc32_setcontext(&(ucp)->uc_mcontext, 0, NULL)
59 * %r2 points to a struct kcb.
62 struct tdv *tp_tdv; /* dynamic TLS */
64 long double tp_tls[0]; /* static TLS */
68 struct kse_thr_mailbox tcb_tmbx;
69 struct pthread *tcb_thread;
70 struct kcb *tcb_curkcb;
72 struct ppc32_tp tcb_tp;
76 struct kse_mailbox kcb_kmbx;
77 struct tcb kcb_faketcb;
78 struct tcb *kcb_curtcb;
83 * From the PowerPC32 TLS spec:
85 * "r2 is the thread pointer, and points 0x7000 past the end of the
86 * thread control block." Or, 0x7008 past the start of the 8-byte tcb
88 #define TP_OFFSET 0x7008
89 register uint8_t *_tpr __asm("%r2");
91 #define _tcb ((struct tcb *)(_tpr - TP_OFFSET - offsetof(struct tcb, tcb_tp)))
94 * The kcb and tcb constructors.
96 struct tcb *_tcb_ctor(struct pthread *, int);
97 void _tcb_dtor(struct tcb *);
98 struct kcb *_kcb_ctor(struct kse *kse);
99 void _kcb_dtor(struct kcb *);
101 /* Called from the KSE to set its private data. */
103 _kcb_set(struct kcb *kcb)
105 /* There is no thread yet; use the fake tcb. */
106 _tpr = (uint8_t *)&kcb->kcb_faketcb.tcb_tp + TP_OFFSET;
110 * Get the current kcb.
112 * This can only be called while in a critical region; don't
113 * worry about having the kcb changed out from under us.
115 static __inline struct kcb *
118 return (_tcb->tcb_curkcb);
122 * Enter a critical region.
124 * Read and clear km_curthread in the kse mailbox.
126 static __inline struct kse_thr_mailbox *
127 _kcb_critical_enter(void)
129 struct kse_thr_mailbox *crit;
132 if (_tcb->tcb_isfake != 0) {
134 * We already are in a critical region since
135 * there is no current thread.
139 flags = _tcb->tcb_tmbx.tm_flags;
140 _tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL;
141 crit = _tcb->tcb_curkcb->kcb_kmbx.km_curthread;
142 _tcb->tcb_curkcb->kcb_kmbx.km_curthread = NULL;
143 _tcb->tcb_tmbx.tm_flags = flags;
149 _kcb_critical_leave(struct kse_thr_mailbox *crit)
151 /* No need to do anything if this is a fake tcb. */
152 if (_tcb->tcb_isfake == 0)
153 _tcb->tcb_curkcb->kcb_kmbx.km_curthread = crit;
157 _kcb_in_critical(void)
162 if (_tcb->tcb_isfake != 0) {
164 * We are in a critical region since there is no
169 flags = _tcb->tcb_tmbx.tm_flags;
170 _tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL;
171 ret = (_tcb->tcb_curkcb->kcb_kmbx.km_curthread == NULL);
172 _tcb->tcb_tmbx.tm_flags = flags;
178 _tcb_set(struct kcb *kcb, struct tcb *tcb)
181 tcb = &kcb->kcb_faketcb;
182 kcb->kcb_curtcb = tcb;
183 tcb->tcb_curkcb = kcb;
184 _tpr = (uint8_t *)&tcb->tcb_tp + TP_OFFSET;
187 static __inline struct tcb *
193 static __inline struct pthread *
196 return (_tcb->tcb_thread);
200 * Get the current kse.
202 * Like _kcb_get(), this can only be called while in a critical region.
204 static __inline struct kse *
207 return (_tcb->tcb_curkcb->kcb_kse);
211 _thread_enter_uts(struct tcb *tcb, struct kcb *kcb)
213 if (_ppc32_getcontext(&tcb->tcb_tmbx.tm_context.uc_mcontext) == 0) {
214 /* Make the fake tcb the current thread. */
215 kcb->kcb_curtcb = &kcb->kcb_faketcb;
216 _tpr = (uint8_t *)&kcb->kcb_faketcb.tcb_tp + TP_OFFSET;
217 _ppc32_enter_uts(&kcb->kcb_kmbx, kcb->kcb_kmbx.km_func,
218 kcb->kcb_kmbx.km_stack.ss_sp,
219 kcb->kcb_kmbx.km_stack.ss_size - 32);
220 /* We should not reach here. */
227 _thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox)
230 extern int _libkse_debug;
233 mc = &tcb->tcb_tmbx.tm_context.uc_mcontext;
236 * A full context needs a system call to restore, so use
237 * kse_switchin. Otherwise, the partial context can be
238 * restored with _ppc32_setcontext
240 if (mc->mc_vers != _MC_VERSION_KSE && _libkse_debug != 0) {
242 kse_switchin(&tcb->tcb_tmbx, KSE_SWITCHIN_SETTMBX);
244 kse_switchin(&tcb->tcb_tmbx, 0);
246 tcb->tcb_tmbx.tm_lwp = kcb->kcb_kmbx.km_lwp;
248 _ppc32_setcontext(mc, (intptr_t)&tcb->tcb_tmbx,
249 (intptr_t *)&kcb->kcb_kmbx.km_curthread);
251 _ppc32_setcontext(mc, 0, NULL);
254 /* We should not reach here. */
258 #endif /* _PTHREAD_MD_H_ */