2 * Copyright (C) 2003 David Xu <davidxu@freebsd.org>
3 * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Neither the name of the author nor the names of its contributors
12 * may be used to endorse or promote products derived from this software
13 * without specific prior written permission.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * Machine-dependent thread prototypes/definitions for the thread kernel.
32 #ifndef _PTHREAD_MD_H_
33 #define _PTHREAD_MD_H_
36 #include <sys/types.h>
38 #include <machine/sysarch.h>
41 #define KSE_STACKSIZE 16384
42 #define DTV_OFFSET offsetof(struct tcb, tcb_dtv)
44 #define THR_GETCONTEXT(ucp) \
45 (void)_amd64_save_context(&(ucp)->uc_mcontext)
46 #define THR_SETCONTEXT(ucp) \
47 (void)_amd64_restore_context(&(ucp)->uc_mcontext, 0, NULL)
57 * %fs points to a struct kcb.
60 struct tcb *kcb_curtcb;
61 struct kcb *kcb_self; /* self reference */
63 struct kse_mailbox kcb_kmbx;
67 struct tcb *tcb_self; /* required by rtld */
68 void *tcb_dtv; /* required by rtld */
69 struct pthread *tcb_thread;
70 void *tcb_spare[1]; /* align tcb_tmbx to 16 bytes */
71 struct kse_thr_mailbox tcb_tmbx;
75 * Evaluates to the byte offset of the per-kse variable name.
77 #define __kcb_offset(name) __offsetof(struct kcb, name)
80 * Evaluates to the type of the per-kse variable name.
82 #define __kcb_type(name) __typeof(((struct kcb *)0)->name)
85 * Evaluates to the value of the per-kse variable name.
87 #define KCB_GET64(name) ({ \
88 __kcb_type(name) __result; \
91 __asm __volatile("movq %%fs:%1, %0" \
93 : "m" (*(u_long *)(__kcb_offset(name)))); \
94 __result = (__kcb_type(name))__i; \
100 * Sets the value of the per-kse variable name to value val.
102 #define KCB_SET64(name, val) ({ \
103 __kcb_type(name) __val = (val); \
106 __i = (u_long)__val; \
107 __asm __volatile("movq %1,%%fs:%0" \
108 : "=m" (*(u_long *)(__kcb_offset(name))) \
112 static __inline u_long
113 __kcb_readandclear64(volatile u_long *addr)
119 " xchgq %%fs:%1, %0;"
120 "# __kcb_readandclear64"
126 #define KCB_READANDCLEAR64(name) ({ \
127 __kcb_type(name) __result; \
129 __result = (__kcb_type(name)) \
130 __kcb_readandclear64((u_long *)__kcb_offset(name)); \
135 #define _kcb_curkcb() KCB_GET64(kcb_self)
136 #define _kcb_curtcb() KCB_GET64(kcb_curtcb)
137 #define _kcb_curkse() ((struct kse *)KCB_GET64(kcb_kmbx.km_udata))
138 #define _kcb_get_tmbx() KCB_GET64(kcb_kmbx.km_curthread)
139 #define _kcb_set_tmbx(value) KCB_SET64(kcb_kmbx.km_curthread, (void *)value)
140 #define _kcb_readandclear_tmbx() KCB_READANDCLEAR64(kcb_kmbx.km_curthread)
145 struct tcb *_tcb_ctor(struct pthread *, int);
146 void _tcb_dtor(struct tcb *tcb);
147 struct kcb *_kcb_ctor(struct kse *);
148 void _kcb_dtor(struct kcb *);
150 /* Called from the KSE to set its private data. */
152 _kcb_set(struct kcb *kcb)
154 amd64_set_fsbase(kcb);
157 /* Get the current kcb. */
158 static __inline struct kcb *
161 return (_kcb_curkcb());
164 static __inline struct kse_thr_mailbox *
165 _kcb_critical_enter(void)
167 struct kse_thr_mailbox *crit;
169 crit = _kcb_readandclear_tmbx();
174 _kcb_critical_leave(struct kse_thr_mailbox *crit)
180 _kcb_in_critical(void)
182 return (_kcb_get_tmbx() == NULL);
186 _tcb_set(struct kcb *kcb, struct tcb *tcb)
188 kcb->kcb_curtcb = tcb;
191 static __inline struct tcb *
194 return (_kcb_curtcb());
197 static __inline struct pthread *
204 return (tcb->tcb_thread);
209 static __inline struct kse *
212 return ((struct kse *)_kcb_curkse());
215 void _amd64_enter_uts(struct kse_mailbox *km, kse_func_t uts, void *stack,
217 int _amd64_restore_context(mcontext_t *mc, intptr_t val, intptr_t *loc);
218 int _amd64_save_context(mcontext_t *mc);
221 _thread_enter_uts(struct tcb *tcb, struct kcb *kcb)
225 ret = _amd64_save_context(&tcb->tcb_tmbx.tm_context.uc_mcontext);
227 _amd64_enter_uts(&kcb->kcb_kmbx, kcb->kcb_kmbx.km_func,
228 kcb->kcb_kmbx.km_stack.ss_sp,
229 kcb->kcb_kmbx.km_stack.ss_size);
230 /* We should not reach here. */
239 _thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox)
241 extern int _libkse_debug;
243 if ((kcb == NULL) || (tcb == NULL))
245 kcb->kcb_curtcb = tcb;
247 if (_libkse_debug == 0) {
248 tcb->tcb_tmbx.tm_lwp = kcb->kcb_kmbx.km_lwp;
250 _amd64_restore_context(
251 &tcb->tcb_tmbx.tm_context.uc_mcontext,
252 (intptr_t)&tcb->tcb_tmbx,
253 (intptr_t *)(void *)&kcb->kcb_kmbx.km_curthread);
255 _amd64_restore_context(
256 &tcb->tcb_tmbx.tm_context.uc_mcontext,
258 /* We should not reach here. */
261 kse_switchin(&tcb->tcb_tmbx, KSE_SWITCHIN_SETTMBX);
263 kse_switchin(&tcb->tcb_tmbx, 0);