2 * Copyright (c) 2004 David Xu <davidxu@freebsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
35 #include <sys/types.h>
37 #include <sys/ptrace.h>
38 #include <proc_service.h>
39 #include <thread_db.h>
41 #include "libpthread_db.h"
43 #define P2T(c) ps2td(c)
45 static void pt_unmap_lwp(const td_thragent_t *ta, lwpid_t lwp);
46 static int pt_validate(const td_thrhandle_t *th);
63 return TD_NOLIBTHREAD;
72 pt_map_thread(const td_thragent_t *const_ta, psaddr_t pt, int type)
74 td_thragent_t *ta = __DECONST(td_thragent_t *, const_ta);
79 for (i = 1; i < ta->map_len; ++i) {
80 if (ta->map[i].type == PT_NONE) {
83 } else if (ta->map[i].type == type && ta->map[i].thr == pt) {
89 if (ta->map_len == 0) {
90 ta->map = calloc(20, sizeof(struct pt_map));
96 new = realloc(ta->map,
97 sizeof(struct pt_map) * ta->map_len * 2);
100 memset(new + ta->map_len, '\0', sizeof(struct pt_map) *
108 ta->map[first].type = type;
109 ta->map[first].thr = pt;
121 pt_ta_new(struct ps_prochandle *ph, td_thragent_t **pta)
123 #define LOOKUP_SYM(proc, sym, addr) \
124 ret = ps_pglobal_lookup(proc, NULL, sym, addr); \
126 TDBG("can not find symbol: %s\n", sym); \
127 ret = TD_NOLIBTHREAD; \
131 #define LOOKUP_VAL(proc, sym, val) \
132 ret = ps_pglobal_lookup(proc, NULL, sym, &vaddr);\
134 TDBG("can not find symbol: %s\n", sym); \
135 ret = TD_NOLIBTHREAD; \
138 ret = ps_pread(proc, vaddr, val, sizeof(int)); \
140 TDBG("can not read value of %s\n", sym);\
141 ret = TD_NOLIBTHREAD; \
152 ta = malloc(sizeof(td_thragent_t));
157 ta->thread_activated = 0;
161 LOOKUP_SYM(ph, "_libkse_debug", &ta->libkse_debug_addr);
162 LOOKUP_SYM(ph, "_thread_list", &ta->thread_list_addr);
163 LOOKUP_SYM(ph, "_thread_activated", &ta->thread_activated_addr);
164 LOOKUP_SYM(ph, "_thread_active_threads",&ta->thread_active_threads_addr);
165 LOOKUP_SYM(ph, "_thread_keytable", &ta->thread_keytable_addr);
166 LOOKUP_VAL(ph, "_thread_off_dtv", &ta->thread_off_dtv);
167 LOOKUP_VAL(ph, "_thread_off_kse_locklevel", &ta->thread_off_kse_locklevel);
168 LOOKUP_VAL(ph, "_thread_off_kse", &ta->thread_off_kse);
169 LOOKUP_VAL(ph, "_thread_off_tlsindex", &ta->thread_off_tlsindex);
170 LOOKUP_VAL(ph, "_thread_off_attr_flags", &ta->thread_off_attr_flags);
171 LOOKUP_VAL(ph, "_thread_size_key", &ta->thread_size_key);
172 LOOKUP_VAL(ph, "_thread_off_tcb", &ta->thread_off_tcb);
173 LOOKUP_VAL(ph, "_thread_off_linkmap", &ta->thread_off_linkmap);
174 LOOKUP_VAL(ph, "_thread_off_tmbx", &ta->thread_off_tmbx);
175 LOOKUP_VAL(ph, "_thread_off_thr_locklevel", &ta->thread_off_thr_locklevel);
176 LOOKUP_VAL(ph, "_thread_off_next", &ta->thread_off_next);
177 LOOKUP_VAL(ph, "_thread_off_state", &ta->thread_off_state);
178 LOOKUP_VAL(ph, "_thread_max_keys", &ta->thread_max_keys);
179 LOOKUP_VAL(ph, "_thread_off_key_allocated", &ta->thread_off_key_allocated);
180 LOOKUP_VAL(ph, "_thread_off_key_destructor", &ta->thread_off_key_destructor);
181 LOOKUP_VAL(ph, "_thread_state_running", &ta->thread_state_running);
182 LOOKUP_VAL(ph, "_thread_state_zoombie", &ta->thread_state_zoombie);
185 * If this fails it probably means we're debugging a core file and
188 ps_pwrite(ph, ta->libkse_debug_addr, &dbg, sizeof(int));
198 pt_ta_delete(td_thragent_t *ta)
206 * Error returns from this write are not really a problem;
207 * the process doesn't exist any more.
209 ps_pwrite(ta->ph, ta->libkse_debug_addr, &dbg, sizeof(int));
217 pt_ta_map_id2thr(const td_thragent_t *ta, thread_t id, td_thrhandle_t *th)
220 TAILQ_HEAD(, pthread) thread_list;
221 psaddr_t pt, tcb_addr;
227 if (id < 0 || id >= ta->map_len || ta->map[id].type == PT_NONE)
229 ret = ps_pread(ta->ph, ta->thread_list_addr, &thread_list,
230 sizeof(thread_list));
233 pt = (psaddr_t)thread_list.tqh_first;
234 if (ta->map[id].type == PT_LWP) {
236 * if we are referencing a lwp, make sure it was not already
237 * mapped to user thread.
240 ret = ps_pread(ta->ph,
241 pt + ta->thread_off_tcb,
242 &tcb_addr, sizeof(tcb_addr));
245 ret = ps_pread(ta->ph,
246 tcb_addr + ta->thread_off_tmbx +
247 offsetof(struct kse_thr_mailbox, tm_lwp),
252 * If the lwp was already mapped to userland thread,
253 * we shouldn't reference it directly in future.
255 if (lwp == ta->map[id].lwp) {
256 ta->map[id].type = PT_NONE;
259 /* get next thread */
260 ret = ps_pread(ta->ph,
261 pt + ta->thread_off_next,
267 ret = ptrace(PT_GETREGS, ta->map[id].lwp, (caddr_t)&gregs, 0);
269 /* no longer exists */
270 ta->map[id].type = PT_NONE;
274 while (pt != 0 && ta->map[id].thr != pt) {
275 ret = ps_pread(ta->ph,
276 pt + ta->thread_off_tcb,
277 &tcb_addr, sizeof(tcb_addr));
280 /* get next thread */
281 ret = ps_pread(ta->ph,
282 pt + ta->thread_off_next,
289 /* no longer exists */
290 ta->map[id].type = PT_NONE;
301 pt_ta_map_lwp2thr(const td_thragent_t *ta, lwpid_t lwp, td_thrhandle_t *th)
303 TAILQ_HEAD(, pthread) thread_list;
310 ret = ps_pread(ta->ph, ta->thread_list_addr, &thread_list,
311 sizeof(thread_list));
314 pt = (psaddr_t)thread_list.tqh_first;
316 ret = ps_pread(ta->ph, pt + ta->thread_off_tcb,
320 ptr += ta->thread_off_tmbx +
321 offsetof(struct kse_thr_mailbox, tm_lwp);
322 ret = ps_pread(ta->ph, ptr, &tmp_lwp, sizeof(lwpid_t));
325 if (tmp_lwp == lwp) {
327 th->th_tid = pt_map_thread(ta, pt, PT_USER);
328 if (th->th_tid == -1)
330 pt_unmap_lwp(ta, lwp);
335 /* get next thread */
336 ret = ps_pread(ta->ph,
337 pt + ta->thread_off_next,
347 pt_ta_thr_iter(const td_thragent_t *ta,
348 td_thr_iter_f *callback, void *cbdata_p,
349 td_thr_state_e state, int ti_pri,
350 sigset_t *ti_sigmask_p,
351 unsigned int ti_user_flags)
353 TAILQ_HEAD(, pthread) thread_list;
361 pserr = ps_pread(ta->ph, ta->thread_activated_addr, &activated,
368 pserr = ps_pread(ta->ph, ta->thread_list_addr, &thread_list,
369 sizeof(thread_list));
372 pt = (psaddr_t)thread_list.tqh_first;
375 th.th_tid = pt_map_thread(ta, pt, PT_USER);
377 /* should we unmap lwp here ? */
380 if ((*callback)(&th, cbdata_p))
382 /* get next thread */
383 pserr = ps_pread(ta->ph,
384 pt + ta->thread_off_next, &pt,
393 pt_ta_tsd_iter(const td_thragent_t *ta, td_key_iter_f *ki, void *arg)
397 int i, ret, allocated;
401 keytable = malloc(ta->thread_max_keys * ta->thread_size_key);
402 if (keytable == NULL)
404 ret = ps_pread(ta->ph, (psaddr_t)ta->thread_keytable_addr, keytable,
405 ta->thread_max_keys * ta->thread_size_key);
410 for (i = 0; i < ta->thread_max_keys; i++) {
411 allocated = *(int *)(keytable + i * ta->thread_size_key +
412 ta->thread_off_key_allocated);
413 destructor = *(void **)(keytable + i * ta->thread_size_key +
414 ta->thread_off_key_destructor);
416 ret = (ki)(i, destructor, arg);
428 pt_ta_event_addr(const td_thragent_t *ta, td_event_e event, td_notify_t *ptr)
435 pt_ta_set_event(const td_thragent_t *ta, td_thr_events_t *events)
442 pt_ta_clear_event(const td_thragent_t *ta, td_thr_events_t *events)
449 pt_ta_event_getmsg(const td_thragent_t *ta, td_event_msg_t *msg)
456 pt_dbsuspend(const td_thrhandle_t *th, int suspend)
458 td_thragent_t *ta = (td_thragent_t *)th->th_ta;
459 psaddr_t tcb_addr, tmbx_addr, ptr;
462 int attrflags, locklevel, ret;
466 ret = pt_validate(th);
470 if (ta->map[th->th_tid].type == PT_LWP) {
472 ret = ps_lstop(ta->ph, ta->map[th->th_tid].lwp);
474 ret = ps_lcontinue(ta->ph, ta->map[th->th_tid].lwp);
478 ret = ps_pread(ta->ph, ta->map[th->th_tid].thr +
479 ta->thread_off_attr_flags,
480 &attrflags, sizeof(attrflags));
483 ret = ps_pread(ta->ph, ta->map[th->th_tid].thr +
485 &tcb_addr, sizeof(tcb_addr));
488 tmbx_addr = tcb_addr + ta->thread_off_tmbx;
489 ptr = tmbx_addr + offsetof(struct kse_thr_mailbox, tm_lwp);
490 ret = ps_pread(ta->ph, ptr, &lwp, sizeof(lwpid_t));
495 /* don't suspend signal thread */
496 if (attrflags & 0x200)
498 if (attrflags & PTHREAD_SCOPE_SYSTEM) {
500 * don't suspend system scope thread if it is holding
501 * some low level locks
503 ptr = ta->map[th->th_tid].thr + ta->thread_off_kse;
504 ret = ps_pread(ta->ph, ptr, &ptr, sizeof(ptr));
507 ret = ps_pread(ta->ph, ptr + ta->thread_off_kse_locklevel,
508 &locklevel, sizeof(int));
511 if (locklevel <= 0) {
512 ptr = ta->map[th->th_tid].thr +
513 ta->thread_off_thr_locklevel;
514 ret = ps_pread(ta->ph, ptr, &locklevel,
521 ret = ps_lstop(ta->ph, lwp);
523 ret = ps_lcontinue(ta->ph, lwp);
529 struct ptrace_lwpinfo pl;
531 if (ptrace(PT_LWPINFO, lwp, (caddr_t) &pl, sizeof(pl)))
534 if (!(pl.pl_flags & PL_FLAG_BOUND))
535 ret = ps_lstop(ta->ph, lwp);
537 ret = ps_lcontinue(ta->ph, lwp);
545 ret = ps_pread(ta->ph,
546 tmbx_addr + offsetof(struct kse_thr_mailbox, tm_dflags),
547 &dflags, sizeof(dflags));
551 dflags |= TMDF_SUSPEND;
553 dflags &= ~TMDF_SUSPEND;
554 ret = ps_pwrite(ta->ph,
555 tmbx_addr + offsetof(struct kse_thr_mailbox, tm_dflags),
556 &dflags, sizeof(dflags));
561 pt_thr_dbresume(const td_thrhandle_t *th)
565 return pt_dbsuspend(th, 0);
569 pt_thr_dbsuspend(const td_thrhandle_t *th)
573 return pt_dbsuspend(th, 1);
577 pt_thr_validate(const td_thrhandle_t *th)
584 ret = pt_ta_map_id2thr(th->th_ta, th->th_tid,
590 pt_thr_get_info(const td_thrhandle_t *th, td_thrinfo_t *info)
592 const td_thragent_t *ta = th->th_ta;
600 ret = pt_validate(th);
604 memset(info, 0, sizeof(*info));
605 if (ta->map[th->th_tid].type == PT_LWP) {
606 info->ti_type = TD_THR_SYSTEM;
607 info->ti_lid = ta->map[th->th_tid].lwp;
608 info->ti_tid = th->th_tid;
609 info->ti_state = TD_THR_RUN;
610 info->ti_type = TD_THR_SYSTEM;
613 ret = ps_pread(ta->ph, ta->map[th->th_tid].thr + ta->thread_off_tcb,
614 &tcb_addr, sizeof(tcb_addr));
617 ret = ps_pread(ta->ph, ta->map[th->th_tid].thr + ta->thread_off_state,
618 &state, sizeof(state));
619 ret = ps_pread(ta->ph,
620 tcb_addr + ta->thread_off_tmbx +
621 offsetof(struct kse_thr_mailbox, tm_lwp),
622 &info->ti_lid, sizeof(lwpid_t));
625 ret = ps_pread(ta->ph,
626 tcb_addr + ta->thread_off_tmbx +
627 offsetof(struct kse_thr_mailbox, tm_dflags),
628 &dflags, sizeof(dflags));
631 info->ti_ta_p = th->th_ta;
632 info->ti_tid = th->th_tid;
633 if (state == ta->thread_state_running)
634 info->ti_state = TD_THR_RUN;
635 else if (state == ta->thread_state_zoombie)
636 info->ti_state = TD_THR_ZOMBIE;
638 info->ti_state = TD_THR_SLEEP;
639 info->ti_db_suspended = ((dflags & TMDF_SUSPEND) != 0);
640 info->ti_type = TD_THR_USER;
646 pt_thr_getxmmregs(const td_thrhandle_t *th, char *fxsave)
648 const td_thragent_t *ta = th->th_ta;
649 struct kse_thr_mailbox tmbx;
650 psaddr_t tcb_addr, tmbx_addr, ptr;
658 ret = pt_validate(th);
662 if (ta->map[th->th_tid].type == PT_LWP) {
663 ret = ps_lgetxmmregs(ta->ph, ta->map[th->th_tid].lwp, fxsave);
667 ret = ps_pread(ta->ph, ta->map[th->th_tid].thr + ta->thread_off_tcb,
668 &tcb_addr, sizeof(tcb_addr));
671 tmbx_addr = tcb_addr + ta->thread_off_tmbx;
672 ptr = tmbx_addr + offsetof(struct kse_thr_mailbox, tm_lwp);
673 ret = ps_pread(ta->ph, ptr, &lwp, sizeof(lwpid_t));
677 ret = ps_lgetxmmregs(ta->ph, lwp, fxsave);
681 ret = ps_pread(ta->ph, tmbx_addr, &tmbx, sizeof(tmbx));
684 pt_ucontext_to_fxsave(&tmbx.tm_context, fxsave);
690 pt_thr_getfpregs(const td_thrhandle_t *th, prfpregset_t *fpregs)
692 const td_thragent_t *ta = th->th_ta;
693 struct kse_thr_mailbox tmbx;
694 psaddr_t tcb_addr, tmbx_addr, ptr;
700 ret = pt_validate(th);
704 if (ta->map[th->th_tid].type == PT_LWP) {
705 ret = ps_lgetfpregs(ta->ph, ta->map[th->th_tid].lwp, fpregs);
709 ret = ps_pread(ta->ph, ta->map[th->th_tid].thr + ta->thread_off_tcb,
710 &tcb_addr, sizeof(tcb_addr));
713 tmbx_addr = tcb_addr + ta->thread_off_tmbx;
714 ptr = tmbx_addr + offsetof(struct kse_thr_mailbox, tm_lwp);
715 ret = ps_pread(ta->ph, ptr, &lwp, sizeof(lwpid_t));
719 ret = ps_lgetfpregs(ta->ph, lwp, fpregs);
723 ret = ps_pread(ta->ph, tmbx_addr, &tmbx, sizeof(tmbx));
726 pt_ucontext_to_fpreg(&tmbx.tm_context, fpregs);
731 pt_thr_getgregs(const td_thrhandle_t *th, prgregset_t gregs)
733 const td_thragent_t *ta = th->th_ta;
734 struct kse_thr_mailbox tmbx;
735 psaddr_t tcb_addr, tmbx_addr, ptr;
741 ret = pt_validate(th);
745 if (ta->map[th->th_tid].type == PT_LWP) {
746 ret = ps_lgetregs(ta->ph,
747 ta->map[th->th_tid].lwp, gregs);
751 ret = ps_pread(ta->ph, ta->map[th->th_tid].thr + ta->thread_off_tcb,
752 &tcb_addr, sizeof(tcb_addr));
755 tmbx_addr = tcb_addr + ta->thread_off_tmbx;
756 ptr = tmbx_addr + offsetof(struct kse_thr_mailbox, tm_lwp);
757 ret = ps_pread(ta->ph, ptr, &lwp, sizeof(lwpid_t));
761 ret = ps_lgetregs(ta->ph, lwp, gregs);
764 ret = ps_pread(ta->ph, tmbx_addr, &tmbx, sizeof(tmbx));
767 pt_ucontext_to_reg(&tmbx.tm_context, gregs);
773 pt_thr_setxmmregs(const td_thrhandle_t *th, const char *fxsave)
775 const td_thragent_t *ta = th->th_ta;
776 struct kse_thr_mailbox tmbx;
777 psaddr_t tcb_addr, tmbx_addr, ptr;
785 ret = pt_validate(th);
789 if (ta->map[th->th_tid].type == PT_LWP) {
790 ret = ps_lsetxmmregs(ta->ph, ta->map[th->th_tid].lwp, fxsave);
794 ret = ps_pread(ta->ph, ta->map[th->th_tid].thr +
796 &tcb_addr, sizeof(tcb_addr));
799 tmbx_addr = tcb_addr + ta->thread_off_tmbx;
800 ptr = tmbx_addr + offsetof(struct kse_thr_mailbox, tm_lwp);
801 ret = ps_pread(ta->ph, ptr, &lwp, sizeof(lwpid_t));
805 ret = ps_lsetxmmregs(ta->ph, lwp, fxsave);
809 * Read a copy of context, this makes sure that registers
810 * not covered by structure reg won't be clobbered
812 ret = ps_pread(ta->ph, tmbx_addr, &tmbx, sizeof(tmbx));
816 pt_fxsave_to_ucontext(fxsave, &tmbx.tm_context);
817 ret = ps_pwrite(ta->ph, tmbx_addr, &tmbx, sizeof(tmbx));
823 pt_thr_setfpregs(const td_thrhandle_t *th, const prfpregset_t *fpregs)
825 const td_thragent_t *ta = th->th_ta;
826 struct kse_thr_mailbox tmbx;
827 psaddr_t tcb_addr, tmbx_addr, ptr;
833 ret = pt_validate(th);
837 if (ta->map[th->th_tid].type == PT_LWP) {
838 ret = ps_lsetfpregs(ta->ph, ta->map[th->th_tid].lwp, fpregs);
842 ret = ps_pread(ta->ph, ta->map[th->th_tid].thr +
844 &tcb_addr, sizeof(tcb_addr));
847 tmbx_addr = tcb_addr + ta->thread_off_tmbx;
848 ptr = tmbx_addr + offsetof(struct kse_thr_mailbox, tm_lwp);
849 ret = ps_pread(ta->ph, ptr, &lwp, sizeof(lwpid_t));
853 ret = ps_lsetfpregs(ta->ph, lwp, fpregs);
857 * Read a copy of context, this makes sure that registers
858 * not covered by structure reg won't be clobbered
860 ret = ps_pread(ta->ph, tmbx_addr, &tmbx, sizeof(tmbx));
864 pt_fpreg_to_ucontext(fpregs, &tmbx.tm_context);
865 ret = ps_pwrite(ta->ph, tmbx_addr, &tmbx, sizeof(tmbx));
870 pt_thr_setgregs(const td_thrhandle_t *th, const prgregset_t gregs)
872 const td_thragent_t *ta = th->th_ta;
873 struct kse_thr_mailbox tmbx;
874 psaddr_t tcb_addr, tmbx_addr, ptr;
880 ret = pt_validate(th);
884 if (ta->map[th->th_tid].type == PT_LWP) {
885 ret = ps_lsetregs(ta->ph, ta->map[th->th_tid].lwp, gregs);
889 ret = ps_pread(ta->ph, ta->map[th->th_tid].thr +
891 &tcb_addr, sizeof(tcb_addr));
894 tmbx_addr = tcb_addr + ta->thread_off_tmbx;
895 ptr = tmbx_addr + offsetof(struct kse_thr_mailbox, tm_lwp);
896 ret = ps_pread(ta->ph, ptr, &lwp, sizeof(lwpid_t));
900 ret = ps_lsetregs(ta->ph, lwp, gregs);
905 * Read a copy of context, make sure that registers
906 * not covered by structure reg won't be clobbered
908 ret = ps_pread(ta->ph, tmbx_addr, &tmbx, sizeof(tmbx));
911 pt_reg_to_ucontext(gregs, &tmbx.tm_context);
912 ret = ps_pwrite(ta->ph, tmbx_addr, &tmbx, sizeof(tmbx));
917 pt_thr_event_enable(const td_thrhandle_t *th, int en)
924 pt_thr_set_event(const td_thrhandle_t *th, td_thr_events_t *setp)
931 pt_thr_clear_event(const td_thrhandle_t *th, td_thr_events_t *setp)
938 pt_thr_event_getmsg(const td_thrhandle_t *th, td_event_msg_t *msg)
945 pt_thr_sstep(const td_thrhandle_t *th, int step)
947 const td_thragent_t *ta = th->th_ta;
948 struct kse_thr_mailbox tmbx;
950 psaddr_t tcb_addr, tmbx_addr;
957 ret = pt_validate(th);
961 if (ta->map[th->th_tid].type == PT_LWP)
964 ret = ps_pread(ta->ph, ta->map[th->th_tid].thr +
966 &tcb_addr, sizeof(tcb_addr));
970 /* Clear or set single step flag in thread mailbox */
971 ret = ps_pread(ta->ph,
972 tcb_addr + ta->thread_off_tmbx +
973 offsetof(struct kse_thr_mailbox, tm_dflags),
974 &dflags, sizeof(uint32_t));
978 dflags |= TMDF_SSTEP;
980 dflags &= ~TMDF_SSTEP;
981 ret = ps_pwrite(ta->ph,
982 tcb_addr + ta->thread_off_tmbx +
983 offsetof(struct kse_thr_mailbox, tm_dflags),
984 &dflags, sizeof(uint32_t));
988 ret = ps_pread(ta->ph,
989 tcb_addr + ta->thread_off_tmbx +
990 offsetof(struct kse_thr_mailbox, tm_lwp),
991 &lwp, sizeof(lwpid_t));
997 tmbx_addr = tcb_addr + ta->thread_off_tmbx;
999 * context is in userland, some architectures store
1000 * single step status in registers, we should change
1003 ret = ps_pread(ta->ph, tmbx_addr, &tmbx, sizeof(tmbx));
1005 pt_ucontext_to_reg(&tmbx.tm_context, ®s);
1006 /* only write out if it is really changed. */
1007 if (pt_reg_sstep(®s, step) != 0) {
1008 pt_reg_to_ucontext(®s, &tmbx.tm_context);
1009 ret = ps_pwrite(ta->ph, tmbx_addr, &tmbx,
1017 pt_unmap_lwp(const td_thragent_t *ta, lwpid_t lwp)
1021 for (i = 0; i < ta->map_len; ++i) {
1022 if (ta->map[i].type == PT_LWP && ta->map[i].lwp == lwp) {
1023 ta->map[i].type = PT_NONE;
1030 pt_validate(const td_thrhandle_t *th)
1033 if (th->th_tid < 0 || th->th_tid >= th->th_ta->map_len ||
1034 th->th_ta->map[th->th_tid].type == PT_NONE)
1040 pt_thr_tls_get_addr(const td_thrhandle_t *th, void *_linkmap, size_t offset,
1044 const td_thragent_t *ta = th->th_ta;
1045 psaddr_t tcb_addr, *dtv_addr;
1048 /* linkmap is a member of Obj_Entry */
1049 obj_entry = (char *)_linkmap - ta->thread_off_linkmap;
1051 /* get tlsindex of the object file */
1052 ret = ps_pread(ta->ph,
1053 obj_entry + ta->thread_off_tlsindex,
1054 &tls_index, sizeof(tls_index));
1058 /* get thread tcb */
1059 ret = ps_pread(ta->ph, ta->map[th->th_tid].thr +
1061 &tcb_addr, sizeof(tcb_addr));
1065 /* get dtv array address */
1066 ret = ps_pread(ta->ph, tcb_addr + ta->thread_off_dtv,
1067 &dtv_addr, sizeof(dtv_addr));
1070 /* now get the object's tls block base address */
1071 ret = ps_pread(ta->ph, &dtv_addr[tls_index+1], address,
1080 struct ta_ops libpthread_db_ops = {
1082 .to_ta_clear_event = pt_ta_clear_event,
1083 .to_ta_delete = pt_ta_delete,
1084 .to_ta_event_addr = pt_ta_event_addr,
1085 .to_ta_event_getmsg = pt_ta_event_getmsg,
1086 .to_ta_map_id2thr = pt_ta_map_id2thr,
1087 .to_ta_map_lwp2thr = pt_ta_map_lwp2thr,
1088 .to_ta_new = pt_ta_new,
1089 .to_ta_set_event = pt_ta_set_event,
1090 .to_ta_thr_iter = pt_ta_thr_iter,
1091 .to_ta_tsd_iter = pt_ta_tsd_iter,
1092 .to_thr_clear_event = pt_thr_clear_event,
1093 .to_thr_dbresume = pt_thr_dbresume,
1094 .to_thr_dbsuspend = pt_thr_dbsuspend,
1095 .to_thr_event_enable = pt_thr_event_enable,
1096 .to_thr_event_getmsg = pt_thr_event_getmsg,
1097 .to_thr_get_info = pt_thr_get_info,
1098 .to_thr_getfpregs = pt_thr_getfpregs,
1099 .to_thr_getgregs = pt_thr_getgregs,
1100 .to_thr_set_event = pt_thr_set_event,
1101 .to_thr_setfpregs = pt_thr_setfpregs,
1102 .to_thr_setgregs = pt_thr_setgregs,
1103 .to_thr_validate = pt_thr_validate,
1104 .to_thr_tls_get_addr = pt_thr_tls_get_addr,
1106 /* FreeBSD specific extensions. */
1107 .to_thr_sstep = pt_thr_sstep,
1109 .to_thr_getxmmregs = pt_thr_getxmmregs,
1110 .to_thr_setxmmregs = pt_thr_setxmmregs,