2 * Copyright (c) 2004 David Xu <davidxu@freebsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
35 #include <sys/types.h>
37 #include <sys/ptrace.h>
38 #include <proc_service.h>
39 #include <thread_db.h>
41 #include "libpthread_db.h"
43 #define P2T(c) ps2td(c)
45 static void pt_unmap_lwp(const td_thragent_t *ta, lwpid_t lwp);
46 static int pt_validate(const td_thrhandle_t *th);
63 return TD_NOLIBTHREAD;
72 pt_map_thread(const td_thragent_t *const_ta, psaddr_t pt, int type)
74 td_thragent_t *ta = __DECONST(td_thragent_t *, const_ta);
79 for (i = 1; i < ta->map_len; ++i) {
80 if (ta->map[i].type == PT_NONE) {
83 } else if (ta->map[i].type == type && ta->map[i].thr == pt) {
89 if (ta->map_len == 0) {
90 ta->map = calloc(20, sizeof(struct pt_map));
96 new = realloc(ta->map,
97 sizeof(struct pt_map) * ta->map_len * 2);
100 memset(new + ta->map_len, '\0', sizeof(struct pt_map) *
108 ta->map[first].type = type;
109 ta->map[first].thr = pt;
121 pt_ta_new(struct ps_prochandle *ph, td_thragent_t **pta)
123 #define LOOKUP_SYM(proc, sym, addr) \
124 ret = ps_pglobal_lookup(proc, NULL, sym, addr); \
126 TDBG("can not find symbol: %s\n", sym); \
127 ret = TD_NOLIBTHREAD; \
131 #define LOOKUP_VAL(proc, sym, val) \
132 ret = ps_pglobal_lookup(proc, NULL, sym, &vaddr);\
134 TDBG("can not find symbol: %s\n", sym); \
135 ret = TD_NOLIBTHREAD; \
138 ret = ps_pread(proc, vaddr, val, sizeof(int)); \
140 TDBG("can not read value of %s\n", sym);\
141 ret = TD_NOLIBTHREAD; \
152 ta = malloc(sizeof(td_thragent_t));
157 ta->thread_activated = 0;
161 LOOKUP_SYM(ph, "_libkse_debug", &ta->libkse_debug_addr);
162 LOOKUP_SYM(ph, "_thread_list", &ta->thread_list_addr);
163 LOOKUP_SYM(ph, "_thread_activated", &ta->thread_activated_addr);
164 LOOKUP_SYM(ph, "_thread_active_threads",&ta->thread_active_threads_addr);
165 LOOKUP_SYM(ph, "_thread_keytable", &ta->thread_keytable_addr);
166 LOOKUP_VAL(ph, "_thread_off_dtv", &ta->thread_off_dtv);
167 LOOKUP_VAL(ph, "_thread_off_kse_locklevel", &ta->thread_off_kse_locklevel);
168 LOOKUP_VAL(ph, "_thread_off_kse", &ta->thread_off_kse);
169 LOOKUP_VAL(ph, "_thread_off_tlsindex", &ta->thread_off_tlsindex);
170 LOOKUP_VAL(ph, "_thread_off_attr_flags", &ta->thread_off_attr_flags);
171 LOOKUP_VAL(ph, "_thread_size_key", &ta->thread_size_key);
172 LOOKUP_VAL(ph, "_thread_off_tcb", &ta->thread_off_tcb);
173 LOOKUP_VAL(ph, "_thread_off_linkmap", &ta->thread_off_linkmap);
174 LOOKUP_VAL(ph, "_thread_off_tmbx", &ta->thread_off_tmbx);
175 LOOKUP_VAL(ph, "_thread_off_thr_locklevel", &ta->thread_off_thr_locklevel);
176 LOOKUP_VAL(ph, "_thread_off_next", &ta->thread_off_next);
177 LOOKUP_VAL(ph, "_thread_off_state", &ta->thread_off_state);
178 LOOKUP_VAL(ph, "_thread_max_keys", &ta->thread_max_keys);
179 LOOKUP_VAL(ph, "_thread_off_key_allocated", &ta->thread_off_key_allocated);
180 LOOKUP_VAL(ph, "_thread_off_key_destructor", &ta->thread_off_key_destructor);
181 LOOKUP_VAL(ph, "_thread_state_running", &ta->thread_state_running);
182 LOOKUP_VAL(ph, "_thread_state_zoombie", &ta->thread_state_zoombie);
185 * If this fails it probably means we're debugging a core file and
188 ps_pwrite(ph, ta->libkse_debug_addr, &dbg, sizeof(int));
198 pt_ta_delete(td_thragent_t *ta)
206 * Error returns from this write are not really a problem;
207 * the process doesn't exist any more.
209 ps_pwrite(ta->ph, ta->libkse_debug_addr, &dbg, sizeof(int));
217 pt_ta_map_id2thr(const td_thragent_t *ta, thread_t id, td_thrhandle_t *th)
220 TAILQ_HEAD(, pthread) thread_list;
221 psaddr_t pt, tcb_addr;
227 if (id < 0 || id >= ta->map_len || ta->map[id].type == PT_NONE)
229 ret = ps_pread(ta->ph, ta->thread_list_addr, &thread_list,
230 sizeof(thread_list));
233 pt = (psaddr_t)thread_list.tqh_first;
234 if (ta->map[id].type == PT_LWP) {
236 * if we are referencing a lwp, make sure it was not already
237 * mapped to user thread.
240 ret = ps_pread(ta->ph,
241 pt + ta->thread_off_tcb,
242 &tcb_addr, sizeof(tcb_addr));
245 ret = ps_pread(ta->ph,
246 tcb_addr + ta->thread_off_tmbx +
247 offsetof(struct kse_thr_mailbox, tm_lwp),
252 * If the lwp was already mapped to userland thread,
253 * we shouldn't reference it directly in future.
255 if (lwp == ta->map[id].lwp) {
256 ta->map[id].type = PT_NONE;
259 /* get next thread */
260 ret = ps_pread(ta->ph,
261 pt + ta->thread_off_next,
267 ret = ps_lgetregs(ta->ph, ta->map[id].lwp, gregs);
269 /* no longer exists */
270 ta->map[id].type = PT_NONE;
274 while (pt != 0 && ta->map[id].thr != pt) {
275 ret = ps_pread(ta->ph,
276 pt + ta->thread_off_tcb,
277 &tcb_addr, sizeof(tcb_addr));
280 /* get next thread */
281 ret = ps_pread(ta->ph,
282 pt + ta->thread_off_next,
289 /* no longer exists */
290 ta->map[id].type = PT_NONE;
301 pt_ta_map_lwp2thr(const td_thragent_t *ta, lwpid_t lwp, td_thrhandle_t *th)
303 TAILQ_HEAD(, pthread) thread_list;
310 ret = ps_pread(ta->ph, ta->thread_list_addr, &thread_list,
311 sizeof(thread_list));
314 pt = (psaddr_t)thread_list.tqh_first;
316 ret = ps_pread(ta->ph, pt + ta->thread_off_tcb,
320 ptr += ta->thread_off_tmbx +
321 offsetof(struct kse_thr_mailbox, tm_lwp);
322 ret = ps_pread(ta->ph, ptr, &tmp_lwp, sizeof(lwpid_t));
325 if (tmp_lwp == lwp) {
327 th->th_tid = pt_map_thread(ta, pt, PT_USER);
328 if (th->th_tid == -1)
330 pt_unmap_lwp(ta, lwp);
335 /* get next thread */
336 ret = ps_pread(ta->ph,
337 pt + ta->thread_off_next,
347 pt_ta_thr_iter(const td_thragent_t *ta,
348 td_thr_iter_f *callback, void *cbdata_p,
349 td_thr_state_e state, int ti_pri,
350 sigset_t *ti_sigmask_p,
351 unsigned int ti_user_flags)
353 TAILQ_HEAD(, pthread) thread_list;
361 pserr = ps_pread(ta->ph, ta->thread_activated_addr, &activated,
368 pserr = ps_pread(ta->ph, ta->thread_list_addr, &thread_list,
369 sizeof(thread_list));
372 pt = (psaddr_t)thread_list.tqh_first;
375 th.th_tid = pt_map_thread(ta, pt, PT_USER);
377 /* should we unmap lwp here ? */
380 if ((*callback)(&th, cbdata_p))
382 /* get next thread */
383 pserr = ps_pread(ta->ph,
384 pt + ta->thread_off_next, &pt,
393 pt_ta_tsd_iter(const td_thragent_t *ta, td_key_iter_f *ki, void *arg)
397 int i, ret, allocated;
401 keytable = malloc(ta->thread_max_keys * ta->thread_size_key);
402 if (keytable == NULL)
404 ret = ps_pread(ta->ph, (psaddr_t)ta->thread_keytable_addr, keytable,
405 ta->thread_max_keys * ta->thread_size_key);
410 for (i = 0; i < ta->thread_max_keys; i++) {
411 allocated = *(int *)(keytable + i * ta->thread_size_key +
412 ta->thread_off_key_allocated);
413 destructor = *(void **)(keytable + i * ta->thread_size_key +
414 ta->thread_off_key_destructor);
416 ret = (ki)(i, destructor, arg);
428 pt_ta_event_addr(const td_thragent_t *ta, td_event_e event, td_notify_t *ptr)
435 pt_ta_set_event(const td_thragent_t *ta, td_thr_events_t *events)
442 pt_ta_clear_event(const td_thragent_t *ta, td_thr_events_t *events)
449 pt_ta_event_getmsg(const td_thragent_t *ta, td_event_msg_t *msg)
456 pt_dbsuspend(const td_thrhandle_t *th, int suspend)
458 td_thragent_t *ta = (td_thragent_t *)th->th_ta;
459 psaddr_t tcb_addr, tmbx_addr, ptr;
462 int attrflags, locklevel, ret;
466 ret = pt_validate(th);
470 if (ta->map[th->th_tid].type == PT_LWP) {
472 ret = ps_lstop(ta->ph, ta->map[th->th_tid].lwp);
474 ret = ps_lcontinue(ta->ph, ta->map[th->th_tid].lwp);
478 ret = ps_pread(ta->ph, ta->map[th->th_tid].thr +
479 ta->thread_off_attr_flags,
480 &attrflags, sizeof(attrflags));
483 ret = ps_pread(ta->ph, ta->map[th->th_tid].thr +
485 &tcb_addr, sizeof(tcb_addr));
488 tmbx_addr = tcb_addr + ta->thread_off_tmbx;
489 ptr = tmbx_addr + offsetof(struct kse_thr_mailbox, tm_lwp);
490 ret = ps_pread(ta->ph, ptr, &lwp, sizeof(lwpid_t));
495 /* don't suspend signal thread */
496 if (attrflags & 0x200)
498 if (attrflags & PTHREAD_SCOPE_SYSTEM) {
500 * don't suspend system scope thread if it is holding
501 * some low level locks
503 ptr = ta->map[th->th_tid].thr + ta->thread_off_kse;
504 ret = ps_pread(ta->ph, ptr, &ptr, sizeof(ptr));
507 ret = ps_pread(ta->ph, ptr + ta->thread_off_kse_locklevel,
508 &locklevel, sizeof(int));
511 if (locklevel <= 0) {
512 ptr = ta->map[th->th_tid].thr +
513 ta->thread_off_thr_locklevel;
514 ret = ps_pread(ta->ph, ptr, &locklevel,
521 ret = ps_lstop(ta->ph, lwp);
523 ret = ps_lcontinue(ta->ph, lwp);
529 struct ptrace_lwpinfo pl;
531 if (ps_linfo(ta->ph, lwp, (caddr_t)&pl))
534 if (!(pl.pl_flags & PL_FLAG_BOUND))
535 ret = ps_lstop(ta->ph, lwp);
537 ret = ps_lcontinue(ta->ph, lwp);
545 ret = ps_pread(ta->ph,
546 tmbx_addr + offsetof(struct kse_thr_mailbox, tm_dflags),
547 &dflags, sizeof(dflags));
551 dflags |= TMDF_SUSPEND;
553 dflags &= ~TMDF_SUSPEND;
554 ret = ps_pwrite(ta->ph,
555 tmbx_addr + offsetof(struct kse_thr_mailbox, tm_dflags),
556 &dflags, sizeof(dflags));
561 pt_thr_dbresume(const td_thrhandle_t *th)
565 return pt_dbsuspend(th, 0);
569 pt_thr_dbsuspend(const td_thrhandle_t *th)
573 return pt_dbsuspend(th, 1);
577 pt_thr_validate(const td_thrhandle_t *th)
584 ret = pt_ta_map_id2thr(th->th_ta, th->th_tid,
590 pt_thr_get_info(const td_thrhandle_t *th, td_thrinfo_t *info)
592 const td_thragent_t *ta = th->th_ta;
600 bzero(info, sizeof(*info));
601 ret = pt_validate(th);
605 memset(info, 0, sizeof(*info));
606 if (ta->map[th->th_tid].type == PT_LWP) {
607 info->ti_type = TD_THR_SYSTEM;
608 info->ti_lid = ta->map[th->th_tid].lwp;
609 info->ti_tid = th->th_tid;
610 info->ti_state = TD_THR_RUN;
611 info->ti_type = TD_THR_SYSTEM;
614 ret = ps_pread(ta->ph, ta->map[th->th_tid].thr + ta->thread_off_tcb,
615 &tcb_addr, sizeof(tcb_addr));
618 ret = ps_pread(ta->ph, ta->map[th->th_tid].thr + ta->thread_off_state,
619 &state, sizeof(state));
620 ret = ps_pread(ta->ph,
621 tcb_addr + ta->thread_off_tmbx +
622 offsetof(struct kse_thr_mailbox, tm_lwp),
623 &info->ti_lid, sizeof(lwpid_t));
626 ret = ps_pread(ta->ph,
627 tcb_addr + ta->thread_off_tmbx +
628 offsetof(struct kse_thr_mailbox, tm_dflags),
629 &dflags, sizeof(dflags));
632 info->ti_ta_p = th->th_ta;
633 info->ti_tid = th->th_tid;
634 if (state == ta->thread_state_running)
635 info->ti_state = TD_THR_RUN;
636 else if (state == ta->thread_state_zoombie)
637 info->ti_state = TD_THR_ZOMBIE;
639 info->ti_state = TD_THR_SLEEP;
640 info->ti_db_suspended = ((dflags & TMDF_SUSPEND) != 0);
641 info->ti_type = TD_THR_USER;
647 pt_thr_getxmmregs(const td_thrhandle_t *th, char *fxsave)
649 const td_thragent_t *ta = th->th_ta;
650 struct kse_thr_mailbox tmbx;
651 psaddr_t tcb_addr, tmbx_addr, ptr;
659 ret = pt_validate(th);
663 if (ta->map[th->th_tid].type == PT_LWP) {
664 ret = ps_lgetxmmregs(ta->ph, ta->map[th->th_tid].lwp, fxsave);
668 ret = ps_pread(ta->ph, ta->map[th->th_tid].thr + ta->thread_off_tcb,
669 &tcb_addr, sizeof(tcb_addr));
672 tmbx_addr = tcb_addr + ta->thread_off_tmbx;
673 ptr = tmbx_addr + offsetof(struct kse_thr_mailbox, tm_lwp);
674 ret = ps_pread(ta->ph, ptr, &lwp, sizeof(lwpid_t));
678 ret = ps_lgetxmmregs(ta->ph, lwp, fxsave);
682 ret = ps_pread(ta->ph, tmbx_addr, &tmbx, sizeof(tmbx));
685 pt_ucontext_to_fxsave(&tmbx.tm_context, fxsave);
691 pt_thr_getfpregs(const td_thrhandle_t *th, prfpregset_t *fpregs)
693 const td_thragent_t *ta = th->th_ta;
694 struct kse_thr_mailbox tmbx;
695 psaddr_t tcb_addr, tmbx_addr, ptr;
701 ret = pt_validate(th);
705 if (ta->map[th->th_tid].type == PT_LWP) {
706 ret = ps_lgetfpregs(ta->ph, ta->map[th->th_tid].lwp, fpregs);
710 ret = ps_pread(ta->ph, ta->map[th->th_tid].thr + ta->thread_off_tcb,
711 &tcb_addr, sizeof(tcb_addr));
714 tmbx_addr = tcb_addr + ta->thread_off_tmbx;
715 ptr = tmbx_addr + offsetof(struct kse_thr_mailbox, tm_lwp);
716 ret = ps_pread(ta->ph, ptr, &lwp, sizeof(lwpid_t));
720 ret = ps_lgetfpregs(ta->ph, lwp, fpregs);
724 ret = ps_pread(ta->ph, tmbx_addr, &tmbx, sizeof(tmbx));
727 pt_ucontext_to_fpreg(&tmbx.tm_context, fpregs);
732 pt_thr_getgregs(const td_thrhandle_t *th, prgregset_t gregs)
734 const td_thragent_t *ta = th->th_ta;
735 struct kse_thr_mailbox tmbx;
736 psaddr_t tcb_addr, tmbx_addr, ptr;
742 ret = pt_validate(th);
746 if (ta->map[th->th_tid].type == PT_LWP) {
747 ret = ps_lgetregs(ta->ph,
748 ta->map[th->th_tid].lwp, gregs);
752 ret = ps_pread(ta->ph, ta->map[th->th_tid].thr + ta->thread_off_tcb,
753 &tcb_addr, sizeof(tcb_addr));
756 tmbx_addr = tcb_addr + ta->thread_off_tmbx;
757 ptr = tmbx_addr + offsetof(struct kse_thr_mailbox, tm_lwp);
758 ret = ps_pread(ta->ph, ptr, &lwp, sizeof(lwpid_t));
762 ret = ps_lgetregs(ta->ph, lwp, gregs);
765 ret = ps_pread(ta->ph, tmbx_addr, &tmbx, sizeof(tmbx));
768 pt_ucontext_to_reg(&tmbx.tm_context, gregs);
774 pt_thr_setxmmregs(const td_thrhandle_t *th, const char *fxsave)
776 const td_thragent_t *ta = th->th_ta;
777 struct kse_thr_mailbox tmbx;
778 psaddr_t tcb_addr, tmbx_addr, ptr;
786 ret = pt_validate(th);
790 if (ta->map[th->th_tid].type == PT_LWP) {
791 ret = ps_lsetxmmregs(ta->ph, ta->map[th->th_tid].lwp, fxsave);
795 ret = ps_pread(ta->ph, ta->map[th->th_tid].thr +
797 &tcb_addr, sizeof(tcb_addr));
800 tmbx_addr = tcb_addr + ta->thread_off_tmbx;
801 ptr = tmbx_addr + offsetof(struct kse_thr_mailbox, tm_lwp);
802 ret = ps_pread(ta->ph, ptr, &lwp, sizeof(lwpid_t));
806 ret = ps_lsetxmmregs(ta->ph, lwp, fxsave);
810 * Read a copy of context, this makes sure that registers
811 * not covered by structure reg won't be clobbered
813 ret = ps_pread(ta->ph, tmbx_addr, &tmbx, sizeof(tmbx));
817 pt_fxsave_to_ucontext(fxsave, &tmbx.tm_context);
818 ret = ps_pwrite(ta->ph, tmbx_addr, &tmbx, sizeof(tmbx));
824 pt_thr_setfpregs(const td_thrhandle_t *th, const prfpregset_t *fpregs)
826 const td_thragent_t *ta = th->th_ta;
827 struct kse_thr_mailbox tmbx;
828 psaddr_t tcb_addr, tmbx_addr, ptr;
834 ret = pt_validate(th);
838 if (ta->map[th->th_tid].type == PT_LWP) {
839 ret = ps_lsetfpregs(ta->ph, ta->map[th->th_tid].lwp, fpregs);
843 ret = ps_pread(ta->ph, ta->map[th->th_tid].thr +
845 &tcb_addr, sizeof(tcb_addr));
848 tmbx_addr = tcb_addr + ta->thread_off_tmbx;
849 ptr = tmbx_addr + offsetof(struct kse_thr_mailbox, tm_lwp);
850 ret = ps_pread(ta->ph, ptr, &lwp, sizeof(lwpid_t));
854 ret = ps_lsetfpregs(ta->ph, lwp, fpregs);
858 * Read a copy of context, this makes sure that registers
859 * not covered by structure reg won't be clobbered
861 ret = ps_pread(ta->ph, tmbx_addr, &tmbx, sizeof(tmbx));
865 pt_fpreg_to_ucontext(fpregs, &tmbx.tm_context);
866 ret = ps_pwrite(ta->ph, tmbx_addr, &tmbx, sizeof(tmbx));
871 pt_thr_setgregs(const td_thrhandle_t *th, const prgregset_t gregs)
873 const td_thragent_t *ta = th->th_ta;
874 struct kse_thr_mailbox tmbx;
875 psaddr_t tcb_addr, tmbx_addr, ptr;
881 ret = pt_validate(th);
885 if (ta->map[th->th_tid].type == PT_LWP) {
886 ret = ps_lsetregs(ta->ph, ta->map[th->th_tid].lwp, gregs);
890 ret = ps_pread(ta->ph, ta->map[th->th_tid].thr +
892 &tcb_addr, sizeof(tcb_addr));
895 tmbx_addr = tcb_addr + ta->thread_off_tmbx;
896 ptr = tmbx_addr + offsetof(struct kse_thr_mailbox, tm_lwp);
897 ret = ps_pread(ta->ph, ptr, &lwp, sizeof(lwpid_t));
901 ret = ps_lsetregs(ta->ph, lwp, gregs);
906 * Read a copy of context, make sure that registers
907 * not covered by structure reg won't be clobbered
909 ret = ps_pread(ta->ph, tmbx_addr, &tmbx, sizeof(tmbx));
912 pt_reg_to_ucontext(gregs, &tmbx.tm_context);
913 ret = ps_pwrite(ta->ph, tmbx_addr, &tmbx, sizeof(tmbx));
918 pt_thr_event_enable(const td_thrhandle_t *th, int en)
925 pt_thr_set_event(const td_thrhandle_t *th, td_thr_events_t *setp)
932 pt_thr_clear_event(const td_thrhandle_t *th, td_thr_events_t *setp)
939 pt_thr_event_getmsg(const td_thrhandle_t *th, td_event_msg_t *msg)
946 pt_thr_sstep(const td_thrhandle_t *th, int step)
948 const td_thragent_t *ta = th->th_ta;
949 struct kse_thr_mailbox tmbx;
951 psaddr_t tcb_addr, tmbx_addr;
958 ret = pt_validate(th);
962 if (ta->map[th->th_tid].type == PT_LWP)
965 ret = ps_pread(ta->ph, ta->map[th->th_tid].thr +
967 &tcb_addr, sizeof(tcb_addr));
971 /* Clear or set single step flag in thread mailbox */
972 ret = ps_pread(ta->ph,
973 tcb_addr + ta->thread_off_tmbx +
974 offsetof(struct kse_thr_mailbox, tm_dflags),
975 &dflags, sizeof(uint32_t));
979 dflags |= TMDF_SSTEP;
981 dflags &= ~TMDF_SSTEP;
982 ret = ps_pwrite(ta->ph,
983 tcb_addr + ta->thread_off_tmbx +
984 offsetof(struct kse_thr_mailbox, tm_dflags),
985 &dflags, sizeof(uint32_t));
989 ret = ps_pread(ta->ph,
990 tcb_addr + ta->thread_off_tmbx +
991 offsetof(struct kse_thr_mailbox, tm_lwp),
992 &lwp, sizeof(lwpid_t));
998 tmbx_addr = tcb_addr + ta->thread_off_tmbx;
1000 * context is in userland, some architectures store
1001 * single step status in registers, we should change
1004 ret = ps_pread(ta->ph, tmbx_addr, &tmbx, sizeof(tmbx));
1006 pt_ucontext_to_reg(&tmbx.tm_context, ®s);
1007 /* only write out if it is really changed. */
1008 if (pt_reg_sstep(®s, step) != 0) {
1009 pt_reg_to_ucontext(®s, &tmbx.tm_context);
1010 ret = ps_pwrite(ta->ph, tmbx_addr, &tmbx,
1018 pt_unmap_lwp(const td_thragent_t *ta, lwpid_t lwp)
1022 for (i = 0; i < ta->map_len; ++i) {
1023 if (ta->map[i].type == PT_LWP && ta->map[i].lwp == lwp) {
1024 ta->map[i].type = PT_NONE;
1031 pt_validate(const td_thrhandle_t *th)
1034 if (th->th_tid < 0 || th->th_tid >= th->th_ta->map_len ||
1035 th->th_ta->map[th->th_tid].type == PT_NONE)
1041 pt_thr_tls_get_addr(const td_thrhandle_t *th, void *_linkmap, size_t offset,
1045 const td_thragent_t *ta = th->th_ta;
1046 psaddr_t tcb_addr, *dtv_addr;
1049 /* linkmap is a member of Obj_Entry */
1050 obj_entry = (char *)_linkmap - ta->thread_off_linkmap;
1052 /* get tlsindex of the object file */
1053 ret = ps_pread(ta->ph,
1054 obj_entry + ta->thread_off_tlsindex,
1055 &tls_index, sizeof(tls_index));
1059 /* get thread tcb */
1060 ret = ps_pread(ta->ph, ta->map[th->th_tid].thr +
1062 &tcb_addr, sizeof(tcb_addr));
1066 /* get dtv array address */
1067 ret = ps_pread(ta->ph, tcb_addr + ta->thread_off_dtv,
1068 &dtv_addr, sizeof(dtv_addr));
1071 /* now get the object's tls block base address */
1072 ret = ps_pread(ta->ph, &dtv_addr[tls_index+1], address,
1081 struct ta_ops libpthread_db_ops = {
1083 .to_ta_clear_event = pt_ta_clear_event,
1084 .to_ta_delete = pt_ta_delete,
1085 .to_ta_event_addr = pt_ta_event_addr,
1086 .to_ta_event_getmsg = pt_ta_event_getmsg,
1087 .to_ta_map_id2thr = pt_ta_map_id2thr,
1088 .to_ta_map_lwp2thr = pt_ta_map_lwp2thr,
1089 .to_ta_new = pt_ta_new,
1090 .to_ta_set_event = pt_ta_set_event,
1091 .to_ta_thr_iter = pt_ta_thr_iter,
1092 .to_ta_tsd_iter = pt_ta_tsd_iter,
1093 .to_thr_clear_event = pt_thr_clear_event,
1094 .to_thr_dbresume = pt_thr_dbresume,
1095 .to_thr_dbsuspend = pt_thr_dbsuspend,
1096 .to_thr_event_enable = pt_thr_event_enable,
1097 .to_thr_event_getmsg = pt_thr_event_getmsg,
1098 .to_thr_get_info = pt_thr_get_info,
1099 .to_thr_getfpregs = pt_thr_getfpregs,
1100 .to_thr_getgregs = pt_thr_getgregs,
1101 .to_thr_set_event = pt_thr_set_event,
1102 .to_thr_setfpregs = pt_thr_setfpregs,
1103 .to_thr_setgregs = pt_thr_setgregs,
1104 .to_thr_validate = pt_thr_validate,
1105 .to_thr_tls_get_addr = pt_thr_tls_get_addr,
1107 /* FreeBSD specific extensions. */
1108 .to_thr_sstep = pt_thr_sstep,
1110 .to_thr_getxmmregs = pt_thr_getxmmregs,
1111 .to_thr_setxmmregs = pt_thr_setxmmregs,