2 * Copyright (c) 1994, Sean Eric Fagan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Sean Eric Fagan.
16 * 4. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/systm.h>
38 #include <sys/mutex.h>
39 #include <sys/syscallsubr.h>
40 #include <sys/sysproto.h>
42 #include <sys/vnode.h>
43 #include <sys/ptrace.h>
45 #include <sys/malloc.h>
46 #include <sys/signalvar.h>
48 #include <machine/reg.h>
52 #include <vm/vm_extern.h>
53 #include <vm/vm_map.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_object.h>
56 #include <vm/vm_page.h>
59 * Functions implemented using PROC_ACTION():
61 * proc_read_regs(proc, regs)
62 * Get the current user-visible register set from the process
63 * and copy it into the regs structure (<machine/reg.h>).
64 * The process is stopped at the time read_regs is called.
66 * proc_write_regs(proc, regs)
67 * Update the current register set from the passed in regs
68 * structure. Take care to avoid clobbering special CPU
69 * registers or privileged bits in the PSL.
70 * Depending on the architecture this may have fix-up work to do,
71 * especially if the IAR or PCW are modified.
72 * The process is stopped at the time write_regs is called.
74 * proc_read_fpregs, proc_write_fpregs
75 * deal with the floating point register set, otherwise as above.
77 * proc_read_dbregs, proc_write_dbregs
78 * deal with the processor debug register set, otherwise as above.
81 * Arrange for the process to trap after executing a single instruction.
84 #define PROC_ACTION(action) do { \
87 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \
88 if ((td->td_proc->p_sflag & PS_INMEM) == 0) \
96 proc_read_regs(struct thread *td, struct reg *regs)
99 PROC_ACTION(fill_regs(td, regs));
103 proc_write_regs(struct thread *td, struct reg *regs)
106 PROC_ACTION(set_regs(td, regs));
110 proc_read_dbregs(struct thread *td, struct dbreg *dbregs)
113 PROC_ACTION(fill_dbregs(td, dbregs));
117 proc_write_dbregs(struct thread *td, struct dbreg *dbregs)
120 PROC_ACTION(set_dbregs(td, dbregs));
124 * Ptrace doesn't support fpregs at all, and there are no security holes
125 * or translations for fpregs, so we can just copy them.
128 proc_read_fpregs(struct thread *td, struct fpreg *fpregs)
131 PROC_ACTION(fill_fpregs(td, fpregs));
135 proc_write_fpregs(struct thread *td, struct fpreg *fpregs)
138 PROC_ACTION(set_fpregs(td, fpregs));
142 proc_sstep(struct thread *td)
145 PROC_ACTION(ptrace_single_step(td));
149 proc_rwmem(struct proc *p, struct uio *uio)
153 vm_object_t backing_object, object = NULL;
154 vm_offset_t pageno = 0; /* page number */
156 int error, refcnt, writing;
159 * if the vmspace is in the midst of being deallocated or the
160 * process is exiting, don't try to grab anything. The page table
161 * usage in that process can be messed up.
164 if ((p->p_flag & P_WEXIT))
167 if ((refcnt = vm->vm_refcnt) < 1)
169 } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1));
176 writing = uio->uio_rw == UIO_WRITE;
177 reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) :
181 * Only map in one page at a time. We don't have to, but it
182 * makes things easier. This way is trivial - right?
187 int page_offset; /* offset into page */
188 vm_map_entry_t out_entry;
197 uva = (vm_offset_t)uio->uio_offset;
200 * Get the page number of this segment.
202 pageno = trunc_page(uva);
203 page_offset = uva - pageno;
206 * How many bytes to copy
208 len = min(PAGE_SIZE - page_offset, uio->uio_resid);
211 * Fault the page on behalf of the process
213 error = vm_fault(map, pageno, reqprot, VM_FAULT_NORMAL);
220 * Now we need to get the page. out_entry, out_prot, wired,
221 * and single_use aren't used. One would think the vm code
222 * would be a *bit* nicer... We use tmap because
223 * vm_map_lookup() can change the map argument.
226 error = vm_map_lookup(&tmap, pageno, reqprot, &out_entry,
227 &object, &pindex, &out_prot, &wired);
232 VM_OBJECT_LOCK(object);
233 while ((m = vm_page_lookup(object, pindex)) == NULL &&
235 (backing_object = object->backing_object) != NULL) {
237 * Allow fallback to backing objects if we are reading.
239 VM_OBJECT_LOCK(backing_object);
240 pindex += OFF_TO_IDX(object->backing_object_offset);
241 VM_OBJECT_UNLOCK(object);
242 object = backing_object;
244 VM_OBJECT_UNLOCK(object);
246 vm_map_lookup_done(tmap, out_entry);
252 * Hold the page in memory.
254 vm_page_lock_queues();
256 vm_page_unlock_queues();
259 * We're done with tmap now.
261 vm_map_lookup_done(tmap, out_entry);
264 * Now do the i/o move.
266 error = uiomove_fromphys(&m, page_offset, len, uio);
271 vm_page_lock_queues();
273 vm_page_unlock_queues();
275 } while (error == 0 && uio->uio_resid > 0);
282 * Process debugging system call.
284 #ifndef _SYS_SYSPROTO_H_
297 ptrace(struct thread *td, struct ptrace_args *uap)
300 * XXX this obfuscation is to reduce stack usage, but the register
301 * structs may be too large to put on the stack anyway.
304 struct ptrace_io_desc piod;
305 struct ptrace_lwpinfo pl;
321 error = copyin(uap->addr, &r.reg, sizeof r.reg);
324 error = copyin(uap->addr, &r.fpreg, sizeof r.fpreg);
327 error = copyin(uap->addr, &r.dbreg, sizeof r.dbreg);
330 error = copyin(uap->addr, &r.piod, sizeof r.piod);
339 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data);
345 (void)copyout(&r.piod, uap->addr, sizeof r.piod);
348 error = copyout(&r.reg, uap->addr, sizeof r.reg);
351 error = copyout(&r.fpreg, uap->addr, sizeof r.fpreg);
354 error = copyout(&r.dbreg, uap->addr, sizeof r.dbreg);
357 error = copyout(&r.pl, uap->addr, uap->data);
365 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
369 struct proc *curp, *p, *pp;
370 struct thread *td2 = NULL;
371 struct ptrace_io_desc *piod;
372 struct ptrace_lwpinfo *pl;
373 int error, write, tmp, num;
374 int proctree_locked = 0;
375 lwpid_t tid = 0, *buf;
376 pid_t saved_pid = pid;
380 /* Lock proctree before locking the process. */
390 sx_xlock(&proctree_lock);
398 if (req == PT_TRACE_ME) {
402 if (pid <= PID_MAX) {
403 if ((p = pfind(pid)) == NULL) {
405 sx_xunlock(&proctree_lock);
409 /* this is slow, should be optimized */
410 sx_slock(&allproc_lock);
411 FOREACH_PROC_IN_SYSTEM(p) {
413 mtx_lock_spin(&sched_lock);
414 FOREACH_THREAD_IN_PROC(p, td2) {
415 if (td2->td_tid == pid)
418 mtx_unlock_spin(&sched_lock);
420 break; /* proc lock held */
423 sx_sunlock(&allproc_lock);
426 sx_xunlock(&proctree_lock);
433 if ((error = p_cansee(td, p)) != 0)
436 if ((error = p_candebug(td, p)) != 0)
440 * System processes can't be debugged.
442 if ((p->p_flag & P_SYSTEM) != 0) {
448 td2 = FIRST_THREAD_IN_PROC(p);
462 if (p->p_pid == td->td_proc->p_pid) {
468 if (p->p_flag & P_TRACED) {
473 /* Can't trace an ancestor if you're being traced. */
474 if (curp->p_flag & P_TRACED) {
475 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) {
488 /* Allow thread to clear single step for itself */
489 if (td->td_tid == tid)
494 /* not being traced... */
495 if ((p->p_flag & P_TRACED) == 0) {
500 /* not being traced by YOU */
501 if (p->p_pptr != td->td_proc) {
506 /* not currently stopped */
507 if (!P_SHOULDSTOP(p) || p->p_suspcount != p->p_numthreads ||
508 (p->p_flag & P_WAITED) == 0) {
519 * Single step fixup ala procfs
521 FIX_SSTEP(td2); /* XXXKSE */
525 * Actually do the requests
528 td->td_retval[0] = 0;
532 /* set my trace flag and "owner" so it can read/write me */
533 p->p_flag |= P_TRACED;
534 p->p_oppid = p->p_pptr->p_pid;
536 sx_xunlock(&proctree_lock);
540 /* security check done above */
541 p->p_flag |= P_TRACED;
542 p->p_oppid = p->p_pptr->p_pid;
543 if (p->p_pptr != td->td_proc)
544 proc_reparent(p, td->td_proc);
546 goto sendsig; /* in PT_CONTINUE below */
550 error = ptrace_clear_single_step(td2);
559 error = ptrace_single_step(td2);
568 mtx_lock_spin(&sched_lock);
569 td2->td_flags |= TDF_DBSUSPEND;
570 mtx_unlock_spin(&sched_lock);
577 mtx_lock_spin(&sched_lock);
578 td2->td_flags &= ~TDF_DBSUSPEND;
579 mtx_unlock_spin(&sched_lock);
590 /* Zero means do not send any signal */
591 if (data < 0 || data > _SIG_MAXSIG) {
601 error = ptrace_single_step(td2);
609 p->p_stops |= S_PT_SCE;
612 p->p_stops |= S_PT_SCX;
615 p->p_stops |= S_PT_SCE | S_PT_SCX;
619 if (addr != (void *)1) {
621 error = ptrace_set_pc(td2, (u_long)(uintfptr_t)addr);
630 if (req == PT_DETACH) {
631 /* reset process parent */
632 if (p->p_oppid != p->p_pptr->p_pid) {
636 pp = pfind(p->p_oppid);
642 proc_reparent(p, pp);
644 p->p_sigparent = SIGCHLD;
646 p->p_flag &= ~(P_TRACED | P_WAITED);
649 /* should we send SIGCHLD? */
654 sx_xunlock(&proctree_lock);
655 /* deliver or queue signal */
656 if (P_SHOULDSTOP(p)) {
658 p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG);
659 mtx_lock_spin(&sched_lock);
660 if (saved_pid <= PID_MAX) {
661 p->p_xthread->td_flags &= ~TDF_XSIG;
662 p->p_xthread->td_xsig = data;
664 td2->td_flags &= ~TDF_XSIG;
668 if (req == PT_DETACH) {
670 FOREACH_THREAD_IN_PROC(p, td3)
671 td3->td_flags &= ~TDF_DBSUSPEND;
674 * unsuspend all threads, to not let a thread run,
675 * you should use PT_SUSPEND to suspend it before
676 * continuing process.
680 mtx_unlock_spin(&sched_lock);
696 /* write = 0 set above */
697 iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp;
698 iov.iov_len = sizeof(int);
701 uio.uio_offset = (off_t)(uintptr_t)addr;
702 uio.uio_resid = sizeof(int);
703 uio.uio_segflg = UIO_SYSSPACE; /* i.e.: the uap */
704 uio.uio_rw = write ? UIO_WRITE : UIO_READ;
706 error = proc_rwmem(p, &uio);
707 if (uio.uio_resid != 0) {
709 * XXX proc_rwmem() doesn't currently return ENOSPC,
710 * so I think write() can bogusly return 0.
711 * XXX what happens for short writes? We don't want
712 * to write partial data.
713 * XXX proc_rwmem() returns EPERM for other invalid
714 * addresses. Convert this to EINVAL. Does this
715 * clobber returns of EPERM for other reasons?
717 if (error == 0 || error == ENOSPC || error == EPERM)
718 error = EINVAL; /* EOF */
721 td->td_retval[0] = tmp;
727 iov.iov_base = piod->piod_addr;
728 iov.iov_len = piod->piod_len;
731 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs;
732 uio.uio_resid = piod->piod_len;
733 uio.uio_segflg = UIO_USERSPACE;
735 switch (piod->piod_op) {
738 uio.uio_rw = UIO_READ;
742 uio.uio_rw = UIO_WRITE;
747 error = proc_rwmem(p, &uio);
748 piod->piod_len -= uio.uio_resid;
753 goto sendsig; /* in PT_CONTINUE above */
757 error = proc_write_regs(td2, addr);
764 error = proc_read_regs(td2, addr);
771 error = proc_write_fpregs(td2, addr);
778 error = proc_read_fpregs(td2, addr);
785 error = proc_write_dbregs(td2, addr);
792 error = proc_read_dbregs(td2, addr);
798 if (data == 0 || data > sizeof(*pl))
802 if (saved_pid <= PID_MAX) {
803 pl->pl_lwpid = p->p_xthread->td_tid;
804 pl->pl_event = PL_EVENT_SIGNAL;
806 pl->pl_lwpid = td2->td_tid;
807 if (td2->td_flags & TDF_XSIG)
808 pl->pl_event = PL_EVENT_SIGNAL;
812 if (td2->td_pflags & TDP_SA) {
813 pl->pl_flags = PL_FLAG_SA;
814 if (td2->td_upcall && !TD_CAN_UNBIND(td2))
815 pl->pl_flags |= PL_FLAG_BOUND;
824 td->td_retval[0] = p->p_numthreads;
833 num = imin(p->p_numthreads, data);
835 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK);
838 mtx_lock_spin(&sched_lock);
839 FOREACH_THREAD_IN_PROC(p, td2) {
842 buf[tmp++] = td2->td_tid;
844 mtx_unlock_spin(&sched_lock);
846 error = copyout(buf, addr, tmp * sizeof(lwpid_t));
849 td->td_retval[0] = num;
853 #ifdef __HAVE_PTRACE_MACHDEP
854 if (req >= PT_FIRSTMACH) {
857 error = cpu_ptrace(td2, req, addr, data);
865 /* Unknown request. */
872 sx_xunlock(&proctree_lock);
877 * Stop a process because of a debugging event;
878 * stay stopped until p->p_step is cleared
879 * (cleared by PIOCCONT in procfs).
882 stopevent(struct proc *p, unsigned int event, unsigned int val)
885 PROC_LOCK_ASSERT(p, MA_OWNED);
890 p->p_stype = event; /* Which event caused the stop? */
891 wakeup(&p->p_stype); /* Wake up any PIOCWAIT'ing procs */
892 msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0);