2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California.
4 * Copyright (c) 2005 Robert N. M. Watson
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 4. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include "opt_ktrace.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/fcntl.h>
43 #include <sys/kernel.h>
44 #include <sys/kthread.h>
46 #include <sys/mutex.h>
47 #include <sys/malloc.h>
48 #include <sys/mount.h>
49 #include <sys/namei.h>
52 #include <sys/unistd.h>
53 #include <sys/vnode.h>
54 #include <sys/ktrace.h>
56 #include <sys/sysctl.h>
57 #include <sys/syslog.h>
58 #include <sys/sysproto.h>
60 #include <security/mac/mac_framework.h>
63 * The ktrace facility allows the tracing of certain key events in user space
64 * processes, such as system calls, signal delivery, context switches, and
65 * user generated events using utrace(2). It works by streaming event
66 * records and data to a vnode associated with the process using the
67 * ktrace(2) system call. In general, records can be written directly from
68 * the context that generates the event. One important exception to this is
69 * during a context switch, where sleeping is not permitted. To handle this
70 * case, trace events are generated using in-kernel ktr_request records, and
71 * then delivered to disk at a convenient moment -- either immediately, the
72 * next traceable event, at system call return, or at process exit.
74 * When dealing with multiple threads or processes writing to the same event
75 * log, ordering guarantees are weak: specifically, if an event has multiple
76 * records (i.e., system call enter and return), they may be interlaced with
77 * records from another event. Process and thread ID information is provided
78 * in the record, and user applications can de-interlace events if required.
81 static MALLOC_DEFINE(M_KTRACE, "KTRACE", "KTRACE");
85 #ifndef KTRACE_REQUEST_POOL
86 #define KTRACE_REQUEST_POOL 100
90 struct ktr_header ktr_header;
93 struct ktr_syscall ktr_syscall;
94 struct ktr_sysret ktr_sysret;
95 struct ktr_genio ktr_genio;
96 struct ktr_psig ktr_psig;
97 struct ktr_csw ktr_csw;
99 STAILQ_ENTRY(ktr_request) ktr_list;
102 static int data_lengths[] = {
104 offsetof(struct ktr_syscall, ktr_args), /* KTR_SYSCALL */
105 sizeof(struct ktr_sysret), /* KTR_SYSRET */
107 sizeof(struct ktr_genio), /* KTR_GENIO */
108 sizeof(struct ktr_psig), /* KTR_PSIG */
109 sizeof(struct ktr_csw), /* KTR_CSW */
113 static STAILQ_HEAD(, ktr_request) ktr_free;
115 static SYSCTL_NODE(_kern, OID_AUTO, ktrace, CTLFLAG_RD, 0, "KTRACE options");
117 static u_int ktr_requestpool = KTRACE_REQUEST_POOL;
118 TUNABLE_INT("kern.ktrace.request_pool", &ktr_requestpool);
120 static u_int ktr_geniosize = PAGE_SIZE;
121 TUNABLE_INT("kern.ktrace.genio_size", &ktr_geniosize);
122 SYSCTL_UINT(_kern_ktrace, OID_AUTO, genio_size, CTLFLAG_RW, &ktr_geniosize,
123 0, "Maximum size of genio event payload");
125 static int print_message = 1;
126 struct mtx ktrace_mtx;
127 static struct cv ktrace_cv;
128 static struct sx ktrace_sx;
130 static void ktrace_init(void *dummy);
131 static int sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS);
132 static u_int ktrace_resize_pool(u_int newsize);
133 static struct ktr_request *ktr_getrequest(int type);
134 static void ktr_submitrequest(struct thread *td, struct ktr_request *req);
135 static void ktr_freerequest(struct ktr_request *req);
136 static void ktr_writerequest(struct thread *td, struct ktr_request *req);
137 static int ktrcanset(struct thread *,struct proc *);
138 static int ktrsetchildren(struct thread *,struct proc *,int,int,struct vnode *);
139 static int ktrops(struct thread *,struct proc *,int,int,struct vnode *);
142 * ktrace itself generates events, such as context switches, which we do not
143 * wish to trace. Maintain a flag, TDP_INKTRACE, on each thread to determine
144 * whether or not it is in a region where tracing of events should be
148 ktrace_enter(struct thread *td)
151 KASSERT(!(td->td_pflags & TDP_INKTRACE), ("ktrace_enter: flag set"));
152 td->td_pflags |= TDP_INKTRACE;
156 ktrace_exit(struct thread *td)
159 KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_exit: flag not set"));
160 td->td_pflags &= ~TDP_INKTRACE;
164 ktrace_assert(struct thread *td)
167 KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_assert: flag not set"));
171 ktrace_init(void *dummy)
173 struct ktr_request *req;
176 mtx_init(&ktrace_mtx, "ktrace", NULL, MTX_DEF | MTX_QUIET);
177 sx_init(&ktrace_sx, "ktrace_sx");
178 cv_init(&ktrace_cv, "ktrace");
179 STAILQ_INIT(&ktr_free);
180 for (i = 0; i < ktr_requestpool; i++) {
181 req = malloc(sizeof(struct ktr_request), M_KTRACE, M_WAITOK);
182 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
185 SYSINIT(ktrace_init, SI_SUB_KTRACE, SI_ORDER_ANY, ktrace_init, NULL);
188 sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS)
191 u_int newsize, oldsize, wantsize;
194 /* Handle easy read-only case first to avoid warnings from GCC. */
196 mtx_lock(&ktrace_mtx);
197 oldsize = ktr_requestpool;
198 mtx_unlock(&ktrace_mtx);
199 return (SYSCTL_OUT(req, &oldsize, sizeof(u_int)));
202 error = SYSCTL_IN(req, &wantsize, sizeof(u_int));
207 mtx_lock(&ktrace_mtx);
208 oldsize = ktr_requestpool;
209 newsize = ktrace_resize_pool(wantsize);
210 mtx_unlock(&ktrace_mtx);
212 error = SYSCTL_OUT(req, &oldsize, sizeof(u_int));
215 if (wantsize > oldsize && newsize < wantsize)
219 SYSCTL_PROC(_kern_ktrace, OID_AUTO, request_pool, CTLTYPE_UINT|CTLFLAG_RW,
220 &ktr_requestpool, 0, sysctl_kern_ktrace_request_pool, "IU", "");
223 ktrace_resize_pool(u_int newsize)
225 struct ktr_request *req;
228 mtx_assert(&ktrace_mtx, MA_OWNED);
230 bound = newsize - ktr_requestpool;
232 return (ktr_requestpool);
234 /* Shrink pool down to newsize if possible. */
235 while (bound++ < 0) {
236 req = STAILQ_FIRST(&ktr_free);
238 return (ktr_requestpool);
239 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
241 mtx_unlock(&ktrace_mtx);
243 mtx_lock(&ktrace_mtx);
246 /* Grow pool up to newsize. */
247 while (bound-- > 0) {
248 mtx_unlock(&ktrace_mtx);
249 req = malloc(sizeof(struct ktr_request), M_KTRACE,
251 mtx_lock(&ktrace_mtx);
252 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
255 return (ktr_requestpool);
258 static struct ktr_request *
259 ktr_getrequest(int type)
261 struct ktr_request *req;
262 struct thread *td = curthread;
263 struct proc *p = td->td_proc;
266 ktrace_enter(td); /* XXX: In caller instead? */
267 mtx_lock(&ktrace_mtx);
268 if (!KTRCHECK(td, type)) {
269 mtx_unlock(&ktrace_mtx);
273 req = STAILQ_FIRST(&ktr_free);
275 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
276 req->ktr_header.ktr_type = type;
277 if (p->p_traceflag & KTRFAC_DROP) {
278 req->ktr_header.ktr_type |= KTR_DROP;
279 p->p_traceflag &= ~KTRFAC_DROP;
281 mtx_unlock(&ktrace_mtx);
282 microtime(&req->ktr_header.ktr_time);
283 req->ktr_header.ktr_pid = p->p_pid;
284 req->ktr_header.ktr_tid = td->td_tid;
285 bcopy(p->p_comm, req->ktr_header.ktr_comm, MAXCOMLEN + 1);
286 req->ktr_buffer = NULL;
287 req->ktr_header.ktr_len = 0;
289 p->p_traceflag |= KTRFAC_DROP;
292 mtx_unlock(&ktrace_mtx);
294 printf("Out of ktrace request objects.\n");
301 * Some trace generation environments don't permit direct access to VFS,
302 * such as during a context switch where sleeping is not allowed. Under these
303 * circumstances, queue a request to the thread to be written asynchronously
307 ktr_enqueuerequest(struct thread *td, struct ktr_request *req)
310 mtx_lock(&ktrace_mtx);
311 STAILQ_INSERT_TAIL(&td->td_proc->p_ktr, req, ktr_list);
312 mtx_unlock(&ktrace_mtx);
317 * Drain any pending ktrace records from the per-thread queue to disk. This
318 * is used both internally before committing other records, and also on
319 * system call return. We drain all the ones we can find at the time when
320 * drain is requested, but don't keep draining after that as those events
321 * may me approximately "after" the current event.
324 ktr_drain(struct thread *td)
326 struct ktr_request *queued_req;
327 STAILQ_HEAD(, ktr_request) local_queue;
330 sx_assert(&ktrace_sx, SX_XLOCKED);
332 STAILQ_INIT(&local_queue); /* XXXRW: needed? */
334 if (!STAILQ_EMPTY(&td->td_proc->p_ktr)) {
335 mtx_lock(&ktrace_mtx);
336 STAILQ_CONCAT(&local_queue, &td->td_proc->p_ktr);
337 mtx_unlock(&ktrace_mtx);
339 while ((queued_req = STAILQ_FIRST(&local_queue))) {
340 STAILQ_REMOVE_HEAD(&local_queue, ktr_list);
341 ktr_writerequest(td, queued_req);
342 ktr_freerequest(queued_req);
348 * Submit a trace record for immediate commit to disk -- to be used only
349 * where entering VFS is OK. First drain any pending records that may have
350 * been cached in the thread.
353 ktr_submitrequest(struct thread *td, struct ktr_request *req)
358 sx_xlock(&ktrace_sx);
360 ktr_writerequest(td, req);
361 ktr_freerequest(req);
362 sx_xunlock(&ktrace_sx);
368 ktr_freerequest(struct ktr_request *req)
371 if (req->ktr_buffer != NULL)
372 free(req->ktr_buffer, M_KTRACE);
373 mtx_lock(&ktrace_mtx);
374 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
375 mtx_unlock(&ktrace_mtx);
382 ktrsyscall(code, narg, args)
386 struct ktr_request *req;
387 struct ktr_syscall *ktp;
391 buflen = sizeof(register_t) * narg;
393 buf = malloc(buflen, M_KTRACE, M_WAITOK);
394 bcopy(args, buf, buflen);
396 req = ktr_getrequest(KTR_SYSCALL);
402 ktp = &req->ktr_data.ktr_syscall;
403 ktp->ktr_code = code;
404 ktp->ktr_narg = narg;
406 req->ktr_header.ktr_len = buflen;
407 req->ktr_buffer = buf;
409 ktr_submitrequest(curthread, req);
416 ktrsysret(code, error, retval)
420 struct ktr_request *req;
421 struct ktr_sysret *ktp;
423 req = ktr_getrequest(KTR_SYSRET);
426 ktp = &req->ktr_data.ktr_sysret;
427 ktp->ktr_code = code;
428 ktp->ktr_error = error;
429 ktp->ktr_retval = retval; /* what about val2 ? */
430 ktr_submitrequest(curthread, req);
434 * When a process exits, drain per-process asynchronous trace records.
437 ktrprocexit(struct thread *td)
441 sx_xlock(&ktrace_sx);
443 sx_xunlock(&ktrace_sx);
448 * When a thread returns, drain any asynchronous records generated by the
452 ktruserret(struct thread *td)
456 sx_xlock(&ktrace_sx);
458 sx_xunlock(&ktrace_sx);
466 struct ktr_request *req;
470 namelen = strlen(path);
472 buf = malloc(namelen, M_KTRACE, M_WAITOK);
473 bcopy(path, buf, namelen);
475 req = ktr_getrequest(KTR_NAMEI);
482 req->ktr_header.ktr_len = namelen;
483 req->ktr_buffer = buf;
485 ktr_submitrequest(curthread, req);
489 ktrgenio(fd, rw, uio, error)
495 struct ktr_request *req;
496 struct ktr_genio *ktg;
505 uio->uio_rw = UIO_WRITE;
506 datalen = imin(uio->uio_resid, ktr_geniosize);
507 buf = malloc(datalen, M_KTRACE, M_WAITOK);
508 error = uiomove(buf, datalen, uio);
514 req = ktr_getrequest(KTR_GENIO);
519 ktg = &req->ktr_data.ktr_genio;
522 req->ktr_header.ktr_len = datalen;
523 req->ktr_buffer = buf;
524 ktr_submitrequest(curthread, req);
528 ktrpsig(sig, action, mask, code)
534 struct ktr_request *req;
537 req = ktr_getrequest(KTR_PSIG);
540 kp = &req->ktr_data.ktr_psig;
541 kp->signo = (char)sig;
545 ktr_enqueuerequest(curthread, req);
552 struct ktr_request *req;
555 req = ktr_getrequest(KTR_CSW);
558 kc = &req->ktr_data.ktr_csw;
561 ktr_enqueuerequest(curthread, req);
565 /* Interface and common routines */
572 #ifndef _SYS_SYSPROTO_H_
584 register struct ktrace_args *uap;
587 register struct vnode *vp = NULL;
588 register struct proc *p;
590 int facs = uap->facs & ~KTRFAC_ROOT;
591 int ops = KTROP(uap->ops);
592 int descend = uap->ops & KTRFLAG_DESCEND;
594 int flags, error = 0, vfslocked;
599 * Need something to (un)trace.
601 if (ops != KTROP_CLEARFILE && facs == 0)
605 if (ops != KTROP_CLEAR) {
607 * an operation which requires a file argument.
609 NDINIT(&nd, LOOKUP, NOFOLLOW | MPSAFE, UIO_USERSPACE,
611 flags = FREAD | FWRITE | O_NOFOLLOW;
612 error = vn_open(&nd, &flags, 0, -1);
617 vfslocked = NDHASGIANT(&nd);
618 NDFREE(&nd, NDF_ONLY_PNBUF);
620 VOP_UNLOCK(vp, 0, td);
621 if (vp->v_type != VREG) {
622 (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
623 VFS_UNLOCK_GIANT(vfslocked);
627 VFS_UNLOCK_GIANT(vfslocked);
630 * Clear all uses of the tracefile.
632 if (ops == KTROP_CLEARFILE) {
633 sx_slock(&allproc_lock);
634 LIST_FOREACH(p, &allproc, p_list) {
636 if (p->p_tracevp == vp) {
637 if (ktrcanset(td, p)) {
638 mtx_lock(&ktrace_mtx);
639 cred = p->p_tracecred;
640 p->p_tracecred = NULL;
643 mtx_unlock(&ktrace_mtx);
645 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
646 (void) vn_close(vp, FREAD|FWRITE,
648 VFS_UNLOCK_GIANT(vfslocked);
657 sx_sunlock(&allproc_lock);
663 sx_slock(&proctree_lock);
668 pg = pgfind(-uap->pid);
670 sx_sunlock(&proctree_lock);
675 * ktrops() may call vrele(). Lock pg_members
676 * by the proctree_lock rather than pg_mtx.
680 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
682 if (p_cansee(td, p) != 0) {
689 ret |= ktrsetchildren(td, p, ops, facs, vp);
691 ret |= ktrops(td, p, ops, facs, vp);
694 sx_sunlock(&proctree_lock);
704 sx_sunlock(&proctree_lock);
708 error = p_cansee(td, p);
710 * The slock of the proctree lock will keep this process
711 * from going away, so unlocking the proc here is ok.
715 sx_sunlock(&proctree_lock);
719 ret |= ktrsetchildren(td, p, ops, facs, vp);
721 ret |= ktrops(td, p, ops, facs, vp);
723 sx_sunlock(&proctree_lock);
728 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
729 (void) vn_close(vp, FWRITE, td->td_ucred, td);
730 VFS_UNLOCK_GIANT(vfslocked);
748 register struct utrace_args *uap;
752 struct ktr_request *req;
756 if (!KTRPOINT(td, KTR_USER))
758 if (uap->len > KTR_USER_MAXLEN)
760 cp = malloc(uap->len, M_KTRACE, M_WAITOK);
761 error = copyin(uap->addr, cp, uap->len);
766 req = ktr_getrequest(KTR_USER);
771 req->ktr_buffer = cp;
772 req->ktr_header.ktr_len = uap->len;
773 ktr_submitrequest(td, req);
782 ktrops(td, p, ops, facs, vp)
788 struct vnode *tracevp = NULL;
789 struct ucred *tracecred = NULL;
792 if (!ktrcanset(td, p)) {
796 mtx_lock(&ktrace_mtx);
797 if (ops == KTROP_SET) {
798 if (p->p_tracevp != vp) {
800 * if trace file already in use, relinquish below
802 tracevp = p->p_tracevp;
806 if (p->p_tracecred != td->td_ucred) {
807 tracecred = p->p_tracecred;
808 p->p_tracecred = crhold(td->td_ucred);
810 p->p_traceflag |= facs;
811 if (priv_check_cred(td->td_ucred, PRIV_KTRACE,
812 SUSER_ALLOWJAIL) == 0)
813 p->p_traceflag |= KTRFAC_ROOT;
816 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) {
817 /* no more tracing */
819 tracevp = p->p_tracevp;
821 tracecred = p->p_tracecred;
822 p->p_tracecred = NULL;
825 mtx_unlock(&ktrace_mtx);
827 if (tracevp != NULL) {
830 vfslocked = VFS_LOCK_GIANT(tracevp->v_mount);
832 VFS_UNLOCK_GIANT(vfslocked);
834 if (tracecred != NULL)
841 ktrsetchildren(td, top, ops, facs, vp)
847 register struct proc *p;
848 register int ret = 0;
851 sx_assert(&proctree_lock, SX_LOCKED);
853 ret |= ktrops(td, p, ops, facs, vp);
855 * If this process has children, descend to them next,
856 * otherwise do any siblings, and if done with this level,
857 * follow back up the tree (but not past top).
859 if (!LIST_EMPTY(&p->p_children))
860 p = LIST_FIRST(&p->p_children);
864 if (LIST_NEXT(p, p_sibling)) {
865 p = LIST_NEXT(p, p_sibling);
875 ktr_writerequest(struct thread *td, struct ktr_request *req)
877 struct ktr_header *kth;
882 struct iovec aiov[3];
884 int datalen, buflen, vrele_count;
885 int error, vfslocked;
888 * We hold the vnode and credential for use in I/O in case ktrace is
889 * disabled on the process as we write out the request.
891 * XXXRW: This is not ideal: we could end up performing a write after
892 * the vnode has been closed.
894 mtx_lock(&ktrace_mtx);
895 vp = td->td_proc->p_tracevp;
898 cred = td->td_proc->p_tracecred;
901 mtx_unlock(&ktrace_mtx);
904 * If vp is NULL, the vp has been cleared out from under this
905 * request, so just drop it. Make sure the credential and vnode are
906 * in sync: we should have both or neither.
909 KASSERT(cred == NULL, ("ktr_writerequest: cred != NULL"));
912 KASSERT(cred != NULL, ("ktr_writerequest: cred == NULL"));
914 kth = &req->ktr_header;
915 datalen = data_lengths[(u_short)kth->ktr_type & ~KTR_DROP];
916 buflen = kth->ktr_len;
917 auio.uio_iov = &aiov[0];
919 auio.uio_segflg = UIO_SYSSPACE;
920 auio.uio_rw = UIO_WRITE;
921 aiov[0].iov_base = (caddr_t)kth;
922 aiov[0].iov_len = sizeof(struct ktr_header);
923 auio.uio_resid = sizeof(struct ktr_header);
927 aiov[1].iov_base = (caddr_t)&req->ktr_data;
928 aiov[1].iov_len = datalen;
929 auio.uio_resid += datalen;
931 kth->ktr_len += datalen;
934 KASSERT(req->ktr_buffer != NULL, ("ktrace: nothing to write"));
935 aiov[auio.uio_iovcnt].iov_base = req->ktr_buffer;
936 aiov[auio.uio_iovcnt].iov_len = buflen;
937 auio.uio_resid += buflen;
941 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
942 vn_start_write(vp, &mp, V_WAIT);
943 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
944 (void)VOP_LEASE(vp, td, cred, LEASE_WRITE);
946 error = mac_check_vnode_write(cred, NOCRED, vp);
949 error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, cred);
950 VOP_UNLOCK(vp, 0, td);
951 vn_finished_write(mp);
953 VFS_UNLOCK_GIANT(vfslocked);
957 * If error encountered, give up tracing on this vnode. We defer
958 * all the vrele()'s on the vnode until after we are finished walking
959 * the various lists to avoid needlessly holding locks.
961 log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n",
965 * First, clear this vnode from being used by any processes in the
967 * XXX - If one process gets an EPERM writing to the vnode, should
968 * we really do this? Other processes might have suitable
969 * credentials for the operation.
972 sx_slock(&allproc_lock);
973 LIST_FOREACH(p, &allproc, p_list) {
975 if (p->p_tracevp == vp) {
976 mtx_lock(&ktrace_mtx);
979 cred = p->p_tracecred;
980 p->p_tracecred = NULL;
981 mtx_unlock(&ktrace_mtx);
990 sx_sunlock(&allproc_lock);
993 * We can't clear any pending requests in threads that have cached
994 * them but not yet committed them, as those are per-thread. The
995 * thread will have to clear it itself on system call return.
997 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
998 while (vrele_count-- > 0)
1000 VFS_UNLOCK_GIANT(vfslocked);
1004 * Return true if caller has permission to set the ktracing state
1005 * of target. Essentially, the target can't possess any
1006 * more permissions than the caller. KTRFAC_ROOT signifies that
1007 * root previously set the tracing status on the target process, and
1008 * so, only root may further change it.
1011 ktrcanset(td, targetp)
1013 struct proc *targetp;
1016 PROC_LOCK_ASSERT(targetp, MA_OWNED);
1017 if (targetp->p_traceflag & KTRFAC_ROOT &&
1018 priv_check_cred(td->td_ucred, PRIV_KTRACE, SUSER_ALLOWJAIL))
1021 if (p_candebug(td, targetp) != 0)