2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * @(#)kern_descrip.c 8.6 (Berkeley) 4/19/94
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
42 #include "opt_capsicum.h"
44 #include "opt_ktrace.h"
46 #include <sys/param.h>
47 #include <sys/systm.h>
49 #include <sys/capsicum.h>
51 #include <sys/fcntl.h>
53 #include <sys/filedesc.h>
54 #include <sys/filio.h>
56 #include <sys/kernel.h>
57 #include <sys/limits.h>
59 #include <sys/malloc.h>
60 #include <sys/mount.h>
61 #include <sys/mutex.h>
62 #include <sys/namei.h>
63 #include <sys/selinfo.h>
67 #include <sys/protosw.h>
68 #include <sys/racct.h>
69 #include <sys/resourcevar.h>
71 #include <sys/signalvar.h>
76 #include <sys/syscallsubr.h>
77 #include <sys/sysctl.h>
78 #include <sys/sysproto.h>
79 #include <sys/unistd.h>
81 #include <sys/vnode.h>
82 #include <sys/ktrace.h>
86 #include <security/audit/audit.h>
93 static MALLOC_DEFINE(M_FILEDESC, "filedesc", "Open file descriptor table");
94 static MALLOC_DEFINE(M_PWD, "pwd", "Descriptor table vnodes");
95 static MALLOC_DEFINE(M_PWDDESC, "pwddesc", "Pwd descriptors");
96 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "filedesc_to_leader",
97 "file desc to leader structures");
98 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures");
99 MALLOC_DEFINE(M_FILECAPS, "filecaps", "descriptor capabilities");
101 MALLOC_DECLARE(M_FADVISE);
103 static __read_mostly uma_zone_t file_zone;
104 static __read_mostly uma_zone_t filedesc0_zone;
105 __read_mostly uma_zone_t pwd_zone;
108 static int closefp(struct filedesc *fdp, int fd, struct file *fp,
109 struct thread *td, bool holdleaders, bool audit);
110 static void export_file_to_kinfo(struct file *fp, int fd,
111 cap_rights_t *rightsp, struct kinfo_file *kif,
112 struct filedesc *fdp, int flags);
113 static int fd_first_free(struct filedesc *fdp, int low, int size);
114 static void fdgrowtable(struct filedesc *fdp, int nfd);
115 static void fdgrowtable_exp(struct filedesc *fdp, int nfd);
116 static void fdunused(struct filedesc *fdp, int fd);
117 static void fdused(struct filedesc *fdp, int fd);
118 static int fget_unlocked_seq(struct filedesc *fdp, int fd,
119 cap_rights_t *needrightsp, struct file **fpp, seqc_t *seqp);
120 static int getmaxfd(struct thread *td);
121 static u_long *filecaps_copy_prep(const struct filecaps *src);
122 static void filecaps_copy_finish(const struct filecaps *src,
123 struct filecaps *dst, u_long *ioctls);
124 static u_long *filecaps_free_prep(struct filecaps *fcaps);
125 static void filecaps_free_finish(u_long *ioctls);
127 static struct pwd *pwd_alloc(void);
132 * - An array of open file descriptors (fd_ofiles)
133 * - An array of file flags (fd_ofileflags)
134 * - A bitmap recording which descriptors are in use (fd_map)
136 * A process starts out with NDFILE descriptors. The value of NDFILE has
137 * been selected based the historical limit of 20 open files, and an
138 * assumption that the majority of processes, especially short-lived
139 * processes like shells, will never need more.
141 * If this initial allocation is exhausted, a larger descriptor table and
142 * map are allocated dynamically, and the pointers in the process's struct
143 * filedesc are updated to point to those. This is repeated every time
144 * the process runs out of file descriptors (provided it hasn't hit its
147 * Since threads may hold references to individual descriptor table
148 * entries, the tables are never freed. Instead, they are placed on a
149 * linked list and freed only when the struct filedesc is released.
152 #define NDSLOTSIZE sizeof(NDSLOTTYPE)
153 #define NDENTRIES (NDSLOTSIZE * __CHAR_BIT)
154 #define NDSLOT(x) ((x) / NDENTRIES)
155 #define NDBIT(x) ((NDSLOTTYPE)1 << ((x) % NDENTRIES))
156 #define NDSLOTS(x) (((x) + NDENTRIES - 1) / NDENTRIES)
159 * SLIST entry used to keep track of ofiles which must be reclaimed when
163 struct fdescenttbl *ft_table;
164 SLIST_ENTRY(freetable) ft_next;
168 * Initial allocation: a filedesc structure + the head of SLIST used to
169 * keep track of old ofiles + enough space for NDFILE descriptors.
172 struct fdescenttbl0 {
174 struct filedescent fdt_ofiles[NDFILE];
178 struct filedesc fd_fd;
179 SLIST_HEAD(, freetable) fd_free;
180 struct fdescenttbl0 fd_dfiles;
181 NDSLOTTYPE fd_dmap[NDSLOTS(NDFILE)];
185 * Descriptor management.
187 static int __exclusive_cache_line openfiles; /* actual number of open files */
188 struct mtx sigio_lock; /* mtx to protect pointers to sigio */
189 void __read_mostly (*mq_fdclose)(struct thread *td, int fd, struct file *fp);
192 * If low >= size, just return low. Otherwise find the first zero bit in the
193 * given bitmap, starting at low and not exceeding size - 1. Return size if
197 fd_first_free(struct filedesc *fdp, int low, int size)
199 NDSLOTTYPE *map = fdp->fd_map;
207 if (low % NDENTRIES) {
208 mask = ~(~(NDSLOTTYPE)0 >> (NDENTRIES - (low % NDENTRIES)));
209 if ((mask &= ~map[off]) != 0UL)
210 return (off * NDENTRIES + ffsl(mask) - 1);
213 for (maxoff = NDSLOTS(size); off < maxoff; ++off)
214 if (map[off] != ~0UL)
215 return (off * NDENTRIES + ffsl(~map[off]) - 1);
220 * Find the last used fd.
222 * Call this variant if fdp can't be modified by anyone else (e.g, during exec).
223 * Otherwise use fdlastfile.
226 fdlastfile_single(struct filedesc *fdp)
228 NDSLOTTYPE *map = fdp->fd_map;
231 off = NDSLOT(fdp->fd_nfiles - 1);
232 for (minoff = NDSLOT(0); off >= minoff; --off)
234 return (off * NDENTRIES + flsl(map[off]) - 1);
239 fdlastfile(struct filedesc *fdp)
242 FILEDESC_LOCK_ASSERT(fdp);
243 return (fdlastfile_single(fdp));
247 fdisused(struct filedesc *fdp, int fd)
250 KASSERT(fd >= 0 && fd < fdp->fd_nfiles,
251 ("file descriptor %d out of range (0, %d)", fd, fdp->fd_nfiles));
253 return ((fdp->fd_map[NDSLOT(fd)] & NDBIT(fd)) != 0);
257 * Mark a file descriptor as used.
260 fdused_init(struct filedesc *fdp, int fd)
263 KASSERT(!fdisused(fdp, fd), ("fd=%d is already used", fd));
265 fdp->fd_map[NDSLOT(fd)] |= NDBIT(fd);
269 fdused(struct filedesc *fdp, int fd)
272 FILEDESC_XLOCK_ASSERT(fdp);
274 fdused_init(fdp, fd);
275 if (fd == fdp->fd_freefile)
280 * Mark a file descriptor as unused.
283 fdunused(struct filedesc *fdp, int fd)
286 FILEDESC_XLOCK_ASSERT(fdp);
288 KASSERT(fdisused(fdp, fd), ("fd=%d is already unused", fd));
289 KASSERT(fdp->fd_ofiles[fd].fde_file == NULL,
290 ("fd=%d is still in use", fd));
292 fdp->fd_map[NDSLOT(fd)] &= ~NDBIT(fd);
293 if (fd < fdp->fd_freefile)
294 fdp->fd_freefile = fd;
298 * Free a file descriptor.
300 * Avoid some work if fdp is about to be destroyed.
303 fdefree_last(struct filedescent *fde)
306 filecaps_free(&fde->fde_caps);
310 fdfree(struct filedesc *fdp, int fd)
312 struct filedescent *fde;
314 FILEDESC_XLOCK_ASSERT(fdp);
315 fde = &fdp->fd_ofiles[fd];
317 seqc_write_begin(&fde->fde_seqc);
319 fde->fde_file = NULL;
321 seqc_write_end(&fde->fde_seqc);
328 * System calls on descriptors.
330 #ifndef _SYS_SYSPROTO_H_
331 struct getdtablesize_args {
337 sys_getdtablesize(struct thread *td, struct getdtablesize_args *uap)
343 td->td_retval[0] = getmaxfd(td);
345 PROC_LOCK(td->td_proc);
346 lim = racct_get_limit(td->td_proc, RACCT_NOFILE);
347 PROC_UNLOCK(td->td_proc);
348 if (lim < td->td_retval[0])
349 td->td_retval[0] = lim;
355 * Duplicate a file descriptor to a particular value.
357 * Note: keep in mind that a potential race condition exists when closing
358 * descriptors from a shared descriptor table (via rfork).
360 #ifndef _SYS_SYSPROTO_H_
368 sys_dup2(struct thread *td, struct dup2_args *uap)
371 return (kern_dup(td, FDDUP_FIXED, 0, (int)uap->from, (int)uap->to));
375 * Duplicate a file descriptor.
377 #ifndef _SYS_SYSPROTO_H_
384 sys_dup(struct thread *td, struct dup_args *uap)
387 return (kern_dup(td, FDDUP_NORMAL, 0, (int)uap->fd, 0));
391 * The file control system call.
393 #ifndef _SYS_SYSPROTO_H_
402 sys_fcntl(struct thread *td, struct fcntl_args *uap)
405 return (kern_fcntl_freebsd(td, uap->fd, uap->cmd, uap->arg));
409 kern_fcntl_freebsd(struct thread *td, int fd, int cmd, long arg)
423 * Convert old flock structure to new.
425 error = copyin((void *)(intptr_t)arg, &ofl, sizeof(ofl));
426 fl.l_start = ofl.l_start;
427 fl.l_len = ofl.l_len;
428 fl.l_pid = ofl.l_pid;
429 fl.l_type = ofl.l_type;
430 fl.l_whence = ofl.l_whence;
444 arg1 = (intptr_t)&fl;
450 error = copyin((void *)(intptr_t)arg, &fl, sizeof(fl));
451 arg1 = (intptr_t)&fl;
459 error = kern_fcntl(td, fd, newcmd, arg1);
462 if (cmd == F_OGETLK) {
463 ofl.l_start = fl.l_start;
464 ofl.l_len = fl.l_len;
465 ofl.l_pid = fl.l_pid;
466 ofl.l_type = fl.l_type;
467 ofl.l_whence = fl.l_whence;
468 error = copyout(&ofl, (void *)(intptr_t)arg, sizeof(ofl));
469 } else if (cmd == F_GETLK) {
470 error = copyout(&fl, (void *)(intptr_t)arg, sizeof(fl));
476 kern_fcntl(struct thread *td, int fd, int cmd, intptr_t arg)
478 struct filedesc *fdp;
480 struct file *fp, *fp2;
481 struct filedescent *fde;
485 struct kinfo_file *kif;
486 int error, flg, kif_sz, seals, tmp;
500 error = kern_dup(td, FDDUP_FCNTL, 0, fd, tmp);
503 case F_DUPFD_CLOEXEC:
505 error = kern_dup(td, FDDUP_FCNTL, FDDUP_FLAG_CLOEXEC, fd, tmp);
510 error = kern_dup(td, FDDUP_FIXED, 0, fd, tmp);
513 case F_DUP2FD_CLOEXEC:
515 error = kern_dup(td, FDDUP_FIXED, FDDUP_FLAG_CLOEXEC, fd, tmp);
521 fde = fdeget_locked(fdp, fd);
524 (fde->fde_flags & UF_EXCLOSE) ? FD_CLOEXEC : 0;
527 FILEDESC_SUNLOCK(fdp);
533 fde = fdeget_locked(fdp, fd);
535 fde->fde_flags = (fde->fde_flags & ~UF_EXCLOSE) |
536 (arg & FD_CLOEXEC ? UF_EXCLOSE : 0);
539 FILEDESC_XUNLOCK(fdp);
543 error = fget_fcntl(td, fd, &cap_fcntl_rights, F_GETFL, &fp);
546 td->td_retval[0] = OFLAGS(fp->f_flag);
551 error = fget_fcntl(td, fd, &cap_fcntl_rights, F_SETFL, &fp);
554 if (fp->f_ops == &path_fileops) {
560 tmp = flg = fp->f_flag;
562 tmp |= FFLAGS(arg & ~O_ACCMODE) & FCNTLFLAGS;
563 } while (atomic_cmpset_int(&fp->f_flag, flg, tmp) == 0);
564 tmp = fp->f_flag & FNONBLOCK;
565 error = fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td);
570 tmp = fp->f_flag & FASYNC;
571 error = fo_ioctl(fp, FIOASYNC, &tmp, td->td_ucred, td);
576 atomic_clear_int(&fp->f_flag, FNONBLOCK);
578 (void)fo_ioctl(fp, FIONBIO, &tmp, td->td_ucred, td);
583 error = fget_fcntl(td, fd, &cap_fcntl_rights, F_GETOWN, &fp);
586 error = fo_ioctl(fp, FIOGETOWN, &tmp, td->td_ucred, td);
588 td->td_retval[0] = tmp;
593 error = fget_fcntl(td, fd, &cap_fcntl_rights, F_SETOWN, &fp);
597 error = fo_ioctl(fp, FIOSETOWN, &tmp, td->td_ucred, td);
602 error = priv_check(td, PRIV_NFS_LOCKD);
610 /* FALLTHROUGH F_SETLK */
614 flp = (struct flock *)arg;
615 if ((flg & F_REMOTE) != 0 && flp->l_sysid == 0) {
620 error = fget_unlocked(fdp, fd, &cap_flock_rights, &fp);
623 if (fp->f_type != DTYPE_VNODE || fp->f_ops == &path_fileops) {
629 if (flp->l_whence == SEEK_CUR) {
630 foffset = foffset_get(fp);
633 foffset > OFF_MAX - flp->l_start)) {
638 flp->l_start += foffset;
642 switch (flp->l_type) {
644 if ((fp->f_flag & FREAD) == 0) {
648 if ((p->p_leader->p_flag & P_ADVLOCK) == 0) {
649 PROC_LOCK(p->p_leader);
650 p->p_leader->p_flag |= P_ADVLOCK;
651 PROC_UNLOCK(p->p_leader);
653 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
657 if ((fp->f_flag & FWRITE) == 0) {
661 if ((p->p_leader->p_flag & P_ADVLOCK) == 0) {
662 PROC_LOCK(p->p_leader);
663 p->p_leader->p_flag |= P_ADVLOCK;
664 PROC_UNLOCK(p->p_leader);
666 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
670 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
674 if (flg != F_REMOTE) {
678 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
679 F_UNLCKSYS, flp, flg);
685 if (error != 0 || flp->l_type == F_UNLCK ||
686 flp->l_type == F_UNLCKSYS) {
692 * Check for a race with close.
694 * The vnode is now advisory locked (or unlocked, but this case
695 * is not really important) as the caller requested.
696 * We had to drop the filedesc lock, so we need to recheck if
697 * the descriptor is still valid, because if it was closed
698 * in the meantime we need to remove advisory lock from the
699 * vnode - close on any descriptor leading to an advisory
700 * locked vnode, removes that lock.
701 * We will return 0 on purpose in that case, as the result of
702 * successful advisory lock might have been externally visible
703 * already. This is fine - effectively we pretend to the caller
704 * that the closing thread was a bit slower and that the
705 * advisory lock succeeded before the close.
707 error = fget_unlocked(fdp, fd, &cap_no_rights, &fp2);
713 flp->l_whence = SEEK_SET;
716 flp->l_type = F_UNLCK;
717 (void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
718 F_UNLCK, flp, F_POSIX);
725 error = fget_unlocked(fdp, fd, &cap_flock_rights, &fp);
728 if (fp->f_type != DTYPE_VNODE || fp->f_ops == &path_fileops) {
733 flp = (struct flock *)arg;
734 if (flp->l_type != F_RDLCK && flp->l_type != F_WRLCK &&
735 flp->l_type != F_UNLCK) {
740 if (flp->l_whence == SEEK_CUR) {
741 foffset = foffset_get(fp);
742 if ((flp->l_start > 0 &&
743 foffset > OFF_MAX - flp->l_start) ||
745 foffset < OFF_MIN - flp->l_start)) {
750 flp->l_start += foffset;
753 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK, flp,
759 error = fget_unlocked(fdp, fd, &cap_no_rights, &fp);
762 error = fo_add_seals(fp, arg);
767 error = fget_unlocked(fdp, fd, &cap_no_rights, &fp);
770 if (fo_get_seals(fp, &seals) == 0)
771 td->td_retval[0] = seals;
778 arg = arg ? 128 * 1024: 0;
781 error = fget_unlocked(fdp, fd, &cap_no_rights, &fp);
784 if (fp->f_type != DTYPE_VNODE || fp->f_ops == &path_fileops) {
790 if (vp->v_type != VREG) {
797 * Exclusive lock synchronizes against f_seqcount reads and
798 * writes in sequential_heuristic().
800 error = vn_lock(vp, LK_EXCLUSIVE);
806 bsize = fp->f_vnode->v_mount->mnt_stat.f_iosize;
807 arg = MIN(arg, INT_MAX - bsize + 1);
808 fp->f_seqcount[UIO_READ] = MIN(IO_SEQMAX,
809 (arg + bsize - 1) / bsize);
810 atomic_set_int(&fp->f_flag, FRDAHEAD);
812 atomic_clear_int(&fp->f_flag, FRDAHEAD);
820 * Check if the vnode is part of a union stack (either the
821 * "union" flag from mount(2) or unionfs).
823 * Prior to introduction of this op libc's readdir would call
824 * fstatfs(2), in effect unnecessarily copying kilobytes of
825 * data just to check fs name and a mount flag.
827 * Fixing the code to handle everything in the kernel instead
828 * is a non-trivial endeavor and has low priority, thus this
829 * horrible kludge facilitates the current behavior in a much
830 * cheaper manner until someone(tm) sorts this out.
832 error = fget_unlocked(fdp, fd, &cap_no_rights, &fp);
835 if (fp->f_type != DTYPE_VNODE) {
842 * Since we don't prevent dooming the vnode even non-null mp
843 * found can become immediately stale. This is tolerable since
844 * mount points are type-stable (providing safe memory access)
845 * and any vfs op on this vnode going forward will return an
846 * error (meaning return value in this case is meaningless).
848 mp = atomic_load_ptr(&vp->v_mount);
849 if (__predict_false(mp == NULL)) {
854 td->td_retval[0] = 0;
855 if (mp->mnt_kern_flag & MNTK_UNIONFS ||
856 mp->mnt_flag & MNT_UNION)
857 td->td_retval[0] = 1;
862 #ifdef CAPABILITY_MODE
863 if (IN_CAPABILITY_MODE(td)) {
868 error = copyin((void *)arg, &kif_sz, sizeof(kif_sz));
871 if (kif_sz != sizeof(*kif)) {
875 kif = malloc(sizeof(*kif), M_TEMP, M_WAITOK | M_ZERO);
877 error = fget_cap_locked(fdp, fd, &cap_fcntl_rights, &fp, NULL);
878 if (error == 0 && fhold(fp)) {
879 export_file_to_kinfo(fp, fd, NULL, kif, fdp, 0);
880 FILEDESC_SUNLOCK(fdp);
882 if ((kif->kf_status & KF_ATTR_VALID) != 0) {
883 kif->kf_structsize = sizeof(*kif);
884 error = copyout(kif, (void *)arg, sizeof(*kif));
889 FILEDESC_SUNLOCK(fdp);
904 getmaxfd(struct thread *td)
907 return (min((int)lim_cur(td, RLIMIT_NOFILE), maxfilesperproc));
911 * Common code for dup, dup2, fcntl(F_DUPFD) and fcntl(F_DUP2FD).
914 kern_dup(struct thread *td, u_int mode, int flags, int old, int new)
916 struct filedesc *fdp;
917 struct filedescent *oldfde, *newfde;
919 struct file *delfp, *oldfp;
920 u_long *oioctls, *nioctls;
927 MPASS((flags & ~(FDDUP_FLAG_CLOEXEC)) == 0);
928 MPASS(mode < FDDUP_LASTMODE);
931 /* XXXRW: if (flags & FDDUP_FIXED) AUDIT_ARG_FD2(new); */
934 * Verify we have a valid descriptor to dup from and possibly to
935 * dup to. Unlike dup() and dup2(), fcntl()'s F_DUPFD should
936 * return EINVAL when the new descriptor is out of bounds.
941 return (mode == FDDUP_FCNTL ? EINVAL : EBADF);
942 maxfd = getmaxfd(td);
944 return (mode == FDDUP_FCNTL ? EINVAL : EBADF);
948 if (fget_locked(fdp, old) == NULL)
950 if ((mode == FDDUP_FIXED || mode == FDDUP_MUSTREPLACE) && old == new) {
951 td->td_retval[0] = new;
952 if (flags & FDDUP_FLAG_CLOEXEC)
953 fdp->fd_ofiles[new].fde_flags |= UF_EXCLOSE;
958 oldfde = &fdp->fd_ofiles[old];
959 oldfp = oldfde->fde_file;
964 * If the caller specified a file descriptor, make sure the file
965 * table is large enough to hold it, and grab it. Otherwise, just
966 * allocate a new descriptor the usual way.
971 if ((error = fdalloc(td, new, &new)) != 0) {
976 case FDDUP_MUSTREPLACE:
977 /* Target file descriptor must exist. */
978 if (fget_locked(fdp, new) == NULL) {
984 if (new >= fdp->fd_nfiles) {
986 * The resource limits are here instead of e.g.
987 * fdalloc(), because the file descriptor table may be
988 * shared between processes, so we can't really use
989 * racct_add()/racct_sub(). Instead of counting the
990 * number of actually allocated descriptors, just put
991 * the limit on the size of the file descriptor table.
994 if (RACCT_ENABLED()) {
995 error = racct_set_unlocked(p, RACCT_NOFILE, new + 1);
1003 fdgrowtable_exp(fdp, new + 1);
1005 if (!fdisused(fdp, new))
1009 KASSERT(0, ("%s unsupported mode %d", __func__, mode));
1012 KASSERT(old != new, ("new fd is same as old"));
1014 /* Refetch oldfde because the table may have grown and old one freed. */
1015 oldfde = &fdp->fd_ofiles[old];
1016 KASSERT(oldfp == oldfde->fde_file,
1017 ("fdt_ofiles shift from growth observed at fd %d",
1020 newfde = &fdp->fd_ofiles[new];
1021 delfp = newfde->fde_file;
1023 nioctls = filecaps_copy_prep(&oldfde->fde_caps);
1026 * Duplicate the source descriptor.
1029 seqc_write_begin(&newfde->fde_seqc);
1031 oioctls = filecaps_free_prep(&newfde->fde_caps);
1032 memcpy(newfde, oldfde, fde_change_size);
1033 filecaps_copy_finish(&oldfde->fde_caps, &newfde->fde_caps,
1035 if ((flags & FDDUP_FLAG_CLOEXEC) != 0)
1036 newfde->fde_flags = oldfde->fde_flags | UF_EXCLOSE;
1038 newfde->fde_flags = oldfde->fde_flags & ~UF_EXCLOSE;
1040 seqc_write_end(&newfde->fde_seqc);
1042 td->td_retval[0] = new;
1046 if (delfp != NULL) {
1047 (void) closefp(fdp, new, delfp, td, true, false);
1048 FILEDESC_UNLOCK_ASSERT(fdp);
1051 FILEDESC_XUNLOCK(fdp);
1054 filecaps_free_finish(oioctls);
1059 sigiofree(struct sigio *sigio)
1061 crfree(sigio->sio_ucred);
1062 free(sigio, M_SIGIO);
1065 static struct sigio *
1066 funsetown_locked(struct sigio *sigio)
1071 SIGIO_ASSERT_LOCKED();
1075 *sigio->sio_myref = NULL;
1076 if (sigio->sio_pgid < 0) {
1077 pg = sigio->sio_pgrp;
1079 SLIST_REMOVE(&pg->pg_sigiolst, sigio, sigio, sio_pgsigio);
1082 p = sigio->sio_proc;
1084 SLIST_REMOVE(&p->p_sigiolst, sigio, sigio, sio_pgsigio);
1091 * If sigio is on the list associated with a process or process group,
1092 * disable signalling from the device, remove sigio from the list and
1096 funsetown(struct sigio **sigiop)
1098 struct sigio *sigio;
1100 /* Racy check, consumers must provide synchronization. */
1101 if (*sigiop == NULL)
1105 sigio = funsetown_locked(*sigiop);
1112 * Free a list of sigio structures. The caller must ensure that new sigio
1113 * structures cannot be added after this point. For process groups this is
1114 * guaranteed using the proctree lock; for processes, the P_WEXIT flag serves
1118 funsetownlst(struct sigiolst *sigiolst)
1122 struct sigio *sigio, *tmp;
1125 sigio = SLIST_FIRST(sigiolst);
1133 sigio = SLIST_FIRST(sigiolst);
1134 if (sigio == NULL) {
1140 * Every entry of the list should belong to a single proc or pgrp.
1142 if (sigio->sio_pgid < 0) {
1143 pg = sigio->sio_pgrp;
1144 sx_assert(&proctree_lock, SX_XLOCKED);
1146 } else /* if (sigio->sio_pgid > 0) */ {
1147 p = sigio->sio_proc;
1149 KASSERT((p->p_flag & P_WEXIT) != 0,
1150 ("%s: process %p is not exiting", __func__, p));
1153 SLIST_FOREACH(sigio, sigiolst, sio_pgsigio) {
1154 *sigio->sio_myref = NULL;
1156 KASSERT(sigio->sio_pgid < 0,
1157 ("Proc sigio in pgrp sigio list"));
1158 KASSERT(sigio->sio_pgrp == pg,
1159 ("Bogus pgrp in sigio list"));
1160 } else /* if (p != NULL) */ {
1161 KASSERT(sigio->sio_pgid > 0,
1162 ("Pgrp sigio in proc sigio list"));
1163 KASSERT(sigio->sio_proc == p,
1164 ("Bogus proc in sigio list"));
1174 SLIST_FOREACH_SAFE(sigio, sigiolst, sio_pgsigio, tmp)
1179 * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg).
1181 * After permission checking, add a sigio structure to the sigio list for
1182 * the process or process group.
1185 fsetown(pid_t pgid, struct sigio **sigiop)
1189 struct sigio *osigio, *sigio;
1197 sigio = malloc(sizeof(struct sigio), M_SIGIO, M_WAITOK);
1198 sigio->sio_pgid = pgid;
1199 sigio->sio_ucred = crhold(curthread->td_ucred);
1200 sigio->sio_myref = sigiop;
1204 ret = pget(pgid, PGET_NOTWEXIT | PGET_NOTID | PGET_HOLD, &proc);
1206 osigio = funsetown_locked(*sigiop);
1210 if ((proc->p_flag & P_WEXIT) != 0) {
1212 } else if (proc->p_session !=
1213 curthread->td_proc->p_session) {
1215 * Policy - Don't allow a process to FSETOWN a
1216 * process in another session.
1218 * Remove this test to allow maximum flexibility
1219 * or restrict FSETOWN to the current process or
1220 * process group for maximum safety.
1224 sigio->sio_proc = proc;
1225 SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio,
1230 } else /* if (pgid < 0) */ {
1231 sx_slock(&proctree_lock);
1233 osigio = funsetown_locked(*sigiop);
1234 pgrp = pgfind(-pgid);
1238 if (pgrp->pg_session != curthread->td_proc->p_session) {
1240 * Policy - Don't allow a process to FSETOWN a
1241 * process in another session.
1243 * Remove this test to allow maximum flexibility
1244 * or restrict FSETOWN to the current process or
1245 * process group for maximum safety.
1249 sigio->sio_pgrp = pgrp;
1250 SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio,
1255 sx_sunlock(&proctree_lock);
1266 * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg).
1269 fgetown(struct sigio **sigiop)
1274 pgid = (*sigiop != NULL) ? (*sigiop)->sio_pgid : 0;
1280 closefp_impl(struct filedesc *fdp, int fd, struct file *fp, struct thread *td,
1285 FILEDESC_XLOCK_ASSERT(fdp);
1288 * We now hold the fp reference that used to be owned by the
1289 * descriptor array. We have to unlock the FILEDESC *AFTER*
1290 * knote_fdclose to prevent a race of the fd getting opened, a knote
1291 * added, and deleteing a knote for the new fd.
1293 if (__predict_false(!TAILQ_EMPTY(&fdp->fd_kqlist)))
1294 knote_fdclose(td, fd);
1297 * We need to notify mqueue if the object is of type mqueue.
1299 if (__predict_false(fp->f_type == DTYPE_MQUEUE))
1300 mq_fdclose(td, fd, fp);
1301 FILEDESC_XUNLOCK(fdp);
1304 if (AUDITING_TD(td) && audit)
1305 audit_sysclose(td, fd, fp);
1307 error = closef(fp, td);
1310 * All paths leading up to closefp() will have already removed or
1311 * replaced the fd in the filedesc table, so a restart would not
1312 * operate on the same file.
1314 if (error == ERESTART)
1321 closefp_hl(struct filedesc *fdp, int fd, struct file *fp, struct thread *td,
1322 bool holdleaders, bool audit)
1326 FILEDESC_XLOCK_ASSERT(fdp);
1329 if (td->td_proc->p_fdtol != NULL) {
1331 * Ask fdfree() to sleep to ensure that all relevant
1332 * process leaders can be traversed in closef().
1334 fdp->fd_holdleaderscount++;
1336 holdleaders = false;
1340 error = closefp_impl(fdp, fd, fp, td, audit);
1342 FILEDESC_XLOCK(fdp);
1343 fdp->fd_holdleaderscount--;
1344 if (fdp->fd_holdleaderscount == 0 &&
1345 fdp->fd_holdleaderswakeup != 0) {
1346 fdp->fd_holdleaderswakeup = 0;
1347 wakeup(&fdp->fd_holdleaderscount);
1349 FILEDESC_XUNLOCK(fdp);
1355 closefp(struct filedesc *fdp, int fd, struct file *fp, struct thread *td,
1356 bool holdleaders, bool audit)
1359 FILEDESC_XLOCK_ASSERT(fdp);
1361 if (__predict_false(td->td_proc->p_fdtol != NULL)) {
1362 return (closefp_hl(fdp, fd, fp, td, holdleaders, audit));
1364 return (closefp_impl(fdp, fd, fp, td, audit));
1369 * Close a file descriptor.
1371 #ifndef _SYS_SYSPROTO_H_
1378 sys_close(struct thread *td, struct close_args *uap)
1381 return (kern_close(td, uap->fd));
1385 kern_close(struct thread *td, int fd)
1387 struct filedesc *fdp;
1390 fdp = td->td_proc->p_fd;
1392 FILEDESC_XLOCK(fdp);
1393 if ((fp = fget_locked(fdp, fd)) == NULL) {
1394 FILEDESC_XUNLOCK(fdp);
1399 /* closefp() drops the FILEDESC lock for us. */
1400 return (closefp(fdp, fd, fp, td, true, true));
1404 close_range_cloexec(struct thread *td, u_int lowfd, u_int highfd)
1406 struct filedesc *fdp;
1407 struct fdescenttbl *fdt;
1408 struct filedescent *fde;
1411 fdp = td->td_proc->p_fd;
1412 FILEDESC_XLOCK(fdp);
1413 fdt = atomic_load_ptr(&fdp->fd_files);
1414 highfd = MIN(highfd, fdt->fdt_nfiles - 1);
1416 if (__predict_false(fd > highfd)) {
1419 for (; fd <= highfd; fd++) {
1420 fde = &fdt->fdt_ofiles[fd];
1421 if (fde->fde_file != NULL)
1422 fde->fde_flags |= UF_EXCLOSE;
1425 FILEDESC_XUNLOCK(fdp);
1430 close_range_impl(struct thread *td, u_int lowfd, u_int highfd)
1432 struct filedesc *fdp;
1433 const struct fdescenttbl *fdt;
1437 fdp = td->td_proc->p_fd;
1438 FILEDESC_XLOCK(fdp);
1439 fdt = atomic_load_ptr(&fdp->fd_files);
1440 highfd = MIN(highfd, fdt->fdt_nfiles - 1);
1442 if (__predict_false(fd > highfd)) {
1446 fp = fdt->fdt_ofiles[fd].fde_file;
1452 (void) closefp(fdp, fd, fp, td, true, true);
1455 FILEDESC_XLOCK(fdp);
1456 fdt = atomic_load_ptr(&fdp->fd_files);
1461 FILEDESC_XUNLOCK(fdp);
1467 kern_close_range(struct thread *td, int flags, u_int lowfd, u_int highfd)
1471 * Check this prior to clamping; closefrom(3) with only fd 0, 1, and 2
1472 * open should not be a usage error. From a close_range() perspective,
1473 * close_range(3, ~0U, 0) in the same scenario should also likely not
1474 * be a usage error as all fd above 3 are in-fact already closed.
1476 if (highfd < lowfd) {
1480 if ((flags & CLOSE_RANGE_CLOEXEC) != 0)
1481 return (close_range_cloexec(td, lowfd, highfd));
1483 return (close_range_impl(td, lowfd, highfd));
1486 #ifndef _SYS_SYSPROTO_H_
1487 struct close_range_args {
1494 sys_close_range(struct thread *td, struct close_range_args *uap)
1497 AUDIT_ARG_FD(uap->lowfd);
1498 AUDIT_ARG_CMD(uap->highfd);
1499 AUDIT_ARG_FFLAGS(uap->flags);
1501 if ((uap->flags & ~(CLOSE_RANGE_CLOEXEC)) != 0)
1503 return (kern_close_range(td, uap->flags, uap->lowfd, uap->highfd));
1506 #ifdef COMPAT_FREEBSD12
1508 * Close open file descriptors.
1510 #ifndef _SYS_SYSPROTO_H_
1511 struct freebsd12_closefrom_args {
1517 freebsd12_closefrom(struct thread *td, struct freebsd12_closefrom_args *uap)
1521 AUDIT_ARG_FD(uap->lowfd);
1524 * Treat negative starting file descriptor values identical to
1525 * closefrom(0) which closes all files.
1527 lowfd = MAX(0, uap->lowfd);
1528 return (kern_close_range(td, 0, lowfd, ~0U));
1530 #endif /* COMPAT_FREEBSD12 */
1532 #if defined(COMPAT_43)
1534 * Return status information about a file descriptor.
1536 #ifndef _SYS_SYSPROTO_H_
1537 struct ofstat_args {
1544 ofstat(struct thread *td, struct ofstat_args *uap)
1550 error = kern_fstat(td, uap->fd, &ub);
1553 error = copyout(&oub, uap->sb, sizeof(oub));
1557 #endif /* COMPAT_43 */
1559 #if defined(COMPAT_FREEBSD11)
1561 freebsd11_fstat(struct thread *td, struct freebsd11_fstat_args *uap)
1564 struct freebsd11_stat osb;
1567 error = kern_fstat(td, uap->fd, &sb);
1570 error = freebsd11_cvtstat(&sb, &osb);
1572 error = copyout(&osb, uap->sb, sizeof(osb));
1575 #endif /* COMPAT_FREEBSD11 */
1578 * Return status information about a file descriptor.
1580 #ifndef _SYS_SYSPROTO_H_
1588 sys_fstat(struct thread *td, struct fstat_args *uap)
1593 error = kern_fstat(td, uap->fd, &ub);
1595 error = copyout(&ub, uap->sb, sizeof(ub));
1600 kern_fstat(struct thread *td, int fd, struct stat *sbp)
1607 error = fget(td, fd, &cap_fstat_rights, &fp);
1608 if (__predict_false(error != 0))
1611 AUDIT_ARG_FILE(td->td_proc, fp);
1613 error = fo_stat(fp, sbp, td->td_ucred, td);
1615 #ifdef __STAT_TIME_T_EXT
1616 sbp->st_atim_ext = 0;
1617 sbp->st_mtim_ext = 0;
1618 sbp->st_ctim_ext = 0;
1619 sbp->st_btim_ext = 0;
1622 if (KTRPOINT(td, KTR_STRUCT))
1623 ktrstat_error(sbp, error);
1628 #if defined(COMPAT_FREEBSD11)
1630 * Return status information about a file descriptor.
1632 #ifndef _SYS_SYSPROTO_H_
1633 struct freebsd11_nfstat_args {
1640 freebsd11_nfstat(struct thread *td, struct freebsd11_nfstat_args *uap)
1646 error = kern_fstat(td, uap->fd, &ub);
1648 freebsd11_cvtnstat(&ub, &nub);
1649 error = copyout(&nub, uap->sb, sizeof(nub));
1653 #endif /* COMPAT_FREEBSD11 */
1656 * Return pathconf information about a file descriptor.
1658 #ifndef _SYS_SYSPROTO_H_
1659 struct fpathconf_args {
1666 sys_fpathconf(struct thread *td, struct fpathconf_args *uap)
1671 error = kern_fpathconf(td, uap->fd, uap->name, &value);
1673 td->td_retval[0] = value;
1678 kern_fpathconf(struct thread *td, int fd, int name, long *valuep)
1684 error = fget(td, fd, &cap_fpathconf_rights, &fp);
1688 if (name == _PC_ASYNC_IO) {
1689 *valuep = _POSIX_ASYNCHRONOUS_IO;
1694 vn_lock(vp, LK_SHARED | LK_RETRY);
1695 error = VOP_PATHCONF(vp, name, valuep);
1697 } else if (fp->f_type == DTYPE_PIPE || fp->f_type == DTYPE_SOCKET) {
1698 if (name != _PC_PIPE_BUF) {
1713 * Copy filecaps structure allocating memory for ioctls array if needed.
1715 * The last parameter indicates whether the fdtable is locked. If it is not and
1716 * ioctls are encountered, copying fails and the caller must lock the table.
1718 * Note that if the table was not locked, the caller has to check the relevant
1719 * sequence counter to determine whether the operation was successful.
1722 filecaps_copy(const struct filecaps *src, struct filecaps *dst, bool locked)
1726 if (src->fc_ioctls != NULL && !locked)
1728 memcpy(dst, src, sizeof(*src));
1729 if (src->fc_ioctls == NULL)
1732 KASSERT(src->fc_nioctls > 0,
1733 ("fc_ioctls != NULL, but fc_nioctls=%hd", src->fc_nioctls));
1735 size = sizeof(src->fc_ioctls[0]) * src->fc_nioctls;
1736 dst->fc_ioctls = malloc(size, M_FILECAPS, M_WAITOK);
1737 memcpy(dst->fc_ioctls, src->fc_ioctls, size);
1742 filecaps_copy_prep(const struct filecaps *src)
1747 if (__predict_true(src->fc_ioctls == NULL))
1750 KASSERT(src->fc_nioctls > 0,
1751 ("fc_ioctls != NULL, but fc_nioctls=%hd", src->fc_nioctls));
1753 size = sizeof(src->fc_ioctls[0]) * src->fc_nioctls;
1754 ioctls = malloc(size, M_FILECAPS, M_WAITOK);
1759 filecaps_copy_finish(const struct filecaps *src, struct filecaps *dst,
1765 if (__predict_true(src->fc_ioctls == NULL)) {
1766 MPASS(ioctls == NULL);
1770 size = sizeof(src->fc_ioctls[0]) * src->fc_nioctls;
1771 dst->fc_ioctls = ioctls;
1772 bcopy(src->fc_ioctls, dst->fc_ioctls, size);
1776 * Move filecaps structure to the new place and clear the old place.
1779 filecaps_move(struct filecaps *src, struct filecaps *dst)
1783 bzero(src, sizeof(*src));
1787 * Fill the given filecaps structure with full rights.
1790 filecaps_fill(struct filecaps *fcaps)
1793 CAP_ALL(&fcaps->fc_rights);
1794 fcaps->fc_ioctls = NULL;
1795 fcaps->fc_nioctls = -1;
1796 fcaps->fc_fcntls = CAP_FCNTL_ALL;
1800 * Free memory allocated within filecaps structure.
1803 filecaps_free(struct filecaps *fcaps)
1806 free(fcaps->fc_ioctls, M_FILECAPS);
1807 bzero(fcaps, sizeof(*fcaps));
1811 filecaps_free_prep(struct filecaps *fcaps)
1815 ioctls = fcaps->fc_ioctls;
1816 bzero(fcaps, sizeof(*fcaps));
1821 filecaps_free_finish(u_long *ioctls)
1824 free(ioctls, M_FILECAPS);
1828 * Validate the given filecaps structure.
1831 filecaps_validate(const struct filecaps *fcaps, const char *func)
1834 KASSERT(cap_rights_is_valid(&fcaps->fc_rights),
1835 ("%s: invalid rights", func));
1836 KASSERT((fcaps->fc_fcntls & ~CAP_FCNTL_ALL) == 0,
1837 ("%s: invalid fcntls", func));
1838 KASSERT(fcaps->fc_fcntls == 0 ||
1839 cap_rights_is_set(&fcaps->fc_rights, CAP_FCNTL),
1840 ("%s: fcntls without CAP_FCNTL", func));
1841 KASSERT(fcaps->fc_ioctls != NULL ? fcaps->fc_nioctls > 0 :
1842 (fcaps->fc_nioctls == -1 || fcaps->fc_nioctls == 0),
1843 ("%s: invalid ioctls", func));
1844 KASSERT(fcaps->fc_nioctls == 0 ||
1845 cap_rights_is_set(&fcaps->fc_rights, CAP_IOCTL),
1846 ("%s: ioctls without CAP_IOCTL", func));
1850 fdgrowtable_exp(struct filedesc *fdp, int nfd)
1854 FILEDESC_XLOCK_ASSERT(fdp);
1856 nfd1 = fdp->fd_nfiles * 2;
1859 fdgrowtable(fdp, nfd1);
1863 * Grow the file table to accommodate (at least) nfd descriptors.
1866 fdgrowtable(struct filedesc *fdp, int nfd)
1868 struct filedesc0 *fdp0;
1869 struct freetable *ft;
1870 struct fdescenttbl *ntable;
1871 struct fdescenttbl *otable;
1872 int nnfiles, onfiles;
1873 NDSLOTTYPE *nmap, *omap;
1875 KASSERT(fdp->fd_nfiles > 0, ("zero-length file table"));
1877 /* save old values */
1878 onfiles = fdp->fd_nfiles;
1879 otable = fdp->fd_files;
1882 /* compute the size of the new table */
1883 nnfiles = NDSLOTS(nfd) * NDENTRIES; /* round up */
1884 if (nnfiles <= onfiles)
1885 /* the table is already large enough */
1889 * Allocate a new table. We need enough space for the number of
1890 * entries, file entries themselves and the struct freetable we will use
1891 * when we decommission the table and place it on the freelist.
1892 * We place the struct freetable in the middle so we don't have
1893 * to worry about padding.
1895 ntable = malloc(offsetof(struct fdescenttbl, fdt_ofiles) +
1896 nnfiles * sizeof(ntable->fdt_ofiles[0]) +
1897 sizeof(struct freetable),
1898 M_FILEDESC, M_ZERO | M_WAITOK);
1899 /* copy the old data */
1900 ntable->fdt_nfiles = nnfiles;
1901 memcpy(ntable->fdt_ofiles, otable->fdt_ofiles,
1902 onfiles * sizeof(ntable->fdt_ofiles[0]));
1905 * Allocate a new map only if the old is not large enough. It will
1906 * grow at a slower rate than the table as it can map more
1907 * entries than the table can hold.
1909 if (NDSLOTS(nnfiles) > NDSLOTS(onfiles)) {
1910 nmap = malloc(NDSLOTS(nnfiles) * NDSLOTSIZE, M_FILEDESC,
1912 /* copy over the old data and update the pointer */
1913 memcpy(nmap, omap, NDSLOTS(onfiles) * sizeof(*omap));
1918 * Make sure that ntable is correctly initialized before we replace
1919 * fd_files poiner. Otherwise fget_unlocked() may see inconsistent
1922 atomic_store_rel_ptr((volatile void *)&fdp->fd_files, (uintptr_t)ntable);
1925 * Free the old file table when not shared by other threads or processes.
1926 * The old file table is considered to be shared when either are true:
1927 * - The process has more than one thread.
1928 * - The file descriptor table has been shared via fdshare().
1930 * When shared, the old file table will be placed on a freelist
1931 * which will be processed when the struct filedesc is released.
1933 * Note that if onfiles == NDFILE, we're dealing with the original
1934 * static allocation contained within (struct filedesc0 *)fdp,
1935 * which must not be freed.
1937 if (onfiles > NDFILE) {
1939 * Note we may be called here from fdinit while allocating a
1940 * table for a new process in which case ->p_fd points
1943 if (curproc->p_fd != fdp || FILEDESC_IS_ONLY_USER(fdp)) {
1944 free(otable, M_FILEDESC);
1946 ft = (struct freetable *)&otable->fdt_ofiles[onfiles];
1947 fdp0 = (struct filedesc0 *)fdp;
1948 ft->ft_table = otable;
1949 SLIST_INSERT_HEAD(&fdp0->fd_free, ft, ft_next);
1953 * The map does not have the same possibility of threads still
1954 * holding references to it. So always free it as long as it
1955 * does not reference the original static allocation.
1957 if (NDSLOTS(onfiles) > NDSLOTS(NDFILE))
1958 free(omap, M_FILEDESC);
1962 * Allocate a file descriptor for the process.
1965 fdalloc(struct thread *td, int minfd, int *result)
1967 struct proc *p = td->td_proc;
1968 struct filedesc *fdp = p->p_fd;
1969 int fd, maxfd, allocfd;
1974 FILEDESC_XLOCK_ASSERT(fdp);
1976 if (fdp->fd_freefile > minfd)
1977 minfd = fdp->fd_freefile;
1979 maxfd = getmaxfd(td);
1982 * Search the bitmap for a free descriptor starting at minfd.
1983 * If none is found, grow the file table.
1985 fd = fd_first_free(fdp, minfd, fdp->fd_nfiles);
1986 if (__predict_false(fd >= maxfd))
1988 if (__predict_false(fd >= fdp->fd_nfiles)) {
1989 allocfd = min(fd * 2, maxfd);
1991 if (RACCT_ENABLED()) {
1992 error = racct_set_unlocked(p, RACCT_NOFILE, allocfd);
1998 * fd is already equal to first free descriptor >= minfd, so
1999 * we only need to grow the table and we are done.
2001 fdgrowtable_exp(fdp, allocfd);
2005 * Perform some sanity checks, then mark the file descriptor as
2006 * used and return it to the caller.
2008 KASSERT(fd >= 0 && fd < min(maxfd, fdp->fd_nfiles),
2009 ("invalid descriptor %d", fd));
2010 KASSERT(!fdisused(fdp, fd),
2011 ("fd_first_free() returned non-free descriptor"));
2012 KASSERT(fdp->fd_ofiles[fd].fde_file == NULL,
2013 ("file descriptor isn't free"));
2020 * Allocate n file descriptors for the process.
2023 fdallocn(struct thread *td, int minfd, int *fds, int n)
2025 struct proc *p = td->td_proc;
2026 struct filedesc *fdp = p->p_fd;
2029 FILEDESC_XLOCK_ASSERT(fdp);
2031 for (i = 0; i < n; i++)
2032 if (fdalloc(td, 0, &fds[i]) != 0)
2036 for (i--; i >= 0; i--)
2037 fdunused(fdp, fds[i]);
2045 * Create a new open file structure and allocate a file descriptor for the
2046 * process that refers to it. We add one reference to the file for the
2047 * descriptor table and one reference for resultfp. This is to prevent us
2048 * being preempted and the entry in the descriptor table closed after we
2049 * release the FILEDESC lock.
2052 falloc_caps(struct thread *td, struct file **resultfp, int *resultfd, int flags,
2053 struct filecaps *fcaps)
2058 MPASS(resultfp != NULL);
2059 MPASS(resultfd != NULL);
2061 error = _falloc_noinstall(td, &fp, 2);
2062 if (__predict_false(error != 0)) {
2066 error = finstall_refed(td, fp, &fd, flags, fcaps);
2067 if (__predict_false(error != 0)) {
2068 falloc_abort(td, fp);
2079 * Create a new open file structure without allocating a file descriptor.
2082 _falloc_noinstall(struct thread *td, struct file **resultfp, u_int n)
2085 int maxuserfiles = maxfiles - (maxfiles / 20);
2087 static struct timeval lastfail;
2090 KASSERT(resultfp != NULL, ("%s: resultfp == NULL", __func__));
2093 openfiles_new = atomic_fetchadd_int(&openfiles, 1) + 1;
2094 if ((openfiles_new >= maxuserfiles &&
2095 priv_check(td, PRIV_MAXFILES) != 0) ||
2096 openfiles_new >= maxfiles) {
2097 atomic_subtract_int(&openfiles, 1);
2098 if (ppsratecheck(&lastfail, &curfail, 1)) {
2099 printf("kern.maxfiles limit exceeded by uid %i, (%s) "
2100 "please see tuning(7).\n", td->td_ucred->cr_ruid, td->td_proc->p_comm);
2104 fp = uma_zalloc(file_zone, M_WAITOK);
2105 bzero(fp, sizeof(*fp));
2106 refcount_init(&fp->f_count, n);
2107 fp->f_cred = crhold(td->td_ucred);
2108 fp->f_ops = &badfileops;
2114 falloc_abort(struct thread *td, struct file *fp)
2118 * For assertion purposes.
2120 refcount_init(&fp->f_count, 0);
2125 * Install a file in a file descriptor table.
2128 _finstall(struct filedesc *fdp, struct file *fp, int fd, int flags,
2129 struct filecaps *fcaps)
2131 struct filedescent *fde;
2135 filecaps_validate(fcaps, __func__);
2136 FILEDESC_XLOCK_ASSERT(fdp);
2138 fde = &fdp->fd_ofiles[fd];
2140 seqc_write_begin(&fde->fde_seqc);
2143 fde->fde_flags = (flags & O_CLOEXEC) != 0 ? UF_EXCLOSE : 0;
2145 filecaps_move(fcaps, &fde->fde_caps);
2147 filecaps_fill(&fde->fde_caps);
2149 seqc_write_end(&fde->fde_seqc);
2154 finstall_refed(struct thread *td, struct file *fp, int *fd, int flags,
2155 struct filecaps *fcaps)
2157 struct filedesc *fdp = td->td_proc->p_fd;
2162 FILEDESC_XLOCK(fdp);
2163 error = fdalloc(td, 0, fd);
2164 if (__predict_true(error == 0)) {
2165 _finstall(fdp, fp, *fd, flags, fcaps);
2167 FILEDESC_XUNLOCK(fdp);
2172 finstall(struct thread *td, struct file *fp, int *fd, int flags,
2173 struct filecaps *fcaps)
2181 error = finstall_refed(td, fp, fd, flags, fcaps);
2182 if (__predict_false(error != 0)) {
2189 * Build a new filedesc structure from another.
2191 * If fdp is not NULL, return with it shared locked.
2194 fdinit(struct filedesc *fdp, bool prepfiles, int *lastfile)
2196 struct filedesc0 *newfdp0;
2197 struct filedesc *newfdp;
2200 MPASS(lastfile != NULL);
2202 MPASS(lastfile == NULL);
2204 newfdp0 = uma_zalloc(filedesc0_zone, M_WAITOK | M_ZERO);
2205 newfdp = &newfdp0->fd_fd;
2207 /* Create the file descriptor table. */
2208 FILEDESC_LOCK_INIT(newfdp);
2209 refcount_init(&newfdp->fd_refcnt, 1);
2210 refcount_init(&newfdp->fd_holdcnt, 1);
2211 newfdp->fd_map = newfdp0->fd_dmap;
2212 newfdp->fd_files = (struct fdescenttbl *)&newfdp0->fd_dfiles;
2213 newfdp->fd_files->fdt_nfiles = NDFILE;
2218 FILEDESC_SLOCK(fdp);
2220 FILEDESC_SUNLOCK(fdp);
2225 *lastfile = fdlastfile(fdp);
2226 if (*lastfile < newfdp->fd_nfiles)
2228 FILEDESC_SUNLOCK(fdp);
2229 fdgrowtable(newfdp, *lastfile + 1);
2230 FILEDESC_SLOCK(fdp);
2237 * Build a pwddesc structure from another.
2238 * Copy the current, root, and jail root vnode references.
2240 * If pdp is not NULL, return with it shared locked.
2243 pdinit(struct pwddesc *pdp, bool keeplock)
2245 struct pwddesc *newpdp;
2248 newpdp = malloc(sizeof(*newpdp), M_PWDDESC, M_WAITOK | M_ZERO);
2250 PWDDESC_LOCK_INIT(newpdp);
2251 refcount_init(&newpdp->pd_refcount, 1);
2252 newpdp->pd_cmask = CMASK;
2255 newpwd = pwd_alloc();
2256 smr_serialized_store(&newpdp->pd_pwd, newpwd, true);
2261 newpwd = pwd_hold_pwddesc(pdp);
2262 smr_serialized_store(&newpdp->pd_pwd, newpwd, true);
2264 PWDDESC_XUNLOCK(pdp);
2269 * Hold either filedesc or pwddesc of the passed process.
2271 * The process lock is used to synchronize against the target exiting and
2274 * Clearing can be ilustrated in 3 steps:
2275 * 1. set the pointer to NULL. Either routine can race against it, hence
2277 * 2. observe the process lock as not taken. Until then fdhold/pdhold can
2278 * race to either still see the pointer or find NULL. It is still safe to
2279 * grab a reference as clearing is stalled.
2280 * 3. after the lock is observed as not taken, any fdhold/pdhold calls are
2281 * guaranteed to see NULL, making it safe to finish clearing
2283 static struct filedesc *
2284 fdhold(struct proc *p)
2286 struct filedesc *fdp;
2288 PROC_LOCK_ASSERT(p, MA_OWNED);
2289 fdp = atomic_load_ptr(&p->p_fd);
2291 refcount_acquire(&fdp->fd_holdcnt);
2295 static struct pwddesc *
2296 pdhold(struct proc *p)
2298 struct pwddesc *pdp;
2300 PROC_LOCK_ASSERT(p, MA_OWNED);
2301 pdp = atomic_load_ptr(&p->p_pd);
2303 refcount_acquire(&pdp->pd_refcount);
2308 fddrop(struct filedesc *fdp)
2311 if (refcount_load(&fdp->fd_holdcnt) > 1) {
2312 if (refcount_release(&fdp->fd_holdcnt) == 0)
2316 FILEDESC_LOCK_DESTROY(fdp);
2317 uma_zfree(filedesc0_zone, fdp);
2321 pddrop(struct pwddesc *pdp)
2325 if (refcount_release_if_not_last(&pdp->pd_refcount))
2329 if (refcount_release(&pdp->pd_refcount) == 0) {
2330 PWDDESC_XUNLOCK(pdp);
2333 pwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
2335 PWDDESC_XUNLOCK(pdp);
2338 PWDDESC_LOCK_DESTROY(pdp);
2339 free(pdp, M_PWDDESC);
2343 * Share a filedesc structure.
2346 fdshare(struct filedesc *fdp)
2349 refcount_acquire(&fdp->fd_refcnt);
2354 * Share a pwddesc structure.
2357 pdshare(struct pwddesc *pdp)
2359 refcount_acquire(&pdp->pd_refcount);
2364 * Unshare a filedesc structure, if necessary by making a copy
2367 fdunshare(struct thread *td)
2369 struct filedesc *tmp;
2370 struct proc *p = td->td_proc;
2372 if (refcount_load(&p->p_fd->fd_refcnt) == 1)
2375 tmp = fdcopy(p->p_fd);
2381 * Unshare a pwddesc structure.
2384 pdunshare(struct thread *td)
2386 struct pwddesc *pdp;
2391 if (p->p_pd->pd_refcount == 1)
2394 pdp = pdcopy(p->p_pd);
2400 fdinstall_remapped(struct thread *td, struct filedesc *fdp)
2404 td->td_proc->p_fd = fdp;
2408 * Copy a filedesc structure. A NULL pointer in returns a NULL reference,
2409 * this is to ease callers, not catch errors.
2412 fdcopy(struct filedesc *fdp)
2414 struct filedesc *newfdp;
2415 struct filedescent *nfde, *ofde;
2420 newfdp = fdinit(fdp, true, &lastfile);
2421 /* copy all passable descriptors (i.e. not kqueue) */
2422 newfdp->fd_freefile = -1;
2423 for (i = 0; i <= lastfile; ++i) {
2424 ofde = &fdp->fd_ofiles[i];
2425 if (ofde->fde_file == NULL ||
2426 (ofde->fde_file->f_ops->fo_flags & DFLAG_PASSABLE) == 0 ||
2427 !fhold(ofde->fde_file)) {
2428 if (newfdp->fd_freefile == -1)
2429 newfdp->fd_freefile = i;
2432 nfde = &newfdp->fd_ofiles[i];
2434 filecaps_copy(&ofde->fde_caps, &nfde->fde_caps, true);
2435 fdused_init(newfdp, i);
2437 if (newfdp->fd_freefile == -1)
2438 newfdp->fd_freefile = i;
2439 FILEDESC_SUNLOCK(fdp);
2444 * Copy a pwddesc structure.
2447 pdcopy(struct pwddesc *pdp)
2449 struct pwddesc *newpdp;
2453 newpdp = pdinit(pdp, true);
2454 newpdp->pd_cmask = pdp->pd_cmask;
2455 PWDDESC_XUNLOCK(pdp);
2460 * Copies a filedesc structure, while remapping all file descriptors
2461 * stored inside using a translation table.
2463 * File descriptors are copied over to the new file descriptor table,
2464 * regardless of whether the close-on-exec flag is set.
2467 fdcopy_remapped(struct filedesc *fdp, const int *fds, size_t nfds,
2468 struct filedesc **ret)
2470 struct filedesc *newfdp;
2471 struct filedescent *nfde, *ofde;
2472 int error, i, lastfile;
2476 newfdp = fdinit(fdp, true, &lastfile);
2477 if (nfds > lastfile + 1) {
2478 /* New table cannot be larger than the old one. */
2482 /* Copy all passable descriptors (i.e. not kqueue). */
2483 newfdp->fd_freefile = nfds;
2484 for (i = 0; i < nfds; ++i) {
2485 if (fds[i] < 0 || fds[i] > lastfile) {
2486 /* File descriptor out of bounds. */
2490 ofde = &fdp->fd_ofiles[fds[i]];
2491 if (ofde->fde_file == NULL) {
2492 /* Unused file descriptor. */
2496 if ((ofde->fde_file->f_ops->fo_flags & DFLAG_PASSABLE) == 0) {
2497 /* File descriptor cannot be passed. */
2501 if (!fhold(ofde->fde_file)) {
2505 nfde = &newfdp->fd_ofiles[i];
2507 filecaps_copy(&ofde->fde_caps, &nfde->fde_caps, true);
2508 fdused_init(newfdp, i);
2510 FILEDESC_SUNLOCK(fdp);
2514 FILEDESC_SUNLOCK(fdp);
2515 fdescfree_remapped(newfdp);
2520 * Clear POSIX style locks. This is only used when fdp looses a reference (i.e.
2521 * one of processes using it exits) and the table used to be shared.
2524 fdclearlocks(struct thread *td)
2526 struct filedesc *fdp;
2527 struct filedesc_to_leader *fdtol;
2537 MPASS(fdtol != NULL);
2539 FILEDESC_XLOCK(fdp);
2540 KASSERT(fdtol->fdl_refcount > 0,
2541 ("filedesc_to_refcount botch: fdl_refcount=%d",
2542 fdtol->fdl_refcount));
2543 if (fdtol->fdl_refcount == 1 &&
2544 (p->p_leader->p_flag & P_ADVLOCK) != 0) {
2545 lastfile = fdlastfile(fdp);
2546 for (i = 0; i <= lastfile; i++) {
2547 fp = fdp->fd_ofiles[i].fde_file;
2548 if (fp == NULL || fp->f_type != DTYPE_VNODE ||
2551 FILEDESC_XUNLOCK(fdp);
2552 lf.l_whence = SEEK_SET;
2555 lf.l_type = F_UNLCK;
2557 (void) VOP_ADVLOCK(vp,
2558 (caddr_t)p->p_leader, F_UNLCK,
2560 FILEDESC_XLOCK(fdp);
2565 if (fdtol->fdl_refcount == 1) {
2566 if (fdp->fd_holdleaderscount > 0 &&
2567 (p->p_leader->p_flag & P_ADVLOCK) != 0) {
2569 * close() or kern_dup() has cleared a reference
2570 * in a shared file descriptor table.
2572 fdp->fd_holdleaderswakeup = 1;
2573 sx_sleep(&fdp->fd_holdleaderscount,
2574 FILEDESC_LOCK(fdp), PLOCK, "fdlhold", 0);
2577 if (fdtol->fdl_holdcount > 0) {
2579 * Ensure that fdtol->fdl_leader remains
2580 * valid in closef().
2582 fdtol->fdl_wakeup = 1;
2583 sx_sleep(fdtol, FILEDESC_LOCK(fdp), PLOCK,
2588 fdtol->fdl_refcount--;
2589 if (fdtol->fdl_refcount == 0 &&
2590 fdtol->fdl_holdcount == 0) {
2591 fdtol->fdl_next->fdl_prev = fdtol->fdl_prev;
2592 fdtol->fdl_prev->fdl_next = fdtol->fdl_next;
2596 FILEDESC_XUNLOCK(fdp);
2598 free(fdtol, M_FILEDESC_TO_LEADER);
2602 * Release a filedesc structure.
2605 fdescfree_fds(struct thread *td, struct filedesc *fdp, bool needclose)
2607 struct filedesc0 *fdp0;
2608 struct freetable *ft, *tft;
2609 struct filedescent *fde;
2613 KASSERT(refcount_load(&fdp->fd_refcnt) == 0,
2614 ("%s: fd table %p carries references", __func__, fdp));
2617 * Serialize with threads iterating over the table, if any.
2619 if (refcount_load(&fdp->fd_holdcnt) > 1) {
2620 FILEDESC_XLOCK(fdp);
2621 FILEDESC_XUNLOCK(fdp);
2624 lastfile = fdlastfile_single(fdp);
2625 for (i = 0; i <= lastfile; i++) {
2626 fde = &fdp->fd_ofiles[i];
2631 (void) closef(fp, td);
2637 if (NDSLOTS(fdp->fd_nfiles) > NDSLOTS(NDFILE))
2638 free(fdp->fd_map, M_FILEDESC);
2639 if (fdp->fd_nfiles > NDFILE)
2640 free(fdp->fd_files, M_FILEDESC);
2642 fdp0 = (struct filedesc0 *)fdp;
2643 SLIST_FOREACH_SAFE(ft, &fdp0->fd_free, ft_next, tft)
2644 free(ft->ft_table, M_FILEDESC);
2650 fdescfree(struct thread *td)
2653 struct filedesc *fdp;
2660 if (RACCT_ENABLED())
2661 racct_set_unlocked(p, RACCT_NOFILE, 0);
2664 if (p->p_fdtol != NULL)
2668 * Check fdhold for an explanation.
2670 atomic_store_ptr(&p->p_fd, NULL);
2671 atomic_thread_fence_seq_cst();
2672 PROC_WAIT_UNLOCKED(p);
2674 if (refcount_release(&fdp->fd_refcnt) == 0)
2677 fdescfree_fds(td, fdp, 1);
2681 pdescfree(struct thread *td)
2684 struct pwddesc *pdp;
2691 * Check pdhold for an explanation.
2693 atomic_store_ptr(&p->p_pd, NULL);
2694 atomic_thread_fence_seq_cst();
2695 PROC_WAIT_UNLOCKED(p);
2701 fdescfree_remapped(struct filedesc *fdp)
2704 /* fdescfree_fds() asserts that fd_refcnt == 0. */
2705 if (!refcount_release(&fdp->fd_refcnt))
2706 panic("%s: fd table %p has extra references", __func__, fdp);
2708 fdescfree_fds(curthread, fdp, 0);
2712 * For setugid programs, we don't want to people to use that setugidness
2713 * to generate error messages which write to a file which otherwise would
2714 * otherwise be off-limits to the process. We check for filesystems where
2715 * the vnode can change out from under us after execve (like [lin]procfs).
2717 * Since fdsetugidsafety calls this only for fd 0, 1 and 2, this check is
2718 * sufficient. We also don't check for setugidness since we know we are.
2721 is_unsafe(struct file *fp)
2725 if (fp->f_type != DTYPE_VNODE)
2729 return ((vp->v_vflag & VV_PROCDEP) != 0);
2733 * Make this setguid thing safe, if at all possible.
2736 fdsetugidsafety(struct thread *td)
2738 struct filedesc *fdp;
2742 fdp = td->td_proc->p_fd;
2743 KASSERT(refcount_load(&fdp->fd_refcnt) == 1,
2744 ("the fdtable should not be shared"));
2745 MPASS(fdp->fd_nfiles >= 3);
2746 for (i = 0; i <= 2; i++) {
2747 fp = fdp->fd_ofiles[i].fde_file;
2748 if (fp != NULL && is_unsafe(fp)) {
2749 FILEDESC_XLOCK(fdp);
2750 knote_fdclose(td, i);
2752 * NULL-out descriptor prior to close to avoid
2753 * a race while close blocks.
2756 FILEDESC_XUNLOCK(fdp);
2757 (void) closef(fp, td);
2763 * If a specific file object occupies a specific file descriptor, close the
2764 * file descriptor entry and drop a reference on the file object. This is a
2765 * convenience function to handle a subsequent error in a function that calls
2766 * falloc() that handles the race that another thread might have closed the
2767 * file descriptor out from under the thread creating the file object.
2770 fdclose(struct thread *td, struct file *fp, int idx)
2772 struct filedesc *fdp = td->td_proc->p_fd;
2774 FILEDESC_XLOCK(fdp);
2775 if (fdp->fd_ofiles[idx].fde_file == fp) {
2777 FILEDESC_XUNLOCK(fdp);
2780 FILEDESC_XUNLOCK(fdp);
2784 * Close any files on exec?
2787 fdcloseexec(struct thread *td)
2789 struct filedesc *fdp;
2790 struct filedescent *fde;
2794 fdp = td->td_proc->p_fd;
2795 KASSERT(refcount_load(&fdp->fd_refcnt) == 1,
2796 ("the fdtable should not be shared"));
2797 lastfile = fdlastfile_single(fdp);
2798 for (i = 0; i <= lastfile; i++) {
2799 fde = &fdp->fd_ofiles[i];
2801 if (fp != NULL && (fp->f_type == DTYPE_MQUEUE ||
2802 (fde->fde_flags & UF_EXCLOSE))) {
2803 FILEDESC_XLOCK(fdp);
2805 (void) closefp(fdp, i, fp, td, false, false);
2806 FILEDESC_UNLOCK_ASSERT(fdp);
2812 * It is unsafe for set[ug]id processes to be started with file
2813 * descriptors 0..2 closed, as these descriptors are given implicit
2814 * significance in the Standard C library. fdcheckstd() will create a
2815 * descriptor referencing /dev/null for each of stdin, stdout, and
2816 * stderr that is not already open.
2819 fdcheckstd(struct thread *td)
2821 struct filedesc *fdp;
2823 int i, error, devnull;
2825 fdp = td->td_proc->p_fd;
2826 KASSERT(refcount_load(&fdp->fd_refcnt) == 1,
2827 ("the fdtable should not be shared"));
2828 MPASS(fdp->fd_nfiles >= 3);
2830 for (i = 0; i <= 2; i++) {
2831 if (fdp->fd_ofiles[i].fde_file != NULL)
2834 save = td->td_retval[0];
2835 if (devnull != -1) {
2836 error = kern_dup(td, FDDUP_FIXED, 0, devnull, i);
2838 error = kern_openat(td, AT_FDCWD, "/dev/null",
2839 UIO_SYSSPACE, O_RDWR, 0);
2841 devnull = td->td_retval[0];
2842 KASSERT(devnull == i, ("we didn't get our fd"));
2845 td->td_retval[0] = save;
2853 * Internal form of close. Decrement reference count on file structure.
2854 * Note: td may be NULL when closing a file that was being passed in a
2858 closef(struct file *fp, struct thread *td)
2862 struct filedesc_to_leader *fdtol;
2863 struct filedesc *fdp;
2868 * POSIX record locking dictates that any close releases ALL
2869 * locks owned by this process. This is handled by setting
2870 * a flag in the unlock to free ONLY locks obeying POSIX
2871 * semantics, and not to free BSD-style file locks.
2872 * If the descriptor was in a message, POSIX-style locks
2873 * aren't passed with the descriptor, and the thread pointer
2874 * will be NULL. Callers should be careful only to pass a
2875 * NULL thread pointer when there really is no owning
2876 * context that might have locks, or the locks will be
2879 if (fp->f_type == DTYPE_VNODE) {
2881 if ((td->td_proc->p_leader->p_flag & P_ADVLOCK) != 0) {
2882 lf.l_whence = SEEK_SET;
2885 lf.l_type = F_UNLCK;
2886 (void) VOP_ADVLOCK(vp, (caddr_t)td->td_proc->p_leader,
2887 F_UNLCK, &lf, F_POSIX);
2889 fdtol = td->td_proc->p_fdtol;
2890 if (fdtol != NULL) {
2892 * Handle special case where file descriptor table is
2893 * shared between multiple process leaders.
2895 fdp = td->td_proc->p_fd;
2896 FILEDESC_XLOCK(fdp);
2897 for (fdtol = fdtol->fdl_next;
2898 fdtol != td->td_proc->p_fdtol;
2899 fdtol = fdtol->fdl_next) {
2900 if ((fdtol->fdl_leader->p_flag &
2903 fdtol->fdl_holdcount++;
2904 FILEDESC_XUNLOCK(fdp);
2905 lf.l_whence = SEEK_SET;
2908 lf.l_type = F_UNLCK;
2910 (void) VOP_ADVLOCK(vp,
2911 (caddr_t)fdtol->fdl_leader, F_UNLCK, &lf,
2913 FILEDESC_XLOCK(fdp);
2914 fdtol->fdl_holdcount--;
2915 if (fdtol->fdl_holdcount == 0 &&
2916 fdtol->fdl_wakeup != 0) {
2917 fdtol->fdl_wakeup = 0;
2921 FILEDESC_XUNLOCK(fdp);
2924 return (fdrop_close(fp, td));
2928 * Hack for file descriptor passing code.
2931 closef_nothread(struct file *fp)
2938 * Initialize the file pointer with the specified properties.
2940 * The ops are set with release semantics to be certain that the flags, type,
2941 * and data are visible when ops is. This is to prevent ops methods from being
2942 * called with bad data.
2945 finit(struct file *fp, u_int flag, short type, void *data, struct fileops *ops)
2950 atomic_store_rel_ptr((volatile uintptr_t *)&fp->f_ops, (uintptr_t)ops);
2954 finit_vnode(struct file *fp, u_int flag, void *data, struct fileops *ops)
2956 fp->f_seqcount[UIO_READ] = 1;
2957 fp->f_seqcount[UIO_WRITE] = 1;
2958 finit(fp, (flag & FMASK) | (fp->f_flag & FHASLOCK), DTYPE_VNODE,
2963 fget_cap_locked(struct filedesc *fdp, int fd, cap_rights_t *needrightsp,
2964 struct file **fpp, struct filecaps *havecapsp)
2966 struct filedescent *fde;
2969 FILEDESC_LOCK_ASSERT(fdp);
2972 fde = fdeget_locked(fdp, fd);
2979 error = cap_check(cap_rights_fde_inline(fde), needrightsp);
2984 if (havecapsp != NULL)
2985 filecaps_copy(&fde->fde_caps, havecapsp, true);
2987 *fpp = fde->fde_file;
2995 fget_cap(struct thread *td, int fd, cap_rights_t *needrightsp,
2996 struct file **fpp, struct filecaps *havecapsp)
2998 struct filedesc *fdp = td->td_proc->p_fd;
3000 #ifndef CAPABILITIES
3001 error = fget_unlocked(fdp, fd, needrightsp, fpp);
3002 if (havecapsp != NULL && error == 0)
3003 filecaps_fill(havecapsp);
3010 error = fget_unlocked_seq(fdp, fd, needrightsp, &fp, &seq);
3014 if (havecapsp != NULL) {
3015 if (!filecaps_copy(&fdp->fd_ofiles[fd].fde_caps,
3016 havecapsp, false)) {
3022 if (!fd_modified(fdp, fd, seq))
3031 FILEDESC_SLOCK(fdp);
3032 error = fget_cap_locked(fdp, fd, needrightsp, fpp, havecapsp);
3033 if (error == 0 && !fhold(*fpp))
3035 FILEDESC_SUNLOCK(fdp);
3042 fgetvp_lookup_smr(int fd, struct nameidata *ndp, struct vnode **vpp, bool *fsearch)
3044 const struct filedescent *fde;
3045 const struct fdescenttbl *fdt;
3046 struct filedesc *fdp;
3049 const cap_rights_t *haverights;
3050 cap_rights_t rights;
3053 VFS_SMR_ASSERT_ENTERED();
3055 rights = *ndp->ni_rightsneeded;
3056 cap_rights_set_one(&rights, CAP_LOOKUP);
3058 fdp = curproc->p_fd;
3059 fdt = fdp->fd_files;
3060 if (__predict_false((u_int)fd >= fdt->fdt_nfiles))
3062 seq = seqc_read_notmodify(fd_seqc(fdt, fd));
3063 fde = &fdt->fdt_ofiles[fd];
3064 haverights = cap_rights_fde_inline(fde);
3066 if (__predict_false(fp == NULL))
3068 if (__predict_false(cap_check_inline_transient(haverights, &rights)))
3070 *fsearch = ((fp->f_flag & FSEARCH) != 0);
3072 if (__predict_false(vp == NULL)) {
3075 if (!filecaps_copy(&fde->fde_caps, &ndp->ni_filecaps, false)) {
3079 * Use an acquire barrier to force re-reading of fdt so it is
3080 * refreshed for verification.
3082 atomic_thread_fence_acq();
3083 fdt = fdp->fd_files;
3084 if (__predict_false(!seqc_consistent_nomb(fd_seqc(fdt, fd), seq)))
3087 * If file descriptor doesn't have all rights,
3088 * all lookups relative to it must also be
3089 * strictly relative.
3091 * Not yet supported by fast path.
3094 if (!cap_rights_contains(&ndp->ni_filecaps.fc_rights, &rights) ||
3095 ndp->ni_filecaps.fc_fcntls != CAP_FCNTL_ALL ||
3096 ndp->ni_filecaps.fc_nioctls != -1) {
3098 ndp->ni_lcf |= NI_LCF_STRICTRELATIVE;
3108 fgetvp_lookup_smr(int fd, struct nameidata *ndp, struct vnode **vpp, bool *fsearch)
3110 const struct fdescenttbl *fdt;
3111 struct filedesc *fdp;
3115 VFS_SMR_ASSERT_ENTERED();
3117 fdp = curproc->p_fd;
3118 fdt = fdp->fd_files;
3119 if (__predict_false((u_int)fd >= fdt->fdt_nfiles))
3121 fp = fdt->fdt_ofiles[fd].fde_file;
3122 if (__predict_false(fp == NULL))
3124 *fsearch = ((fp->f_flag & FSEARCH) != 0);
3126 if (__predict_false(vp == NULL || vp->v_type != VDIR)) {
3130 * Use an acquire barrier to force re-reading of fdt so it is
3131 * refreshed for verification.
3133 atomic_thread_fence_acq();
3134 fdt = fdp->fd_files;
3135 if (__predict_false(fp != fdt->fdt_ofiles[fd].fde_file))
3137 filecaps_fill(&ndp->ni_filecaps);
3144 fget_unlocked_seq(struct filedesc *fdp, int fd, cap_rights_t *needrightsp,
3145 struct file **fpp, seqc_t *seqp)
3148 const struct filedescent *fde;
3150 const struct fdescenttbl *fdt;
3154 cap_rights_t haverights;
3158 fdt = fdp->fd_files;
3159 if (__predict_false((u_int)fd >= fdt->fdt_nfiles))
3162 * Fetch the descriptor locklessly. We avoid fdrop() races by
3163 * never raising a refcount above 0. To accomplish this we have
3164 * to use a cmpset loop rather than an atomic_add. The descriptor
3165 * must be re-verified once we acquire a reference to be certain
3166 * that the identity is still correct and we did not lose a race
3167 * due to preemption.
3171 seq = seqc_read_notmodify(fd_seqc(fdt, fd));
3172 fde = &fdt->fdt_ofiles[fd];
3173 haverights = *cap_rights_fde_inline(fde);
3175 if (!seqc_consistent(fd_seqc(fdt, fd), seq))
3178 fp = fdt->fdt_ofiles[fd].fde_file;
3183 error = cap_check_inline(&haverights, needrightsp);
3187 if (__predict_false(!refcount_acquire_if_not_zero(&fp->f_count))) {
3189 * Force a reload. Other thread could reallocate the
3190 * table before this fd was closed, so it is possible
3191 * that there is a stale fp pointer in cached version.
3193 fdt = atomic_load_ptr(&fdp->fd_files);
3197 * Use an acquire barrier to force re-reading of fdt so it is
3198 * refreshed for verification.
3200 atomic_thread_fence_acq();
3201 fdt = fdp->fd_files;
3203 if (seqc_consistent_nomb(fd_seqc(fdt, fd), seq))
3205 if (fp == fdt->fdt_ofiles[fd].fde_file)
3208 fdrop(fp, curthread);
3220 * See the comments in fget_unlocked_seq for an explanation of how this works.
3222 * This is a simplified variant which bails out to the aforementioned routine
3223 * if anything goes wrong. In practice this only happens when userspace is
3224 * racing with itself.
3227 fget_unlocked(struct filedesc *fdp, int fd, cap_rights_t *needrightsp,
3231 const struct filedescent *fde;
3233 const struct fdescenttbl *fdt;
3237 const cap_rights_t *haverights;
3240 fdt = fdp->fd_files;
3241 if (__predict_false((u_int)fd >= fdt->fdt_nfiles)) {
3246 seq = seqc_read_notmodify(fd_seqc(fdt, fd));
3247 fde = &fdt->fdt_ofiles[fd];
3248 haverights = cap_rights_fde_inline(fde);
3251 fp = fdt->fdt_ofiles[fd].fde_file;
3253 if (__predict_false(fp == NULL))
3256 if (__predict_false(cap_check_inline_transient(haverights, needrightsp)))
3259 if (__predict_false(!refcount_acquire_if_not_zero(&fp->f_count)))
3263 * Use an acquire barrier to force re-reading of fdt so it is
3264 * refreshed for verification.
3266 atomic_thread_fence_acq();
3267 fdt = fdp->fd_files;
3269 if (__predict_false(!seqc_consistent_nomb(fd_seqc(fdt, fd), seq)))
3271 if (__predict_false(fp != fdt->fdt_ofiles[fd].fde_file))
3277 fdrop(fp, curthread);
3280 return (fget_unlocked_seq(fdp, fd, needrightsp, fpp, NULL));
3284 * Translate fd -> file when the caller guarantees the file descriptor table
3285 * can't be changed by others.
3287 * Note this does not mean the file object itself is only visible to the caller,
3288 * merely that it wont disappear without having to be referenced.
3290 * Must be paired with fput_only_user.
3294 fget_only_user(struct filedesc *fdp, int fd, cap_rights_t *needrightsp,
3297 const struct filedescent *fde;
3298 const struct fdescenttbl *fdt;
3299 const cap_rights_t *haverights;
3303 MPASS(FILEDESC_IS_ONLY_USER(fdp));
3306 if (__predict_false(fd >= fdp->fd_nfiles))
3309 fdt = fdp->fd_files;
3310 fde = &fdt->fdt_ofiles[fd];
3312 if (__predict_false(fp == NULL))
3314 MPASS(refcount_load(&fp->f_count) > 0);
3315 haverights = cap_rights_fde_inline(fde);
3316 error = cap_check_inline(haverights, needrightsp);
3317 if (__predict_false(error != 0))
3324 fget_only_user(struct filedesc *fdp, int fd, cap_rights_t *needrightsp,
3329 MPASS(FILEDESC_IS_ONLY_USER(fdp));
3332 if (__predict_false(fd >= fdp->fd_nfiles))
3335 fp = fdp->fd_ofiles[fd].fde_file;
3336 if (__predict_false(fp == NULL))
3339 MPASS(refcount_load(&fp->f_count) > 0);
3346 * Extract the file pointer associated with the specified descriptor for the
3347 * current user process.
3349 * If the descriptor doesn't exist or doesn't match 'flags', EBADF is
3352 * File's rights will be checked against the capability rights mask.
3354 * If an error occurred the non-zero error is returned and *fpp is set to
3355 * NULL. Otherwise *fpp is held and set and zero is returned. Caller is
3356 * responsible for fdrop().
3359 _fget(struct thread *td, int fd, struct file **fpp, int flags,
3360 cap_rights_t *needrightsp)
3362 struct filedesc *fdp;
3367 fdp = td->td_proc->p_fd;
3368 error = fget_unlocked(fdp, fd, needrightsp, &fp);
3369 if (__predict_false(error != 0))
3371 if (__predict_false(fp->f_ops == &badfileops)) {
3377 * FREAD and FWRITE failure return EBADF as per POSIX.
3383 if ((fp->f_flag & flags) == 0)
3387 if (fp->f_ops != &path_fileops &&
3388 ((fp->f_flag & (FREAD | FEXEC)) == 0 ||
3389 (fp->f_flag & FWRITE) != 0))
3395 KASSERT(0, ("wrong flags"));
3408 fget(struct thread *td, int fd, cap_rights_t *rightsp, struct file **fpp)
3411 return (_fget(td, fd, fpp, 0, rightsp));
3415 fget_mmap(struct thread *td, int fd, cap_rights_t *rightsp, vm_prot_t *maxprotp,
3419 #ifndef CAPABILITIES
3420 error = _fget(td, fd, fpp, 0, rightsp);
3421 if (maxprotp != NULL)
3422 *maxprotp = VM_PROT_ALL;
3425 cap_rights_t fdrights;
3426 struct filedesc *fdp;
3431 fdp = td->td_proc->p_fd;
3432 MPASS(cap_rights_is_set(rightsp, CAP_MMAP));
3434 error = fget_unlocked_seq(fdp, fd, rightsp, &fp, &seq);
3435 if (__predict_false(error != 0))
3437 if (__predict_false(fp->f_ops == &badfileops)) {
3441 if (maxprotp != NULL)
3442 fdrights = *cap_rights(fdp, fd);
3443 if (!fd_modified(fdp, fd, seq))
3449 * If requested, convert capability rights to access flags.
3451 if (maxprotp != NULL)
3452 *maxprotp = cap_rights_to_vmprot(&fdrights);
3459 fget_read(struct thread *td, int fd, cap_rights_t *rightsp, struct file **fpp)
3462 return (_fget(td, fd, fpp, FREAD, rightsp));
3466 fget_write(struct thread *td, int fd, cap_rights_t *rightsp, struct file **fpp)
3469 return (_fget(td, fd, fpp, FWRITE, rightsp));
3473 fget_fcntl(struct thread *td, int fd, cap_rights_t *rightsp, int needfcntl,
3476 struct filedesc *fdp = td->td_proc->p_fd;
3477 #ifndef CAPABILITIES
3478 return (fget_unlocked(fdp, fd, rightsp, fpp));
3485 MPASS(cap_rights_is_set(rightsp, CAP_FCNTL));
3487 error = fget_unlocked_seq(fdp, fd, rightsp, &fp, &seq);
3490 error = cap_fcntl_check(fdp, fd, needfcntl);
3491 if (!fd_modified(fdp, fd, seq))
3505 * Like fget() but loads the underlying vnode, or returns an error if the
3506 * descriptor does not represent a vnode. Note that pipes use vnodes but
3507 * never have VM objects. The returned vnode will be vref()'d.
3509 * XXX: what about the unused flags ?
3512 _fgetvp(struct thread *td, int fd, int flags, cap_rights_t *needrightsp,
3519 error = _fget(td, fd, &fp, flags, needrightsp);
3522 if (fp->f_vnode == NULL) {
3534 fgetvp(struct thread *td, int fd, cap_rights_t *rightsp, struct vnode **vpp)
3537 return (_fgetvp(td, fd, 0, rightsp, vpp));
3541 fgetvp_rights(struct thread *td, int fd, cap_rights_t *needrightsp,
3542 struct filecaps *havecaps, struct vnode **vpp)
3544 struct filecaps caps;
3548 error = fget_cap(td, fd, needrightsp, &fp, &caps);
3551 if (fp->f_ops == &badfileops) {
3555 if (fp->f_vnode == NULL) {
3567 filecaps_free(&caps);
3573 fgetvp_read(struct thread *td, int fd, cap_rights_t *rightsp, struct vnode **vpp)
3576 return (_fgetvp(td, fd, FREAD, rightsp, vpp));
3580 fgetvp_exec(struct thread *td, int fd, cap_rights_t *rightsp, struct vnode **vpp)
3583 return (_fgetvp(td, fd, FEXEC, rightsp, vpp));
3588 fgetvp_write(struct thread *td, int fd, cap_rights_t *rightsp,
3592 return (_fgetvp(td, fd, FWRITE, rightsp, vpp));
3597 * Handle the last reference to a file being closed.
3599 * Without the noinline attribute clang keeps inlining the func thorough this
3600 * file when fdrop is used.
3603 _fdrop(struct file *fp, struct thread *td)
3609 count = refcount_load(&fp->f_count);
3611 panic("fdrop: fp %p count %d", fp, count);
3613 error = fo_close(fp, td);
3614 atomic_subtract_int(&openfiles, 1);
3616 free(fp->f_advice, M_FADVISE);
3617 uma_zfree(file_zone, fp);
3623 * Apply an advisory lock on a file descriptor.
3625 * Just attempt to get a record lock of the requested type on the entire file
3626 * (l_whence = SEEK_SET, l_start = 0, l_len = 0).
3628 #ifndef _SYS_SYSPROTO_H_
3636 sys_flock(struct thread *td, struct flock_args *uap)
3643 error = fget(td, uap->fd, &cap_flock_rights, &fp);
3647 if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO) {
3650 if (fp->f_ops == &path_fileops) {
3656 lf.l_whence = SEEK_SET;
3659 if (uap->how & LOCK_UN) {
3660 lf.l_type = F_UNLCK;
3661 atomic_clear_int(&fp->f_flag, FHASLOCK);
3662 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK);
3665 if (uap->how & LOCK_EX)
3666 lf.l_type = F_WRLCK;
3667 else if (uap->how & LOCK_SH)
3668 lf.l_type = F_RDLCK;
3673 atomic_set_int(&fp->f_flag, FHASLOCK);
3674 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf,
3675 (uap->how & LOCK_NB) ? F_FLOCK : F_FLOCK | F_WAIT);
3681 * Duplicate the specified descriptor to a free descriptor.
3684 dupfdopen(struct thread *td, struct filedesc *fdp, int dfd, int mode,
3685 int openerror, int *indxp)
3687 struct filedescent *newfde, *oldfde;
3692 KASSERT(openerror == ENODEV || openerror == ENXIO,
3693 ("unexpected error %d in %s", openerror, __func__));
3696 * If the to-be-dup'd fd number is greater than the allowed number
3697 * of file descriptors, or the fd to be dup'd has already been
3698 * closed, then reject.
3700 FILEDESC_XLOCK(fdp);
3701 if ((fp = fget_locked(fdp, dfd)) == NULL) {
3702 FILEDESC_XUNLOCK(fdp);
3706 error = fdalloc(td, 0, &indx);
3708 FILEDESC_XUNLOCK(fdp);
3713 * There are two cases of interest here.
3715 * For ENODEV simply dup (dfd) to file descriptor (indx) and return.
3717 * For ENXIO steal away the file structure from (dfd) and store it in
3718 * (indx). (dfd) is effectively closed by this operation.
3720 switch (openerror) {
3723 * Check that the mode the file is being opened for is a
3724 * subset of the mode of the existing descriptor.
3726 if (((mode & (FREAD|FWRITE)) | fp->f_flag) != fp->f_flag) {
3727 fdunused(fdp, indx);
3728 FILEDESC_XUNLOCK(fdp);
3732 fdunused(fdp, indx);
3733 FILEDESC_XUNLOCK(fdp);
3736 newfde = &fdp->fd_ofiles[indx];
3737 oldfde = &fdp->fd_ofiles[dfd];
3738 ioctls = filecaps_copy_prep(&oldfde->fde_caps);
3740 seqc_write_begin(&newfde->fde_seqc);
3742 memcpy(newfde, oldfde, fde_change_size);
3743 filecaps_copy_finish(&oldfde->fde_caps, &newfde->fde_caps,
3746 seqc_write_end(&newfde->fde_seqc);
3751 * Steal away the file pointer from dfd and stuff it into indx.
3753 newfde = &fdp->fd_ofiles[indx];
3754 oldfde = &fdp->fd_ofiles[dfd];
3756 seqc_write_begin(&newfde->fde_seqc);
3758 memcpy(newfde, oldfde, fde_change_size);
3759 oldfde->fde_file = NULL;
3762 seqc_write_end(&newfde->fde_seqc);
3766 FILEDESC_XUNLOCK(fdp);
3772 * This sysctl determines if we will allow a process to chroot(2) if it
3773 * has a directory open:
3774 * 0: disallowed for all processes.
3775 * 1: allowed for processes that were not already chroot(2)'ed.
3776 * 2: allowed for all processes.
3779 static int chroot_allow_open_directories = 1;
3781 SYSCTL_INT(_kern, OID_AUTO, chroot_allow_open_directories, CTLFLAG_RW,
3782 &chroot_allow_open_directories, 0,
3783 "Allow a process to chroot(2) if it has a directory open");
3786 * Helper function for raised chroot(2) security function: Refuse if
3787 * any filedescriptors are open directories.
3790 chroot_refuse_vdir_fds(struct filedesc *fdp)
3796 FILEDESC_LOCK_ASSERT(fdp);
3798 lastfile = fdlastfile(fdp);
3799 for (fd = 0; fd <= lastfile; fd++) {
3800 fp = fget_locked(fdp, fd);
3803 if (fp->f_type == DTYPE_VNODE) {
3805 if (vp->v_type == VDIR)
3813 pwd_fill(struct pwd *oldpwd, struct pwd *newpwd)
3816 if (newpwd->pwd_cdir == NULL && oldpwd->pwd_cdir != NULL) {
3817 vrefact(oldpwd->pwd_cdir);
3818 newpwd->pwd_cdir = oldpwd->pwd_cdir;
3821 if (newpwd->pwd_rdir == NULL && oldpwd->pwd_rdir != NULL) {
3822 vrefact(oldpwd->pwd_rdir);
3823 newpwd->pwd_rdir = oldpwd->pwd_rdir;
3826 if (newpwd->pwd_jdir == NULL && oldpwd->pwd_jdir != NULL) {
3827 vrefact(oldpwd->pwd_jdir);
3828 newpwd->pwd_jdir = oldpwd->pwd_jdir;
3833 pwd_hold_pwddesc(struct pwddesc *pdp)
3837 PWDDESC_ASSERT_XLOCKED(pdp);
3838 pwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
3840 refcount_acquire(&pwd->pwd_refcount);
3845 pwd_hold_smr(struct pwd *pwd)
3849 if (__predict_true(refcount_acquire_if_not_zero(&pwd->pwd_refcount))) {
3856 pwd_hold(struct thread *td)
3858 struct pwddesc *pdp;
3861 pdp = td->td_proc->p_pd;
3864 pwd = vfs_smr_entered_load(&pdp->pd_pwd);
3865 if (pwd_hold_smr(pwd)) {
3871 pwd = pwd_hold_pwddesc(pdp);
3873 PWDDESC_XUNLOCK(pdp);
3878 pwd_hold_proc(struct proc *p)
3880 struct pwddesc *pdp;
3883 PROC_ASSERT_HELD(p);
3890 pwd = pwd_hold_pwddesc(pdp);
3892 PWDDESC_XUNLOCK(pdp);
3902 pwd = uma_zalloc_smr(pwd_zone, M_WAITOK);
3903 bzero(pwd, sizeof(*pwd));
3904 refcount_init(&pwd->pwd_refcount, 1);
3909 pwd_drop(struct pwd *pwd)
3912 if (!refcount_release(&pwd->pwd_refcount))
3915 if (pwd->pwd_cdir != NULL)
3916 vrele(pwd->pwd_cdir);
3917 if (pwd->pwd_rdir != NULL)
3918 vrele(pwd->pwd_rdir);
3919 if (pwd->pwd_jdir != NULL)
3920 vrele(pwd->pwd_jdir);
3921 uma_zfree_smr(pwd_zone, pwd);
3925 * The caller is responsible for invoking priv_check() and
3926 * mac_vnode_check_chroot() to authorize this operation.
3929 pwd_chroot(struct thread *td, struct vnode *vp)
3931 struct pwddesc *pdp;
3932 struct filedesc *fdp;
3933 struct pwd *newpwd, *oldpwd;
3936 fdp = td->td_proc->p_fd;
3937 pdp = td->td_proc->p_pd;
3938 newpwd = pwd_alloc();
3939 FILEDESC_SLOCK(fdp);
3941 oldpwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
3942 if (chroot_allow_open_directories == 0 ||
3943 (chroot_allow_open_directories == 1 &&
3944 oldpwd->pwd_rdir != rootvnode)) {
3945 error = chroot_refuse_vdir_fds(fdp);
3946 FILEDESC_SUNLOCK(fdp);
3948 PWDDESC_XUNLOCK(pdp);
3953 FILEDESC_SUNLOCK(fdp);
3957 newpwd->pwd_rdir = vp;
3958 if (oldpwd->pwd_jdir == NULL) {
3960 newpwd->pwd_jdir = vp;
3962 pwd_fill(oldpwd, newpwd);
3963 pwd_set(pdp, newpwd);
3964 PWDDESC_XUNLOCK(pdp);
3970 pwd_chdir(struct thread *td, struct vnode *vp)
3972 struct pwddesc *pdp;
3973 struct pwd *newpwd, *oldpwd;
3975 VNPASS(vp->v_usecount > 0, vp);
3977 newpwd = pwd_alloc();
3978 pdp = td->td_proc->p_pd;
3980 oldpwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
3981 newpwd->pwd_cdir = vp;
3982 pwd_fill(oldpwd, newpwd);
3983 pwd_set(pdp, newpwd);
3984 PWDDESC_XUNLOCK(pdp);
3989 * jail_attach(2) changes both root and working directories.
3992 pwd_chroot_chdir(struct thread *td, struct vnode *vp)
3994 struct pwddesc *pdp;
3995 struct filedesc *fdp;
3996 struct pwd *newpwd, *oldpwd;
3999 fdp = td->td_proc->p_fd;
4000 pdp = td->td_proc->p_pd;
4001 newpwd = pwd_alloc();
4002 FILEDESC_SLOCK(fdp);
4004 oldpwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
4005 error = chroot_refuse_vdir_fds(fdp);
4006 FILEDESC_SUNLOCK(fdp);
4008 PWDDESC_XUNLOCK(pdp);
4014 newpwd->pwd_rdir = vp;
4016 newpwd->pwd_cdir = vp;
4017 if (oldpwd->pwd_jdir == NULL) {
4019 newpwd->pwd_jdir = vp;
4021 pwd_fill(oldpwd, newpwd);
4022 pwd_set(pdp, newpwd);
4023 PWDDESC_XUNLOCK(pdp);
4029 pwd_ensure_dirs(void)
4031 struct pwddesc *pdp;
4032 struct pwd *oldpwd, *newpwd;
4034 pdp = curproc->p_pd;
4036 oldpwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
4037 if (oldpwd->pwd_cdir != NULL && oldpwd->pwd_rdir != NULL) {
4038 PWDDESC_XUNLOCK(pdp);
4041 PWDDESC_XUNLOCK(pdp);
4043 newpwd = pwd_alloc();
4045 oldpwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
4046 pwd_fill(oldpwd, newpwd);
4047 if (newpwd->pwd_cdir == NULL) {
4049 newpwd->pwd_cdir = rootvnode;
4051 if (newpwd->pwd_rdir == NULL) {
4053 newpwd->pwd_rdir = rootvnode;
4055 pwd_set(pdp, newpwd);
4056 PWDDESC_XUNLOCK(pdp);
4061 pwd_set_rootvnode(void)
4063 struct pwddesc *pdp;
4064 struct pwd *oldpwd, *newpwd;
4066 pdp = curproc->p_pd;
4068 newpwd = pwd_alloc();
4070 oldpwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
4072 newpwd->pwd_cdir = rootvnode;
4074 newpwd->pwd_rdir = rootvnode;
4075 pwd_fill(oldpwd, newpwd);
4076 pwd_set(pdp, newpwd);
4077 PWDDESC_XUNLOCK(pdp);
4082 * Scan all active processes and prisons to see if any of them have a current
4083 * or root directory of `olddp'. If so, replace them with the new mount point.
4086 mountcheckdirs(struct vnode *olddp, struct vnode *newdp)
4088 struct pwddesc *pdp;
4089 struct pwd *newpwd, *oldpwd;
4094 if (vrefcnt(olddp) == 1)
4097 newpwd = pwd_alloc();
4098 sx_slock(&allproc_lock);
4099 FOREACH_PROC_IN_SYSTEM(p) {
4106 oldpwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
4107 if (oldpwd == NULL ||
4108 (oldpwd->pwd_cdir != olddp &&
4109 oldpwd->pwd_rdir != olddp &&
4110 oldpwd->pwd_jdir != olddp)) {
4111 PWDDESC_XUNLOCK(pdp);
4115 if (oldpwd->pwd_cdir == olddp) {
4117 newpwd->pwd_cdir = newdp;
4119 if (oldpwd->pwd_rdir == olddp) {
4121 newpwd->pwd_rdir = newdp;
4123 if (oldpwd->pwd_jdir == olddp) {
4125 newpwd->pwd_jdir = newdp;
4127 pwd_fill(oldpwd, newpwd);
4128 pwd_set(pdp, newpwd);
4129 PWDDESC_XUNLOCK(pdp);
4132 newpwd = pwd_alloc();
4134 sx_sunlock(&allproc_lock);
4136 if (rootvnode == olddp) {
4141 mtx_lock(&prison0.pr_mtx);
4142 if (prison0.pr_root == olddp) {
4144 prison0.pr_root = newdp;
4147 mtx_unlock(&prison0.pr_mtx);
4148 sx_slock(&allprison_lock);
4149 TAILQ_FOREACH(pr, &allprison, pr_list) {
4150 mtx_lock(&pr->pr_mtx);
4151 if (pr->pr_root == olddp) {
4153 pr->pr_root = newdp;
4156 mtx_unlock(&pr->pr_mtx);
4158 sx_sunlock(&allprison_lock);
4163 struct filedesc_to_leader *
4164 filedesc_to_leader_alloc(struct filedesc_to_leader *old, struct filedesc *fdp, struct proc *leader)
4166 struct filedesc_to_leader *fdtol;
4168 fdtol = malloc(sizeof(struct filedesc_to_leader),
4169 M_FILEDESC_TO_LEADER, M_WAITOK);
4170 fdtol->fdl_refcount = 1;
4171 fdtol->fdl_holdcount = 0;
4172 fdtol->fdl_wakeup = 0;
4173 fdtol->fdl_leader = leader;
4175 FILEDESC_XLOCK(fdp);
4176 fdtol->fdl_next = old->fdl_next;
4177 fdtol->fdl_prev = old;
4178 old->fdl_next = fdtol;
4179 fdtol->fdl_next->fdl_prev = fdtol;
4180 FILEDESC_XUNLOCK(fdp);
4182 fdtol->fdl_next = fdtol;
4183 fdtol->fdl_prev = fdtol;
4189 sysctl_kern_proc_nfds(SYSCTL_HANDLER_ARGS)
4192 struct filedesc *fdp;
4194 int count, off, minoff;
4200 if (*(int *)arg1 != 0)
4203 fdp = curproc->p_fd;
4205 FILEDESC_SLOCK(fdp);
4207 off = NDSLOT(fdp->fd_nfiles - 1);
4208 for (minoff = NDSLOT(0); off >= minoff; --off)
4209 count += bitcountl(map[off]);
4210 FILEDESC_SUNLOCK(fdp);
4212 return (SYSCTL_OUT(req, &count, sizeof(count)));
4215 static SYSCTL_NODE(_kern_proc, KERN_PROC_NFDS, nfds,
4216 CTLFLAG_RD|CTLFLAG_CAPRD|CTLFLAG_MPSAFE, sysctl_kern_proc_nfds,
4217 "Number of open file descriptors");
4220 * Get file structures globally.
4223 sysctl_kern_file(SYSCTL_HANDLER_ARGS)
4226 struct filedesc *fdp;
4229 int error, n, lastfile;
4231 error = sysctl_wire_old_buffer(req, 0);
4234 if (req->oldptr == NULL) {
4236 sx_slock(&allproc_lock);
4237 FOREACH_PROC_IN_SYSTEM(p) {
4239 if (p->p_state == PRS_NEW) {
4247 /* overestimates sparse tables. */
4248 n += fdp->fd_nfiles;
4251 sx_sunlock(&allproc_lock);
4252 return (SYSCTL_OUT(req, 0, n * sizeof(xf)));
4255 bzero(&xf, sizeof(xf));
4256 xf.xf_size = sizeof(xf);
4257 sx_slock(&allproc_lock);
4258 FOREACH_PROC_IN_SYSTEM(p) {
4260 if (p->p_state == PRS_NEW) {
4264 if (p_cansee(req->td, p) != 0) {
4268 xf.xf_pid = p->p_pid;
4269 xf.xf_uid = p->p_ucred->cr_uid;
4274 FILEDESC_SLOCK(fdp);
4275 lastfile = fdlastfile(fdp);
4276 for (n = 0; refcount_load(&fdp->fd_refcnt) > 0 && n <= lastfile;
4278 if ((fp = fdp->fd_ofiles[n].fde_file) == NULL)
4281 xf.xf_file = (uintptr_t)fp;
4282 xf.xf_data = (uintptr_t)fp->f_data;
4283 xf.xf_vnode = (uintptr_t)fp->f_vnode;
4284 xf.xf_type = (uintptr_t)fp->f_type;
4285 xf.xf_count = refcount_load(&fp->f_count);
4287 xf.xf_offset = foffset_get(fp);
4288 xf.xf_flag = fp->f_flag;
4289 error = SYSCTL_OUT(req, &xf, sizeof(xf));
4293 FILEDESC_SUNLOCK(fdp);
4298 sx_sunlock(&allproc_lock);
4302 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD|CTLFLAG_MPSAFE,
4303 0, 0, sysctl_kern_file, "S,xfile", "Entire file table");
4305 #ifdef KINFO_FILE_SIZE
4306 CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE);
4310 xlate_fflags(int fflags)
4312 static const struct {
4315 } fflags_table[] = {
4316 { FAPPEND, KF_FLAG_APPEND },
4317 { FASYNC, KF_FLAG_ASYNC },
4318 { FFSYNC, KF_FLAG_FSYNC },
4319 { FHASLOCK, KF_FLAG_HASLOCK },
4320 { FNONBLOCK, KF_FLAG_NONBLOCK },
4321 { FREAD, KF_FLAG_READ },
4322 { FWRITE, KF_FLAG_WRITE },
4323 { O_CREAT, KF_FLAG_CREAT },
4324 { O_DIRECT, KF_FLAG_DIRECT },
4325 { O_EXCL, KF_FLAG_EXCL },
4326 { O_EXEC, KF_FLAG_EXEC },
4327 { O_EXLOCK, KF_FLAG_EXLOCK },
4328 { O_NOFOLLOW, KF_FLAG_NOFOLLOW },
4329 { O_SHLOCK, KF_FLAG_SHLOCK },
4330 { O_TRUNC, KF_FLAG_TRUNC }
4336 for (i = 0; i < nitems(fflags_table); i++)
4337 if (fflags & fflags_table[i].fflag)
4338 kflags |= fflags_table[i].kf_fflag;
4342 /* Trim unused data from kf_path by truncating the structure size. */
4344 pack_kinfo(struct kinfo_file *kif)
4347 kif->kf_structsize = offsetof(struct kinfo_file, kf_path) +
4348 strlen(kif->kf_path) + 1;
4349 kif->kf_structsize = roundup(kif->kf_structsize, sizeof(uint64_t));
4353 export_file_to_kinfo(struct file *fp, int fd, cap_rights_t *rightsp,
4354 struct kinfo_file *kif, struct filedesc *fdp, int flags)
4358 bzero(kif, sizeof(*kif));
4360 /* Set a default type to allow for empty fill_kinfo() methods. */
4361 kif->kf_type = KF_TYPE_UNKNOWN;
4362 kif->kf_flags = xlate_fflags(fp->f_flag);
4363 if (rightsp != NULL)
4364 kif->kf_cap_rights = *rightsp;
4366 cap_rights_init_zero(&kif->kf_cap_rights);
4368 kif->kf_ref_count = refcount_load(&fp->f_count);
4369 kif->kf_offset = foffset_get(fp);
4372 * This may drop the filedesc lock, so the 'fp' cannot be
4373 * accessed after this call.
4375 error = fo_fill_kinfo(fp, kif, fdp);
4377 kif->kf_status |= KF_ATTR_VALID;
4378 if ((flags & KERN_FILEDESC_PACK_KINFO) != 0)
4381 kif->kf_structsize = roundup2(sizeof(*kif), sizeof(uint64_t));
4385 export_vnode_to_kinfo(struct vnode *vp, int fd, int fflags,
4386 struct kinfo_file *kif, int flags)
4390 bzero(kif, sizeof(*kif));
4392 kif->kf_type = KF_TYPE_VNODE;
4393 error = vn_fill_kinfo_vnode(vp, kif);
4395 kif->kf_status |= KF_ATTR_VALID;
4396 kif->kf_flags = xlate_fflags(fflags);
4397 cap_rights_init_zero(&kif->kf_cap_rights);
4399 kif->kf_ref_count = -1;
4400 kif->kf_offset = -1;
4401 if ((flags & KERN_FILEDESC_PACK_KINFO) != 0)
4404 kif->kf_structsize = roundup2(sizeof(*kif), sizeof(uint64_t));
4408 struct export_fd_buf {
4409 struct filedesc *fdp;
4410 struct pwddesc *pdp;
4413 struct kinfo_file kif;
4418 export_kinfo_to_sb(struct export_fd_buf *efbuf)
4420 struct kinfo_file *kif;
4423 if (efbuf->remainder != -1) {
4424 if (efbuf->remainder < kif->kf_structsize)
4426 efbuf->remainder -= kif->kf_structsize;
4428 if (sbuf_bcat(efbuf->sb, kif, kif->kf_structsize) != 0)
4429 return (sbuf_error(efbuf->sb));
4434 export_file_to_sb(struct file *fp, int fd, cap_rights_t *rightsp,
4435 struct export_fd_buf *efbuf)
4439 if (efbuf->remainder == 0)
4441 export_file_to_kinfo(fp, fd, rightsp, &efbuf->kif, efbuf->fdp,
4443 FILEDESC_SUNLOCK(efbuf->fdp);
4444 error = export_kinfo_to_sb(efbuf);
4445 FILEDESC_SLOCK(efbuf->fdp);
4450 export_vnode_to_sb(struct vnode *vp, int fd, int fflags,
4451 struct export_fd_buf *efbuf)
4455 if (efbuf->remainder == 0)
4457 if (efbuf->pdp != NULL)
4458 PWDDESC_XUNLOCK(efbuf->pdp);
4459 export_vnode_to_kinfo(vp, fd, fflags, &efbuf->kif, efbuf->flags);
4460 error = export_kinfo_to_sb(efbuf);
4461 if (efbuf->pdp != NULL)
4462 PWDDESC_XLOCK(efbuf->pdp);
4467 * Store a process file descriptor information to sbuf.
4469 * Takes a locked proc as argument, and returns with the proc unlocked.
4472 kern_proc_filedesc_out(struct proc *p, struct sbuf *sb, ssize_t maxlen,
4476 struct filedesc *fdp;
4477 struct pwddesc *pdp;
4478 struct export_fd_buf *efbuf;
4479 struct vnode *cttyvp, *textvp, *tracevp;
4481 int error, i, lastfile;
4482 cap_rights_t rights;
4484 PROC_LOCK_ASSERT(p, MA_OWNED);
4487 tracevp = ktr_get_tracevp(p, true);
4489 textvp = p->p_textvp;
4492 /* Controlling tty. */
4494 if (p->p_pgrp != NULL && p->p_pgrp->pg_session != NULL) {
4495 cttyvp = p->p_pgrp->pg_session->s_ttyvp;
4503 efbuf = malloc(sizeof(*efbuf), M_TEMP, M_WAITOK);
4507 efbuf->remainder = maxlen;
4508 efbuf->flags = flags;
4511 if (tracevp != NULL)
4512 error = export_vnode_to_sb(tracevp, KF_FD_TYPE_TRACE,
4513 FREAD | FWRITE, efbuf);
4514 if (error == 0 && textvp != NULL)
4515 error = export_vnode_to_sb(textvp, KF_FD_TYPE_TEXT, FREAD,
4517 if (error == 0 && cttyvp != NULL)
4518 error = export_vnode_to_sb(cttyvp, KF_FD_TYPE_CTTY,
4519 FREAD | FWRITE, efbuf);
4520 if (error != 0 || pdp == NULL || fdp == NULL)
4525 pwd = pwd_hold_pwddesc(pdp);
4527 /* working directory */
4528 if (pwd->pwd_cdir != NULL) {
4529 vrefact(pwd->pwd_cdir);
4530 error = export_vnode_to_sb(pwd->pwd_cdir,
4531 KF_FD_TYPE_CWD, FREAD, efbuf);
4533 /* root directory */
4534 if (error == 0 && pwd->pwd_rdir != NULL) {
4535 vrefact(pwd->pwd_rdir);
4536 error = export_vnode_to_sb(pwd->pwd_rdir,
4537 KF_FD_TYPE_ROOT, FREAD, efbuf);
4539 /* jail directory */
4540 if (error == 0 && pwd->pwd_jdir != NULL) {
4541 vrefact(pwd->pwd_jdir);
4542 error = export_vnode_to_sb(pwd->pwd_jdir,
4543 KF_FD_TYPE_JAIL, FREAD, efbuf);
4546 PWDDESC_XUNLOCK(pdp);
4551 FILEDESC_SLOCK(fdp);
4552 lastfile = fdlastfile(fdp);
4553 for (i = 0; refcount_load(&fdp->fd_refcnt) > 0 && i <= lastfile; i++) {
4554 if ((fp = fdp->fd_ofiles[i].fde_file) == NULL)
4557 rights = *cap_rights(fdp, i);
4558 #else /* !CAPABILITIES */
4559 rights = cap_no_rights;
4562 * Create sysctl entry. It is OK to drop the filedesc
4563 * lock inside of export_file_to_sb() as we will
4564 * re-validate and re-evaluate its properties when the
4567 error = export_file_to_sb(fp, i, &rights, efbuf);
4571 FILEDESC_SUNLOCK(fdp);
4577 free(efbuf, M_TEMP);
4581 #define FILEDESC_SBUF_SIZE (sizeof(struct kinfo_file) * 5)
4584 * Get per-process file descriptors for use by procstat(1), et al.
4587 sysctl_kern_proc_filedesc(SYSCTL_HANDLER_ARGS)
4593 int error, error2, *name;
4601 sbuf_new_for_sysctl(&sb, NULL, FILEDESC_SBUF_SIZE, req);
4602 sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
4603 error = pget((pid_t)name[0], PGET_CANDEBUG | PGET_NOTWEXIT, &p);
4608 maxlen = req->oldptr != NULL ? req->oldlen : -1;
4609 error = kern_proc_filedesc_out(p, &sb, maxlen,
4610 KERN_FILEDESC_PACK_KINFO);
4611 error2 = sbuf_finish(&sb);
4613 return (error != 0 ? error : error2);
4616 #ifdef COMPAT_FREEBSD7
4617 #ifdef KINFO_OFILE_SIZE
4618 CTASSERT(sizeof(struct kinfo_ofile) == KINFO_OFILE_SIZE);
4622 kinfo_to_okinfo(struct kinfo_file *kif, struct kinfo_ofile *okif)
4625 okif->kf_structsize = sizeof(*okif);
4626 okif->kf_type = kif->kf_type;
4627 okif->kf_fd = kif->kf_fd;
4628 okif->kf_ref_count = kif->kf_ref_count;
4629 okif->kf_flags = kif->kf_flags & (KF_FLAG_READ | KF_FLAG_WRITE |
4630 KF_FLAG_APPEND | KF_FLAG_ASYNC | KF_FLAG_FSYNC | KF_FLAG_NONBLOCK |
4631 KF_FLAG_DIRECT | KF_FLAG_HASLOCK);
4632 okif->kf_offset = kif->kf_offset;
4633 if (kif->kf_type == KF_TYPE_VNODE)
4634 okif->kf_vnode_type = kif->kf_un.kf_file.kf_file_type;
4636 okif->kf_vnode_type = KF_VTYPE_VNON;
4637 strlcpy(okif->kf_path, kif->kf_path, sizeof(okif->kf_path));
4638 if (kif->kf_type == KF_TYPE_SOCKET) {
4639 okif->kf_sock_domain = kif->kf_un.kf_sock.kf_sock_domain0;
4640 okif->kf_sock_type = kif->kf_un.kf_sock.kf_sock_type0;
4641 okif->kf_sock_protocol = kif->kf_un.kf_sock.kf_sock_protocol0;
4642 okif->kf_sa_local = kif->kf_un.kf_sock.kf_sa_local;
4643 okif->kf_sa_peer = kif->kf_un.kf_sock.kf_sa_peer;
4645 okif->kf_sa_local.ss_family = AF_UNSPEC;
4646 okif->kf_sa_peer.ss_family = AF_UNSPEC;
4651 export_vnode_for_osysctl(struct vnode *vp, int type, struct kinfo_file *kif,
4652 struct kinfo_ofile *okif, struct pwddesc *pdp, struct sysctl_req *req)
4657 PWDDESC_XUNLOCK(pdp);
4658 export_vnode_to_kinfo(vp, type, 0, kif, KERN_FILEDESC_PACK_KINFO);
4659 kinfo_to_okinfo(kif, okif);
4660 error = SYSCTL_OUT(req, okif, sizeof(*okif));
4666 * Get per-process file descriptors for use by procstat(1), et al.
4669 sysctl_kern_proc_ofiledesc(SYSCTL_HANDLER_ARGS)
4671 struct kinfo_ofile *okif;
4672 struct kinfo_file *kif;
4673 struct filedesc *fdp;
4674 struct pwddesc *pdp;
4677 int error, i, lastfile, *name;
4686 error = pget((pid_t)name[0], PGET_CANDEBUG | PGET_NOTWEXIT, &p);
4693 if (fdp == NULL || pdp == NULL) {
4698 kif = malloc(sizeof(*kif), M_TEMP, M_WAITOK);
4699 okif = malloc(sizeof(*okif), M_TEMP, M_WAITOK);
4701 pwd = pwd_hold_pwddesc(pdp);
4703 if (pwd->pwd_cdir != NULL)
4704 export_vnode_for_osysctl(pwd->pwd_cdir, KF_FD_TYPE_CWD, kif,
4706 if (pwd->pwd_rdir != NULL)
4707 export_vnode_for_osysctl(pwd->pwd_rdir, KF_FD_TYPE_ROOT, kif,
4709 if (pwd->pwd_jdir != NULL)
4710 export_vnode_for_osysctl(pwd->pwd_jdir, KF_FD_TYPE_JAIL, kif,
4713 PWDDESC_XUNLOCK(pdp);
4716 FILEDESC_SLOCK(fdp);
4717 lastfile = fdlastfile(fdp);
4718 for (i = 0; refcount_load(&fdp->fd_refcnt) > 0 && i <= lastfile; i++) {
4719 if ((fp = fdp->fd_ofiles[i].fde_file) == NULL)
4721 export_file_to_kinfo(fp, i, NULL, kif, fdp,
4722 KERN_FILEDESC_PACK_KINFO);
4723 FILEDESC_SUNLOCK(fdp);
4724 kinfo_to_okinfo(kif, okif);
4725 error = SYSCTL_OUT(req, okif, sizeof(*okif));
4726 FILEDESC_SLOCK(fdp);
4730 FILEDESC_SUNLOCK(fdp);
4738 static SYSCTL_NODE(_kern_proc, KERN_PROC_OFILEDESC, ofiledesc,
4739 CTLFLAG_RD|CTLFLAG_MPSAFE, sysctl_kern_proc_ofiledesc,
4740 "Process ofiledesc entries");
4741 #endif /* COMPAT_FREEBSD7 */
4744 vntype_to_kinfo(int vtype)
4749 } vtypes_table[] = {
4750 { VBAD, KF_VTYPE_VBAD },
4751 { VBLK, KF_VTYPE_VBLK },
4752 { VCHR, KF_VTYPE_VCHR },
4753 { VDIR, KF_VTYPE_VDIR },
4754 { VFIFO, KF_VTYPE_VFIFO },
4755 { VLNK, KF_VTYPE_VLNK },
4756 { VNON, KF_VTYPE_VNON },
4757 { VREG, KF_VTYPE_VREG },
4758 { VSOCK, KF_VTYPE_VSOCK }
4763 * Perform vtype translation.
4765 for (i = 0; i < nitems(vtypes_table); i++)
4766 if (vtypes_table[i].vtype == vtype)
4767 return (vtypes_table[i].kf_vtype);
4769 return (KF_VTYPE_UNKNOWN);
4772 static SYSCTL_NODE(_kern_proc, KERN_PROC_FILEDESC, filedesc,
4773 CTLFLAG_RD|CTLFLAG_MPSAFE, sysctl_kern_proc_filedesc,
4774 "Process filedesc entries");
4777 * Store a process current working directory information to sbuf.
4779 * Takes a locked proc as argument, and returns with the proc unlocked.
4782 kern_proc_cwd_out(struct proc *p, struct sbuf *sb, ssize_t maxlen)
4784 struct pwddesc *pdp;
4786 struct export_fd_buf *efbuf;
4790 PROC_LOCK_ASSERT(p, MA_OWNED);
4797 efbuf = malloc(sizeof(*efbuf), M_TEMP, M_WAITOK);
4801 efbuf->remainder = maxlen;
4805 pwd = PWDDESC_XLOCKED_LOAD_PWD(pdp);
4806 cdir = pwd->pwd_cdir;
4811 error = export_vnode_to_sb(cdir, KF_FD_TYPE_CWD, FREAD, efbuf);
4813 PWDDESC_XUNLOCK(pdp);
4815 free(efbuf, M_TEMP);
4820 * Get per-process current working directory.
4823 sysctl_kern_proc_cwd(SYSCTL_HANDLER_ARGS)
4829 int error, error2, *name;
4837 sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file), req);
4838 sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
4839 error = pget((pid_t)name[0], PGET_CANDEBUG | PGET_NOTWEXIT, &p);
4844 maxlen = req->oldptr != NULL ? req->oldlen : -1;
4845 error = kern_proc_cwd_out(p, &sb, maxlen);
4846 error2 = sbuf_finish(&sb);
4848 return (error != 0 ? error : error2);
4851 static SYSCTL_NODE(_kern_proc, KERN_PROC_CWD, cwd, CTLFLAG_RD|CTLFLAG_MPSAFE,
4852 sysctl_kern_proc_cwd, "Process current working directory");
4856 * For the purposes of debugging, generate a human-readable string for the
4860 file_type_to_name(short type)
4888 case DTYPE_PROCDESC:
4892 case DTYPE_LINUXTFD:
4900 * For the purposes of debugging, identify a process (if any, perhaps one of
4901 * many) that references the passed file in its file descriptor array. Return
4904 static struct proc *
4905 file_to_first_proc(struct file *fp)
4907 struct filedesc *fdp;
4911 FOREACH_PROC_IN_SYSTEM(p) {
4912 if (p->p_state == PRS_NEW)
4917 for (n = 0; n < fdp->fd_nfiles; n++) {
4918 if (fp == fdp->fd_ofiles[n].fde_file)
4926 db_print_file(struct file *fp, int header)
4928 #define XPTRWIDTH ((int)howmany(sizeof(void *) * NBBY, 4))
4932 db_printf("%*s %6s %*s %8s %4s %5s %6s %*s %5s %s\n",
4933 XPTRWIDTH, "File", "Type", XPTRWIDTH, "Data", "Flag",
4934 "GCFl", "Count", "MCount", XPTRWIDTH, "Vnode", "FPID",
4936 p = file_to_first_proc(fp);
4937 db_printf("%*p %6s %*p %08x %04x %5d %6d %*p %5d %s\n", XPTRWIDTH,
4938 fp, file_type_to_name(fp->f_type), XPTRWIDTH, fp->f_data,
4939 fp->f_flag, 0, refcount_load(&fp->f_count), 0, XPTRWIDTH, fp->f_vnode,
4940 p != NULL ? p->p_pid : -1, p != NULL ? p->p_comm : "-");
4945 DB_SHOW_COMMAND(file, db_show_file)
4950 db_printf("usage: show file <addr>\n");
4953 fp = (struct file *)addr;
4954 db_print_file(fp, 1);
4957 DB_SHOW_COMMAND(files, db_show_files)
4959 struct filedesc *fdp;
4966 FOREACH_PROC_IN_SYSTEM(p) {
4967 if (p->p_state == PRS_NEW)
4969 if ((fdp = p->p_fd) == NULL)
4971 for (n = 0; n < fdp->fd_nfiles; ++n) {
4972 if ((fp = fdp->fd_ofiles[n].fde_file) == NULL)
4974 db_print_file(fp, header);
4981 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW,
4982 &maxfilesperproc, 0, "Maximum files allowed open per process");
4984 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW,
4985 &maxfiles, 0, "Maximum number of files");
4987 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD,
4988 &openfiles, 0, "System-wide number of open files");
4992 filelistinit(void *dummy)
4995 file_zone = uma_zcreate("Files", sizeof(struct file), NULL, NULL,
4996 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
4997 filedesc0_zone = uma_zcreate("filedesc0", sizeof(struct filedesc0),
4998 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
4999 pwd_zone = uma_zcreate("PWD", sizeof(struct pwd), NULL, NULL,
5000 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_SMR);
5002 * XXXMJG this is a temporary hack due to boot ordering issues against
5005 vfs_smr = uma_zone_get_smr(pwd_zone);
5006 mtx_init(&sigio_lock, "sigio lock", NULL, MTX_DEF);
5008 SYSINIT(select, SI_SUB_LOCK, SI_ORDER_FIRST, filelistinit, NULL);
5010 /*-------------------------------------------------------------------*/
5013 badfo_readwrite(struct file *fp, struct uio *uio, struct ucred *active_cred,
5014 int flags, struct thread *td)
5021 badfo_truncate(struct file *fp, off_t length, struct ucred *active_cred,
5029 badfo_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
5037 badfo_poll(struct file *fp, int events, struct ucred *active_cred,
5045 badfo_kqfilter(struct file *fp, struct knote *kn)
5052 badfo_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
5060 badfo_close(struct file *fp, struct thread *td)
5067 badfo_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
5075 badfo_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
5083 badfo_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
5084 struct uio *trl_uio, off_t offset, size_t nbytes, off_t *sent, int flags,
5092 badfo_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
5098 struct fileops badfileops = {
5099 .fo_read = badfo_readwrite,
5100 .fo_write = badfo_readwrite,
5101 .fo_truncate = badfo_truncate,
5102 .fo_ioctl = badfo_ioctl,
5103 .fo_poll = badfo_poll,
5104 .fo_kqfilter = badfo_kqfilter,
5105 .fo_stat = badfo_stat,
5106 .fo_close = badfo_close,
5107 .fo_chmod = badfo_chmod,
5108 .fo_chown = badfo_chown,
5109 .fo_sendfile = badfo_sendfile,
5110 .fo_fill_kinfo = badfo_fill_kinfo,
5114 path_poll(struct file *fp, int events, struct ucred *active_cred,
5121 path_close(struct file *fp, struct thread *td)
5123 MPASS(fp->f_type == DTYPE_VNODE);
5124 fp->f_ops = &badfileops;
5129 struct fileops path_fileops = {
5130 .fo_read = badfo_readwrite,
5131 .fo_write = badfo_readwrite,
5132 .fo_truncate = badfo_truncate,
5133 .fo_ioctl = badfo_ioctl,
5134 .fo_poll = path_poll,
5135 .fo_kqfilter = vn_kqfilter_opath,
5136 .fo_stat = vn_statfile,
5137 .fo_close = path_close,
5138 .fo_chmod = badfo_chmod,
5139 .fo_chown = badfo_chown,
5140 .fo_sendfile = badfo_sendfile,
5141 .fo_fill_kinfo = vn_fill_kinfo,
5142 .fo_flags = DFLAG_PASSABLE,
5146 invfo_rdwr(struct file *fp, struct uio *uio, struct ucred *active_cred,
5147 int flags, struct thread *td)
5150 return (EOPNOTSUPP);
5154 invfo_truncate(struct file *fp, off_t length, struct ucred *active_cred,
5162 invfo_ioctl(struct file *fp, u_long com, void *data,
5163 struct ucred *active_cred, struct thread *td)
5170 invfo_poll(struct file *fp, int events, struct ucred *active_cred,
5174 return (poll_no_poll(events));
5178 invfo_kqfilter(struct file *fp, struct knote *kn)
5185 invfo_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
5193 invfo_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
5201 invfo_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
5202 struct uio *trl_uio, off_t offset, size_t nbytes, off_t *sent, int flags,
5209 /*-------------------------------------------------------------------*/
5212 * File Descriptor pseudo-device driver (/dev/fd/).
5214 * Opening minor device N dup()s the file (if any) connected to file
5215 * descriptor N belonging to the calling process. Note that this driver
5216 * consists of only the ``open()'' routine, because all subsequent
5217 * references to this file will be direct to the other driver.
5219 * XXX: we could give this one a cloning event handler if necessary.
5224 fdopen(struct cdev *dev, int mode, int type, struct thread *td)
5228 * XXX Kludge: set curthread->td_dupfd to contain the value of the
5229 * the file descriptor being sought for duplication. The error
5230 * return ensures that the vnode for this device will be released
5231 * by vn_open. Open will detect this special error and take the
5232 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
5233 * will simply report the error.
5235 td->td_dupfd = dev2unit(dev);
5239 static struct cdevsw fildesc_cdevsw = {
5240 .d_version = D_VERSION,
5246 fildesc_drvinit(void *unused)
5250 dev = make_dev_credf(MAKEDEV_ETERNAL, &fildesc_cdevsw, 0, NULL,
5251 UID_ROOT, GID_WHEEL, 0666, "fd/0");
5252 make_dev_alias(dev, "stdin");
5253 dev = make_dev_credf(MAKEDEV_ETERNAL, &fildesc_cdevsw, 1, NULL,
5254 UID_ROOT, GID_WHEEL, 0666, "fd/1");
5255 make_dev_alias(dev, "stdout");
5256 dev = make_dev_credf(MAKEDEV_ETERNAL, &fildesc_cdevsw, 2, NULL,
5257 UID_ROOT, GID_WHEEL, 0666, "fd/2");
5258 make_dev_alias(dev, "stderr");
5261 SYSINIT(fildescdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, fildesc_drvinit, NULL);