2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed
8 * to Berkeley by John Heidemann of the UCLA Ficus project.
10 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
40 #include <sys/param.h>
41 #include <sys/systm.h>
45 #include <sys/event.h>
46 #include <sys/filio.h>
47 #include <sys/kernel.h>
48 #include <sys/limits.h>
50 #include <sys/lockf.h>
51 #include <sys/malloc.h>
52 #include <sys/mount.h>
53 #include <sys/namei.h>
54 #include <sys/rwlock.h>
55 #include <sys/fcntl.h>
56 #include <sys/unistd.h>
57 #include <sys/vnode.h>
58 #include <sys/dirent.h>
61 #include <security/audit/audit.h>
64 #include <security/mac/mac_framework.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_extern.h>
70 #include <vm/vm_map.h>
71 #include <vm/vm_page.h>
72 #include <vm/vm_pager.h>
73 #include <vm/vnode_pager.h>
75 static int vop_nolookup(struct vop_lookup_args *);
76 static int vop_norename(struct vop_rename_args *);
77 static int vop_nostrategy(struct vop_strategy_args *);
78 static int get_next_dirent(struct vnode *vp, struct dirent **dpp,
79 char *dirbuf, int dirbuflen, off_t *off,
80 char **cpos, int *len, int *eofflag,
82 static int dirent_exists(struct vnode *vp, const char *dirname,
85 #define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4)
87 static int vop_stdis_text(struct vop_is_text_args *ap);
88 static int vop_stdunset_text(struct vop_unset_text_args *ap);
89 static int vop_stdadd_writecount(struct vop_add_writecount_args *ap);
90 static int vop_stdcopy_file_range(struct vop_copy_file_range_args *ap);
91 static int vop_stdfdatasync(struct vop_fdatasync_args *ap);
92 static int vop_stdgetpages_async(struct vop_getpages_async_args *ap);
93 static int vop_stdread_pgcache(struct vop_read_pgcache_args *ap);
94 static int vop_stdstat(struct vop_stat_args *ap);
95 static int vop_stdvput_pair(struct vop_vput_pair_args *ap);
98 * This vnode table stores what we want to do if the filesystem doesn't
99 * implement a particular VOP.
101 * If there is no specific entry here, we will return EOPNOTSUPP.
103 * Note that every filesystem has to implement either vop_access
104 * or vop_accessx; failing to do so will result in immediate crash
105 * due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(),
106 * which calls vop_stdaccess() etc.
109 struct vop_vector default_vnodeops = {
111 .vop_bypass = VOP_EOPNOTSUPP,
113 .vop_access = vop_stdaccess,
114 .vop_accessx = vop_stdaccessx,
115 .vop_advise = vop_stdadvise,
116 .vop_advlock = vop_stdadvlock,
117 .vop_advlockasync = vop_stdadvlockasync,
118 .vop_advlockpurge = vop_stdadvlockpurge,
119 .vop_allocate = vop_stdallocate,
120 .vop_deallocate = vop_stddeallocate,
121 .vop_bmap = vop_stdbmap,
122 .vop_close = VOP_NULL,
123 .vop_fsync = VOP_NULL,
124 .vop_stat = vop_stdstat,
125 .vop_fdatasync = vop_stdfdatasync,
126 .vop_getpages = vop_stdgetpages,
127 .vop_getpages_async = vop_stdgetpages_async,
128 .vop_getwritemount = vop_stdgetwritemount,
129 .vop_inactive = VOP_NULL,
130 .vop_need_inactive = vop_stdneed_inactive,
131 .vop_ioctl = vop_stdioctl,
132 .vop_kqfilter = vop_stdkqfilter,
133 .vop_islocked = vop_stdislocked,
134 .vop_lock1 = vop_stdlock,
135 .vop_lookup = vop_nolookup,
136 .vop_open = VOP_NULL,
137 .vop_pathconf = VOP_EINVAL,
138 .vop_poll = vop_nopoll,
139 .vop_putpages = vop_stdputpages,
140 .vop_readlink = VOP_EINVAL,
141 .vop_read_pgcache = vop_stdread_pgcache,
142 .vop_rename = vop_norename,
143 .vop_revoke = VOP_PANIC,
144 .vop_strategy = vop_nostrategy,
145 .vop_unlock = vop_stdunlock,
146 .vop_vptocnp = vop_stdvptocnp,
147 .vop_vptofh = vop_stdvptofh,
148 .vop_unp_bind = vop_stdunp_bind,
149 .vop_unp_connect = vop_stdunp_connect,
150 .vop_unp_detach = vop_stdunp_detach,
151 .vop_is_text = vop_stdis_text,
152 .vop_set_text = vop_stdset_text,
153 .vop_unset_text = vop_stdunset_text,
154 .vop_add_writecount = vop_stdadd_writecount,
155 .vop_copy_file_range = vop_stdcopy_file_range,
156 .vop_vput_pair = vop_stdvput_pair,
158 VFS_VOP_VECTOR_REGISTER(default_vnodeops);
161 * Series of placeholder functions for various error returns for
166 vop_eopnotsupp(struct vop_generic_args *ap)
169 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
176 vop_ebadf(struct vop_generic_args *ap)
183 vop_enotty(struct vop_generic_args *ap)
190 vop_einval(struct vop_generic_args *ap)
197 vop_enoent(struct vop_generic_args *ap)
204 vop_eagain(struct vop_generic_args *ap)
211 vop_null(struct vop_generic_args *ap)
218 * Helper function to panic on some bad VOPs in some filesystems.
221 vop_panic(struct vop_generic_args *ap)
224 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
228 * vop_std<something> and vop_no<something> are default functions for use by
229 * filesystems that need the "default reasonable" implementation for a
230 * particular operation.
232 * The documentation for the operations they implement exists (if it exists)
233 * in the VOP_<SOMETHING>(9) manpage (all uppercase).
237 * Default vop for filesystems that do not support name lookup
241 struct vop_lookup_args /* {
243 struct vnode **a_vpp;
244 struct componentname *a_cnp;
255 * Handle unlock and reference counting for arguments of vop_rename
256 * for filesystems that do not implement rename operation.
259 vop_norename(struct vop_rename_args *ap)
269 * Strategy routine for VFS devices that have none.
271 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
272 * routine. Typically this is done for a BIO_READ strategy call.
273 * Typically B_INVAL is assumed to already be clear prior to a write
274 * and should not be cleared manually unless you just made the buffer
275 * invalid. BIO_ERROR should be cleared either way.
279 vop_nostrategy (struct vop_strategy_args *ap)
281 printf("No strategy for buffer at %p\n", ap->a_bp);
282 vn_printf(ap->a_vp, "vnode ");
283 ap->a_bp->b_ioflags |= BIO_ERROR;
284 ap->a_bp->b_error = EOPNOTSUPP;
290 get_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf,
291 int dirbuflen, off_t *off, char **cpos, int *len,
292 int *eofflag, struct thread *td)
299 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
300 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
303 iov.iov_base = dirbuf;
304 iov.iov_len = dirbuflen;
308 uio.uio_offset = *off;
309 uio.uio_resid = dirbuflen;
310 uio.uio_segflg = UIO_SYSSPACE;
311 uio.uio_rw = UIO_READ;
317 error = mac_vnode_check_readdir(td->td_ucred, vp);
320 error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag,
325 *off = uio.uio_offset;
328 *len = (dirbuflen - uio.uio_resid);
334 dp = (struct dirent *)(*cpos);
335 reclen = dp->d_reclen;
338 /* check for malformed directory.. */
339 if (reclen < DIRENT_MINSIZE)
349 * Check if a named file exists in a given directory vnode.
352 dirent_exists(struct vnode *vp, const char *dirname, struct thread *td)
355 int error, eofflag, dirbuflen, len, found;
360 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
361 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
365 error = VOP_GETATTR(vp, &va, td->td_ucred);
369 dirbuflen = DEV_BSIZE;
370 if (dirbuflen < va.va_blocksize)
371 dirbuflen = va.va_blocksize;
372 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
377 error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off,
378 &cpos, &len, &eofflag, td);
382 if (dp->d_type != DT_WHT && dp->d_fileno != 0 &&
383 strcmp(dp->d_name, dirname) == 0) {
387 } while (len > 0 || !eofflag);
390 free(dirbuf, M_TEMP);
395 vop_stdaccess(struct vop_access_args *ap)
398 KASSERT((ap->a_accmode & ~(VEXEC | VWRITE | VREAD | VADMIN |
399 VAPPEND)) == 0, ("invalid bit in accmode"));
401 return (VOP_ACCESSX(ap->a_vp, ap->a_accmode, ap->a_cred, ap->a_td));
405 vop_stdaccessx(struct vop_accessx_args *ap)
408 accmode_t accmode = ap->a_accmode;
410 error = vfs_unixify_accmode(&accmode);
417 return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td));
421 * Advisory record locking support
424 vop_stdadvlock(struct vop_advlock_args *ap)
434 * Provide atomicity of open(O_CREAT | O_EXCL | O_EXLOCK) for
435 * local filesystems. See vn_open_cred() for reciprocal part.
438 if (mp != NULL && (mp->mnt_flag & MNT_LOCAL) != 0 &&
439 ap->a_op == F_SETLK && (ap->a_flags & F_FIRSTOPEN) == 0) {
441 while ((vp->v_iflag & VI_FOPENING) != 0)
442 msleep(vp, VI_MTX(vp), PLOCK, "lockfo", 0);
446 if (ap->a_fl->l_whence == SEEK_END) {
448 * The NFSv4 server must avoid doing a vn_lock() here, since it
449 * can deadlock the nfsd threads, due to a LOR. Fortunately
450 * the NFSv4 server always uses SEEK_SET and this code is
451 * only required for the SEEK_END case.
453 vn_lock(vp, LK_SHARED | LK_RETRY);
454 error = VOP_GETATTR(vp, &vattr, curthread->td_ucred);
461 return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size));
465 vop_stdadvlockasync(struct vop_advlockasync_args *ap)
472 if (ap->a_fl->l_whence == SEEK_END) {
473 /* The size argument is only needed for SEEK_END. */
474 vn_lock(vp, LK_SHARED | LK_RETRY);
475 error = VOP_GETATTR(vp, &vattr, curthread->td_ucred);
482 return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size));
486 vop_stdadvlockpurge(struct vop_advlockpurge_args *ap)
491 lf_purgelocks(vp, &vp->v_lockf);
498 * Standard implementation of POSIX pathconf, to get information about limits
500 * Override per filesystem for the case where the filesystem has smaller
505 struct vop_pathconf_args /* {
512 switch (ap->a_name) {
514 *ap->a_retval = _POSIX_ASYNCHRONOUS_IO;
517 *ap->a_retval = PATH_MAX;
519 case _PC_ACL_EXTENDED:
521 case _PC_CAP_PRESENT:
522 case _PC_DEALLOC_PRESENT:
523 case _PC_INF_PRESENT:
524 case _PC_MAC_PRESENT:
534 * Standard lock, unlock and islocked functions.
538 struct vop_lock1_args /* {
545 struct vnode *vp = ap->a_vp;
549 return (lockmgr_lock_flags(vp->v_vnlock, ap->a_flags,
550 &ilk->lock_object, ap->a_file, ap->a_line));
556 struct vop_unlock_args /* {
560 struct vnode *vp = ap->a_vp;
562 return (lockmgr_unlock(vp->v_vnlock));
568 struct vop_islocked_args /* {
573 return (lockstatus(ap->a_vp->v_vnlock));
577 * Variants of the above set.
580 * - shared locking disablement is not supported
581 * - v_vnlock pointer is not honored
585 struct vop_lock1_args /* {
592 struct vnode *vp = ap->a_vp;
593 int flags = ap->a_flags;
596 MPASS(vp->v_vnlock == &vp->v_lock);
598 if (__predict_false((flags & ~(LK_TYPE_MASK | LK_NODDLKTREAT | LK_RETRY)) != 0))
601 switch (flags & LK_TYPE_MASK) {
603 return (lockmgr_slock(&vp->v_lock, flags, ap->a_file, ap->a_line));
605 return (lockmgr_xlock(&vp->v_lock, flags, ap->a_file, ap->a_line));
609 return (lockmgr_lock_flags(&vp->v_lock, flags,
610 &ilk->lock_object, ap->a_file, ap->a_line));
615 struct vop_unlock_args /* {
619 struct vnode *vp = ap->a_vp;
621 MPASS(vp->v_vnlock == &vp->v_lock);
623 return (lockmgr_unlock(&vp->v_lock));
628 struct vop_islocked_args /* {
632 struct vnode *vp = ap->a_vp;
634 MPASS(vp->v_vnlock == &vp->v_lock);
636 return (lockstatus(&vp->v_lock));
640 * Return true for select/poll.
644 struct vop_poll_args /* {
647 struct ucred *a_cred;
652 if (ap->a_events & ~POLLSTANDARD)
654 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
658 * Implement poll for local filesystems that support it.
662 struct vop_poll_args /* {
665 struct ucred *a_cred;
669 if (ap->a_events & ~POLLSTANDARD)
670 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
671 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
675 * Return our mount point, as we will take charge of the writes.
678 vop_stdgetwritemount(ap)
679 struct vop_getwritemount_args /* {
681 struct mount **a_mpp;
688 * Note that having a reference does not prevent forced unmount from
689 * setting ->v_mount to NULL after the lock gets released. This is of
690 * no consequence for typical consumers (most notably vn_start_write)
691 * since in this case the vnode is VIRF_DOOMED. Unmount might have
692 * progressed far enough that its completion is only delayed by the
693 * reference obtained here. The consumer only needs to concern itself
697 mp = vfs_ref_from_vp(vp);
703 * If the file system doesn't implement VOP_BMAP, then return sensible defaults:
704 * - Return the vnode's bufobj instead of any underlying device's bufobj
705 * - Calculate the physical block number as if there were equal size
706 * consecutive blocks, but
707 * - Report no contiguous runs of blocks.
711 struct vop_bmap_args /* {
714 struct bufobj **a_bop;
721 if (ap->a_bop != NULL)
722 *ap->a_bop = &ap->a_vp->v_bufobj;
723 if (ap->a_bnp != NULL)
724 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
725 if (ap->a_runp != NULL)
727 if (ap->a_runb != NULL)
734 struct vop_fsync_args /* {
741 return (vn_fsync_buf(ap->a_vp, ap->a_waitfor));
745 vop_stdfdatasync(struct vop_fdatasync_args *ap)
748 return (VOP_FSYNC(ap->a_vp, MNT_WAIT, ap->a_td));
752 vop_stdfdatasync_buf(struct vop_fdatasync_args *ap)
755 return (vn_fsync_buf(ap->a_vp, MNT_WAIT));
758 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
761 struct vop_getpages_args /* {
770 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
771 ap->a_count, ap->a_rbehind, ap->a_rahead, NULL, NULL);
775 vop_stdgetpages_async(struct vop_getpages_async_args *ap)
779 error = VOP_GETPAGES(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind,
781 if (ap->a_iodone != NULL)
782 ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error);
787 vop_stdkqfilter(struct vop_kqfilter_args *ap)
789 return vfs_kqfilter(ap);
792 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
795 struct vop_putpages_args /* {
804 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
805 ap->a_sync, ap->a_rtvals);
809 vop_stdvptofh(struct vop_vptofh_args *ap)
815 vop_stdvptocnp(struct vop_vptocnp_args *ap)
817 struct vnode *vp = ap->a_vp;
818 struct vnode **dvp = ap->a_vpp;
820 char *buf = ap->a_buf;
821 size_t *buflen = ap->a_buflen;
823 int i, error, eofflag, dirbuflen, flags, locked, len, covered;
838 if (vp->v_type != VDIR)
841 error = VOP_GETATTR(vp, &va, cred);
846 locked = VOP_ISLOCKED(vp);
848 NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, UIO_SYSSPACE,
851 error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL);
853 vn_lock(vp, locked | LK_RETRY);
858 mvp = *dvp = nd.ni_vp;
860 if (vp->v_mount != (*dvp)->v_mount &&
861 ((*dvp)->v_vflag & VV_ROOT) &&
862 ((*dvp)->v_mount->mnt_flag & MNT_UNION)) {
863 *dvp = (*dvp)->v_mount->mnt_vnodecovered;
866 vn_close(mvp, FREAD, cred, td);
868 vn_lock(*dvp, LK_SHARED | LK_RETRY);
872 fileno = va.va_fileid;
874 dirbuflen = DEV_BSIZE;
875 if (dirbuflen < va.va_blocksize)
876 dirbuflen = va.va_blocksize;
877 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
879 if ((*dvp)->v_type != VDIR) {
887 /* call VOP_READDIR of parent */
888 error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off,
889 &cpos, &len, &eofflag, td);
893 if ((dp->d_type != DT_WHT) &&
894 (dp->d_fileno == fileno)) {
897 vn_lock(mvp, LK_SHARED | LK_RETRY);
898 if (dirent_exists(mvp, dp->d_name, td)) {
901 vn_lock(*dvp, LK_SHARED | LK_RETRY);
905 vn_lock(*dvp, LK_SHARED | LK_RETRY);
913 if (dp->d_namlen == 1 && dp->d_name[0] == '.') {
916 bcopy(dp->d_name, buf + i, dp->d_namlen);
921 } while (len > 0 || !eofflag);
925 free(dirbuf, M_TEMP);
935 vn_close(mvp, FREAD, cred, td);
937 vn_lock(vp, locked | LK_RETRY);
942 vop_stdallocate(struct vop_allocate_args *ap)
946 off_t maxfilesize = 0;
949 struct vattr vattr, *vap;
951 off_t fsize, len, cur, offset;
964 offset = *ap->a_offset;
966 error = VOP_GETATTR(vp, vap, ap->a_cred);
969 fsize = vap->va_size;
970 iosize = vap->va_blocksize;
972 iosize = BLKDEV_IOSIZE;
973 if (iosize > maxphys)
975 buf = malloc(iosize, M_TEMP, M_WAITOK);
979 * Check if the filesystem sets f_maxfilesize; if not use
980 * VOP_SETATTR to perform the check.
982 sfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
983 error = VFS_STATFS(vp->v_mount, sfs, td);
985 maxfilesize = sfs->f_maxfilesize;
990 if (offset > maxfilesize || len > maxfilesize ||
991 offset + len > maxfilesize) {
997 if (offset + len > vap->va_size) {
999 * Test offset + len against the filesystem's maxfilesize.
1002 vap->va_size = offset + len;
1003 error = VOP_SETATTR(vp, vap, ap->a_cred);
1007 vap->va_size = fsize;
1008 error = VOP_SETATTR(vp, vap, ap->a_cred);
1015 * Read and write back anything below the nominal file
1016 * size. There's currently no way outside the filesystem
1017 * to know whether this area is sparse or not.
1020 if ((offset % iosize) != 0)
1021 cur -= (offset % iosize);
1024 if (offset < fsize) {
1025 aiov.iov_base = buf;
1027 auio.uio_iov = &aiov;
1028 auio.uio_iovcnt = 1;
1029 auio.uio_offset = offset;
1030 auio.uio_resid = cur;
1031 auio.uio_segflg = UIO_SYSSPACE;
1032 auio.uio_rw = UIO_READ;
1034 error = VOP_READ(vp, &auio, ap->a_ioflag, ap->a_cred);
1037 if (auio.uio_resid > 0) {
1038 bzero(buf + cur - auio.uio_resid,
1045 aiov.iov_base = buf;
1047 auio.uio_iov = &aiov;
1048 auio.uio_iovcnt = 1;
1049 auio.uio_offset = offset;
1050 auio.uio_resid = cur;
1051 auio.uio_segflg = UIO_SYSSPACE;
1052 auio.uio_rw = UIO_WRITE;
1055 error = VOP_WRITE(vp, &auio, ap->a_ioflag, ap->a_cred);
1069 *ap->a_offset = offset;
1075 vp_zerofill(struct vnode *vp, struct vattr *vap, off_t *offsetp, off_t *lenp,
1076 int ioflag, struct ucred *cred)
1085 iosize = vap->va_blocksize;
1091 iosize = BLKDEV_IOSIZE;
1092 /* If va_blocksize is 512 bytes, iosize will be 4 kilobytes */
1093 iosize = min(iosize * 8, ZERO_REGION_SIZE);
1096 int xfersize = iosize;
1097 if (offset % iosize != 0)
1098 xfersize -= offset % iosize;
1102 aiov.iov_base = __DECONST(void *, zero_region);
1103 aiov.iov_len = xfersize;
1104 auio.uio_iov = &aiov;
1105 auio.uio_iovcnt = 1;
1106 auio.uio_offset = offset;
1107 auio.uio_resid = xfersize;
1108 auio.uio_segflg = UIO_SYSSPACE;
1109 auio.uio_rw = UIO_WRITE;
1112 error = VOP_WRITE(vp, &auio, ioflag, cred);
1114 len -= xfersize - auio.uio_resid;
1115 offset += xfersize - auio.uio_resid;
1129 vop_stddeallocate(struct vop_deallocate_args *ap)
1136 off_t noff, xfersize, rem;
1139 offset = *ap->a_offset;
1142 error = VOP_GETATTR(vp, &va, cred);
1146 len = omin((off_t)va.va_size - offset, *ap->a_len);
1149 error = vn_bmap_seekhole_locked(vp, FIOSEEKDATA, &noff, cred);
1152 /* XXX: Is it okay to fallback further? */
1156 * No more data region to be filled
1163 KASSERT(noff >= offset, ("FIOSEEKDATA going backward"));
1164 if (noff != offset) {
1165 xfersize = omin(noff - offset, len);
1171 error = vn_bmap_seekhole_locked(vp, FIOSEEKHOLE, &noff, cred);
1176 xfersize = rem = omin(noff - offset, len);
1177 error = vp_zerofill(vp, &va, &offset, &rem, ap->a_ioflag, cred);
1179 len -= xfersize - rem;
1187 /* Handle the case when offset is beyond EOF */
1191 *ap->a_offset = offset;
1197 vop_stdadvise(struct vop_advise_args *ap)
1201 daddr_t startn, endn;
1202 off_t bstart, bend, start, end;
1206 switch (ap->a_advice) {
1207 case POSIX_FADV_WILLNEED:
1209 * Do nothing for now. Filesystems should provide a
1210 * custom method which starts an asynchronous read of
1211 * the requested region.
1215 case POSIX_FADV_DONTNEED:
1217 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1218 if (VN_IS_DOOMED(vp)) {
1224 * Round to block boundaries (and later possibly further to
1225 * page boundaries). Applications cannot reasonably be aware
1226 * of the boundaries, and the rounding must be to expand at
1227 * both extremities to cover enough. It still doesn't cover
1228 * read-ahead. For partial blocks, this gives unnecessary
1229 * discarding of buffers but is efficient enough since the
1230 * pages usually remain in VMIO for some time.
1232 bsize = vp->v_bufobj.bo_bsize;
1233 bstart = rounddown(ap->a_start, bsize);
1234 bend = roundup(ap->a_end, bsize);
1237 * Deactivate pages in the specified range from the backing VM
1238 * object. Pages that are resident in the buffer cache will
1239 * remain wired until their corresponding buffers are released
1242 if (vp->v_object != NULL) {
1243 start = trunc_page(bstart);
1244 end = round_page(bend);
1245 VM_OBJECT_RLOCK(vp->v_object);
1246 vm_object_page_noreuse(vp->v_object, OFF_TO_IDX(start),
1248 VM_OBJECT_RUNLOCK(vp->v_object);
1253 startn = bstart / bsize;
1254 endn = bend / bsize;
1255 error = bnoreuselist(&bo->bo_clean, bo, startn, endn);
1257 error = bnoreuselist(&bo->bo_dirty, bo, startn, endn);
1269 vop_stdunp_bind(struct vop_unp_bind_args *ap)
1272 ap->a_vp->v_unpcb = ap->a_unpcb;
1277 vop_stdunp_connect(struct vop_unp_connect_args *ap)
1280 *ap->a_unpcb = ap->a_vp->v_unpcb;
1285 vop_stdunp_detach(struct vop_unp_detach_args *ap)
1288 ap->a_vp->v_unpcb = NULL;
1293 vop_stdis_text(struct vop_is_text_args *ap)
1296 return (atomic_load_int(&ap->a_vp->v_writecount) < 0);
1300 vop_stdset_text(struct vop_set_text_args *ap)
1308 n = atomic_load_int(&vp->v_writecount);
1310 if (__predict_false(n > 0)) {
1315 * Transition point, we may need to grab a reference on the vnode.
1317 * Take the ref early As a safety measure against bogus calls
1318 * to vop_stdunset_text.
1322 if ((vn_irflag_read(vp) & VIRF_TEXT_REF) != 0) {
1326 if (atomic_fcmpset_int(&vp->v_writecount, &n, -1)) {
1336 if (atomic_fcmpset_int(&vp->v_writecount, &n, n - 1)) {
1340 __assert_unreachable();
1344 vop_stdunset_text(struct vop_unset_text_args *ap)
1351 n = atomic_load_int(&vp->v_writecount);
1353 if (__predict_false(n >= 0)) {
1358 * Transition point, we may need to release a reference on the vnode.
1361 if (atomic_fcmpset_int(&vp->v_writecount, &n, 0)) {
1362 if ((vn_irflag_read(vp) & VIRF_TEXT_REF) != 0) {
1371 if (atomic_fcmpset_int(&vp->v_writecount, &n, n + 1)) {
1375 __assert_unreachable();
1378 static int __always_inline
1379 vop_stdadd_writecount_impl(struct vop_add_writecount_args *ap, bool handle_msync)
1382 struct mount *mp __diagused;
1391 VNPASS((mp->mnt_kern_flag & MNTK_NOMSYNC) == 0, vp);
1393 VNPASS((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0, vp);
1398 n = atomic_load_int(&vp->v_writecount);
1400 if (__predict_false(n < 0)) {
1404 VNASSERT(n + ap->a_inc >= 0, vp,
1405 ("neg writecount increment %d + %d = %d", n, ap->a_inc,
1413 if (atomic_fcmpset_int(&vp->v_writecount, &n, n + ap->a_inc)) {
1417 __assert_unreachable();
1421 vop_stdadd_writecount(struct vop_add_writecount_args *ap)
1424 return (vop_stdadd_writecount_impl(ap, true));
1428 vop_stdadd_writecount_nomsync(struct vop_add_writecount_args *ap)
1431 return (vop_stdadd_writecount_impl(ap, false));
1435 vop_stdneed_inactive(struct vop_need_inactive_args *ap)
1442 vop_stdioctl(struct vop_ioctl_args *ap)
1449 switch (ap->a_command) {
1453 error = vn_lock(vp, LK_SHARED);
1456 if (vp->v_type == VREG)
1457 error = VOP_GETATTR(vp, &va, ap->a_cred);
1462 if (*offp < 0 || *offp >= va.va_size)
1464 else if (ap->a_command == FIOSEEKHOLE)
1478 * used to fill the vfs function table to get reasonable default return values.
1481 vfs_stdroot (mp, flags, vpp)
1487 return (EOPNOTSUPP);
1491 vfs_stdstatfs (mp, sbp)
1496 return (EOPNOTSUPP);
1500 vfs_stdquotactl (mp, cmds, uid, arg, mp_busy)
1507 return (EOPNOTSUPP);
1511 vfs_stdsync(mp, waitfor)
1515 struct vnode *vp, *mvp;
1517 int error, lockreq, allerror = 0;
1520 lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
1521 if (waitfor != MNT_WAIT)
1522 lockreq |= LK_NOWAIT;
1524 * Force stale buffer cache information to be flushed.
1527 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1528 if (vp->v_bufobj.bo_dirty.bv_cnt == 0) {
1532 if ((error = vget(vp, lockreq)) != 0) {
1533 if (error == ENOENT) {
1534 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1539 error = VOP_FSYNC(vp, waitfor, td);
1548 vfs_stdnosync (mp, waitfor)
1557 vop_stdcopy_file_range(struct vop_copy_file_range_args *ap)
1561 error = vn_generic_copy_file_range(ap->a_invp, ap->a_inoffp,
1562 ap->a_outvp, ap->a_outoffp, ap->a_lenp, ap->a_flags, ap->a_incred,
1563 ap->a_outcred, ap->a_fsizetd);
1568 vfs_stdvget (mp, ino, flags, vpp)
1575 return (EOPNOTSUPP);
1579 vfs_stdfhtovp (mp, fhp, flags, vpp)
1586 return (EOPNOTSUPP);
1591 struct vfsconf *vfsp;
1598 vfs_stduninit (vfsp)
1599 struct vfsconf *vfsp;
1606 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname)
1609 struct vnode *filename_vp;
1611 const char *attrname;
1614 if (filename_vp != NULL)
1615 VOP_UNLOCK(filename_vp);
1616 return (EOPNOTSUPP);
1620 vfs_stdsysctl(mp, op, req)
1623 struct sysctl_req *req;
1626 return (EOPNOTSUPP);
1629 static vop_bypass_t *
1630 bp_by_off(struct vop_vector *vop, struct vop_generic_args *a)
1633 return (*(vop_bypass_t **)((char *)vop + a->a_desc->vdesc_vop_offset));
1637 vop_sigdefer(struct vop_vector *vop, struct vop_generic_args *a)
1642 bp = bp_by_off(vop, a);
1645 prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT);
1647 sigallowstop(prev_stops);
1652 vop_stdstat(struct vop_stat_args *a)
1664 error = vop_stat_helper_pre(a);
1671 * Initialize defaults for new and unusual fields, so that file
1672 * systems which don't support these fields don't need to know
1675 vap->va_birthtime.tv_sec = -1;
1676 vap->va_birthtime.tv_nsec = 0;
1677 vap->va_fsid = VNOVAL;
1679 vap->va_rdev = NODEV;
1681 error = VOP_GETATTR(vp, vap, a->a_active_cred);
1686 * Zero the spare stat fields
1688 bzero(sb, sizeof *sb);
1691 * Copy from vattr table
1693 if (vap->va_fsid != VNOVAL)
1694 sb->st_dev = vap->va_fsid;
1696 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
1697 sb->st_ino = vap->va_fileid;
1698 mode = vap->va_mode;
1699 switch (vap->va_type) {
1726 sb->st_nlink = vap->va_nlink;
1727 sb->st_uid = vap->va_uid;
1728 sb->st_gid = vap->va_gid;
1729 sb->st_rdev = vap->va_rdev;
1730 if (vap->va_size > OFF_MAX) {
1734 sb->st_size = vap->va_size;
1735 sb->st_atim.tv_sec = vap->va_atime.tv_sec;
1736 sb->st_atim.tv_nsec = vap->va_atime.tv_nsec;
1737 sb->st_mtim.tv_sec = vap->va_mtime.tv_sec;
1738 sb->st_mtim.tv_nsec = vap->va_mtime.tv_nsec;
1739 sb->st_ctim.tv_sec = vap->va_ctime.tv_sec;
1740 sb->st_ctim.tv_nsec = vap->va_ctime.tv_nsec;
1741 sb->st_birthtim.tv_sec = vap->va_birthtime.tv_sec;
1742 sb->st_birthtim.tv_nsec = vap->va_birthtime.tv_nsec;
1745 * According to www.opengroup.org, the meaning of st_blksize is
1746 * "a filesystem-specific preferred I/O block size for this
1747 * object. In some filesystem types, this may vary from file
1749 * Use minimum/default of PAGE_SIZE (e.g. for VCHR).
1752 sb->st_blksize = max(PAGE_SIZE, vap->va_blocksize);
1753 sb->st_flags = vap->va_flags;
1754 sb->st_blocks = vap->va_bytes / S_BLKSIZE;
1755 sb->st_gen = vap->va_gen;
1757 return (vop_stat_helper_post(a, error));
1761 vop_stdread_pgcache(struct vop_read_pgcache_args *ap __unused)
1763 return (EJUSTRETURN);
1767 vop_stdvput_pair(struct vop_vput_pair_args *ap)
1769 struct vnode *dvp, *vp, **vpp;
1774 if (vpp != NULL && ap->a_unlock_vp && (vp = *vpp) != NULL)