2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed
8 * to Berkeley by John Heidemann of the UCLA Ficus project.
10 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
40 #include <sys/param.h>
41 #include <sys/systm.h>
45 #include <sys/event.h>
46 #include <sys/kernel.h>
47 #include <sys/limits.h>
49 #include <sys/lockf.h>
50 #include <sys/malloc.h>
51 #include <sys/mount.h>
52 #include <sys/namei.h>
53 #include <sys/rwlock.h>
54 #include <sys/fcntl.h>
55 #include <sys/unistd.h>
56 #include <sys/vnode.h>
57 #include <sys/dirent.h>
60 #include <security/mac/mac_framework.h>
63 #include <vm/vm_object.h>
64 #include <vm/vm_extern.h>
66 #include <vm/vm_map.h>
67 #include <vm/vm_page.h>
68 #include <vm/vm_pager.h>
69 #include <vm/vnode_pager.h>
71 static int vop_nolookup(struct vop_lookup_args *);
72 static int vop_norename(struct vop_rename_args *);
73 static int vop_nostrategy(struct vop_strategy_args *);
74 static int get_next_dirent(struct vnode *vp, struct dirent **dpp,
75 char *dirbuf, int dirbuflen, off_t *off,
76 char **cpos, int *len, int *eofflag,
78 static int dirent_exists(struct vnode *vp, const char *dirname,
81 #define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4)
83 static int vop_stdis_text(struct vop_is_text_args *ap);
84 static int vop_stdunset_text(struct vop_unset_text_args *ap);
85 static int vop_stdadd_writecount(struct vop_add_writecount_args *ap);
86 static int vop_stdcopy_file_range(struct vop_copy_file_range_args *ap);
87 static int vop_stdfdatasync(struct vop_fdatasync_args *ap);
88 static int vop_stdgetpages_async(struct vop_getpages_async_args *ap);
91 * This vnode table stores what we want to do if the filesystem doesn't
92 * implement a particular VOP.
94 * If there is no specific entry here, we will return EOPNOTSUPP.
96 * Note that every filesystem has to implement either vop_access
97 * or vop_accessx; failing to do so will result in immediate crash
98 * due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(),
99 * which calls vop_stdaccess() etc.
102 struct vop_vector default_vnodeops = {
104 .vop_bypass = VOP_EOPNOTSUPP,
106 .vop_access = vop_stdaccess,
107 .vop_accessx = vop_stdaccessx,
108 .vop_advise = vop_stdadvise,
109 .vop_advlock = vop_stdadvlock,
110 .vop_advlockasync = vop_stdadvlockasync,
111 .vop_advlockpurge = vop_stdadvlockpurge,
112 .vop_allocate = vop_stdallocate,
113 .vop_bmap = vop_stdbmap,
114 .vop_close = VOP_NULL,
115 .vop_fsync = VOP_NULL,
116 .vop_fdatasync = vop_stdfdatasync,
117 .vop_getpages = vop_stdgetpages,
118 .vop_getpages_async = vop_stdgetpages_async,
119 .vop_getwritemount = vop_stdgetwritemount,
120 .vop_inactive = VOP_NULL,
121 .vop_ioctl = VOP_ENOTTY,
122 .vop_kqfilter = vop_stdkqfilter,
123 .vop_islocked = vop_stdislocked,
124 .vop_lock1 = vop_stdlock,
125 .vop_lookup = vop_nolookup,
126 .vop_open = VOP_NULL,
127 .vop_pathconf = VOP_EINVAL,
128 .vop_poll = vop_nopoll,
129 .vop_putpages = vop_stdputpages,
130 .vop_readlink = VOP_EINVAL,
131 .vop_rename = vop_norename,
132 .vop_revoke = VOP_PANIC,
133 .vop_strategy = vop_nostrategy,
134 .vop_unlock = vop_stdunlock,
135 .vop_vptocnp = vop_stdvptocnp,
136 .vop_vptofh = vop_stdvptofh,
137 .vop_unp_bind = vop_stdunp_bind,
138 .vop_unp_connect = vop_stdunp_connect,
139 .vop_unp_detach = vop_stdunp_detach,
140 .vop_is_text = vop_stdis_text,
141 .vop_set_text = vop_stdset_text,
142 .vop_unset_text = vop_stdunset_text,
143 .vop_add_writecount = vop_stdadd_writecount,
144 .vop_copy_file_range = vop_stdcopy_file_range,
148 * Series of placeholder functions for various error returns for
153 vop_eopnotsupp(struct vop_generic_args *ap)
156 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
163 vop_ebadf(struct vop_generic_args *ap)
170 vop_enotty(struct vop_generic_args *ap)
177 vop_einval(struct vop_generic_args *ap)
184 vop_enoent(struct vop_generic_args *ap)
191 vop_null(struct vop_generic_args *ap)
198 * Helper function to panic on some bad VOPs in some filesystems.
201 vop_panic(struct vop_generic_args *ap)
204 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
208 * vop_std<something> and vop_no<something> are default functions for use by
209 * filesystems that need the "default reasonable" implementation for a
210 * particular operation.
212 * The documentation for the operations they implement exists (if it exists)
213 * in the VOP_<SOMETHING>(9) manpage (all uppercase).
217 * Default vop for filesystems that do not support name lookup
221 struct vop_lookup_args /* {
223 struct vnode **a_vpp;
224 struct componentname *a_cnp;
235 * Handle unlock and reference counting for arguments of vop_rename
236 * for filesystems that do not implement rename operation.
239 vop_norename(struct vop_rename_args *ap)
249 * Strategy routine for VFS devices that have none.
251 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
252 * routine. Typically this is done for a BIO_READ strategy call.
253 * Typically B_INVAL is assumed to already be clear prior to a write
254 * and should not be cleared manually unless you just made the buffer
255 * invalid. BIO_ERROR should be cleared either way.
259 vop_nostrategy (struct vop_strategy_args *ap)
261 printf("No strategy for buffer at %p\n", ap->a_bp);
262 vn_printf(ap->a_vp, "vnode ");
263 ap->a_bp->b_ioflags |= BIO_ERROR;
264 ap->a_bp->b_error = EOPNOTSUPP;
270 get_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf,
271 int dirbuflen, off_t *off, char **cpos, int *len,
272 int *eofflag, struct thread *td)
279 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
280 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
283 iov.iov_base = dirbuf;
284 iov.iov_len = dirbuflen;
288 uio.uio_offset = *off;
289 uio.uio_resid = dirbuflen;
290 uio.uio_segflg = UIO_SYSSPACE;
291 uio.uio_rw = UIO_READ;
297 error = mac_vnode_check_readdir(td->td_ucred, vp);
300 error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag,
305 *off = uio.uio_offset;
308 *len = (dirbuflen - uio.uio_resid);
314 dp = (struct dirent *)(*cpos);
315 reclen = dp->d_reclen;
318 /* check for malformed directory.. */
319 if (reclen < DIRENT_MINSIZE)
329 * Check if a named file exists in a given directory vnode.
332 dirent_exists(struct vnode *vp, const char *dirname, struct thread *td)
335 int error, eofflag, dirbuflen, len, found;
340 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
341 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
345 error = VOP_GETATTR(vp, &va, td->td_ucred);
349 dirbuflen = DEV_BSIZE;
350 if (dirbuflen < va.va_blocksize)
351 dirbuflen = va.va_blocksize;
352 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
357 error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off,
358 &cpos, &len, &eofflag, td);
362 if (dp->d_type != DT_WHT && dp->d_fileno != 0 &&
363 strcmp(dp->d_name, dirname) == 0) {
367 } while (len > 0 || !eofflag);
370 free(dirbuf, M_TEMP);
375 vop_stdaccess(struct vop_access_args *ap)
378 KASSERT((ap->a_accmode & ~(VEXEC | VWRITE | VREAD | VADMIN |
379 VAPPEND)) == 0, ("invalid bit in accmode"));
381 return (VOP_ACCESSX(ap->a_vp, ap->a_accmode, ap->a_cred, ap->a_td));
385 vop_stdaccessx(struct vop_accessx_args *ap)
388 accmode_t accmode = ap->a_accmode;
390 error = vfs_unixify_accmode(&accmode);
397 return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td));
401 * Advisory record locking support
404 vop_stdadvlock(struct vop_advlock_args *ap)
411 if (ap->a_fl->l_whence == SEEK_END) {
413 * The NFSv4 server must avoid doing a vn_lock() here, since it
414 * can deadlock the nfsd threads, due to a LOR. Fortunately
415 * the NFSv4 server always uses SEEK_SET and this code is
416 * only required for the SEEK_END case.
418 vn_lock(vp, LK_SHARED | LK_RETRY);
419 error = VOP_GETATTR(vp, &vattr, curthread->td_ucred);
426 return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size));
430 vop_stdadvlockasync(struct vop_advlockasync_args *ap)
437 if (ap->a_fl->l_whence == SEEK_END) {
438 /* The size argument is only needed for SEEK_END. */
439 vn_lock(vp, LK_SHARED | LK_RETRY);
440 error = VOP_GETATTR(vp, &vattr, curthread->td_ucred);
447 return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size));
451 vop_stdadvlockpurge(struct vop_advlockpurge_args *ap)
456 lf_purgelocks(vp, &vp->v_lockf);
463 * Standard implementation of POSIX pathconf, to get information about limits
465 * Override per filesystem for the case where the filesystem has smaller
470 struct vop_pathconf_args /* {
477 switch (ap->a_name) {
479 *ap->a_retval = _POSIX_ASYNCHRONOUS_IO;
482 *ap->a_retval = PATH_MAX;
484 case _PC_ACL_EXTENDED:
486 case _PC_CAP_PRESENT:
487 case _PC_INF_PRESENT:
488 case _PC_MAC_PRESENT:
498 * Standard lock, unlock and islocked functions.
502 struct vop_lock1_args /* {
509 struct vnode *vp = ap->a_vp;
513 return (lockmgr_lock_fast_path(vp->v_vnlock, ap->a_flags,
514 &ilk->lock_object, ap->a_file, ap->a_line));
520 struct vop_unlock_args /* {
525 struct vnode *vp = ap->a_vp;
529 return (lockmgr_unlock_fast_path(vp->v_vnlock, ap->a_flags,
536 struct vop_islocked_args /* {
541 return (lockstatus(ap->a_vp->v_vnlock));
545 * Return true for select/poll.
549 struct vop_poll_args /* {
552 struct ucred *a_cred;
557 return (poll_no_poll(ap->a_events));
561 * Implement poll for local filesystems that support it.
565 struct vop_poll_args /* {
568 struct ucred *a_cred;
572 if (ap->a_events & ~POLLSTANDARD)
573 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
574 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
578 * Return our mount point, as we will take charge of the writes.
581 vop_stdgetwritemount(ap)
582 struct vop_getwritemount_args /* {
584 struct mount **a_mpp;
590 * XXX Since this is called unlocked we may be recycled while
591 * attempting to ref the mount. If this is the case or mountpoint
592 * will be set to NULL. We only have to prevent this call from
593 * returning with a ref to an incorrect mountpoint. It is not
594 * harmful to return with a ref to our previous mountpoint.
596 mp = ap->a_vp->v_mount;
599 if (mp != ap->a_vp->v_mount) {
609 * If the file system doesn't implement VOP_BMAP, then return sensible defaults:
610 * - Return the vnode's bufobj instead of any underlying device's bufobj
611 * - Calculate the physical block number as if there were equal size
612 * consecutive blocks, but
613 * - Report no contiguous runs of blocks.
617 struct vop_bmap_args /* {
620 struct bufobj **a_bop;
627 if (ap->a_bop != NULL)
628 *ap->a_bop = &ap->a_vp->v_bufobj;
629 if (ap->a_bnp != NULL)
630 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
631 if (ap->a_runp != NULL)
633 if (ap->a_runb != NULL)
640 struct vop_fsync_args /* {
647 return (vn_fsync_buf(ap->a_vp, ap->a_waitfor));
651 vop_stdfdatasync(struct vop_fdatasync_args *ap)
654 return (VOP_FSYNC(ap->a_vp, MNT_WAIT, ap->a_td));
658 vop_stdfdatasync_buf(struct vop_fdatasync_args *ap)
661 return (vn_fsync_buf(ap->a_vp, MNT_WAIT));
664 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
667 struct vop_getpages_args /* {
676 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
677 ap->a_count, ap->a_rbehind, ap->a_rahead, NULL, NULL);
681 vop_stdgetpages_async(struct vop_getpages_async_args *ap)
685 error = VOP_GETPAGES(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind,
687 ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error);
692 vop_stdkqfilter(struct vop_kqfilter_args *ap)
694 return vfs_kqfilter(ap);
697 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
700 struct vop_putpages_args /* {
709 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
710 ap->a_sync, ap->a_rtvals);
714 vop_stdvptofh(struct vop_vptofh_args *ap)
720 vop_stdvptocnp(struct vop_vptocnp_args *ap)
722 struct vnode *vp = ap->a_vp;
723 struct vnode **dvp = ap->a_vpp;
724 struct ucred *cred = ap->a_cred;
725 char *buf = ap->a_buf;
726 int *buflen = ap->a_buflen;
728 int i, error, eofflag, dirbuflen, flags, locked, len, covered;
742 if (vp->v_type != VDIR)
745 error = VOP_GETATTR(vp, &va, cred);
750 locked = VOP_ISLOCKED(vp);
752 NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, UIO_SYSSPACE,
755 error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL);
757 vn_lock(vp, locked | LK_RETRY);
760 NDFREE(&nd, NDF_ONLY_PNBUF);
762 mvp = *dvp = nd.ni_vp;
764 if (vp->v_mount != (*dvp)->v_mount &&
765 ((*dvp)->v_vflag & VV_ROOT) &&
766 ((*dvp)->v_mount->mnt_flag & MNT_UNION)) {
767 *dvp = (*dvp)->v_mount->mnt_vnodecovered;
770 vn_close(mvp, FREAD, cred, td);
772 vn_lock(*dvp, LK_SHARED | LK_RETRY);
776 fileno = va.va_fileid;
778 dirbuflen = DEV_BSIZE;
779 if (dirbuflen < va.va_blocksize)
780 dirbuflen = va.va_blocksize;
781 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
783 if ((*dvp)->v_type != VDIR) {
791 /* call VOP_READDIR of parent */
792 error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off,
793 &cpos, &len, &eofflag, td);
797 if ((dp->d_type != DT_WHT) &&
798 (dp->d_fileno == fileno)) {
801 vn_lock(mvp, LK_SHARED | LK_RETRY);
802 if (dirent_exists(mvp, dp->d_name, td)) {
805 vn_lock(*dvp, LK_SHARED | LK_RETRY);
809 vn_lock(*dvp, LK_SHARED | LK_RETRY);
817 if (dp->d_namlen == 1 && dp->d_name[0] == '.') {
820 bcopy(dp->d_name, buf + i, dp->d_namlen);
825 } while (len > 0 || !eofflag);
829 free(dirbuf, M_TEMP);
839 vn_close(mvp, FREAD, cred, td);
841 vn_lock(vp, locked | LK_RETRY);
846 vop_stdallocate(struct vop_allocate_args *ap)
850 off_t maxfilesize = 0;
853 struct vattr vattr, *vap;
855 off_t fsize, len, cur, offset;
868 offset = *ap->a_offset;
870 error = VOP_GETATTR(vp, vap, td->td_ucred);
873 fsize = vap->va_size;
874 iosize = vap->va_blocksize;
876 iosize = BLKDEV_IOSIZE;
877 if (iosize > MAXPHYS)
879 buf = malloc(iosize, M_TEMP, M_WAITOK);
883 * Check if the filesystem sets f_maxfilesize; if not use
884 * VOP_SETATTR to perform the check.
886 sfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK);
887 error = VFS_STATFS(vp->v_mount, sfs, td);
889 maxfilesize = sfs->f_maxfilesize;
894 if (offset > maxfilesize || len > maxfilesize ||
895 offset + len > maxfilesize) {
901 if (offset + len > vap->va_size) {
903 * Test offset + len against the filesystem's maxfilesize.
906 vap->va_size = offset + len;
907 error = VOP_SETATTR(vp, vap, td->td_ucred);
911 vap->va_size = fsize;
912 error = VOP_SETATTR(vp, vap, td->td_ucred);
919 * Read and write back anything below the nominal file
920 * size. There's currently no way outside the filesystem
921 * to know whether this area is sparse or not.
924 if ((offset % iosize) != 0)
925 cur -= (offset % iosize);
928 if (offset < fsize) {
931 auio.uio_iov = &aiov;
933 auio.uio_offset = offset;
934 auio.uio_resid = cur;
935 auio.uio_segflg = UIO_SYSSPACE;
936 auio.uio_rw = UIO_READ;
938 error = VOP_READ(vp, &auio, 0, td->td_ucred);
941 if (auio.uio_resid > 0) {
942 bzero(buf + cur - auio.uio_resid,
951 auio.uio_iov = &aiov;
953 auio.uio_offset = offset;
954 auio.uio_resid = cur;
955 auio.uio_segflg = UIO_SYSSPACE;
956 auio.uio_rw = UIO_WRITE;
959 error = VOP_WRITE(vp, &auio, 0, td->td_ucred);
973 *ap->a_offset = offset;
979 vop_stdadvise(struct vop_advise_args *ap)
983 daddr_t startn, endn;
984 off_t bstart, bend, start, end;
988 switch (ap->a_advice) {
989 case POSIX_FADV_WILLNEED:
991 * Do nothing for now. Filesystems should provide a
992 * custom method which starts an asynchronous read of
993 * the requested region.
997 case POSIX_FADV_DONTNEED:
999 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1000 if (vp->v_iflag & VI_DOOMED) {
1006 * Round to block boundaries (and later possibly further to
1007 * page boundaries). Applications cannot reasonably be aware
1008 * of the boundaries, and the rounding must be to expand at
1009 * both extremities to cover enough. It still doesn't cover
1010 * read-ahead. For partial blocks, this gives unnecessary
1011 * discarding of buffers but is efficient enough since the
1012 * pages usually remain in VMIO for some time.
1014 bsize = vp->v_bufobj.bo_bsize;
1015 bstart = rounddown(ap->a_start, bsize);
1016 bend = roundup(ap->a_end, bsize);
1019 * Deactivate pages in the specified range from the backing VM
1020 * object. Pages that are resident in the buffer cache will
1021 * remain wired until their corresponding buffers are released
1024 if (vp->v_object != NULL) {
1025 start = trunc_page(bstart);
1026 end = round_page(bend);
1027 VM_OBJECT_RLOCK(vp->v_object);
1028 vm_object_page_noreuse(vp->v_object, OFF_TO_IDX(start),
1030 VM_OBJECT_RUNLOCK(vp->v_object);
1035 startn = bstart / bsize;
1036 endn = bend / bsize;
1037 error = bnoreuselist(&bo->bo_clean, bo, startn, endn);
1039 error = bnoreuselist(&bo->bo_dirty, bo, startn, endn);
1051 vop_stdunp_bind(struct vop_unp_bind_args *ap)
1054 ap->a_vp->v_unpcb = ap->a_unpcb;
1059 vop_stdunp_connect(struct vop_unp_connect_args *ap)
1062 *ap->a_unpcb = ap->a_vp->v_unpcb;
1067 vop_stdunp_detach(struct vop_unp_detach_args *ap)
1070 ap->a_vp->v_unpcb = NULL;
1075 vop_stdis_text(struct vop_is_text_args *ap)
1078 return (ap->a_vp->v_writecount < 0);
1082 vop_stdset_text(struct vop_set_text_args *ap)
1089 if (vp->v_writecount > 0) {
1100 vop_stdunset_text(struct vop_unset_text_args *ap)
1107 if (vp->v_writecount < 0) {
1118 vop_stdadd_writecount(struct vop_add_writecount_args *ap)
1124 VI_LOCK_FLAGS(vp, MTX_DUPOK);
1125 if (vp->v_writecount < 0) {
1128 VNASSERT(vp->v_writecount + ap->a_inc >= 0, vp,
1129 ("neg writecount increment %d", ap->a_inc));
1130 vp->v_writecount += ap->a_inc;
1139 * used to fill the vfs function table to get reasonable default return values.
1142 vfs_stdroot (mp, flags, vpp)
1148 return (EOPNOTSUPP);
1152 vfs_stdstatfs (mp, sbp)
1157 return (EOPNOTSUPP);
1161 vfs_stdquotactl (mp, cmds, uid, arg)
1168 return (EOPNOTSUPP);
1172 vfs_stdsync(mp, waitfor)
1176 struct vnode *vp, *mvp;
1178 int error, lockreq, allerror = 0;
1181 lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
1182 if (waitfor != MNT_WAIT)
1183 lockreq |= LK_NOWAIT;
1185 * Force stale buffer cache information to be flushed.
1188 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1189 if (vp->v_bufobj.bo_dirty.bv_cnt == 0) {
1193 if ((error = vget(vp, lockreq, td)) != 0) {
1194 if (error == ENOENT) {
1195 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1200 error = VOP_FSYNC(vp, waitfor, td);
1209 vfs_stdnosync (mp, waitfor)
1218 vop_stdcopy_file_range(struct vop_copy_file_range_args *ap)
1222 error = vn_generic_copy_file_range(ap->a_invp, ap->a_inoffp,
1223 ap->a_outvp, ap->a_outoffp, ap->a_lenp, ap->a_flags, ap->a_incred,
1224 ap->a_outcred, ap->a_fsizetd);
1229 vfs_stdvget (mp, ino, flags, vpp)
1236 return (EOPNOTSUPP);
1240 vfs_stdfhtovp (mp, fhp, flags, vpp)
1247 return (EOPNOTSUPP);
1252 struct vfsconf *vfsp;
1259 vfs_stduninit (vfsp)
1260 struct vfsconf *vfsp;
1267 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname)
1270 struct vnode *filename_vp;
1272 const char *attrname;
1275 if (filename_vp != NULL)
1276 VOP_UNLOCK(filename_vp, 0);
1277 return (EOPNOTSUPP);
1281 vfs_stdsysctl(mp, op, req)
1284 struct sysctl_req *req;
1287 return (EOPNOTSUPP);
1290 static vop_bypass_t *
1291 bp_by_off(struct vop_vector *vop, struct vop_generic_args *a)
1294 return (*(vop_bypass_t **)((char *)vop + a->a_desc->vdesc_vop_offset));
1298 vop_sigdefer(struct vop_vector *vop, struct vop_generic_args *a)
1303 for (; vop != NULL; vop = vop->vop_default) {
1304 bp = bp_by_off(vop, a);
1309 * Bypass is not really supported. It is done for
1310 * fallback to unimplemented vops in the default
1313 bp = vop->vop_bypass;
1319 prev_stops = sigdeferstop(SIGDEFERSTOP_SILENT);
1321 sigallowstop(prev_stops);