2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed
6 * to Berkeley by John Heidemann of the UCLA Ficus project.
8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <sys/param.h>
39 #include <sys/systm.h>
43 #include <sys/event.h>
44 #include <sys/kernel.h>
45 #include <sys/limits.h>
47 #include <sys/lockf.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/mutex.h>
51 #include <sys/namei.h>
52 #include <sys/fcntl.h>
53 #include <sys/unistd.h>
54 #include <sys/vnode.h>
55 #include <sys/dirent.h>
58 #include <security/mac/mac_framework.h>
61 #include <vm/vm_object.h>
62 #include <vm/vm_extern.h>
64 #include <vm/vm_map.h>
65 #include <vm/vm_page.h>
66 #include <vm/vm_pager.h>
67 #include <vm/vnode_pager.h>
69 static int vop_nolookup(struct vop_lookup_args *);
70 static int vop_norename(struct vop_rename_args *);
71 static int vop_nostrategy(struct vop_strategy_args *);
72 static int get_next_dirent(struct vnode *vp, struct dirent **dpp,
73 char *dirbuf, int dirbuflen, off_t *off,
74 char **cpos, int *len, int *eofflag,
76 static int dirent_exists(struct vnode *vp, const char *dirname,
79 #define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4)
81 static int vop_stdis_text(struct vop_is_text_args *ap);
82 static int vop_stdset_text(struct vop_set_text_args *ap);
83 static int vop_stdunset_text(struct vop_unset_text_args *ap);
86 * This vnode table stores what we want to do if the filesystem doesn't
87 * implement a particular VOP.
89 * If there is no specific entry here, we will return EOPNOTSUPP.
91 * Note that every filesystem has to implement either vop_access
92 * or vop_accessx; failing to do so will result in immediate crash
93 * due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(),
94 * which calls vop_stdaccess() etc.
97 struct vop_vector default_vnodeops = {
99 .vop_bypass = VOP_EOPNOTSUPP,
101 .vop_access = vop_stdaccess,
102 .vop_accessx = vop_stdaccessx,
103 .vop_advise = vop_stdadvise,
104 .vop_advlock = vop_stdadvlock,
105 .vop_advlockasync = vop_stdadvlockasync,
106 .vop_advlockpurge = vop_stdadvlockpurge,
107 .vop_allocate = vop_stdallocate,
108 .vop_bmap = vop_stdbmap,
109 .vop_close = VOP_NULL,
110 .vop_fsync = VOP_NULL,
111 .vop_getpages = vop_stdgetpages,
112 .vop_getwritemount = vop_stdgetwritemount,
113 .vop_inactive = VOP_NULL,
114 .vop_ioctl = VOP_ENOTTY,
115 .vop_kqfilter = vop_stdkqfilter,
116 .vop_islocked = vop_stdislocked,
117 .vop_lock1 = vop_stdlock,
118 .vop_lookup = vop_nolookup,
119 .vop_open = VOP_NULL,
120 .vop_pathconf = VOP_EINVAL,
121 .vop_poll = vop_nopoll,
122 .vop_putpages = vop_stdputpages,
123 .vop_readlink = VOP_EINVAL,
124 .vop_rename = vop_norename,
125 .vop_revoke = VOP_PANIC,
126 .vop_strategy = vop_nostrategy,
127 .vop_unlock = vop_stdunlock,
128 .vop_vptocnp = vop_stdvptocnp,
129 .vop_vptofh = vop_stdvptofh,
130 .vop_unp_bind = vop_stdunp_bind,
131 .vop_unp_connect = vop_stdunp_connect,
132 .vop_unp_detach = vop_stdunp_detach,
133 .vop_is_text = vop_stdis_text,
134 .vop_set_text = vop_stdset_text,
135 .vop_unset_text = vop_stdunset_text,
139 * Series of placeholder functions for various error returns for
144 vop_eopnotsupp(struct vop_generic_args *ap)
147 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name);
154 vop_ebadf(struct vop_generic_args *ap)
161 vop_enotty(struct vop_generic_args *ap)
168 vop_einval(struct vop_generic_args *ap)
175 vop_enoent(struct vop_generic_args *ap)
182 vop_null(struct vop_generic_args *ap)
189 * Helper function to panic on some bad VOPs in some filesystems.
192 vop_panic(struct vop_generic_args *ap)
195 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name);
199 * vop_std<something> and vop_no<something> are default functions for use by
200 * filesystems that need the "default reasonable" implementation for a
201 * particular operation.
203 * The documentation for the operations they implement exists (if it exists)
204 * in the VOP_<SOMETHING>(9) manpage (all uppercase).
208 * Default vop for filesystems that do not support name lookup
212 struct vop_lookup_args /* {
214 struct vnode **a_vpp;
215 struct componentname *a_cnp;
226 * Handle unlock and reference counting for arguments of vop_rename
227 * for filesystems that do not implement rename operation.
230 vop_norename(struct vop_rename_args *ap)
240 * Strategy routine for VFS devices that have none.
242 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy
243 * routine. Typically this is done for a BIO_READ strategy call.
244 * Typically B_INVAL is assumed to already be clear prior to a write
245 * and should not be cleared manually unless you just made the buffer
246 * invalid. BIO_ERROR should be cleared either way.
250 vop_nostrategy (struct vop_strategy_args *ap)
252 printf("No strategy for buffer at %p\n", ap->a_bp);
253 vprint("vnode", ap->a_vp);
254 ap->a_bp->b_ioflags |= BIO_ERROR;
255 ap->a_bp->b_error = EOPNOTSUPP;
261 get_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf,
262 int dirbuflen, off_t *off, char **cpos, int *len,
263 int *eofflag, struct thread *td)
270 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
271 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
274 iov.iov_base = dirbuf;
275 iov.iov_len = dirbuflen;
279 uio.uio_offset = *off;
280 uio.uio_resid = dirbuflen;
281 uio.uio_segflg = UIO_SYSSPACE;
282 uio.uio_rw = UIO_READ;
288 error = mac_vnode_check_readdir(td->td_ucred, vp);
291 error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag,
296 *off = uio.uio_offset;
299 *len = (dirbuflen - uio.uio_resid);
305 dp = (struct dirent *)(*cpos);
306 reclen = dp->d_reclen;
309 /* check for malformed directory.. */
310 if (reclen < DIRENT_MINSIZE)
320 * Check if a named file exists in a given directory vnode.
323 dirent_exists(struct vnode *vp, const char *dirname, struct thread *td)
326 int error, eofflag, dirbuflen, len, found;
331 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp));
332 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp));
336 error = VOP_GETATTR(vp, &va, td->td_ucred);
340 dirbuflen = DEV_BSIZE;
341 if (dirbuflen < va.va_blocksize)
342 dirbuflen = va.va_blocksize;
343 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
348 error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off,
349 &cpos, &len, &eofflag, td);
353 if (dp->d_type != DT_WHT && dp->d_fileno != 0 &&
354 strcmp(dp->d_name, dirname) == 0) {
358 } while (len > 0 || !eofflag);
361 free(dirbuf, M_TEMP);
366 vop_stdaccess(struct vop_access_args *ap)
369 KASSERT((ap->a_accmode & ~(VEXEC | VWRITE | VREAD | VADMIN |
370 VAPPEND)) == 0, ("invalid bit in accmode"));
372 return (VOP_ACCESSX(ap->a_vp, ap->a_accmode, ap->a_cred, ap->a_td));
376 vop_stdaccessx(struct vop_accessx_args *ap)
379 accmode_t accmode = ap->a_accmode;
381 error = vfs_unixify_accmode(&accmode);
388 return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td));
392 * Advisory record locking support
395 vop_stdadvlock(struct vop_advlock_args *ap)
403 cred = curthread->td_ucred;
404 vn_lock(vp, LK_SHARED | LK_RETRY);
405 error = VOP_GETATTR(vp, &vattr, cred);
410 return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size));
414 vop_stdadvlockasync(struct vop_advlockasync_args *ap)
422 cred = curthread->td_ucred;
423 vn_lock(vp, LK_SHARED | LK_RETRY);
424 error = VOP_GETATTR(vp, &vattr, cred);
429 return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size));
433 vop_stdadvlockpurge(struct vop_advlockpurge_args *ap)
438 lf_purgelocks(vp, &vp->v_lockf);
445 * Standard implementation of POSIX pathconf, to get information about limits
447 * Override per filesystem for the case where the filesystem has smaller
452 struct vop_pathconf_args /* {
459 switch (ap->a_name) {
461 *ap->a_retval = NAME_MAX;
464 *ap->a_retval = PATH_MAX;
467 *ap->a_retval = LINK_MAX;
470 *ap->a_retval = MAX_CANON;
473 *ap->a_retval = MAX_INPUT;
476 *ap->a_retval = PIPE_BUF;
478 case _PC_CHOWN_RESTRICTED:
482 *ap->a_retval = _POSIX_VDISABLE;
491 * Standard lock, unlock and islocked functions.
495 struct vop_lock1_args /* {
502 struct vnode *vp = ap->a_vp;
504 return (_lockmgr_args(vp->v_vnlock, ap->a_flags, VI_MTX(vp),
505 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, ap->a_file,
512 struct vop_unlock_args /* {
517 struct vnode *vp = ap->a_vp;
519 return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp)));
525 struct vop_islocked_args /* {
530 return (lockstatus(ap->a_vp->v_vnlock));
534 * Return true for select/poll.
538 struct vop_poll_args /* {
541 struct ucred *a_cred;
546 return (poll_no_poll(ap->a_events));
550 * Implement poll for local filesystems that support it.
554 struct vop_poll_args /* {
557 struct ucred *a_cred;
561 if (ap->a_events & ~POLLSTANDARD)
562 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events));
563 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
567 * Return our mount point, as we will take charge of the writes.
570 vop_stdgetwritemount(ap)
571 struct vop_getwritemount_args /* {
573 struct mount **a_mpp;
579 * XXX Since this is called unlocked we may be recycled while
580 * attempting to ref the mount. If this is the case or mountpoint
581 * will be set to NULL. We only have to prevent this call from
582 * returning with a ref to an incorrect mountpoint. It is not
583 * harmful to return with a ref to our previous mountpoint.
585 mp = ap->a_vp->v_mount;
588 if (mp != ap->a_vp->v_mount) {
597 /* XXX Needs good comment and VOP_BMAP(9) manpage */
600 struct vop_bmap_args /* {
603 struct bufobj **a_bop;
610 if (ap->a_bop != NULL)
611 *ap->a_bop = &ap->a_vp->v_bufobj;
612 if (ap->a_bnp != NULL)
613 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize);
614 if (ap->a_runp != NULL)
616 if (ap->a_runb != NULL)
623 struct vop_fsync_args /* {
625 struct ucred *a_cred;
630 struct vnode *vp = ap->a_vp;
635 int maxretry = 1000; /* large, arbitrarily chosen */
641 * MARK/SCAN initialization to avoid infinite loops.
643 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
644 bp->b_vflags &= ~BV_SCANNED;
649 * Flush all dirty buffers associated with a vnode.
652 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
653 if ((bp->b_vflags & BV_SCANNED) != 0)
655 bp->b_vflags |= BV_SCANNED;
656 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
657 if (ap->a_waitfor != MNT_WAIT)
660 LK_EXCLUSIVE | LK_INTERLOCK | LK_SLEEPFAIL,
668 KASSERT(bp->b_bufobj == bo,
669 ("bp %p wrong b_bufobj %p should be %p",
670 bp, bp->b_bufobj, bo));
671 if ((bp->b_flags & B_DELWRI) == 0)
672 panic("fsync: not dirty");
673 if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) {
684 * If synchronous the caller expects us to completely resolve all
685 * dirty buffers in the system. Wait for in-progress I/O to
686 * complete (which could include background bitmap writes), then
687 * retry if dirty blocks still exist.
689 if (ap->a_waitfor == MNT_WAIT) {
690 bufobj_wwait(bo, 0, 0);
691 if (bo->bo_dirty.bv_cnt > 0) {
693 * If we are unable to write any of these buffers
694 * then we fail now rather than trying endlessly
697 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
698 if ((error = bp->b_error) == 0)
700 if (error == 0 && --maxretry >= 0)
707 vprint("fsync: giving up on dirty", vp);
712 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */
715 struct vop_getpages_args /* {
720 vm_ooffset_t a_offset;
724 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m,
725 ap->a_count, ap->a_reqpage);
729 vop_stdkqfilter(struct vop_kqfilter_args *ap)
731 return vfs_kqfilter(ap);
734 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */
737 struct vop_putpages_args /* {
743 vm_ooffset_t a_offset;
747 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
748 ap->a_sync, ap->a_rtvals);
752 vop_stdvptofh(struct vop_vptofh_args *ap)
758 vop_stdvptocnp(struct vop_vptocnp_args *ap)
760 struct vnode *vp = ap->a_vp;
761 struct vnode **dvp = ap->a_vpp;
762 struct ucred *cred = ap->a_cred;
763 char *buf = ap->a_buf;
764 int *buflen = ap->a_buflen;
766 int i, error, eofflag, dirbuflen, flags, locked, len, covered;
780 if (vp->v_type != VDIR)
783 error = VOP_GETATTR(vp, &va, cred);
788 locked = VOP_ISLOCKED(vp);
790 NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
793 error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL);
795 vn_lock(vp, locked | LK_RETRY);
798 NDFREE(&nd, NDF_ONLY_PNBUF);
800 mvp = *dvp = nd.ni_vp;
802 if (vp->v_mount != (*dvp)->v_mount &&
803 ((*dvp)->v_vflag & VV_ROOT) &&
804 ((*dvp)->v_mount->mnt_flag & MNT_UNION)) {
805 *dvp = (*dvp)->v_mount->mnt_vnodecovered;
808 vn_close(mvp, FREAD, cred, td);
810 vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY);
814 fileno = va.va_fileid;
816 dirbuflen = DEV_BSIZE;
817 if (dirbuflen < va.va_blocksize)
818 dirbuflen = va.va_blocksize;
819 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
821 if ((*dvp)->v_type != VDIR) {
829 /* call VOP_READDIR of parent */
830 error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off,
831 &cpos, &len, &eofflag, td);
835 if ((dp->d_type != DT_WHT) &&
836 (dp->d_fileno == fileno)) {
839 vn_lock(mvp, LK_EXCLUSIVE | LK_RETRY);
840 if (dirent_exists(mvp, dp->d_name, td)) {
843 vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY);
847 vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY);
855 bcopy(dp->d_name, buf + i, dp->d_namlen);
859 } while (len > 0 || !eofflag);
863 free(dirbuf, M_TEMP);
873 vn_close(mvp, FREAD, cred, td);
875 vn_lock(vp, locked | LK_RETRY);
880 vop_stdallocate(struct vop_allocate_args *ap)
886 struct vattr vattr, *vap;
888 off_t fsize, len, cur, offset;
901 offset = *ap->a_offset;
903 error = VOP_GETATTR(vp, vap, td->td_ucred);
906 fsize = vap->va_size;
907 iosize = vap->va_blocksize;
909 iosize = BLKDEV_IOSIZE;
910 if (iosize > MAXPHYS)
912 buf = malloc(iosize, M_TEMP, M_WAITOK);
916 * Check if the filesystem sets f_maxfilesize; if not use
917 * VOP_SETATTR to perform the check.
919 error = VFS_STATFS(vp->v_mount, &sfs, td);
922 if (sfs.f_maxfilesize) {
923 if (offset > sfs.f_maxfilesize || len > sfs.f_maxfilesize ||
924 offset + len > sfs.f_maxfilesize) {
930 if (offset + len > vap->va_size) {
932 * Test offset + len against the filesystem's maxfilesize.
935 vap->va_size = offset + len;
936 error = VOP_SETATTR(vp, vap, td->td_ucred);
940 vap->va_size = fsize;
941 error = VOP_SETATTR(vp, vap, td->td_ucred);
948 * Read and write back anything below the nominal file
949 * size. There's currently no way outside the filesystem
950 * to know whether this area is sparse or not.
953 if ((offset % iosize) != 0)
954 cur -= (offset % iosize);
957 if (offset < fsize) {
960 auio.uio_iov = &aiov;
962 auio.uio_offset = offset;
963 auio.uio_resid = cur;
964 auio.uio_segflg = UIO_SYSSPACE;
965 auio.uio_rw = UIO_READ;
967 error = VOP_READ(vp, &auio, 0, td->td_ucred);
970 if (auio.uio_resid > 0) {
971 bzero(buf + cur - auio.uio_resid,
980 auio.uio_iov = &aiov;
982 auio.uio_offset = offset;
983 auio.uio_resid = cur;
984 auio.uio_segflg = UIO_SYSSPACE;
985 auio.uio_rw = UIO_WRITE;
988 error = VOP_WRITE(vp, &auio, 0, td->td_ucred);
1002 *ap->a_offset = offset;
1008 vop_stdadvise(struct vop_advise_args *ap)
1012 int error, vfslocked;
1015 switch (ap->a_advice) {
1016 case POSIX_FADV_WILLNEED:
1018 * Do nothing for now. Filesystems should provide a
1019 * custom method which starts an asynchronous read of
1020 * the requested region.
1024 case POSIX_FADV_DONTNEED:
1026 * Flush any open FS buffers and then remove pages
1027 * from the backing VM object. Using vinvalbuf() here
1028 * is a bit heavy-handed as it flushes all buffers for
1029 * the given vnode, not just the buffers covering the
1033 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1034 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1035 if (vp->v_iflag & VI_DOOMED) {
1037 VFS_UNLOCK_GIANT(vfslocked);
1040 vinvalbuf(vp, V_CLEANONLY, 0, 0);
1041 if (vp->v_object != NULL) {
1042 start = trunc_page(ap->a_start);
1043 end = round_page(ap->a_end);
1044 VM_OBJECT_LOCK(vp->v_object);
1045 vm_object_page_cache(vp->v_object, OFF_TO_IDX(start),
1047 VM_OBJECT_UNLOCK(vp->v_object);
1050 VFS_UNLOCK_GIANT(vfslocked);
1060 vop_stdunp_bind(struct vop_unp_bind_args *ap)
1063 ap->a_vp->v_socket = ap->a_socket;
1068 vop_stdunp_connect(struct vop_unp_connect_args *ap)
1071 *ap->a_socket = ap->a_vp->v_socket;
1076 vop_stdunp_detach(struct vop_unp_detach_args *ap)
1079 ap->a_vp->v_socket = NULL;
1084 vop_stdis_text(struct vop_is_text_args *ap)
1087 return ((ap->a_vp->v_vflag & VV_TEXT) != 0);
1091 vop_stdset_text(struct vop_set_text_args *ap)
1094 ap->a_vp->v_vflag |= VV_TEXT;
1099 vop_stdunset_text(struct vop_unset_text_args *ap)
1102 ap->a_vp->v_vflag &= ~VV_TEXT;
1108 * used to fill the vfs function table to get reasonable default return values.
1111 vfs_stdroot (mp, flags, vpp)
1117 return (EOPNOTSUPP);
1121 vfs_stdstatfs (mp, sbp)
1126 return (EOPNOTSUPP);
1130 vfs_stdquotactl (mp, cmds, uid, arg)
1137 return (EOPNOTSUPP);
1141 vfs_stdsync(mp, waitfor)
1145 struct vnode *vp, *mvp;
1147 int error, lockreq, allerror = 0;
1150 lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
1151 if (waitfor != MNT_WAIT)
1152 lockreq |= LK_NOWAIT;
1154 * Force stale buffer cache information to be flushed.
1157 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1158 if (vp->v_bufobj.bo_dirty.bv_cnt == 0) {
1162 if ((error = vget(vp, lockreq, td)) != 0) {
1163 if (error == ENOENT) {
1164 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1169 error = VOP_FSYNC(vp, waitfor, td);
1178 vfs_stdnosync (mp, waitfor)
1187 vfs_stdvget (mp, ino, flags, vpp)
1194 return (EOPNOTSUPP);
1198 vfs_stdfhtovp (mp, fhp, flags, vpp)
1205 return (EOPNOTSUPP);
1210 struct vfsconf *vfsp;
1217 vfs_stduninit (vfsp)
1218 struct vfsconf *vfsp;
1225 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname)
1228 struct vnode *filename_vp;
1230 const char *attrname;
1233 if (filename_vp != NULL)
1234 VOP_UNLOCK(filename_vp, 0);
1235 return (EOPNOTSUPP);
1239 vfs_stdsysctl(mp, op, req)
1242 struct sysctl_req *req;
1245 return (EOPNOTSUPP);
1248 /* end of vfs default ops */