2 * Copyright (c) 2002, 2003 Networks Associates Technology, Inc.
5 * This software was developed for the FreeBSD Project by Marshall
6 * Kirk McKusick and Network Associates Laboratories, the Security
7 * Research Division of Network Associates, Inc. under DARPA/SPAWAR
8 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * Copyright (c) 1982, 1986, 1989, 1993
33 * The Regents of the University of California. All rights reserved.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * from: @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95
60 * from: $FreeBSD: .../ufs/ufs_readwrite.c,v 1.96 2002/08/12 09:22:11 phk ...
61 * @(#)ffs_vnops.c 8.15 (Berkeley) 5/14/95
64 #include <sys/cdefs.h>
65 __FBSDID("$FreeBSD$");
67 #include <sys/param.h>
69 #include <sys/systm.h>
72 #include <sys/extattr.h>
73 #include <sys/kernel.h>
74 #include <sys/limits.h>
75 #include <sys/malloc.h>
76 #include <sys/mount.h>
78 #include <sys/rwlock.h>
80 #include <sys/sysctl.h>
81 #include <sys/vmmeter.h>
82 #include <sys/vnode.h>
85 #include <vm/vm_param.h>
86 #include <vm/vm_extern.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_pager.h>
90 #include <vm/vnode_pager.h>
92 #include <ufs/ufs/extattr.h>
93 #include <ufs/ufs/quota.h>
94 #include <ufs/ufs/inode.h>
95 #include <ufs/ufs/ufs_extern.h>
96 #include <ufs/ufs/ufsmount.h>
98 #include <ufs/ffs/fs.h>
99 #include <ufs/ffs/ffs_extern.h>
100 #include "opt_directio.h"
103 #define ALIGNED_TO(ptr, s) \
104 (((uintptr_t)(ptr) & (_Alignof(s) - 1)) == 0)
107 extern int ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone);
109 static vop_fdatasync_t ffs_fdatasync;
110 static vop_fsync_t ffs_fsync;
111 static vop_getpages_t ffs_getpages;
112 static vop_lock1_t ffs_lock;
113 static vop_read_t ffs_read;
114 static vop_write_t ffs_write;
115 static int ffs_extread(struct vnode *vp, struct uio *uio, int ioflag);
116 static int ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag,
118 static vop_strategy_t ffsext_strategy;
119 static vop_closeextattr_t ffs_closeextattr;
120 static vop_deleteextattr_t ffs_deleteextattr;
121 static vop_getextattr_t ffs_getextattr;
122 static vop_listextattr_t ffs_listextattr;
123 static vop_openextattr_t ffs_openextattr;
124 static vop_setextattr_t ffs_setextattr;
125 static vop_vptofh_t ffs_vptofh;
127 /* Global vfs data structures for ufs. */
128 struct vop_vector ffs_vnodeops1 = {
129 .vop_default = &ufs_vnodeops,
130 .vop_fsync = ffs_fsync,
131 .vop_fdatasync = ffs_fdatasync,
132 .vop_getpages = ffs_getpages,
133 .vop_getpages_async = vnode_pager_local_getpages_async,
134 .vop_lock1 = ffs_lock,
135 .vop_read = ffs_read,
136 .vop_reallocblks = ffs_reallocblks,
137 .vop_write = ffs_write,
138 .vop_vptofh = ffs_vptofh,
141 struct vop_vector ffs_fifoops1 = {
142 .vop_default = &ufs_fifoops,
143 .vop_fsync = ffs_fsync,
144 .vop_fdatasync = ffs_fdatasync,
145 .vop_reallocblks = ffs_reallocblks, /* XXX: really ??? */
146 .vop_vptofh = ffs_vptofh,
149 /* Global vfs data structures for ufs. */
150 struct vop_vector ffs_vnodeops2 = {
151 .vop_default = &ufs_vnodeops,
152 .vop_fsync = ffs_fsync,
153 .vop_fdatasync = ffs_fdatasync,
154 .vop_getpages = ffs_getpages,
155 .vop_getpages_async = vnode_pager_local_getpages_async,
156 .vop_lock1 = ffs_lock,
157 .vop_read = ffs_read,
158 .vop_reallocblks = ffs_reallocblks,
159 .vop_write = ffs_write,
160 .vop_closeextattr = ffs_closeextattr,
161 .vop_deleteextattr = ffs_deleteextattr,
162 .vop_getextattr = ffs_getextattr,
163 .vop_listextattr = ffs_listextattr,
164 .vop_openextattr = ffs_openextattr,
165 .vop_setextattr = ffs_setextattr,
166 .vop_vptofh = ffs_vptofh,
169 struct vop_vector ffs_fifoops2 = {
170 .vop_default = &ufs_fifoops,
171 .vop_fsync = ffs_fsync,
172 .vop_fdatasync = ffs_fdatasync,
173 .vop_lock1 = ffs_lock,
174 .vop_reallocblks = ffs_reallocblks,
175 .vop_strategy = ffsext_strategy,
176 .vop_closeextattr = ffs_closeextattr,
177 .vop_deleteextattr = ffs_deleteextattr,
178 .vop_getextattr = ffs_getextattr,
179 .vop_listextattr = ffs_listextattr,
180 .vop_openextattr = ffs_openextattr,
181 .vop_setextattr = ffs_setextattr,
182 .vop_vptofh = ffs_vptofh,
186 * Synch an open file.
190 ffs_fsync(struct vop_fsync_args *ap)
199 error = ffs_syncvnode(vp, ap->a_waitfor, 0);
202 if (ap->a_waitfor == MNT_WAIT && DOINGSOFTDEP(vp)) {
203 error = softdep_fsync(vp);
208 * The softdep_fsync() function may drop vp lock,
209 * allowing for dirty buffers to reappear on the
210 * bo_dirty list. Recheck and resync as needed.
213 if ((vp->v_type == VREG || vp->v_type == VDIR) &&
214 (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0)) {
224 ffs_syncvnode(struct vnode *vp, int waitfor, int flags)
228 struct buf *bp, *nbp;
231 bool still_dirty, wait;
234 ip->i_flag &= ~IN_NEEDSYNC;
238 * When doing MNT_WAIT we must first flush all dependencies
241 if (DOINGSOFTDEP(vp) && waitfor == MNT_WAIT &&
242 (error = softdep_sync_metadata(vp)) != 0)
246 * Flush all dirty buffers associated with a vnode.
250 wait = false; /* Always do an async pass first. */
251 lbn = lblkno(ITOFS(ip), (ip->i_size + ITOFS(ip)->fs_bsize - 1));
254 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
255 bp->b_vflags &= ~BV_SCANNED;
256 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
258 * Reasons to skip this buffer: it has already been considered
259 * on this pass, the buffer has dependencies that will cause
260 * it to be redirtied and it has not already been deferred,
261 * or it is already being written.
263 if ((bp->b_vflags & BV_SCANNED) != 0)
265 bp->b_vflags |= BV_SCANNED;
267 * Flush indirects in order, if requested.
269 * Note that if only datasync is requested, we can
270 * skip indirect blocks when softupdates are not
271 * active. Otherwise we must flush them with data,
272 * since dependencies prevent data block writes.
274 if (waitfor == MNT_WAIT && bp->b_lblkno <= -UFS_NDADDR &&
275 (lbn_level(bp->b_lblkno) >= passes ||
276 ((flags & DATA_ONLY) != 0 && !DOINGSOFTDEP(vp))))
278 if (bp->b_lblkno > lbn)
279 panic("ffs_syncvnode: syncing truncated data.");
280 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) == 0) {
284 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
285 BO_LOCKPTR(bo)) != 0) {
286 bp->b_vflags &= ~BV_SCANNED;
291 if ((bp->b_flags & B_DELWRI) == 0)
292 panic("ffs_fsync: not dirty");
294 * Check for dependencies and potentially complete them.
296 if (!LIST_EMPTY(&bp->b_dep) &&
297 (error = softdep_sync_buf(vp, bp,
298 wait ? MNT_WAIT : MNT_NOWAIT)) != 0) {
300 if (error != EBUSY) {
304 /* If we deferred once, don't defer again. */
305 if ((bp->b_flags & B_DEFERRED) == 0) {
306 bp->b_flags |= B_DEFERRED;
313 if ((error = bwrite(bp)) != 0)
315 } else if ((bp->b_flags & B_CLUSTEROK)) {
316 (void) vfs_bio_awrite(bp);
323 * Since we may have slept during the I/O, we need
324 * to start from a known point.
327 nbp = TAILQ_FIRST(&bo->bo_dirty.bv_hd);
329 if (waitfor != MNT_WAIT) {
331 if ((flags & NO_INO_UPDT) != 0)
334 return (ffs_update(vp, 0));
336 /* Drain IO to see if we're done. */
337 bufobj_wwait(bo, 0, 0);
339 * Block devices associated with filesystems may have new I/O
340 * requests posted for them even if the vnode is locked, so no
341 * amount of trying will get them clean. We make several passes
344 * Regular files may need multiple passes to flush all dependency
345 * work as it is possible that we must write once per indirect
346 * level, once for the leaf, and once for the inode and each of
347 * these will be done with one sync and one async pass.
349 if (bo->bo_dirty.bv_cnt > 0) {
350 if ((flags & DATA_ONLY) == 0) {
354 * For data-only sync, dirty indirect buffers
358 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
359 if (bp->b_lblkno > -UFS_NDADDR) {
367 /* Write the inode after sync passes to flush deps. */
368 if (wait && DOINGSOFTDEP(vp) &&
369 (flags & NO_INO_UPDT) == 0) {
374 /* switch between sync/async. */
376 if (wait || ++passes < UFS_NIADDR + 2)
379 if (!vn_isdisk(vp, NULL))
380 vn_printf(vp, "ffs_fsync: dirty ");
386 if ((flags & DATA_ONLY) == 0) {
387 if ((flags & NO_INO_UPDT) == 0)
388 error = ffs_update(vp, 1);
390 softdep_journal_fsync(VTOI(vp));
396 ffs_fdatasync(struct vop_fdatasync_args *ap)
399 return (ffs_syncvnode(ap->a_vp, MNT_WAIT, DATA_ONLY));
404 struct vop_lock1_args /* {
412 #ifndef NO_FFS_SNAPSHOT
418 switch (ap->a_flags & LK_TYPE_MASK) {
425 #ifdef DEBUG_VFS_LOCKS
426 KASSERT(vp->v_holdcnt != 0,
427 ("ffs_lock %p: zero hold count", vp));
430 result = _lockmgr_args(lkp, flags, VI_MTX(vp),
431 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT,
432 ap->a_file, ap->a_line);
433 if (lkp == vp->v_vnlock || result != 0)
436 * Apparent success, except that the vnode
437 * mutated between snapshot file vnode and
438 * regular file vnode while this process
439 * slept. The lock currently held is not the
440 * right lock. Release it, and try to get the
443 (void) _lockmgr_args(lkp, LK_RELEASE, NULL,
444 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT,
445 ap->a_file, ap->a_line);
446 if ((flags & (LK_INTERLOCK | LK_NOWAIT)) ==
447 (LK_INTERLOCK | LK_NOWAIT))
449 if ((flags & LK_TYPE_MASK) == LK_UPGRADE)
450 flags = (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE;
451 flags &= ~LK_INTERLOCK;
455 result = VOP_LOCK1_APV(&ufs_vnodeops, ap);
459 return (VOP_LOCK1_APV(&ufs_vnodeops, ap));
464 * Vnode op for reading.
468 struct vop_read_args /* {
472 struct ucred *a_cred;
480 ufs_lbn_t lbn, nextlbn;
482 long size, xfersize, blkoffset;
490 ioflag = ap->a_ioflag;
491 if (ap->a_ioflag & IO_EXT)
493 return (ffs_extread(vp, uio, ioflag));
495 panic("ffs_read+IO_EXT");
498 if ((ioflag & IO_DIRECT) != 0) {
501 error = ffs_rawread(vp, uio, &workdone);
502 if (error != 0 || workdone != 0)
507 seqcount = ap->a_ioflag >> IO_SEQSHIFT;
511 if (uio->uio_rw != UIO_READ)
512 panic("ffs_read: mode");
514 if (vp->v_type == VLNK) {
515 if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen)
516 panic("ffs_read: short symlink");
517 } else if (vp->v_type != VREG && vp->v_type != VDIR)
518 panic("ffs_read: type %d", vp->v_type);
520 orig_resid = uio->uio_resid;
521 KASSERT(orig_resid >= 0, ("ffs_read: uio->uio_resid < 0"));
524 KASSERT(uio->uio_offset >= 0, ("ffs_read: uio->uio_offset < 0"));
526 if (uio->uio_offset < ip->i_size &&
527 uio->uio_offset >= fs->fs_maxfilesize)
530 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
531 if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
533 lbn = lblkno(fs, uio->uio_offset);
537 * size of buffer. The buffer representing the
538 * end of the file is rounded up to the size of
539 * the block type ( fragment or full block,
542 size = blksize(fs, ip, lbn);
543 blkoffset = blkoff(fs, uio->uio_offset);
546 * The amount we want to transfer in this iteration is
547 * one FS block less the amount of the data before
548 * our startpoint (duh!)
550 xfersize = fs->fs_bsize - blkoffset;
553 * But if we actually want less than the block,
554 * or the file doesn't have a whole block more of data,
555 * then use the lesser number.
557 if (uio->uio_resid < xfersize)
558 xfersize = uio->uio_resid;
559 if (bytesinfile < xfersize)
560 xfersize = bytesinfile;
562 if (lblktosize(fs, nextlbn) >= ip->i_size) {
564 * Don't do readahead if this is the end of the file.
566 error = bread_gb(vp, lbn, size, NOCRED,
568 } else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
570 * Otherwise if we are allowed to cluster,
571 * grab as much as we can.
573 * XXX This may not be a win if we are not
574 * doing sequential access.
576 error = cluster_read(vp, ip->i_size, lbn,
577 size, NOCRED, blkoffset + uio->uio_resid,
578 seqcount, GB_UNMAPPED, &bp);
579 } else if (seqcount > 1) {
581 * If we are NOT allowed to cluster, then
582 * if we appear to be acting sequentially,
583 * fire off a request for a readahead
584 * as well as a read. Note that the 4th and 5th
585 * arguments point to arrays of the size specified in
588 u_int nextsize = blksize(fs, ip, nextlbn);
589 error = breadn_flags(vp, lbn, size, &nextlbn,
590 &nextsize, 1, NOCRED, GB_UNMAPPED, &bp);
593 * Failing all of the above, just read what the
594 * user asked for. Interestingly, the same as
595 * the first option above.
597 error = bread_gb(vp, lbn, size, NOCRED,
607 * We should only get non-zero b_resid when an I/O error
608 * has occurred, which should cause us to break above.
609 * However, if the short read did not cause an error,
610 * then we want to ensure that we do not uiomove bad
611 * or uninitialized data.
614 if (size < xfersize) {
620 if (buf_mapped(bp)) {
621 error = vn_io_fault_uiomove((char *)bp->b_data +
622 blkoffset, (int)xfersize, uio);
624 error = vn_io_fault_pgmove(bp->b_pages, blkoffset,
630 vfs_bio_brelse(bp, ioflag);
634 * This can only happen in the case of an error
635 * because the loop above resets bp to NULL on each iteration
636 * and on normal completion has not set a new value into it.
637 * so it must have come from a 'break' statement
640 vfs_bio_brelse(bp, ioflag);
642 if ((error == 0 || uio->uio_resid != orig_resid) &&
643 (vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0 &&
644 (ip->i_flag & IN_ACCESS) == 0) {
646 ip->i_flag |= IN_ACCESS;
653 * Vnode op for writing.
657 struct vop_write_args /* {
661 struct ucred *a_cred;
673 int blkoffset, error, flags, ioflag, size, xfersize;
677 ioflag = ap->a_ioflag;
678 if (ap->a_ioflag & IO_EXT)
680 return (ffs_extwrite(vp, uio, ioflag, ap->a_cred));
682 panic("ffs_write+IO_EXT");
685 seqcount = ap->a_ioflag >> IO_SEQSHIFT;
689 if (uio->uio_rw != UIO_WRITE)
690 panic("ffs_write: mode");
693 switch (vp->v_type) {
695 if (ioflag & IO_APPEND)
696 uio->uio_offset = ip->i_size;
697 if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size)
703 panic("ffs_write: dir write");
706 panic("ffs_write: type %p %d (%d,%d)", vp, (int)vp->v_type,
707 (int)uio->uio_offset,
712 KASSERT(uio->uio_resid >= 0, ("ffs_write: uio->uio_resid < 0"));
713 KASSERT(uio->uio_offset >= 0, ("ffs_write: uio->uio_offset < 0"));
715 if ((uoff_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize)
718 * Maybe this should be above the vnode op call, but so long as
719 * file servers have no limits, I don't think it matters.
721 if (vn_rlimit_fsize(vp, uio, uio->uio_td))
724 resid = uio->uio_resid;
726 if (seqcount > BA_SEQMAX)
727 flags = BA_SEQMAX << BA_SEQSHIFT;
729 flags = seqcount << BA_SEQSHIFT;
730 if (ioflag & IO_SYNC)
732 flags |= BA_UNMAPPED;
734 for (error = 0; uio->uio_resid > 0;) {
735 lbn = lblkno(fs, uio->uio_offset);
736 blkoffset = blkoff(fs, uio->uio_offset);
737 xfersize = fs->fs_bsize - blkoffset;
738 if (uio->uio_resid < xfersize)
739 xfersize = uio->uio_resid;
740 if (uio->uio_offset + xfersize > ip->i_size)
741 vnode_pager_setsize(vp, uio->uio_offset + xfersize);
744 * We must perform a read-before-write if the transfer size
745 * does not cover the entire buffer.
747 if (fs->fs_bsize > xfersize)
751 /* XXX is uio->uio_offset the right thing here? */
752 error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
753 ap->a_cred, flags, &bp);
755 vnode_pager_setsize(vp, ip->i_size);
758 if ((ioflag & (IO_SYNC|IO_INVAL)) == (IO_SYNC|IO_INVAL))
759 bp->b_flags |= B_NOCACHE;
761 if (uio->uio_offset + xfersize > ip->i_size) {
762 ip->i_size = uio->uio_offset + xfersize;
763 DIP_SET(ip, i_size, ip->i_size);
766 size = blksize(fs, ip, lbn) - bp->b_resid;
770 if (buf_mapped(bp)) {
771 error = vn_io_fault_uiomove((char *)bp->b_data +
772 blkoffset, (int)xfersize, uio);
774 error = vn_io_fault_pgmove(bp->b_pages, blkoffset,
778 * If the buffer is not already filled and we encounter an
779 * error while trying to fill it, we have to clear out any
780 * garbage data from the pages instantiated for the buffer.
781 * If we do not, a failed uiomove() during a write can leave
782 * the prior contents of the pages exposed to a userland mmap.
784 * Note that we need only clear buffers with a transfer size
785 * equal to the block size because buffers with a shorter
786 * transfer size were cleared above by the call to UFS_BALLOC()
787 * with the BA_CLRBUF flag set.
789 * If the source region for uiomove identically mmaps the
790 * buffer, uiomove() performed the NOP copy, and the buffer
791 * content remains valid because the page fault handler
792 * validated the pages.
794 if (error != 0 && (bp->b_flags & B_CACHE) == 0 &&
795 fs->fs_bsize == xfersize)
798 vfs_bio_set_flags(bp, ioflag);
801 * If IO_SYNC each buffer is written synchronously. Otherwise
802 * if we have a severe page deficiency write the buffer
803 * asynchronously. Otherwise try to cluster, and if that
804 * doesn't do it then either do an async write (if O_DIRECT),
805 * or a delayed write (if not).
807 if (ioflag & IO_SYNC) {
809 } else if (vm_page_count_severe() ||
810 buf_dirty_count_severe() ||
811 (ioflag & IO_ASYNC)) {
812 bp->b_flags |= B_CLUSTEROK;
814 } else if (xfersize + blkoffset == fs->fs_bsize) {
815 if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
816 bp->b_flags |= B_CLUSTEROK;
817 cluster_write(vp, bp, ip->i_size, seqcount,
822 } else if (ioflag & IO_DIRECT) {
823 bp->b_flags |= B_CLUSTEROK;
826 bp->b_flags |= B_CLUSTEROK;
829 if (error || xfersize == 0)
831 ip->i_flag |= IN_CHANGE | IN_UPDATE;
834 * If we successfully wrote any data, and we are not the superuser
835 * we clear the setuid and setgid bits as a precaution against
838 if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid &&
840 if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0)) {
841 ip->i_mode &= ~(ISUID | ISGID);
842 DIP_SET(ip, i_mode, ip->i_mode);
846 if (ioflag & IO_UNIT) {
847 (void)ffs_truncate(vp, osize,
848 IO_NORMAL | (ioflag & IO_SYNC), ap->a_cred);
849 uio->uio_offset -= resid - uio->uio_resid;
850 uio->uio_resid = resid;
852 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
853 error = ffs_update(vp, 1);
858 * Extended attribute area reading.
861 ffs_extread(struct vnode *vp, struct uio *uio, int ioflag)
864 struct ufs2_dinode *dp;
867 ufs_lbn_t lbn, nextlbn;
869 long size, xfersize, blkoffset;
878 if (uio->uio_rw != UIO_READ || fs->fs_magic != FS_UFS2_MAGIC)
879 panic("ffs_extread: mode");
882 orig_resid = uio->uio_resid;
883 KASSERT(orig_resid >= 0, ("ffs_extread: uio->uio_resid < 0"));
886 KASSERT(uio->uio_offset >= 0, ("ffs_extread: uio->uio_offset < 0"));
888 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
889 if ((bytesinfile = dp->di_extsize - uio->uio_offset) <= 0)
891 lbn = lblkno(fs, uio->uio_offset);
895 * size of buffer. The buffer representing the
896 * end of the file is rounded up to the size of
897 * the block type ( fragment or full block,
900 size = sblksize(fs, dp->di_extsize, lbn);
901 blkoffset = blkoff(fs, uio->uio_offset);
904 * The amount we want to transfer in this iteration is
905 * one FS block less the amount of the data before
906 * our startpoint (duh!)
908 xfersize = fs->fs_bsize - blkoffset;
911 * But if we actually want less than the block,
912 * or the file doesn't have a whole block more of data,
913 * then use the lesser number.
915 if (uio->uio_resid < xfersize)
916 xfersize = uio->uio_resid;
917 if (bytesinfile < xfersize)
918 xfersize = bytesinfile;
920 if (lblktosize(fs, nextlbn) >= dp->di_extsize) {
922 * Don't do readahead if this is the end of the info.
924 error = bread(vp, -1 - lbn, size, NOCRED, &bp);
927 * If we have a second block, then
928 * fire off a request for a readahead
929 * as well as a read. Note that the 4th and 5th
930 * arguments point to arrays of the size specified in
933 u_int nextsize = sblksize(fs, dp->di_extsize, nextlbn);
935 nextlbn = -1 - nextlbn;
936 error = breadn(vp, -1 - lbn,
937 size, &nextlbn, &nextsize, 1, NOCRED, &bp);
946 * We should only get non-zero b_resid when an I/O error
947 * has occurred, which should cause us to break above.
948 * However, if the short read did not cause an error,
949 * then we want to ensure that we do not uiomove bad
950 * or uninitialized data.
953 if (size < xfersize) {
959 error = uiomove((char *)bp->b_data + blkoffset,
963 vfs_bio_brelse(bp, ioflag);
967 * This can only happen in the case of an error
968 * because the loop above resets bp to NULL on each iteration
969 * and on normal completion has not set a new value into it.
970 * so it must have come from a 'break' statement
973 vfs_bio_brelse(bp, ioflag);
978 * Extended attribute area writing.
981 ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *ucred)
984 struct ufs2_dinode *dp;
990 int blkoffset, error, flags, size, xfersize;
997 if (uio->uio_rw != UIO_WRITE || fs->fs_magic != FS_UFS2_MAGIC)
998 panic("ffs_extwrite: mode");
1001 if (ioflag & IO_APPEND)
1002 uio->uio_offset = dp->di_extsize;
1003 KASSERT(uio->uio_offset >= 0, ("ffs_extwrite: uio->uio_offset < 0"));
1004 KASSERT(uio->uio_resid >= 0, ("ffs_extwrite: uio->uio_resid < 0"));
1005 if ((uoff_t)uio->uio_offset + uio->uio_resid >
1006 UFS_NXADDR * fs->fs_bsize)
1009 resid = uio->uio_resid;
1010 osize = dp->di_extsize;
1012 if (ioflag & IO_SYNC)
1015 for (error = 0; uio->uio_resid > 0;) {
1016 lbn = lblkno(fs, uio->uio_offset);
1017 blkoffset = blkoff(fs, uio->uio_offset);
1018 xfersize = fs->fs_bsize - blkoffset;
1019 if (uio->uio_resid < xfersize)
1020 xfersize = uio->uio_resid;
1023 * We must perform a read-before-write if the transfer size
1024 * does not cover the entire buffer.
1026 if (fs->fs_bsize > xfersize)
1029 flags &= ~BA_CLRBUF;
1030 error = UFS_BALLOC(vp, uio->uio_offset, xfersize,
1035 * If the buffer is not valid we have to clear out any
1036 * garbage data from the pages instantiated for the buffer.
1037 * If we do not, a failed uiomove() during a write can leave
1038 * the prior contents of the pages exposed to a userland
1039 * mmap(). XXX deal with uiomove() errors a better way.
1041 if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize)
1044 if (uio->uio_offset + xfersize > dp->di_extsize)
1045 dp->di_extsize = uio->uio_offset + xfersize;
1047 size = sblksize(fs, dp->di_extsize, lbn) - bp->b_resid;
1048 if (size < xfersize)
1052 uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
1054 vfs_bio_set_flags(bp, ioflag);
1057 * If IO_SYNC each buffer is written synchronously. Otherwise
1058 * if we have a severe page deficiency write the buffer
1059 * asynchronously. Otherwise try to cluster, and if that
1060 * doesn't do it then either do an async write (if O_DIRECT),
1061 * or a delayed write (if not).
1063 if (ioflag & IO_SYNC) {
1065 } else if (vm_page_count_severe() ||
1066 buf_dirty_count_severe() ||
1067 xfersize + blkoffset == fs->fs_bsize ||
1068 (ioflag & (IO_ASYNC | IO_DIRECT)))
1072 if (error || xfersize == 0)
1074 ip->i_flag |= IN_CHANGE;
1077 * If we successfully wrote any data, and we are not the superuser
1078 * we clear the setuid and setgid bits as a precaution against
1081 if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid && ucred) {
1082 if (priv_check_cred(ucred, PRIV_VFS_RETAINSUGID, 0)) {
1083 ip->i_mode &= ~(ISUID | ISGID);
1084 dp->di_mode = ip->i_mode;
1088 if (ioflag & IO_UNIT) {
1089 (void)ffs_truncate(vp, osize,
1090 IO_EXT | (ioflag&IO_SYNC), ucred);
1091 uio->uio_offset -= resid - uio->uio_resid;
1092 uio->uio_resid = resid;
1094 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC))
1095 error = ffs_update(vp, 1);
1101 * Vnode operating to retrieve a named extended attribute.
1103 * Locate a particular EA (nspace:name) in the area (ptr:length), and return
1104 * the length of the EA, and possibly the pointer to the entry and to the data.
1107 ffs_findextattr(u_char *ptr, u_int length, int nspace, const char *name,
1108 struct extattr **eapp, u_char **eac)
1110 struct extattr *eap, *eaend;
1113 nlen = strlen(name);
1114 KASSERT(ALIGNED_TO(ptr, struct extattr), ("unaligned"));
1115 eap = (struct extattr *)ptr;
1116 eaend = (struct extattr *)(ptr + length);
1117 for (; eap < eaend; eap = EXTATTR_NEXT(eap)) {
1118 /* make sure this entry is complete */
1119 if (EXTATTR_NEXT(eap) > eaend)
1121 if (eap->ea_namespace != nspace || eap->ea_namelength != nlen
1122 || memcmp(eap->ea_name, name, nlen) != 0)
1127 *eac = EXTATTR_CONTENT(eap);
1128 return (EXTATTR_CONTENT_SIZE(eap));
1134 ffs_rdextattr(u_char **p, struct vnode *vp, struct thread *td, int extra)
1137 struct ufs2_dinode *dp;
1140 struct iovec liovec;
1148 easize = dp->di_extsize;
1149 if ((uoff_t)easize + extra > UFS_NXADDR * fs->fs_bsize)
1152 eae = malloc(easize + extra, M_TEMP, M_WAITOK);
1154 liovec.iov_base = eae;
1155 liovec.iov_len = easize;
1156 luio.uio_iov = &liovec;
1157 luio.uio_iovcnt = 1;
1158 luio.uio_offset = 0;
1159 luio.uio_resid = easize;
1160 luio.uio_segflg = UIO_SYSSPACE;
1161 luio.uio_rw = UIO_READ;
1164 error = ffs_extread(vp, &luio, IO_EXT | IO_SYNC);
1174 ffs_lock_ea(struct vnode *vp)
1180 while (ip->i_flag & IN_EA_LOCKED) {
1181 ip->i_flag |= IN_EA_LOCKWAIT;
1182 msleep(&ip->i_ea_refs, &vp->v_interlock, PINOD + 2, "ufs_ea",
1185 ip->i_flag |= IN_EA_LOCKED;
1190 ffs_unlock_ea(struct vnode *vp)
1196 if (ip->i_flag & IN_EA_LOCKWAIT)
1197 wakeup(&ip->i_ea_refs);
1198 ip->i_flag &= ~(IN_EA_LOCKED | IN_EA_LOCKWAIT);
1203 ffs_open_ea(struct vnode *vp, struct ucred *cred, struct thread *td)
1206 struct ufs2_dinode *dp;
1212 if (ip->i_ea_area != NULL) {
1218 error = ffs_rdextattr(&ip->i_ea_area, vp, td, 0);
1223 ip->i_ea_len = dp->di_extsize;
1231 * Vnode extattr transaction commit/abort
1234 ffs_close_ea(struct vnode *vp, int commit, struct ucred *cred, struct thread *td)
1238 struct iovec liovec;
1240 struct ufs2_dinode *dp;
1245 if (ip->i_ea_area == NULL) {
1250 error = ip->i_ea_error;
1251 if (commit && error == 0) {
1252 ASSERT_VOP_ELOCKED(vp, "ffs_close_ea commit");
1254 cred = vp->v_mount->mnt_cred;
1255 liovec.iov_base = ip->i_ea_area;
1256 liovec.iov_len = ip->i_ea_len;
1257 luio.uio_iov = &liovec;
1258 luio.uio_iovcnt = 1;
1259 luio.uio_offset = 0;
1260 luio.uio_resid = ip->i_ea_len;
1261 luio.uio_segflg = UIO_SYSSPACE;
1262 luio.uio_rw = UIO_WRITE;
1264 /* XXX: I'm not happy about truncating to zero size */
1265 if (ip->i_ea_len < dp->di_extsize)
1266 error = ffs_truncate(vp, 0, IO_EXT, cred);
1267 error = ffs_extwrite(vp, &luio, IO_EXT | IO_SYNC, cred);
1269 if (--ip->i_ea_refs == 0) {
1270 free(ip->i_ea_area, M_TEMP);
1271 ip->i_ea_area = NULL;
1280 * Vnode extattr strategy routine for fifos.
1282 * We need to check for a read or write of the external attributes.
1283 * Otherwise we just fall through and do the usual thing.
1286 ffsext_strategy(struct vop_strategy_args *ap)
1288 struct vop_strategy_args {
1289 struct vnodeop_desc *a_desc;
1299 lbn = ap->a_bp->b_lblkno;
1300 if (I_IS_UFS2(VTOI(vp)) && lbn < 0 && lbn >= -UFS_NXADDR)
1301 return (VOP_STRATEGY_APV(&ufs_vnodeops, ap));
1302 if (vp->v_type == VFIFO)
1303 return (VOP_STRATEGY_APV(&ufs_fifoops, ap));
1304 panic("spec nodes went here");
1308 * Vnode extattr transaction commit/abort
1311 ffs_openextattr(struct vop_openextattr_args *ap)
1313 struct vop_openextattr_args {
1314 struct vnodeop_desc *a_desc;
1316 IN struct ucred *a_cred;
1317 IN struct thread *a_td;
1322 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1323 return (EOPNOTSUPP);
1325 return (ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td));
1330 * Vnode extattr transaction commit/abort
1333 ffs_closeextattr(struct vop_closeextattr_args *ap)
1335 struct vop_closeextattr_args {
1336 struct vnodeop_desc *a_desc;
1339 IN struct ucred *a_cred;
1340 IN struct thread *a_td;
1345 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1346 return (EOPNOTSUPP);
1348 if (ap->a_commit && (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY))
1351 return (ffs_close_ea(ap->a_vp, ap->a_commit, ap->a_cred, ap->a_td));
1355 * Vnode operation to remove a named attribute.
1358 ffs_deleteextattr(struct vop_deleteextattr_args *ap)
1361 IN struct vnode *a_vp;
1362 IN int a_attrnamespace;
1363 IN const char *a_name;
1364 IN struct ucred *a_cred;
1365 IN struct thread *a_td;
1371 struct extattr *eap;
1373 int olen, error, i, easize;
1377 ip = VTOI(ap->a_vp);
1380 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1381 return (EOPNOTSUPP);
1383 if (strlen(ap->a_name) == 0)
1386 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1389 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1390 ap->a_cred, ap->a_td, VWRITE);
1394 * ffs_lock_ea is not needed there, because the vnode
1395 * must be exclusively locked.
1397 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1398 ip->i_ea_error = error;
1402 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1406 /* CEM: delete could be done in-place instead */
1407 eae = malloc(ip->i_ea_len, M_TEMP, M_WAITOK);
1408 bcopy(ip->i_ea_area, eae, ip->i_ea_len);
1409 easize = ip->i_ea_len;
1411 olen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
1414 /* delete but nonexistent */
1416 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1419 ul = eap->ea_length;
1420 i = (u_char *)EXTATTR_NEXT(eap) - eae;
1421 bcopy(EXTATTR_NEXT(eap), eap, easize - i);
1424 tmp = ip->i_ea_area;
1425 ip->i_ea_area = eae;
1426 ip->i_ea_len = easize;
1428 error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td);
1433 * Vnode operation to retrieve a named extended attribute.
1436 ffs_getextattr(struct vop_getextattr_args *ap)
1439 IN struct vnode *a_vp;
1440 IN int a_attrnamespace;
1441 IN const char *a_name;
1442 INOUT struct uio *a_uio;
1444 IN struct ucred *a_cred;
1445 IN struct thread *a_td;
1454 ip = VTOI(ap->a_vp);
1456 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1457 return (EOPNOTSUPP);
1459 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1460 ap->a_cred, ap->a_td, VREAD);
1464 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1468 eae = ip->i_ea_area;
1469 easize = ip->i_ea_len;
1471 ealen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
1475 if (ap->a_size != NULL)
1476 *ap->a_size = ealen;
1477 else if (ap->a_uio != NULL)
1478 error = uiomove(p, ealen, ap->a_uio);
1482 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1487 * Vnode operation to retrieve extended attributes on a vnode.
1490 ffs_listextattr(struct vop_listextattr_args *ap)
1493 IN struct vnode *a_vp;
1494 IN int a_attrnamespace;
1495 INOUT struct uio *a_uio;
1497 IN struct ucred *a_cred;
1498 IN struct thread *a_td;
1503 struct extattr *eap, *eaend;
1506 ip = VTOI(ap->a_vp);
1508 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1509 return (EOPNOTSUPP);
1511 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1512 ap->a_cred, ap->a_td, VREAD);
1516 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1521 if (ap->a_size != NULL)
1524 KASSERT(ALIGNED_TO(ip->i_ea_area, struct extattr), ("unaligned"));
1525 eap = (struct extattr *)ip->i_ea_area;
1526 eaend = (struct extattr *)(ip->i_ea_area + ip->i_ea_len);
1527 for (; error == 0 && eap < eaend; eap = EXTATTR_NEXT(eap)) {
1528 /* make sure this entry is complete */
1529 if (EXTATTR_NEXT(eap) > eaend)
1531 if (eap->ea_namespace != ap->a_attrnamespace)
1534 ealen = eap->ea_namelength;
1535 if (ap->a_size != NULL)
1536 *ap->a_size += ealen + 1;
1537 else if (ap->a_uio != NULL)
1538 error = uiomove(&eap->ea_namelength, ealen + 1,
1542 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1547 * Vnode operation to set a named attribute.
1550 ffs_setextattr(struct vop_setextattr_args *ap)
1553 IN struct vnode *a_vp;
1554 IN int a_attrnamespace;
1555 IN const char *a_name;
1556 INOUT struct uio *a_uio;
1557 IN struct ucred *a_cred;
1558 IN struct thread *a_td;
1564 struct extattr *eap;
1565 uint32_t ealength, ul;
1567 int olen, eapad1, eapad2, error, i, easize;
1571 ip = VTOI(ap->a_vp);
1574 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1575 return (EOPNOTSUPP);
1577 if (strlen(ap->a_name) == 0)
1580 /* XXX Now unsupported API to delete EAs using NULL uio. */
1581 if (ap->a_uio == NULL)
1582 return (EOPNOTSUPP);
1584 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)
1587 ealen = ap->a_uio->uio_resid;
1588 if (ealen < 0 || ealen > lblktosize(fs, UFS_NXADDR))
1591 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1592 ap->a_cred, ap->a_td, VWRITE);
1596 * ffs_lock_ea is not needed there, because the vnode
1597 * must be exclusively locked.
1599 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1600 ip->i_ea_error = error;
1604 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td);
1608 ealength = sizeof(uint32_t) + 3 + strlen(ap->a_name);
1609 eapad1 = roundup2(ealength, 8) - ealength;
1610 eapad2 = roundup2(ealen, 8) - ealen;
1611 ealength += eapad1 + ealen + eapad2;
1614 * CEM: rewrites of the same size or smaller could be done in-place
1615 * instead. (We don't acquire any fine-grained locks in here either,
1616 * so we could also do bigger writes in-place.)
1618 eae = malloc(ip->i_ea_len + ealength, M_TEMP, M_WAITOK);
1619 bcopy(ip->i_ea_area, eae, ip->i_ea_len);
1620 easize = ip->i_ea_len;
1622 olen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name,
1625 /* new, append at end */
1626 KASSERT(ALIGNED_TO(eae + easize, struct extattr),
1628 eap = (struct extattr *)(eae + easize);
1631 ul = eap->ea_length;
1632 i = (u_char *)EXTATTR_NEXT(eap) - eae;
1633 if (ul != ealength) {
1634 bcopy(EXTATTR_NEXT(eap), (u_char *)eap + ealength,
1636 easize += (ealength - ul);
1639 if (easize > lblktosize(fs, UFS_NXADDR)) {
1641 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1642 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1643 ip->i_ea_error = ENOSPC;
1646 eap->ea_length = ealength;
1647 eap->ea_namespace = ap->a_attrnamespace;
1648 eap->ea_contentpadlen = eapad2;
1649 eap->ea_namelength = strlen(ap->a_name);
1650 memcpy(eap->ea_name, ap->a_name, strlen(ap->a_name));
1651 bzero(&eap->ea_name[strlen(ap->a_name)], eapad1);
1652 error = uiomove(EXTATTR_CONTENT(eap), ealen, ap->a_uio);
1655 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td);
1656 if (ip->i_ea_area != NULL && ip->i_ea_error == 0)
1657 ip->i_ea_error = error;
1660 bzero((u_char *)EXTATTR_CONTENT(eap) + ealen, eapad2);
1662 tmp = ip->i_ea_area;
1663 ip->i_ea_area = eae;
1664 ip->i_ea_len = easize;
1666 error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td);
1671 * Vnode pointer to File handle
1674 ffs_vptofh(struct vop_vptofh_args *ap)
1677 IN struct vnode *a_vp;
1678 IN struct fid *a_fhp;
1685 ip = VTOI(ap->a_vp);
1686 ufhp = (struct ufid *)ap->a_fhp;
1687 ufhp->ufid_len = sizeof(struct ufid);
1688 ufhp->ufid_ino = ip->i_number;
1689 ufhp->ufid_gen = ip->i_gen;
1693 SYSCTL_DECL(_vfs_ffs);
1694 static int use_buf_pager = 1;
1695 SYSCTL_INT(_vfs_ffs, OID_AUTO, use_buf_pager, CTLFLAG_RWTUN, &use_buf_pager, 0,
1696 "Always use buffer pager instead of bmap");
1699 ffs_gbp_getblkno(struct vnode *vp, vm_ooffset_t off)
1702 return (lblkno(VFSTOUFS(vp->v_mount)->um_fs, off));
1706 ffs_gbp_getblksz(struct vnode *vp, daddr_t lbn)
1709 return (blksize(VFSTOUFS(vp->v_mount)->um_fs, VTOI(vp), lbn));
1713 ffs_getpages(struct vop_getpages_args *ap)
1716 struct ufsmount *um;
1719 um = VFSTOUFS(vp->v_mount);
1721 if (!use_buf_pager && um->um_devvp->v_bufobj.bo_bsize <= PAGE_SIZE)
1722 return (vnode_pager_generic_getpages(vp, ap->a_m, ap->a_count,
1723 ap->a_rbehind, ap->a_rahead, NULL, NULL));
1724 return (vfs_bio_getpages(vp, ap->a_m, ap->a_count, ap->a_rbehind,
1725 ap->a_rahead, ffs_gbp_getblkno, ffs_gbp_getblksz));