2 * Copyright (c) 2007-2009 Google Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * * Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following disclaimer
13 * in the documentation and/or other materials provided with the
15 * * Neither the name of Google Inc. nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 * Copyright (C) 2005 Csaba Henk.
32 * All rights reserved.
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
43 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 #include <sys/cdefs.h>
57 __FBSDID("$FreeBSD$");
59 #include <sys/types.h>
60 #include <sys/module.h>
61 #include <sys/systm.h>
62 #include <sys/errno.h>
63 #include <sys/param.h>
64 #include <sys/kernel.h>
67 #include <sys/malloc.h>
68 #include <sys/queue.h>
71 #include <sys/mutex.h>
73 #include <sys/mount.h>
74 #include <sys/vnode.h>
76 #include <sys/unistd.h>
77 #include <sys/filedesc.h>
79 #include <sys/fcntl.h>
82 #include <sys/sysctl.h>
85 #include <vm/vm_extern.h>
87 #include <vm/vm_map.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_object.h>
90 #include <vm/vm_pager.h>
91 #include <vm/vnode_pager.h>
92 #include <vm/vm_object.h>
95 #include "fuse_file.h"
96 #include "fuse_node.h"
97 #include "fuse_internal.h"
101 #define FUSE_DEBUG_MODULE IO
102 #include "fuse_debug.h"
106 fuse_read_directbackend(struct vnode *vp, struct uio *uio,
107 struct ucred *cred, struct fuse_filehandle *fufh);
109 fuse_read_biobackend(struct vnode *vp, struct uio *uio,
110 struct ucred *cred, struct fuse_filehandle *fufh);
112 fuse_write_directbackend(struct vnode *vp, struct uio *uio,
113 struct ucred *cred, struct fuse_filehandle *fufh);
115 fuse_write_biobackend(struct vnode *vp, struct uio *uio,
116 struct ucred *cred, struct fuse_filehandle *fufh);
119 fuse_io_dispatch(struct vnode *vp, struct uio *uio, int ioflag,
122 struct fuse_filehandle *fufh;
125 MPASS(vp->v_type == VREG);
127 err = fuse_filehandle_getrw(vp,
128 (uio->uio_rw == UIO_READ) ? FUFH_RDONLY : FUFH_WRONLY, &fufh);
130 printf("FUSE: io dispatch: filehandles are closed\n");
134 * Ideally, when the daemon asks for direct io at open time, the
135 * standard file flag should be set according to this, so that would
136 * just change the default mode, which later on could be changed via
138 * But this doesn't work, the O_DIRECT flag gets cleared at some point
139 * (don't know where). So to make any use of the Fuse direct_io option,
140 * we hardwire it into the file's private data (similarly to Linux,
143 directio = (ioflag & IO_DIRECT) || !fsess_opt_datacache(vnode_mount(vp));
145 switch (uio->uio_rw) {
148 DEBUG("direct read of vnode %ju via file handle %ju\n",
149 (uintmax_t)VTOILLU(vp), (uintmax_t)fufh->fh_id);
150 err = fuse_read_directbackend(vp, uio, cred, fufh);
152 DEBUG("buffered read of vnode %ju\n",
153 (uintmax_t)VTOILLU(vp));
154 err = fuse_read_biobackend(vp, uio, cred, fufh);
159 DEBUG("direct write of vnode %ju via file handle %ju\n",
160 (uintmax_t)VTOILLU(vp), (uintmax_t)fufh->fh_id);
161 err = fuse_write_directbackend(vp, uio, cred, fufh);
162 fuse_invalidate_attr(vp);
164 DEBUG("buffered write of vnode %ju\n",
165 (uintmax_t)VTOILLU(vp));
166 err = fuse_write_biobackend(vp, uio, cred, fufh);
170 panic("uninterpreted mode passed to fuse_io_dispatch");
177 fuse_read_biobackend(struct vnode *vp, struct uio *uio,
178 struct ucred *cred, struct fuse_filehandle *fufh)
183 int err = 0, n = 0, on = 0;
186 const int biosize = fuse_iosize(vp);
188 DEBUG("resid=%zx offset=%jx fsize=%jx\n",
189 uio->uio_resid, uio->uio_offset, VTOFUD(vp)->filesize);
191 if (uio->uio_resid == 0)
193 if (uio->uio_offset < 0)
196 bcount = MIN(MAXBSIZE, biosize);
197 filesize = VTOFUD(vp)->filesize;
200 if (fuse_isdeadfs(vp)) {
204 lbn = uio->uio_offset / biosize;
205 on = uio->uio_offset & (biosize - 1);
207 DEBUG2G("biosize %d, lbn %d, on %d\n", biosize, (int)lbn, on);
210 * Obtain the buffer cache block. Figure out the buffer size
211 * when we are at EOF. If we are modifying the size of the
212 * buffer based on an EOF condition we need to hold
213 * nfs_rslock() through obtaining the buffer to prevent
214 * a potential writer-appender from messing with n_size.
215 * Otherwise we may accidently truncate the buffer and
218 * Note that bcount is *not* DEV_BSIZE aligned.
220 if ((off_t)lbn * biosize >= filesize) {
222 } else if ((off_t)(lbn + 1) * biosize > filesize) {
223 bcount = filesize - (off_t)lbn *biosize;
225 bp = getblk(vp, lbn, bcount, PCATCH, 0, 0);
231 * If B_CACHE is not set, we must issue the read. If this
232 * fails, we return an error.
235 if ((bp->b_flags & B_CACHE) == 0) {
236 bp->b_iocmd = BIO_READ;
237 vfs_busy_pages(bp, 0);
238 err = fuse_io_strategy(vp, bp);
245 * on is the offset into the current bp. Figure out how many
246 * bytes we can copy out of the bp. Note that bcount is
247 * NOT DEV_BSIZE aligned.
249 * Then figure out how many bytes we can copy into the uio.
254 n = MIN((unsigned)(bcount - on), uio->uio_resid);
256 DEBUG2G("feeding buffeater with %d bytes of buffer %p,"
257 " saying %d was asked for\n",
258 n, bp->b_data + on, n + (int)bp->b_resid);
259 err = uiomove(bp->b_data + on, n, uio);
262 DEBUG2G("end of turn, err %d, uio->uio_resid %zd, n %d\n",
263 err, uio->uio_resid, n);
264 } while (err == 0 && uio->uio_resid > 0 && n > 0);
270 fuse_read_directbackend(struct vnode *vp, struct uio *uio,
271 struct ucred *cred, struct fuse_filehandle *fufh)
273 struct fuse_dispatcher fdi;
274 struct fuse_read_in *fri;
277 if (uio->uio_resid == 0)
283 * XXX In "normal" case we use an intermediate kernel buffer for
284 * transmitting data from daemon's context to ours. Eventually, we should
285 * get rid of this. Anyway, if the target uio lives in sysspace (we are
286 * called from pageops), and the input data doesn't need kernel-side
287 * processing (we are not called from readdir) we can already invoke
288 * an optimized, "peer-to-peer" I/O routine.
290 while (uio->uio_resid > 0) {
291 fdi.iosize = sizeof(*fri);
292 fdisp_make_vp(&fdi, FUSE_READ, vp, uio->uio_td, cred);
294 fri->fh = fufh->fh_id;
295 fri->offset = uio->uio_offset;
296 fri->size = MIN(uio->uio_resid,
297 fuse_get_mpdata(vp->v_mount)->max_read);
299 DEBUG2G("fri->fh %ju, fri->offset %ju, fri->size %ju\n",
300 (uintmax_t)fri->fh, (uintmax_t)fri->offset,
301 (uintmax_t)fri->size);
303 if ((err = fdisp_wait_answ(&fdi)))
306 DEBUG2G("complete: got iosize=%d, requested fri.size=%zd; "
307 "resid=%zd offset=%ju\n",
308 fri->size, fdi.iosize, uio->uio_resid,
309 (uintmax_t)uio->uio_offset);
311 if ((err = uiomove(fdi.answ, MIN(fri->size, fdi.iosize), uio)))
313 if (fdi.iosize < fri->size)
323 fuse_write_directbackend(struct vnode *vp, struct uio *uio,
324 struct ucred *cred, struct fuse_filehandle *fufh)
326 struct fuse_vnode_data *fvdat = VTOFUD(vp);
327 struct fuse_write_in *fwi;
328 struct fuse_dispatcher fdi;
338 while (uio->uio_resid > 0) {
339 chunksize = MIN(uio->uio_resid,
340 fuse_get_mpdata(vp->v_mount)->max_write);
342 fdi.iosize = sizeof(*fwi) + chunksize;
343 fdisp_make_vp(&fdi, FUSE_WRITE, vp, uio->uio_td, cred);
346 fwi->fh = fufh->fh_id;
347 fwi->offset = uio->uio_offset;
348 fwi->size = chunksize;
350 if ((err = uiomove((char *)fdi.indata + sizeof(*fwi),
354 if ((err = fdisp_wait_answ(&fdi)))
357 diff = chunksize - ((struct fuse_write_out *)fdi.answ)->size;
362 uio->uio_resid += diff;
363 uio->uio_offset -= diff;
364 if (uio->uio_offset > fvdat->filesize)
365 fuse_vnode_setsize(vp, cred, uio->uio_offset);
374 fuse_write_biobackend(struct vnode *vp, struct uio *uio,
375 struct ucred *cred, struct fuse_filehandle *fufh)
377 struct fuse_vnode_data *fvdat = VTOFUD(vp);
383 const int biosize = fuse_iosize(vp);
385 KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode"));
386 DEBUG("resid=%zx offset=%jx fsize=%jx\n",
387 uio->uio_resid, uio->uio_offset, fvdat->filesize);
388 if (vp->v_type != VREG)
390 if (uio->uio_offset < 0)
392 if (uio->uio_resid == 0)
396 * Find all of this file's B_NEEDCOMMIT buffers. If our writes
397 * would exceed the local maximum per-file write commit size when
398 * combined with those, we must decide whether to flush,
399 * go synchronous, or return err. We don't bother checking
400 * IO_UNIT -- we just make all writes atomic anyway, as there's
401 * no point optimizing for something that really won't ever happen.
404 if (fuse_isdeadfs(vp)) {
408 lbn = uio->uio_offset / biosize;
409 on = uio->uio_offset & (biosize - 1);
410 n = MIN((unsigned)(biosize - on), uio->uio_resid);
412 DEBUG2G("lbn %ju, on %d, n %d, uio offset %ju, uio resid %zd\n",
413 (uintmax_t)lbn, on, n,
414 (uintmax_t)uio->uio_offset, uio->uio_resid);
418 * Handle direct append and file extension cases, calculate
419 * unaligned buffer size.
421 if (uio->uio_offset == fvdat->filesize && n) {
423 * Get the buffer (in its pre-append state to maintain
424 * B_CACHE if it was previously set). Resize the
425 * nfsnode after we have locked the buffer to prevent
426 * readers from reading garbage.
429 DEBUG("getting block from OS, bcount %d\n", bcount);
430 bp = getblk(vp, lbn, bcount, PCATCH, 0, 0);
435 err = fuse_vnode_setsize(vp, cred,
436 uio->uio_offset + n);
441 save = bp->b_flags & B_CACHE;
443 allocbuf(bp, bcount);
448 * Obtain the locked cache block first, and then
449 * adjust the file's size as appropriate.
452 if ((off_t)lbn * biosize + bcount < fvdat->filesize) {
453 if ((off_t)(lbn + 1) * biosize < fvdat->filesize)
456 bcount = fvdat->filesize -
459 DEBUG("getting block from OS, bcount %d\n", bcount);
460 bp = getblk(vp, lbn, bcount, PCATCH, 0, 0);
461 if (bp && uio->uio_offset + n > fvdat->filesize) {
462 err = fuse_vnode_setsize(vp, cred,
463 uio->uio_offset + n);
476 * Issue a READ if B_CACHE is not set. In special-append
477 * mode, B_CACHE is based on the buffer prior to the write
478 * op and is typically set, avoiding the read. If a read
479 * is required in special append mode, the server will
480 * probably send us a short-read since we extended the file
481 * on our end, resulting in b_resid == 0 and, thusly,
482 * B_CACHE getting set.
484 * We can also avoid issuing the read if the write covers
485 * the entire buffer. We have to make sure the buffer state
486 * is reasonable in this case since we will not be initiating
487 * I/O. See the comments in kern/vfs_bio.c's getblk() for
490 * B_CACHE may also be set due to the buffer being cached
494 if (on == 0 && n == bcount) {
495 bp->b_flags |= B_CACHE;
496 bp->b_flags &= ~B_INVAL;
497 bp->b_ioflags &= ~BIO_ERROR;
499 if ((bp->b_flags & B_CACHE) == 0) {
500 bp->b_iocmd = BIO_READ;
501 vfs_busy_pages(bp, 0);
502 fuse_io_strategy(vp, bp);
503 if ((err = bp->b_error)) {
508 if (bp->b_wcred == NOCRED)
509 bp->b_wcred = crhold(cred);
512 * If dirtyend exceeds file size, chop it down. This should
513 * not normally occur but there is an append race where it
514 * might occur XXX, so we log it.
516 * If the chopping creates a reverse-indexed or degenerate
517 * situation with dirtyoff/end, we 0 both of them.
520 if (bp->b_dirtyend > bcount) {
521 DEBUG("FUSE append race @%lx:%d\n",
522 (long)bp->b_blkno * biosize,
523 bp->b_dirtyend - bcount);
524 bp->b_dirtyend = bcount;
526 if (bp->b_dirtyoff >= bp->b_dirtyend)
527 bp->b_dirtyoff = bp->b_dirtyend = 0;
530 * If the new write will leave a contiguous dirty
531 * area, just update the b_dirtyoff and b_dirtyend,
532 * otherwise force a write rpc of the old dirty area.
534 * While it is possible to merge discontiguous writes due to
535 * our having a B_CACHE buffer ( and thus valid read data
536 * for the hole), we don't because it could lead to
537 * significant cache coherency problems with multiple clients,
538 * especially if locking is implemented later on.
540 * as an optimization we could theoretically maintain
541 * a linked list of discontinuous areas, but we would still
542 * have to commit them separately so there isn't much
543 * advantage to it except perhaps a bit of asynchronization.
546 if (bp->b_dirtyend > 0 &&
547 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
549 * Yes, we mean it. Write out everything to "storage"
550 * immediatly, without hesitation. (Apart from other
551 * reasons: the only way to know if a write is valid
552 * if its actually written out.)
555 if (bp->b_error == EINTR) {
561 err = uiomove((char *)bp->b_data + on, n, uio);
564 * Since this block is being modified, it must be written
565 * again and not just committed. Since write clustering does
566 * not work for the stage 1 data write, only the stage 2
567 * commit rpc, we have to clear B_CLUSTEROK as well.
569 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
572 bp->b_ioflags |= BIO_ERROR;
578 * Only update dirtyoff/dirtyend if not a degenerate
582 if (bp->b_dirtyend > 0) {
583 bp->b_dirtyoff = MIN(on, bp->b_dirtyoff);
584 bp->b_dirtyend = MAX((on + n), bp->b_dirtyend);
587 bp->b_dirtyend = on + n;
589 vfs_bio_set_valid(bp, on, n);
594 } while (uio->uio_resid > 0 && n > 0);
596 if (fuse_sync_resize && (fvdat->flag & FN_SIZECHANGE) != 0)
597 fuse_vnode_savesize(vp, cred);
603 fuse_io_strategy(struct vnode *vp, struct buf *bp)
605 struct fuse_filehandle *fufh;
606 struct fuse_vnode_data *fvdat = VTOFUD(vp);
613 const int biosize = fuse_iosize(vp);
615 MPASS(vp->v_type == VREG);
616 MPASS(bp->b_iocmd == BIO_READ || bp->b_iocmd == BIO_WRITE);
617 DEBUG("inode=%ju offset=%jd resid=%ld\n",
618 (uintmax_t)VTOI(vp), (intmax_t)(((off_t)bp->b_blkno) * biosize),
621 error = fuse_filehandle_getrw(vp,
622 (bp->b_iocmd == BIO_READ) ? FUFH_RDONLY : FUFH_WRONLY, &fufh);
624 printf("FUSE: strategy: filehandles are closed\n");
625 bp->b_ioflags |= BIO_ERROR;
629 cred = bp->b_iocmd == BIO_READ ? bp->b_rcred : bp->b_wcred;
633 uiop->uio_iovcnt = 1;
634 uiop->uio_segflg = UIO_SYSSPACE;
635 uiop->uio_td = curthread;
638 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We
639 * do this here so we do not have to do it in all the code that
642 bp->b_flags &= ~B_INVAL;
643 bp->b_ioflags &= ~BIO_ERROR;
645 KASSERT(!(bp->b_flags & B_DONE),
646 ("fuse_io_strategy: bp %p already marked done", bp));
647 if (bp->b_iocmd == BIO_READ) {
648 io.iov_len = uiop->uio_resid = bp->b_bcount;
649 io.iov_base = bp->b_data;
650 uiop->uio_rw = UIO_READ;
652 uiop->uio_offset = ((off_t)bp->b_blkno) * biosize;
653 error = fuse_read_directbackend(vp, uiop, cred, fufh);
655 if ((!error && uiop->uio_resid) ||
656 (fsess_opt_brokenio(vnode_mount(vp)) && error == EIO &&
657 uiop->uio_offset < fvdat->filesize && fvdat->filesize > 0 &&
658 uiop->uio_offset >= fvdat->cached_attrs.va_size)) {
660 * If we had a short read with no error, we must have
661 * hit a file hole. We should zero-fill the remainder.
662 * This can also occur if the server hits the file EOF.
664 * Holes used to be able to occur due to pending
665 * writes, but that is not possible any longer.
667 int nread = bp->b_bcount - uiop->uio_resid;
668 int left = uiop->uio_resid;
671 printf("FUSE: Fix broken io: offset %ju, "
672 " resid %zd, file size %ju/%ju\n",
673 (uintmax_t)uiop->uio_offset,
674 uiop->uio_resid, fvdat->filesize,
675 fvdat->cached_attrs.va_size);
679 bzero((char *)bp->b_data + nread, left);
683 bp->b_ioflags |= BIO_ERROR;
688 * If we only need to commit, try to commit
690 if (bp->b_flags & B_NEEDCOMMIT) {
691 DEBUG("write: B_NEEDCOMMIT flags set\n");
694 * Setup for actual write
696 if ((off_t)bp->b_blkno * biosize + bp->b_dirtyend >
698 bp->b_dirtyend = fvdat->filesize -
699 (off_t)bp->b_blkno * biosize;
701 if (bp->b_dirtyend > bp->b_dirtyoff) {
702 io.iov_len = uiop->uio_resid = bp->b_dirtyend
704 uiop->uio_offset = (off_t)bp->b_blkno * biosize
706 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
707 uiop->uio_rw = UIO_WRITE;
709 error = fuse_write_directbackend(vp, uiop, cred, fufh);
711 if (error == EINTR || error == ETIMEDOUT
712 || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
714 bp->b_flags &= ~(B_INVAL | B_NOCACHE);
715 if ((bp->b_flags & B_PAGING) == 0) {
717 bp->b_flags &= ~B_DONE;
719 if ((error == EINTR || error == ETIMEDOUT) &&
720 (bp->b_flags & B_ASYNC) == 0)
721 bp->b_flags |= B_EINTR;
724 bp->b_ioflags |= BIO_ERROR;
725 bp->b_flags |= B_INVAL;
728 bp->b_dirtyoff = bp->b_dirtyend = 0;
736 bp->b_resid = uiop->uio_resid;
742 fuse_io_flushbuf(struct vnode *vp, int waitfor, struct thread *td)
744 struct vop_fsync_args a = {
746 .a_waitfor = waitfor,
750 return (vop_stdfsync(&a));
754 * Flush and invalidate all dirty buffers. If another process is already
755 * doing the flush, just wait for completion.
758 fuse_io_invalbuf(struct vnode *vp, struct thread *td)
760 struct fuse_vnode_data *fvdat = VTOFUD(vp);
763 if (vp->v_iflag & VI_DOOMED)
766 ASSERT_VOP_ELOCKED(vp, "fuse_io_invalbuf");
768 while (fvdat->flag & FN_FLUSHINPROG) {
769 struct proc *p = td->td_proc;
771 if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF)
773 fvdat->flag |= FN_FLUSHWANT;
774 tsleep(&fvdat->flag, PRIBIO + 2, "fusevinv", 2 * hz);
778 if (SIGNOTEMPTY(p->p_siglist) ||
779 SIGNOTEMPTY(td->td_siglist))
786 fvdat->flag |= FN_FLUSHINPROG;
788 if (vp->v_bufobj.bo_object != NULL) {
789 VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
790 vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
791 VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
793 error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
795 if (error == ERESTART || error == EINTR) {
796 fvdat->flag &= ~FN_FLUSHINPROG;
797 if (fvdat->flag & FN_FLUSHWANT) {
798 fvdat->flag &= ~FN_FLUSHWANT;
799 wakeup(&fvdat->flag);
803 error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
805 fvdat->flag &= ~FN_FLUSHINPROG;
806 if (fvdat->flag & FN_FLUSHWANT) {
807 fvdat->flag &= ~FN_FLUSHWANT;
808 wakeup(&fvdat->flag);