2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2007-2009 Google Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following disclaimer
15 * in the documentation and/or other materials provided with the
17 * * Neither the name of Google Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 * Copyright (C) 2005 Csaba Henk.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
45 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 #include <sys/cdefs.h>
59 __FBSDID("$FreeBSD$");
61 #include <sys/types.h>
62 #include <sys/module.h>
63 #include <sys/systm.h>
64 #include <sys/errno.h>
65 #include <sys/param.h>
66 #include <sys/kernel.h>
69 #include <sys/malloc.h>
70 #include <sys/queue.h>
73 #include <sys/mutex.h>
74 #include <sys/rwlock.h>
76 #include <sys/mount.h>
77 #include <sys/vnode.h>
79 #include <sys/unistd.h>
80 #include <sys/filedesc.h>
82 #include <sys/fcntl.h>
85 #include <sys/sysctl.h>
88 #include <vm/vm_extern.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_page.h>
92 #include <vm/vm_object.h>
95 #include "fuse_file.h"
96 #include "fuse_node.h"
97 #include "fuse_internal.h"
101 SDT_PROVIDER_DECLARE(fuse);
104 * arg0: verbosity. Higher numbers give more verbose messages
105 * arg1: Textual message
107 SDT_PROBE_DEFINE2(fuse, , io, trace, "int", "char*");
110 fuse_read_directbackend(struct vnode *vp, struct uio *uio,
111 struct ucred *cred, struct fuse_filehandle *fufh);
113 fuse_read_biobackend(struct vnode *vp, struct uio *uio,
114 struct ucred *cred, struct fuse_filehandle *fufh, pid_t pid);
116 fuse_write_directbackend(struct vnode *vp, struct uio *uio,
117 struct ucred *cred, struct fuse_filehandle *fufh, int ioflag);
119 fuse_write_biobackend(struct vnode *vp, struct uio *uio,
120 struct ucred *cred, struct fuse_filehandle *fufh, int ioflag, pid_t pid);
122 SDT_PROBE_DEFINE5(fuse, , io, io_dispatch, "struct vnode*", "struct uio*",
123 "int", "struct ucred*", "struct fuse_filehandle*");
125 fuse_io_dispatch(struct vnode *vp, struct uio *uio, int ioflag,
126 struct ucred *cred, pid_t pid)
128 struct fuse_filehandle *fufh;
132 MPASS(vp->v_type == VREG || vp->v_type == VDIR);
134 fflag = (uio->uio_rw == UIO_READ) ? FREAD : FWRITE;
135 err = fuse_filehandle_getrw(vp, fflag, &fufh, cred, pid);
137 printf("FUSE: io dispatch: filehandles are closed\n");
140 SDT_PROBE5(fuse, , io, io_dispatch, vp, uio, ioflag, cred, fufh);
143 * Ideally, when the daemon asks for direct io at open time, the
144 * standard file flag should be set according to this, so that would
145 * just change the default mode, which later on could be changed via
147 * But this doesn't work, the O_DIRECT flag gets cleared at some point
148 * (don't know where). So to make any use of the Fuse direct_io option,
149 * we hardwire it into the file's private data (similarly to Linux,
152 directio = (ioflag & IO_DIRECT) || !fsess_opt_datacache(vnode_mount(vp));
154 switch (uio->uio_rw) {
157 SDT_PROBE2(fuse, , io, trace, 1,
158 "direct read of vnode");
159 err = fuse_read_directbackend(vp, uio, cred, fufh);
161 SDT_PROBE2(fuse, , io, trace, 1,
162 "buffered read of vnode");
163 err = fuse_read_biobackend(vp, uio, cred, fufh, pid);
168 * Kludge: simulate write-through caching via write-around
169 * caching. Same effect, as far as never caching dirty data,
170 * but slightly pessimal in that newly written data is not
173 if (directio || fuse_data_cache_mode == FUSE_CACHE_WT) {
174 SDT_PROBE2(fuse, , io, trace, 1,
175 "direct write of vnode");
176 err = fuse_write_directbackend(vp, uio, cred, fufh,
179 SDT_PROBE2(fuse, , io, trace, 1,
180 "buffered write of vnode");
181 err = fuse_write_biobackend(vp, uio, cred, fufh, ioflag,
186 panic("uninterpreted mode passed to fuse_io_dispatch");
192 SDT_PROBE_DEFINE3(fuse, , io, read_bio_backend_start, "int", "int", "int");
193 SDT_PROBE_DEFINE2(fuse, , io, read_bio_backend_feed, "int", "int");
194 SDT_PROBE_DEFINE3(fuse, , io, read_bio_backend_end, "int", "ssize_t", "int");
196 fuse_read_biobackend(struct vnode *vp, struct uio *uio,
197 struct ucred *cred, struct fuse_filehandle *fufh, pid_t pid)
202 int err = 0, n = 0, on = 0;
205 const int biosize = fuse_iosize(vp);
207 if (uio->uio_resid == 0)
209 if (uio->uio_offset < 0)
213 filesize = VTOFUD(vp)->filesize;
216 if (fuse_isdeadfs(vp)) {
220 lbn = uio->uio_offset / biosize;
221 on = uio->uio_offset & (biosize - 1);
223 SDT_PROBE3(fuse, , io, read_bio_backend_start,
224 biosize, (int)lbn, on);
227 * Obtain the buffer cache block. Figure out the buffer size
228 * when we are at EOF. If we are modifying the size of the
229 * buffer based on an EOF condition we need to hold
230 * nfs_rslock() through obtaining the buffer to prevent
231 * a potential writer-appender from messing with n_size.
232 * Otherwise we may accidentally truncate the buffer and
235 * Note that bcount is *not* DEV_BSIZE aligned.
237 if ((off_t)lbn * biosize >= filesize) {
239 } else if ((off_t)(lbn + 1) * biosize > filesize) {
240 bcount = filesize - (off_t)lbn *biosize;
242 bp = getblk(vp, lbn, bcount, PCATCH, 0, 0);
248 * If B_CACHE is not set, we must issue the read. If this
249 * fails, we return an error.
252 if ((bp->b_flags & B_CACHE) == 0) {
253 bp->b_iocmd = BIO_READ;
254 vfs_busy_pages(bp, 0);
255 err = fuse_io_strategy(vp, bp);
262 * on is the offset into the current bp. Figure out how many
263 * bytes we can copy out of the bp. Note that bcount is
264 * NOT DEV_BSIZE aligned.
266 * Then figure out how many bytes we can copy into the uio.
271 n = MIN((unsigned)(bcount - on), uio->uio_resid);
273 SDT_PROBE2(fuse, , io, read_bio_backend_feed,
274 n, n + (int)bp->b_resid);
275 err = uiomove(bp->b_data + on, n, uio);
278 SDT_PROBE3(fuse, , io, read_bio_backend_end, err,
280 } while (err == 0 && uio->uio_resid > 0 && n > 0);
285 SDT_PROBE_DEFINE1(fuse, , io, read_directbackend_start, "struct fuse_read_in*");
286 SDT_PROBE_DEFINE2(fuse, , io, read_directbackend_complete,
287 "struct fuse_dispatcher*", "struct uio*");
290 fuse_read_directbackend(struct vnode *vp, struct uio *uio,
291 struct ucred *cred, struct fuse_filehandle *fufh)
293 struct fuse_dispatcher fdi;
294 struct fuse_read_in *fri;
297 if (uio->uio_resid == 0)
303 * XXX In "normal" case we use an intermediate kernel buffer for
304 * transmitting data from daemon's context to ours. Eventually, we should
305 * get rid of this. Anyway, if the target uio lives in sysspace (we are
306 * called from pageops), and the input data doesn't need kernel-side
307 * processing (we are not called from readdir) we can already invoke
308 * an optimized, "peer-to-peer" I/O routine.
310 while (uio->uio_resid > 0) {
311 fdi.iosize = sizeof(*fri);
312 fdisp_make_vp(&fdi, FUSE_READ, vp, uio->uio_td, cred);
314 fri->fh = fufh->fh_id;
315 fri->offset = uio->uio_offset;
316 fri->size = MIN(uio->uio_resid,
317 fuse_get_mpdata(vp->v_mount)->max_read);
319 SDT_PROBE1(fuse, , io, read_directbackend_start, fri);
321 if ((err = fdisp_wait_answ(&fdi)))
324 SDT_PROBE2(fuse, , io, read_directbackend_complete,
327 if ((err = uiomove(fdi.answ, MIN(fri->size, fdi.iosize), uio)))
329 if (fdi.iosize < fri->size)
339 fuse_write_directbackend(struct vnode *vp, struct uio *uio,
340 struct ucred *cred, struct fuse_filehandle *fufh, int ioflag)
342 struct fuse_vnode_data *fvdat = VTOFUD(vp);
343 struct fuse_write_in *fwi;
344 struct fuse_dispatcher fdi;
349 if (uio->uio_resid == 0)
351 if (ioflag & IO_APPEND)
352 uio_setoffset(uio, fvdat->filesize);
356 while (uio->uio_resid > 0) {
357 chunksize = MIN(uio->uio_resid,
358 fuse_get_mpdata(vp->v_mount)->max_write);
360 fdi.iosize = sizeof(*fwi) + chunksize;
361 fdisp_make_vp(&fdi, FUSE_WRITE, vp, uio->uio_td, cred);
364 fwi->fh = fufh->fh_id;
365 fwi->offset = uio->uio_offset;
366 fwi->size = chunksize;
368 if ((err = uiomove((char *)fdi.indata + sizeof(*fwi),
372 if ((err = fdisp_wait_answ(&fdi)))
375 /* Adjust the uio in the case of short writes */
376 diff = chunksize - ((struct fuse_write_out *)fdi.answ)->size;
380 } else if (diff > 0 && !(ioflag & IO_DIRECT)) {
382 * XXX We really should be directly checking whether
383 * the file was opened with FOPEN_DIRECT_IO, not
384 * IO_DIRECT. IO_DIRECT can be set in multiple ways.
386 SDT_PROBE2(fuse, , io, trace, 1,
387 "misbehaving filesystem: short writes are only "
388 "allowed with direct_io");
390 uio->uio_resid += diff;
391 uio->uio_offset -= diff;
393 if (uio->uio_offset > fvdat->filesize &&
394 fuse_data_cache_mode != FUSE_CACHE_UC) {
395 fuse_vnode_setsize(vp, cred, uio->uio_offset);
396 fvdat->flag &= ~FN_SIZECHANGE;
405 SDT_PROBE_DEFINE6(fuse, , io, write_biobackend_start, "int64_t", "int", "int",
406 "struct uio*", "int", "bool");
407 SDT_PROBE_DEFINE2(fuse, , io, write_biobackend_append_race, "long", "int");
410 fuse_write_biobackend(struct vnode *vp, struct uio *uio,
411 struct ucred *cred, struct fuse_filehandle *fufh, int ioflag, pid_t pid)
413 struct fuse_vnode_data *fvdat = VTOFUD(vp);
419 const int biosize = fuse_iosize(vp);
421 KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode"));
422 if (vp->v_type != VREG)
424 if (uio->uio_offset < 0)
426 if (uio->uio_resid == 0)
428 if (ioflag & IO_APPEND)
429 uio_setoffset(uio, fvdat->filesize);
432 * Find all of this file's B_NEEDCOMMIT buffers. If our writes
433 * would exceed the local maximum per-file write commit size when
434 * combined with those, we must decide whether to flush,
435 * go synchronous, or return err. We don't bother checking
436 * IO_UNIT -- we just make all writes atomic anyway, as there's
437 * no point optimizing for something that really won't ever happen.
440 if (fuse_isdeadfs(vp)) {
444 lbn = uio->uio_offset / biosize;
445 on = uio->uio_offset & (biosize - 1);
446 n = MIN((unsigned)(biosize - on), uio->uio_resid);
450 * Handle direct append and file extension cases, calculate
451 * unaligned buffer size.
453 if (uio->uio_offset == fvdat->filesize && n) {
455 * Get the buffer (in its pre-append state to maintain
456 * B_CACHE if it was previously set). Resize the
457 * nfsnode after we have locked the buffer to prevent
458 * readers from reading garbage.
461 SDT_PROBE6(fuse, , io, write_biobackend_start,
462 lbn, on, n, uio, bcount, true);
463 bp = getblk(vp, lbn, bcount, PCATCH, 0, 0);
468 err = fuse_vnode_setsize(vp, cred,
469 uio->uio_offset + n);
474 save = bp->b_flags & B_CACHE;
476 allocbuf(bp, bcount);
481 * Obtain the locked cache block first, and then
482 * adjust the file's size as appropriate.
485 if ((off_t)lbn * biosize + bcount < fvdat->filesize) {
486 if ((off_t)(lbn + 1) * biosize < fvdat->filesize)
489 bcount = fvdat->filesize -
492 SDT_PROBE6(fuse, , io, write_biobackend_start,
493 lbn, on, n, uio, bcount, false);
494 bp = getblk(vp, lbn, bcount, PCATCH, 0, 0);
495 if (bp && uio->uio_offset + n > fvdat->filesize) {
496 err = fuse_vnode_setsize(vp, cred,
497 uio->uio_offset + n);
510 * Issue a READ if B_CACHE is not set. In special-append
511 * mode, B_CACHE is based on the buffer prior to the write
512 * op and is typically set, avoiding the read. If a read
513 * is required in special append mode, the server will
514 * probably send us a short-read since we extended the file
515 * on our end, resulting in b_resid == 0 and, thusly,
516 * B_CACHE getting set.
518 * We can also avoid issuing the read if the write covers
519 * the entire buffer. We have to make sure the buffer state
520 * is reasonable in this case since we will not be initiating
521 * I/O. See the comments in kern/vfs_bio.c's getblk() for
524 * B_CACHE may also be set due to the buffer being cached
528 if (on == 0 && n == bcount) {
529 bp->b_flags |= B_CACHE;
530 bp->b_flags &= ~B_INVAL;
531 bp->b_ioflags &= ~BIO_ERROR;
533 if ((bp->b_flags & B_CACHE) == 0) {
534 bp->b_iocmd = BIO_READ;
535 vfs_busy_pages(bp, 0);
536 fuse_io_strategy(vp, bp);
537 if ((err = bp->b_error)) {
542 if (bp->b_wcred == NOCRED)
543 bp->b_wcred = crhold(cred);
546 * If dirtyend exceeds file size, chop it down. This should
547 * not normally occur but there is an append race where it
548 * might occur XXX, so we log it.
550 * If the chopping creates a reverse-indexed or degenerate
551 * situation with dirtyoff/end, we 0 both of them.
554 if (bp->b_dirtyend > bcount) {
555 SDT_PROBE2(fuse, , io, write_biobackend_append_race,
556 (long)bp->b_blkno * biosize,
557 bp->b_dirtyend - bcount);
558 bp->b_dirtyend = bcount;
560 if (bp->b_dirtyoff >= bp->b_dirtyend)
561 bp->b_dirtyoff = bp->b_dirtyend = 0;
564 * If the new write will leave a contiguous dirty
565 * area, just update the b_dirtyoff and b_dirtyend,
566 * otherwise force a write rpc of the old dirty area.
568 * While it is possible to merge discontiguous writes due to
569 * our having a B_CACHE buffer ( and thus valid read data
570 * for the hole), we don't because it could lead to
571 * significant cache coherency problems with multiple clients,
572 * especially if locking is implemented later on.
574 * as an optimization we could theoretically maintain
575 * a linked list of discontinuous areas, but we would still
576 * have to commit them separately so there isn't much
577 * advantage to it except perhaps a bit of asynchronization.
580 if (bp->b_dirtyend > 0 &&
581 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
583 * Yes, we mean it. Write out everything to "storage"
584 * immediately, without hesitation. (Apart from other
585 * reasons: the only way to know if a write is valid
586 * if its actually written out.)
589 if (bp->b_error == EINTR) {
595 err = uiomove((char *)bp->b_data + on, n, uio);
598 * Since this block is being modified, it must be written
599 * again and not just committed. Since write clustering does
600 * not work for the stage 1 data write, only the stage 2
601 * commit rpc, we have to clear B_CLUSTEROK as well.
603 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
606 bp->b_ioflags |= BIO_ERROR;
612 * Only update dirtyoff/dirtyend if not a degenerate
616 if (bp->b_dirtyend > 0) {
617 bp->b_dirtyoff = MIN(on, bp->b_dirtyoff);
618 bp->b_dirtyend = MAX((on + n), bp->b_dirtyend);
621 bp->b_dirtyend = on + n;
623 vfs_bio_set_valid(bp, on, n);
628 } while (uio->uio_resid > 0 && n > 0);
630 if (fuse_sync_resize && (fvdat->flag & FN_SIZECHANGE) != 0)
631 fuse_vnode_savesize(vp, cred, pid);
637 fuse_io_strategy(struct vnode *vp, struct buf *bp)
639 struct fuse_filehandle *fufh;
640 struct fuse_vnode_data *fvdat = VTOFUD(vp);
647 /* We don't know the true pid when we're dealing with the cache */
650 const int biosize = fuse_iosize(vp);
652 MPASS(vp->v_type == VREG || vp->v_type == VDIR);
653 MPASS(bp->b_iocmd == BIO_READ || bp->b_iocmd == BIO_WRITE);
655 fflag = bp->b_iocmd == BIO_READ ? FREAD : FWRITE;
656 cred = bp->b_iocmd == BIO_READ ? bp->b_rcred : bp->b_wcred;
657 error = fuse_filehandle_getrw(vp, fflag, &fufh, cred, pid);
658 if (bp->b_iocmd == BIO_READ && error == EBADF) {
660 * This may be a read-modify-write operation on a cached file
661 * opened O_WRONLY. The FUSE protocol allows this.
663 * TODO: eliminate this hacky check once the FUFH table is gone
665 error = fuse_filehandle_get(vp, FWRITE, &fufh, cred, pid);
668 printf("FUSE: strategy: filehandles are closed\n");
669 bp->b_ioflags |= BIO_ERROR;
677 uiop->uio_iovcnt = 1;
678 uiop->uio_segflg = UIO_SYSSPACE;
679 uiop->uio_td = curthread;
682 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We
683 * do this here so we do not have to do it in all the code that
686 bp->b_flags &= ~B_INVAL;
687 bp->b_ioflags &= ~BIO_ERROR;
689 KASSERT(!(bp->b_flags & B_DONE),
690 ("fuse_io_strategy: bp %p already marked done", bp));
691 if (bp->b_iocmd == BIO_READ) {
692 io.iov_len = uiop->uio_resid = bp->b_bcount;
693 io.iov_base = bp->b_data;
694 uiop->uio_rw = UIO_READ;
696 uiop->uio_offset = ((off_t)bp->b_blkno) * biosize;
697 error = fuse_read_directbackend(vp, uiop, cred, fufh);
699 /* XXXCEM: Potentially invalid access to cached_attrs here */
700 if ((!error && uiop->uio_resid) ||
701 (fsess_opt_brokenio(vnode_mount(vp)) && error == EIO &&
702 uiop->uio_offset < fvdat->filesize && fvdat->filesize > 0 &&
703 uiop->uio_offset >= fvdat->cached_attrs.va_size)) {
705 * If we had a short read with no error, we must have
706 * hit a file hole. We should zero-fill the remainder.
707 * This can also occur if the server hits the file EOF.
709 * Holes used to be able to occur due to pending
710 * writes, but that is not possible any longer.
712 int nread = bp->b_bcount - uiop->uio_resid;
713 int left = uiop->uio_resid;
716 printf("FUSE: Fix broken io: offset %ju, "
717 " resid %zd, file size %ju/%ju\n",
718 (uintmax_t)uiop->uio_offset,
719 uiop->uio_resid, fvdat->filesize,
720 fvdat->cached_attrs.va_size);
724 bzero((char *)bp->b_data + nread, left);
728 bp->b_ioflags |= BIO_ERROR;
733 * If we only need to commit, try to commit
735 if (bp->b_flags & B_NEEDCOMMIT) {
736 SDT_PROBE2(fuse, , io, trace, 1,
737 "write: B_NEEDCOMMIT flags set");
740 * Setup for actual write
742 if ((off_t)bp->b_blkno * biosize + bp->b_dirtyend >
744 bp->b_dirtyend = fvdat->filesize -
745 (off_t)bp->b_blkno * biosize;
747 if (bp->b_dirtyend > bp->b_dirtyoff) {
748 io.iov_len = uiop->uio_resid = bp->b_dirtyend
750 uiop->uio_offset = (off_t)bp->b_blkno * biosize
752 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
753 uiop->uio_rw = UIO_WRITE;
755 error = fuse_write_directbackend(vp, uiop, cred, fufh, 0);
757 if (error == EINTR || error == ETIMEDOUT
758 || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
760 bp->b_flags &= ~(B_INVAL | B_NOCACHE);
761 if ((bp->b_flags & B_PAGING) == 0) {
763 bp->b_flags &= ~B_DONE;
765 if ((error == EINTR || error == ETIMEDOUT) &&
766 (bp->b_flags & B_ASYNC) == 0)
767 bp->b_flags |= B_EINTR;
770 bp->b_ioflags |= BIO_ERROR;
771 bp->b_flags |= B_INVAL;
774 bp->b_dirtyoff = bp->b_dirtyend = 0;
782 bp->b_resid = uiop->uio_resid;
788 fuse_io_flushbuf(struct vnode *vp, int waitfor, struct thread *td)
790 struct vop_fsync_args a = {
792 .a_waitfor = waitfor,
796 return (vop_stdfsync(&a));
800 * Flush and invalidate all dirty buffers. If another process is already
801 * doing the flush, just wait for completion.
804 fuse_io_invalbuf(struct vnode *vp, struct thread *td)
806 struct fuse_vnode_data *fvdat = VTOFUD(vp);
809 if (vp->v_iflag & VI_DOOMED)
812 ASSERT_VOP_ELOCKED(vp, "fuse_io_invalbuf");
814 while (fvdat->flag & FN_FLUSHINPROG) {
815 struct proc *p = td->td_proc;
817 if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF)
819 fvdat->flag |= FN_FLUSHWANT;
820 tsleep(&fvdat->flag, PRIBIO + 2, "fusevinv", 2 * hz);
824 if (SIGNOTEMPTY(p->p_siglist) ||
825 SIGNOTEMPTY(td->td_siglist))
832 fvdat->flag |= FN_FLUSHINPROG;
834 if (vp->v_bufobj.bo_object != NULL) {
835 VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
836 vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
837 VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
839 error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
841 if (error == ERESTART || error == EINTR) {
842 fvdat->flag &= ~FN_FLUSHINPROG;
843 if (fvdat->flag & FN_FLUSHWANT) {
844 fvdat->flag &= ~FN_FLUSHWANT;
845 wakeup(&fvdat->flag);
849 error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
851 fvdat->flag &= ~FN_FLUSHINPROG;
852 if (fvdat->flag & FN_FLUSHWANT) {
853 fvdat->flag &= ~FN_FLUSHWANT;
854 wakeup(&fvdat->flag);