2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2007-2009 Google Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following disclaimer
15 * in the documentation and/or other materials provided with the
17 * * Neither the name of Google Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 * Copyright (C) 2005 Csaba Henk.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
45 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 #include <sys/cdefs.h>
59 __FBSDID("$FreeBSD$");
61 #include <sys/types.h>
62 #include <sys/module.h>
63 #include <sys/systm.h>
64 #include <sys/errno.h>
65 #include <sys/param.h>
66 #include <sys/kernel.h>
69 #include <sys/malloc.h>
70 #include <sys/queue.h>
73 #include <sys/mutex.h>
74 #include <sys/rwlock.h>
77 #include <sys/mount.h>
78 #include <sys/vnode.h>
80 #include <sys/unistd.h>
81 #include <sys/filedesc.h>
83 #include <sys/fcntl.h>
86 #include <sys/sysctl.h>
89 #include <vm/vm_extern.h>
91 #include <vm/vm_map.h>
92 #include <vm/vm_page.h>
93 #include <vm/vm_object.h>
96 #include "fuse_file.h"
97 #include "fuse_node.h"
98 #include "fuse_internal.h"
102 SDT_PROVIDER_DECLARE(fusefs);
105 * arg0: verbosity. Higher numbers give more verbose messages
106 * arg1: Textual message
108 SDT_PROBE_DEFINE2(fusefs, , io, trace, "int", "char*");
111 fuse_io_clear_suid_on_write(struct vnode *vp, struct ucred *cred,
114 fuse_read_directbackend(struct vnode *vp, struct uio *uio,
115 struct ucred *cred, struct fuse_filehandle *fufh);
117 fuse_read_biobackend(struct vnode *vp, struct uio *uio, int ioflag,
118 struct ucred *cred, struct fuse_filehandle *fufh, pid_t pid);
120 fuse_write_directbackend(struct vnode *vp, struct uio *uio,
121 struct ucred *cred, struct fuse_filehandle *fufh, int ioflag);
123 fuse_write_biobackend(struct vnode *vp, struct uio *uio,
124 struct ucred *cred, struct fuse_filehandle *fufh, int ioflag, pid_t pid);
127 * FreeBSD clears the SUID and SGID bits on any write by a non-root user.
130 fuse_io_clear_suid_on_write(struct vnode *vp, struct ucred *cred,
133 struct fuse_data *data;
138 mp = vnode_mount(vp);
139 data = fuse_get_mpdata(mp);
140 dataflags = data->dataflags;
142 if (dataflags & FSESS_DEFAULT_PERMISSIONS) {
143 if (priv_check_cred(cred, PRIV_VFS_RETAINSUGID)) {
144 fuse_internal_getattr(vp, &va, cred, td);
145 if (va.va_mode & (S_ISUID | S_ISGID)) {
146 mode_t mode = va.va_mode & ~(S_ISUID | S_ISGID);
147 /* Clear all vattr fields except mode */
152 * Ignore fuse_internal_setattr's return value,
153 * because at this point the write operation has
154 * already succeeded and we don't want to return
155 * failing status for that.
157 (void)fuse_internal_setattr(vp, &va, td, NULL);
163 SDT_PROBE_DEFINE5(fusefs, , io, io_dispatch, "struct vnode*", "struct uio*",
164 "int", "struct ucred*", "struct fuse_filehandle*");
166 fuse_io_dispatch(struct vnode *vp, struct uio *uio, int ioflag, bool pages,
167 struct ucred *cred, pid_t pid)
169 struct fuse_filehandle *fufh;
173 MPASS(vp->v_type == VREG || vp->v_type == VDIR);
175 fflag = (uio->uio_rw == UIO_READ) ? FREAD : FWRITE;
176 err = fuse_filehandle_getrw(vp, fflag, &fufh, cred, pid);
178 printf("FUSE: io dispatch: filehandles are closed\n");
181 SDT_PROBE5(fusefs, , io, io_dispatch, vp, uio, ioflag, cred, fufh);
184 * Ideally, when the daemon asks for direct io at open time, the
185 * standard file flag should be set according to this, so that would
186 * just change the default mode, which later on could be changed via
188 * But this doesn't work, the O_DIRECT flag gets cleared at some point
189 * (don't know where). So to make any use of the Fuse direct_io option,
190 * we hardwire it into the file's private data (similarly to Linux,
193 directio = (ioflag & IO_DIRECT) || !fsess_opt_datacache(vnode_mount(vp));
195 switch (uio->uio_rw) {
198 SDT_PROBE2(fusefs, , io, trace, 1,
199 "direct read of vnode");
200 err = fuse_read_directbackend(vp, uio, cred, fufh);
202 SDT_PROBE2(fusefs, , io, trace, 1,
203 "buffered read of vnode");
204 err = fuse_read_biobackend(vp, uio, ioflag, cred, fufh,
210 * Kludge: simulate write-through caching via write-around
211 * caching. Same effect, as far as never caching dirty data,
212 * but slightly pessimal in that newly written data is not
215 if (directio || fuse_data_cache_mode == FUSE_CACHE_WT) {
216 const int iosize = fuse_iosize(vp);
219 SDT_PROBE2(fusefs, , io, trace, 1,
220 "direct write of vnode");
221 start = uio->uio_offset;
222 end = start + uio->uio_resid;
224 * Invalidate the write cache unless we're coming from
225 * VOP_PUTPAGES, in which case we're writing _from_ the
229 v_inval_buf_range(vp, start, end, iosize);
230 err = fuse_write_directbackend(vp, uio, cred, fufh,
233 SDT_PROBE2(fusefs, , io, trace, 1,
234 "buffered write of vnode");
235 err = fuse_write_biobackend(vp, uio, cred, fufh, ioflag,
238 fuse_io_clear_suid_on_write(vp, cred, uio->uio_td);
241 panic("uninterpreted mode passed to fuse_io_dispatch");
247 SDT_PROBE_DEFINE3(fusefs, , io, read_bio_backend_start, "int", "int", "int");
248 SDT_PROBE_DEFINE2(fusefs, , io, read_bio_backend_feed, "int", "int");
249 SDT_PROBE_DEFINE3(fusefs, , io, read_bio_backend_end, "int", "ssize_t", "int");
251 fuse_read_biobackend(struct vnode *vp, struct uio *uio, int ioflag,
252 struct ucred *cred, struct fuse_filehandle *fufh, pid_t pid)
257 int err, n = 0, on = 0;
260 const int biosize = fuse_iosize(vp);
262 if (uio->uio_offset < 0)
265 filesize = VTOFUD(vp)->filesize;
267 for (err = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
268 if (fuse_isdeadfs(vp)) {
272 if (filesize - uio->uio_offset <= 0)
274 lbn = uio->uio_offset / biosize;
275 on = uio->uio_offset & (biosize - 1);
277 SDT_PROBE3(fusefs, , io, read_bio_backend_start,
278 biosize, (int)lbn, on);
280 if ((off_t)lbn * biosize >= filesize) {
282 } else if ((off_t)(lbn + 1) * biosize > filesize) {
283 bcount = filesize - (off_t)lbn *biosize;
288 /* TODO: readahead. See ext2_read for an example */
289 err = bread(vp, lbn, bcount, NOCRED, &bp);
297 * on is the offset into the current bp. Figure out how many
298 * bytes we can copy out of the bp. Note that bcount is
299 * NOT DEV_BSIZE aligned.
301 * Then figure out how many bytes we can copy into the uio.
306 n = MIN((unsigned)(bcount - on), uio->uio_resid);
308 SDT_PROBE2(fusefs, , io, read_bio_backend_feed,
309 n, n + (int)bp->b_resid);
310 err = uiomove(bp->b_data + on, n, uio);
312 vfs_bio_brelse(bp, ioflag);
313 SDT_PROBE3(fusefs, , io, read_bio_backend_end, err,
320 SDT_PROBE_DEFINE1(fusefs, , io, read_directbackend_start,
321 "struct fuse_read_in*");
322 SDT_PROBE_DEFINE2(fusefs, , io, read_directbackend_complete,
323 "struct fuse_dispatcher*", "struct uio*");
326 fuse_read_directbackend(struct vnode *vp, struct uio *uio,
327 struct ucred *cred, struct fuse_filehandle *fufh)
329 struct fuse_dispatcher fdi;
330 struct fuse_read_in *fri;
333 if (uio->uio_resid == 0)
339 * XXX In "normal" case we use an intermediate kernel buffer for
340 * transmitting data from daemon's context to ours. Eventually, we should
341 * get rid of this. Anyway, if the target uio lives in sysspace (we are
342 * called from pageops), and the input data doesn't need kernel-side
343 * processing (we are not called from readdir) we can already invoke
344 * an optimized, "peer-to-peer" I/O routine.
346 while (uio->uio_resid > 0) {
347 fdi.iosize = sizeof(*fri);
348 fdisp_make_vp(&fdi, FUSE_READ, vp, uio->uio_td, cred);
350 fri->fh = fufh->fh_id;
351 fri->offset = uio->uio_offset;
352 fri->size = MIN(uio->uio_resid,
353 fuse_get_mpdata(vp->v_mount)->max_read);
355 SDT_PROBE1(fusefs, , io, read_directbackend_start, fri);
357 if ((err = fdisp_wait_answ(&fdi)))
360 SDT_PROBE2(fusefs, , io, read_directbackend_complete,
363 if ((err = uiomove(fdi.answ, MIN(fri->size, fdi.iosize), uio)))
365 if (fdi.iosize < fri->size)
375 fuse_write_directbackend(struct vnode *vp, struct uio *uio,
376 struct ucred *cred, struct fuse_filehandle *fufh, int ioflag)
378 struct fuse_vnode_data *fvdat = VTOFUD(vp);
379 struct fuse_write_in *fwi;
380 struct fuse_write_out *fwo;
381 struct fuse_dispatcher fdi;
384 off_t as_written_offset;
387 bool direct_io = fufh->fuse_open_flags & FOPEN_DIRECT_IO;
389 if (uio->uio_resid == 0)
391 if (ioflag & IO_APPEND)
392 uio_setoffset(uio, fvdat->filesize);
396 while (uio->uio_resid > 0) {
397 chunksize = MIN(uio->uio_resid,
398 fuse_get_mpdata(vp->v_mount)->max_write);
400 fdi.iosize = sizeof(*fwi) + chunksize;
401 fdisp_make_vp(&fdi, FUSE_WRITE, vp, uio->uio_td, cred);
404 fwi->fh = fufh->fh_id;
405 fwi->offset = uio->uio_offset;
406 fwi->size = chunksize;
407 fwi_data = (char *)fdi.indata + sizeof(*fwi);
409 if ((err = uiomove(fwi_data, chunksize, uio)))
413 err = fdisp_wait_answ(&fdi);
414 if (err == ERESTART || err == EINTR || err == EWOULDBLOCK) {
416 * Rewind the uio so dofilewrite will know it's
419 uio->uio_resid += fwi->size;
420 uio->uio_offset -= fwi->size;
422 * Change ERESTART into EINTR because we can't rewind
423 * uio->uio_iov. Basically, once uiomove(9) has been
424 * called, it's impossible to restart a syscall.
433 fwo = ((struct fuse_write_out *)fdi.answ);
435 /* Adjust the uio in the case of short writes */
436 diff = fwi->size - fwo->size;
437 as_written_offset = uio->uio_offset - diff;
439 if (as_written_offset - diff > fvdat->filesize &&
440 fuse_data_cache_mode != FUSE_CACHE_UC) {
441 fuse_vnode_setsize(vp, cred, as_written_offset);
442 fvdat->flag &= ~FN_SIZECHANGE;
446 printf("WARNING: misbehaving FUSE filesystem "
447 "wrote more data than we provided it\n");
450 } else if (diff > 0) {
453 printf("WARNING: misbehaving FUSE filesystem: "
454 "short writes are only allowed with "
457 if (ioflag & IO_DIRECT) {
459 uio->uio_resid += diff;
460 uio->uio_offset -= diff;
463 /* Resend the unwritten portion of data */
464 fdi.iosize = sizeof(*fwi) + diff;
465 /* Refresh fdi without clearing data buffer */
466 fdisp_refresh_vp(&fdi, FUSE_WRITE, vp,
469 MPASS2(fwi == fdi.indata, "FUSE dispatcher "
470 "reallocated despite no increase in "
472 void *src = (char*)fwi_data + fwo->size;
473 memmove(fwi_data, src, diff);
474 fwi->fh = fufh->fh_id;
475 fwi->offset = as_written_offset;
487 SDT_PROBE_DEFINE6(fusefs, , io, write_biobackend_start, "int64_t", "int", "int",
488 "struct uio*", "int", "bool");
489 SDT_PROBE_DEFINE2(fusefs, , io, write_biobackend_append_race, "long", "int");
492 fuse_write_biobackend(struct vnode *vp, struct uio *uio,
493 struct ucred *cred, struct fuse_filehandle *fufh, int ioflag, pid_t pid)
495 struct fuse_vnode_data *fvdat = VTOFUD(vp);
501 const int biosize = fuse_iosize(vp);
503 KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode"));
504 if (vp->v_type != VREG)
506 if (uio->uio_offset < 0)
508 if (uio->uio_resid == 0)
510 if (ioflag & IO_APPEND)
511 uio_setoffset(uio, fvdat->filesize);
514 * Find all of this file's B_NEEDCOMMIT buffers. If our writes
515 * would exceed the local maximum per-file write commit size when
516 * combined with those, we must decide whether to flush,
517 * go synchronous, or return err. We don't bother checking
518 * IO_UNIT -- we just make all writes atomic anyway, as there's
519 * no point optimizing for something that really won't ever happen.
522 if (fuse_isdeadfs(vp)) {
526 lbn = uio->uio_offset / biosize;
527 on = uio->uio_offset & (biosize - 1);
528 n = MIN((unsigned)(biosize - on), uio->uio_resid);
532 * Handle direct append and file extension cases, calculate
533 * unaligned buffer size.
535 if (uio->uio_offset == fvdat->filesize && n) {
537 * Get the buffer (in its pre-append state to maintain
538 * B_CACHE if it was previously set). Resize the
539 * nfsnode after we have locked the buffer to prevent
540 * readers from reading garbage.
543 SDT_PROBE6(fusefs, , io, write_biobackend_start,
544 lbn, on, n, uio, bcount, true);
545 bp = getblk(vp, lbn, bcount, PCATCH, 0, 0);
550 err = fuse_vnode_setsize(vp, cred,
551 uio->uio_offset + n);
556 save = bp->b_flags & B_CACHE;
558 allocbuf(bp, bcount);
563 * Obtain the locked cache block first, and then
564 * adjust the file's size as appropriate.
567 if ((off_t)lbn * biosize + bcount < fvdat->filesize) {
568 if ((off_t)(lbn + 1) * biosize < fvdat->filesize)
571 bcount = fvdat->filesize -
574 SDT_PROBE6(fusefs, , io, write_biobackend_start,
575 lbn, on, n, uio, bcount, false);
576 bp = getblk(vp, lbn, bcount, PCATCH, 0, 0);
577 if (bp && uio->uio_offset + n > fvdat->filesize) {
578 err = fuse_vnode_setsize(vp, cred,
579 uio->uio_offset + n);
592 * Issue a READ if B_CACHE is not set. In special-append
593 * mode, B_CACHE is based on the buffer prior to the write
594 * op and is typically set, avoiding the read. If a read
595 * is required in special append mode, the server will
596 * probably send us a short-read since we extended the file
597 * on our end, resulting in b_resid == 0 and, thusly,
598 * B_CACHE getting set.
600 * We can also avoid issuing the read if the write covers
601 * the entire buffer. We have to make sure the buffer state
602 * is reasonable in this case since we will not be initiating
603 * I/O. See the comments in kern/vfs_bio.c's getblk() for
606 * B_CACHE may also be set due to the buffer being cached
610 if (on == 0 && n == bcount) {
611 bp->b_flags |= B_CACHE;
612 bp->b_flags &= ~B_INVAL;
613 bp->b_ioflags &= ~BIO_ERROR;
615 if ((bp->b_flags & B_CACHE) == 0) {
616 bp->b_iocmd = BIO_READ;
617 vfs_busy_pages(bp, 0);
618 fuse_io_strategy(vp, bp);
619 if ((err = bp->b_error)) {
624 if (bp->b_wcred == NOCRED)
625 bp->b_wcred = crhold(cred);
628 * If dirtyend exceeds file size, chop it down. This should
629 * not normally occur but there is an append race where it
630 * might occur XXX, so we log it.
632 * If the chopping creates a reverse-indexed or degenerate
633 * situation with dirtyoff/end, we 0 both of them.
636 if (bp->b_dirtyend > bcount) {
637 SDT_PROBE2(fusefs, , io, write_biobackend_append_race,
638 (long)bp->b_blkno * biosize,
639 bp->b_dirtyend - bcount);
640 bp->b_dirtyend = bcount;
642 if (bp->b_dirtyoff >= bp->b_dirtyend)
643 bp->b_dirtyoff = bp->b_dirtyend = 0;
646 * If the new write will leave a contiguous dirty
647 * area, just update the b_dirtyoff and b_dirtyend,
648 * otherwise force a write rpc of the old dirty area.
650 * While it is possible to merge discontiguous writes due to
651 * our having a B_CACHE buffer ( and thus valid read data
652 * for the hole), we don't because it could lead to
653 * significant cache coherency problems with multiple clients,
654 * especially if locking is implemented later on.
656 * as an optimization we could theoretically maintain
657 * a linked list of discontinuous areas, but we would still
658 * have to commit them separately so there isn't much
659 * advantage to it except perhaps a bit of asynchronization.
662 if (bp->b_dirtyend > 0 &&
663 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
665 * Yes, we mean it. Write out everything to "storage"
666 * immediately, without hesitation. (Apart from other
667 * reasons: the only way to know if a write is valid
668 * if its actually written out.)
671 if (bp->b_error == EINTR) {
677 err = uiomove((char *)bp->b_data + on, n, uio);
680 * Since this block is being modified, it must be written
681 * again and not just committed. Since write clustering does
682 * not work for the stage 1 data write, only the stage 2
683 * commit rpc, we have to clear B_CLUSTEROK as well.
685 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
688 bp->b_ioflags |= BIO_ERROR;
694 * Only update dirtyoff/dirtyend if not a degenerate
698 if (bp->b_dirtyend > 0) {
699 bp->b_dirtyoff = MIN(on, bp->b_dirtyoff);
700 bp->b_dirtyend = MAX((on + n), bp->b_dirtyend);
703 bp->b_dirtyend = on + n;
705 vfs_bio_set_valid(bp, on, n);
710 } while (uio->uio_resid > 0 && n > 0);
712 if (fuse_sync_resize && (fvdat->flag & FN_SIZECHANGE) != 0)
713 fuse_vnode_savesize(vp, cred, pid);
719 fuse_io_strategy(struct vnode *vp, struct buf *bp)
721 struct fuse_filehandle *fufh;
722 struct fuse_vnode_data *fvdat = VTOFUD(vp);
729 /* We don't know the true pid when we're dealing with the cache */
732 const int biosize = fuse_iosize(vp);
734 MPASS(vp->v_type == VREG || vp->v_type == VDIR);
735 MPASS(bp->b_iocmd == BIO_READ || bp->b_iocmd == BIO_WRITE);
737 fflag = bp->b_iocmd == BIO_READ ? FREAD : FWRITE;
738 cred = bp->b_iocmd == BIO_READ ? bp->b_rcred : bp->b_wcred;
739 error = fuse_filehandle_getrw(vp, fflag, &fufh, cred, pid);
740 if (bp->b_iocmd == BIO_READ && error == EBADF) {
742 * This may be a read-modify-write operation on a cached file
743 * opened O_WRONLY. The FUSE protocol allows this.
745 error = fuse_filehandle_get(vp, FWRITE, &fufh, cred, pid);
748 printf("FUSE: strategy: filehandles are closed\n");
749 bp->b_ioflags |= BIO_ERROR;
757 uiop->uio_iovcnt = 1;
758 uiop->uio_segflg = UIO_SYSSPACE;
759 uiop->uio_td = curthread;
762 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We
763 * do this here so we do not have to do it in all the code that
766 bp->b_flags &= ~B_INVAL;
767 bp->b_ioflags &= ~BIO_ERROR;
769 KASSERT(!(bp->b_flags & B_DONE),
770 ("fuse_io_strategy: bp %p already marked done", bp));
771 if (bp->b_iocmd == BIO_READ) {
772 io.iov_len = uiop->uio_resid = bp->b_bcount;
773 io.iov_base = bp->b_data;
774 uiop->uio_rw = UIO_READ;
776 uiop->uio_offset = ((off_t)bp->b_blkno) * biosize;
777 error = fuse_read_directbackend(vp, uiop, cred, fufh);
779 /* XXXCEM: Potentially invalid access to cached_attrs here */
780 if ((!error && uiop->uio_resid) ||
781 (fsess_opt_brokenio(vnode_mount(vp)) && error == EIO &&
782 uiop->uio_offset < fvdat->filesize && fvdat->filesize > 0 &&
783 uiop->uio_offset >= fvdat->cached_attrs.va_size)) {
785 * If we had a short read with no error, we must have
786 * hit a file hole. We should zero-fill the remainder.
787 * This can also occur if the server hits the file EOF.
789 * Holes used to be able to occur due to pending
790 * writes, but that is not possible any longer.
792 int nread = bp->b_bcount - uiop->uio_resid;
793 int left = uiop->uio_resid;
796 printf("FUSE: Fix broken io: offset %ju, "
797 " resid %zd, file size %ju/%ju\n",
798 (uintmax_t)uiop->uio_offset,
799 uiop->uio_resid, fvdat->filesize,
800 fvdat->cached_attrs.va_size);
804 bzero((char *)bp->b_data + nread, left);
808 bp->b_ioflags |= BIO_ERROR;
813 * If we only need to commit, try to commit
815 if (bp->b_flags & B_NEEDCOMMIT) {
816 SDT_PROBE2(fusefs, , io, trace, 1,
817 "write: B_NEEDCOMMIT flags set");
820 * Setup for actual write
822 if ((off_t)bp->b_blkno * biosize + bp->b_dirtyend >
824 bp->b_dirtyend = fvdat->filesize -
825 (off_t)bp->b_blkno * biosize;
827 if (bp->b_dirtyend > bp->b_dirtyoff) {
828 io.iov_len = uiop->uio_resid = bp->b_dirtyend
830 uiop->uio_offset = (off_t)bp->b_blkno * biosize
832 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
833 uiop->uio_rw = UIO_WRITE;
835 error = fuse_write_directbackend(vp, uiop, cred, fufh, 0);
837 if (error == EINTR || error == ETIMEDOUT
838 || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
840 bp->b_flags &= ~(B_INVAL | B_NOCACHE);
841 if ((bp->b_flags & B_PAGING) == 0) {
843 bp->b_flags &= ~B_DONE;
845 if ((error == EINTR || error == ETIMEDOUT) &&
846 (bp->b_flags & B_ASYNC) == 0)
847 bp->b_flags |= B_EINTR;
850 bp->b_ioflags |= BIO_ERROR;
851 bp->b_flags |= B_INVAL;
854 fuse_io_clear_suid_on_write(vp, cred,
857 bp->b_dirtyoff = bp->b_dirtyend = 0;
865 bp->b_resid = uiop->uio_resid;
871 fuse_io_flushbuf(struct vnode *vp, int waitfor, struct thread *td)
874 return (vn_fsync_buf(vp, waitfor));
878 * Flush and invalidate all dirty buffers. If another process is already
879 * doing the flush, just wait for completion.
882 fuse_io_invalbuf(struct vnode *vp, struct thread *td)
884 struct fuse_vnode_data *fvdat = VTOFUD(vp);
887 if (vp->v_iflag & VI_DOOMED)
890 ASSERT_VOP_ELOCKED(vp, "fuse_io_invalbuf");
892 while (fvdat->flag & FN_FLUSHINPROG) {
893 struct proc *p = td->td_proc;
895 if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF)
897 fvdat->flag |= FN_FLUSHWANT;
898 tsleep(&fvdat->flag, PRIBIO + 2, "fusevinv", 2 * hz);
902 if (SIGNOTEMPTY(p->p_siglist) ||
903 SIGNOTEMPTY(td->td_siglist))
910 fvdat->flag |= FN_FLUSHINPROG;
912 if (vp->v_bufobj.bo_object != NULL) {
913 VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
914 vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
915 VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
917 error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
919 if (error == ERESTART || error == EINTR) {
920 fvdat->flag &= ~FN_FLUSHINPROG;
921 if (fvdat->flag & FN_FLUSHWANT) {
922 fvdat->flag &= ~FN_FLUSHWANT;
923 wakeup(&fvdat->flag);
927 error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
929 fvdat->flag &= ~FN_FLUSHINPROG;
930 if (fvdat->flag & FN_FLUSHWANT) {
931 fvdat->flag &= ~FN_FLUSHWANT;
932 wakeup(&fvdat->flag);