2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2007-2009 Google Inc. and Amit Singh
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following disclaimer
15 * in the documentation and/or other materials provided with the
17 * * Neither the name of Google Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 * Copyright (C) 2005 Csaba Henk.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
45 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 #include <sys/cdefs.h>
59 __FBSDID("$FreeBSD$");
61 #include <sys/param.h>
62 #include <sys/counter.h>
63 #include <sys/module.h>
64 #include <sys/systm.h>
65 #include <sys/errno.h>
66 #include <sys/kernel.h>
69 #include <sys/malloc.h>
70 #include <sys/queue.h>
72 #include <sys/mutex.h>
76 #include <sys/mount.h>
77 #include <sys/vnode.h>
78 #include <sys/namei.h>
80 #include <sys/unistd.h>
81 #include <sys/filedesc.h>
83 #include <sys/fcntl.h>
84 #include <sys/dirent.h>
87 #include <sys/sysctl.h>
91 #include "fuse_file.h"
92 #include "fuse_internal.h"
95 #include "fuse_node.h"
96 #include "fuse_file.h"
98 SDT_PROVIDER_DECLARE(fusefs);
101 * arg0: verbosity. Higher numbers give more verbose messages
102 * arg1: Textual message
104 SDT_PROBE_DEFINE2(fusefs, , internal, trace, "int", "char*");
106 #ifdef ZERO_PAD_INCOMPLETE_BUFS
107 static int isbzero(void *buf, size_t len);
111 counter_u64_t fuse_lookup_cache_hits;
112 counter_u64_t fuse_lookup_cache_misses;
114 SYSCTL_COUNTER_U64(_vfs_fusefs_stats, OID_AUTO, lookup_cache_hits, CTLFLAG_RD,
115 &fuse_lookup_cache_hits, "number of positive cache hits in lookup");
117 SYSCTL_COUNTER_U64(_vfs_fusefs_stats, OID_AUTO, lookup_cache_misses, CTLFLAG_RD,
118 &fuse_lookup_cache_misses, "number of cache misses in lookup");
121 fuse_internal_get_cached_vnode(struct mount* mp, ino_t ino, int flags,
125 struct thread *td = curthread;
126 uint64_t nodeid = ino;
131 error = vfs_hash_get(mp, fuse_vnode_hash(nodeid), flags, td, vpp,
132 fuse_vnode_cmp, &nodeid);
136 * Check the entry cache timeout. We have to do this within fusefs
137 * instead of by using cache_enter_time/cache_lookup because those
138 * routines are only intended to work with pathnames, not inodes
142 if (bintime_cmp(&(VTOFUD(*vpp)->entry_cache_timeout), &now, >)){
143 counter_u64_add(fuse_lookup_cache_hits, 1);
146 /* Entry cache timeout */
147 counter_u64_add(fuse_lookup_cache_misses, 1);
156 /* Synchronously send a FUSE_ACCESS operation */
158 fuse_internal_access(struct vnode *vp,
164 uint32_t mask = F_OK;
168 struct fuse_dispatcher fdi;
169 struct fuse_access_in *fai;
170 struct fuse_data *data;
172 mp = vnode_mount(vp);
173 vtype = vnode_vtype(vp);
175 data = fuse_get_mpdata(mp);
176 dataflags = data->dataflags;
181 if (mode & VMODIFY_PERMS && vfs_isrdonly(mp)) {
182 switch (vp->v_type) {
194 /* Unless explicitly permitted, deny everyone except the fs owner. */
195 if (!(dataflags & FSESS_DAEMON_CAN_SPY)) {
196 if (fuse_match_cred(data->daemoncred, cred))
200 if (dataflags & FSESS_DEFAULT_PERMISSIONS) {
203 fuse_internal_getattr(vp, &va, cred, td);
204 return vaccess(vp->v_type, va.va_mode, va.va_uid,
205 va.va_gid, mode, cred, NULL);
208 if (!fsess_isimpl(mp, FUSE_ACCESS))
211 if ((mode & (VWRITE | VAPPEND | VADMIN)) != 0)
213 if ((mode & VREAD) != 0)
215 if ((mode & VEXEC) != 0)
218 fdisp_init(&fdi, sizeof(*fai));
219 fdisp_make_vp(&fdi, FUSE_ACCESS, vp, td, cred);
224 err = fdisp_wait_answ(&fdi);
228 fsess_set_notimpl(mp, FUSE_ACCESS);
235 * Cache FUSE attributes from attr, in attribute cache associated with vnode
236 * 'vp'. Optionally, if argument 'vap' is not NULL, store a copy of the
237 * converted attributes there as well.
239 * If the nominal attribute cache TTL is zero, do not cache on the 'vp' (but do
240 * return the result to the caller).
243 fuse_internal_cache_attrs(struct vnode *vp, struct fuse_attr *attr,
244 uint64_t attr_valid, uint32_t attr_valid_nsec, struct vattr *vap)
247 struct fuse_vnode_data *fvdat;
248 struct fuse_data *data;
249 struct vattr *vp_cache_at;
251 mp = vnode_mount(vp);
253 data = fuse_get_mpdata(mp);
255 ASSERT_VOP_ELOCKED(vp, "fuse_internal_cache_attrs");
257 fuse_validity_2_bintime(attr_valid, attr_valid_nsec,
258 &fvdat->attr_cache_timeout);
260 /* Fix our buffers if the filesize changed without us knowing */
261 if (vnode_isreg(vp) && attr->size != fvdat->cached_attrs.va_size) {
262 (void)fuse_vnode_setsize(vp, attr->size);
263 fvdat->cached_attrs.va_size = attr->size;
266 if (attr_valid > 0 || attr_valid_nsec > 0)
267 vp_cache_at = &(fvdat->cached_attrs);
268 else if (vap != NULL)
273 vattr_null(vp_cache_at);
274 vp_cache_at->va_fsid = mp->mnt_stat.f_fsid.val[0];
275 vp_cache_at->va_fileid = attr->ino;
276 vp_cache_at->va_mode = attr->mode & ~S_IFMT;
277 vp_cache_at->va_nlink = attr->nlink;
278 vp_cache_at->va_uid = attr->uid;
279 vp_cache_at->va_gid = attr->gid;
280 vp_cache_at->va_rdev = attr->rdev;
281 vp_cache_at->va_size = attr->size;
282 /* XXX on i386, seconds are truncated to 32 bits */
283 vp_cache_at->va_atime.tv_sec = attr->atime;
284 vp_cache_at->va_atime.tv_nsec = attr->atimensec;
285 vp_cache_at->va_mtime.tv_sec = attr->mtime;
286 vp_cache_at->va_mtime.tv_nsec = attr->mtimensec;
287 vp_cache_at->va_ctime.tv_sec = attr->ctime;
288 vp_cache_at->va_ctime.tv_nsec = attr->ctimensec;
289 if (fuse_libabi_geq(data, 7, 9) && attr->blksize > 0)
290 vp_cache_at->va_blocksize = attr->blksize;
292 vp_cache_at->va_blocksize = PAGE_SIZE;
293 vp_cache_at->va_type = IFTOVT(attr->mode);
294 vp_cache_at->va_bytes = attr->blocks * S_BLKSIZE;
295 vp_cache_at->va_flags = 0;
297 if (vap != vp_cache_at && vap != NULL)
298 memcpy(vap, vp_cache_at, sizeof(*vap));
305 fuse_internal_fsync_callback(struct fuse_ticket *tick, struct uio *uio)
307 if (tick->tk_aw_ohead.error == ENOSYS) {
308 fsess_set_notimpl(tick->tk_data->mp, fticket_opcode(tick));
314 fuse_internal_fsync(struct vnode *vp,
319 struct fuse_fsync_in *ffsi = NULL;
320 struct fuse_dispatcher fdi;
321 struct fuse_filehandle *fufh;
322 struct fuse_vnode_data *fvdat = VTOFUD(vp);
323 struct mount *mp = vnode_mount(vp);
327 if (!fsess_isimpl(vnode_mount(vp),
328 (vnode_vtype(vp) == VDIR ? FUSE_FSYNCDIR : FUSE_FSYNC))) {
334 if (!fsess_isimpl(mp, op))
337 fdisp_init(&fdi, sizeof(*ffsi));
339 * fsync every open file handle for this file, because we can't be sure
340 * which file handle the caller is really referring to.
342 LIST_FOREACH(fufh, &fvdat->handles, next) {
344 fdisp_make_vp(&fdi, op, vp, td, NULL);
346 fdisp_refresh_vp(&fdi, op, vp, td, NULL);
348 ffsi->fh = fufh->fh_id;
349 ffsi->fsync_flags = 0;
352 ffsi->fsync_flags = 1;
354 if (waitfor == MNT_WAIT) {
355 err = fdisp_wait_answ(&fdi);
357 fuse_insert_callback(fdi.tick,
358 fuse_internal_fsync_callback);
359 fuse_insert_message(fdi.tick, false);
362 /* ENOSYS means "success, and don't call again" */
363 fsess_set_notimpl(mp, op);
373 /* Asynchronous invalidation */
374 SDT_PROBE_DEFINE2(fusefs, , internal, invalidate_cache_hit,
375 "struct vnode*", "struct vnode*");
377 fuse_internal_invalidate_entry(struct mount *mp, struct uio *uio)
379 struct fuse_notify_inval_entry_out fnieo;
380 struct componentname cn;
381 struct vnode *dvp, *vp;
385 if ((err = uiomove(&fnieo, sizeof(fnieo), uio)) != 0)
388 if ((err = uiomove(name, fnieo.namelen, uio)) != 0)
390 name[fnieo.namelen] = '\0';
391 /* fusefs does not cache "." or ".." entries */
392 if (strncmp(name, ".", sizeof(".")) == 0 ||
393 strncmp(name, "..", sizeof("..")) == 0)
396 if (fnieo.parent == FUSE_ROOT_ID)
397 err = VFS_ROOT(mp, LK_SHARED, &dvp);
399 err = fuse_internal_get_cached_vnode( mp, fnieo.parent,
402 * If dvp is not in the cache, then it must've been reclaimed. And
403 * since fuse_vnop_reclaim does a cache_purge, name's entry must've
404 * been invalidated already. So we can safely return if dvp == NULL
406 if (err != 0 || dvp == NULL)
409 * XXX we can't check dvp's generation because the FUSE invalidate
410 * entry message doesn't include it. Worse case is that we invalidate
411 * an entry that didn't need to be invalidated.
414 cn.cn_nameiop = LOOKUP;
415 cn.cn_flags = 0; /* !MAKEENTRY means free cached entry */
416 cn.cn_thread = curthread;
417 cn.cn_cred = curthread->td_ucred;
418 cn.cn_lkflags = LK_SHARED;
420 cn.cn_nameptr = name;
421 cn.cn_namelen = fnieo.namelen;
422 err = cache_lookup(dvp, &vp, &cn, NULL, NULL);
424 fuse_vnode_clear_attr_cache(dvp);
430 fuse_internal_invalidate_inode(struct mount *mp, struct uio *uio)
432 struct fuse_notify_inval_inode_out fniio;
436 if ((err = uiomove(&fniio, sizeof(fniio), uio)) != 0)
439 if (fniio.ino == FUSE_ROOT_ID)
440 err = VFS_ROOT(mp, LK_EXCLUSIVE, &vp);
442 err = fuse_internal_get_cached_vnode(mp, fniio.ino, LK_SHARED,
444 if (err != 0 || vp == NULL)
447 * XXX we can't check vp's generation because the FUSE invalidate
448 * entry message doesn't include it. Worse case is that we invalidate
449 * an inode that didn't need to be invalidated.
453 * Flush and invalidate buffers if off >= 0. Technically we only need
454 * to flush and invalidate the range of offsets [off, off + len), but
455 * for simplicity's sake we do everything.
458 fuse_io_invalbuf(vp, curthread);
459 fuse_vnode_clear_attr_cache(vp);
466 fuse_internal_mknod(struct vnode *dvp, struct vnode **vpp,
467 struct componentname *cnp, struct vattr *vap)
469 struct fuse_data *data;
470 struct fuse_mknod_in fmni;
473 data = fuse_get_mpdata(dvp->v_mount);
475 fmni.mode = MAKEIMODE(vap->va_type, vap->va_mode);
476 fmni.rdev = vap->va_rdev;
477 if (fuse_libabi_geq(data, 7, 12)) {
478 insize = sizeof(fmni);
479 fmni.umask = curthread->td_proc->p_fd->fd_cmask;
481 insize = FUSE_COMPAT_MKNOD_IN_SIZE;
483 return (fuse_internal_newentry(dvp, vpp, cnp, FUSE_MKNOD, &fmni,
484 insize, vap->va_type));
490 fuse_internal_readdir(struct vnode *vp,
493 struct fuse_filehandle *fufh,
494 struct fuse_iov *cookediov,
499 struct fuse_dispatcher fdi;
500 struct fuse_read_in *fri = NULL;
503 if (uio_resid(uio) == 0)
508 * Note that we DO NOT have a UIO_SYSSPACE here (so no need for p2p
513 * fnd_start is set non-zero once the offset in the directory gets
514 * to the startoff. This is done because directories must be read
515 * from the beginning (offset == 0) when fuse_vnop_readdir() needs
516 * to do an open of the directory.
517 * If it is not set non-zero here, it will be set non-zero in
518 * fuse_internal_readdir_processdata() when uio_offset == startoff.
521 if (uio->uio_offset == startoff)
523 while (uio_resid(uio) > 0) {
524 fdi.iosize = sizeof(*fri);
526 fdisp_make_vp(&fdi, FUSE_READDIR, vp, NULL, NULL);
528 fdisp_refresh_vp(&fdi, FUSE_READDIR, vp, NULL, NULL);
531 fri->fh = fufh->fh_id;
532 fri->offset = uio_offset(uio);
533 fri->size = MIN(uio->uio_resid,
534 fuse_get_mpdata(vp->v_mount)->max_read);
536 if ((err = fdisp_wait_answ(&fdi)))
538 if ((err = fuse_internal_readdir_processdata(uio, startoff,
539 &fnd_start, fri->size, fdi.answ, fdi.iosize, cookediov,
540 ncookies, &cookies)))
545 return ((err == -1) ? 0 : err);
549 * Return -1 to indicate that this readdir is finished, 0 if it copied
550 * all the directory data read in and it may be possible to read more
551 * and greater than 0 for a failure.
554 fuse_internal_readdir_processdata(struct uio *uio,
560 struct fuse_iov *cookediov,
569 struct fuse_dirent *fudge;
573 if (bufsize < FUSE_NAME_OFFSET)
576 if (bufsize < FUSE_NAME_OFFSET) {
580 fudge = (struct fuse_dirent *)buf;
581 freclen = FUSE_DIRENT_SIZE(fudge);
583 if (bufsize < freclen) {
585 * This indicates a partial directory entry at the
586 * end of the directory data.
591 #ifdef ZERO_PAD_INCOMPLETE_BUFS
592 if (isbzero(buf, FUSE_NAME_OFFSET)) {
598 if (!fudge->namelen || fudge->namelen > MAXNAMLEN) {
602 bytesavail = GENERIC_DIRSIZ((struct pseudo_dirent *)
605 if (bytesavail > uio_resid(uio)) {
606 /* Out of space for the dir so we are done. */
611 * Don't start to copy the directory entries out until
612 * the requested offset in the directory is found.
614 if (*fnd_start != 0) {
615 fiov_adjust(cookediov, bytesavail);
616 bzero(cookediov->base, bytesavail);
618 de = (struct dirent *)cookediov->base;
619 de->d_fileno = fudge->ino;
620 de->d_reclen = bytesavail;
621 de->d_type = fudge->type;
622 de->d_namlen = fudge->namelen;
623 memcpy((char *)cookediov->base + sizeof(struct dirent) -
625 (char *)buf + FUSE_NAME_OFFSET, fudge->namelen);
626 dirent_terminate(de);
628 err = uiomove(cookediov->base, cookediov->len, uio);
631 if (cookies != NULL) {
632 if (*ncookies == 0) {
636 *cookies = fudge->off;
640 } else if (startoff == fudge->off)
642 buf = (char *)buf + freclen;
644 uio_setoffset(uio, fudge->off);
654 fuse_internal_remove(struct vnode *dvp,
656 struct componentname *cnp,
659 struct fuse_dispatcher fdi;
663 fdisp_init(&fdi, cnp->cn_namelen + 1);
664 fdisp_make_vp(&fdi, op, dvp, cnp->cn_thread, cnp->cn_cred);
666 memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen);
667 ((char *)fdi.indata)[cnp->cn_namelen] = '\0';
669 err = fdisp_wait_answ(&fdi);
676 * Access the cached nlink even if the attr cached has expired. If
677 * it's inaccurate, the worst that will happen is:
678 * 1) We'll recycle the vnode even though the file has another link we
679 * don't know about, costing a bit of cpu time, or
680 * 2) We won't recycle the vnode even though all of its links are gone.
681 * It will linger around until vnlru reclaims it, costing a bit of
684 nlink = VTOFUD(vp)->cached_attrs.va_nlink--;
687 * Purge the parent's attribute cache because the daemon
688 * should've updated its mtime and ctime.
690 fuse_vnode_clear_attr_cache(dvp);
692 /* NB: nlink could be zero if it was never cached */
693 if (nlink <= 1 || vnode_vtype(vp) == VDIR) {
694 fuse_internal_vnode_disappear(vp);
697 fuse_vnode_update(vp, FN_CTIMECHANGE);
706 fuse_internal_rename(struct vnode *fdvp,
707 struct componentname *fcnp,
709 struct componentname *tcnp)
711 struct fuse_dispatcher fdi;
712 struct fuse_rename_in *fri;
715 fdisp_init(&fdi, sizeof(*fri) + fcnp->cn_namelen + tcnp->cn_namelen + 2);
716 fdisp_make_vp(&fdi, FUSE_RENAME, fdvp, tcnp->cn_thread, tcnp->cn_cred);
719 fri->newdir = VTOI(tdvp);
720 memcpy((char *)fdi.indata + sizeof(*fri), fcnp->cn_nameptr,
722 ((char *)fdi.indata)[sizeof(*fri) + fcnp->cn_namelen] = '\0';
723 memcpy((char *)fdi.indata + sizeof(*fri) + fcnp->cn_namelen + 1,
724 tcnp->cn_nameptr, tcnp->cn_namelen);
725 ((char *)fdi.indata)[sizeof(*fri) + fcnp->cn_namelen +
726 tcnp->cn_namelen + 1] = '\0';
728 err = fdisp_wait_answ(&fdi);
735 /* entity creation */
738 fuse_internal_newentry_makerequest(struct mount *mp,
740 struct componentname *cnp,
744 struct fuse_dispatcher *fdip)
746 fdip->iosize = bufsize + cnp->cn_namelen + 1;
748 fdisp_make(fdip, op, mp, dnid, cnp->cn_thread, cnp->cn_cred);
749 memcpy(fdip->indata, buf, bufsize);
750 memcpy((char *)fdip->indata + bufsize, cnp->cn_nameptr, cnp->cn_namelen);
751 ((char *)fdip->indata)[bufsize + cnp->cn_namelen] = '\0';
755 fuse_internal_newentry_core(struct vnode *dvp,
757 struct componentname *cnp,
759 struct fuse_dispatcher *fdip)
762 struct fuse_entry_out *feo;
763 struct mount *mp = vnode_mount(dvp);
765 if ((err = fdisp_wait_answ(fdip))) {
770 if ((err = fuse_internal_checkentry(feo, vtyp))) {
773 err = fuse_vnode_get(mp, feo, feo->nodeid, dvp, vpp, cnp, vtyp);
775 fuse_internal_forget_send(mp, cnp->cn_thread, cnp->cn_cred,
781 * Purge the parent's attribute cache because the daemon should've
782 * updated its mtime and ctime
784 fuse_vnode_clear_attr_cache(dvp);
786 fuse_internal_cache_attrs(*vpp, &feo->attr, feo->attr_valid,
787 feo->attr_valid_nsec, NULL);
793 fuse_internal_newentry(struct vnode *dvp,
795 struct componentname *cnp,
802 struct fuse_dispatcher fdi;
803 struct mount *mp = vnode_mount(dvp);
806 fuse_internal_newentry_makerequest(mp, VTOI(dvp), cnp, op, buf,
808 err = fuse_internal_newentry_core(dvp, vpp, cnp, vtype, &fdi);
814 /* entity destruction */
817 fuse_internal_forget_callback(struct fuse_ticket *ftick, struct uio *uio)
819 fuse_internal_forget_send(ftick->tk_data->mp, curthread, NULL,
820 ((struct fuse_in_header *)ftick->tk_ms_fiov.base)->nodeid, 1);
826 fuse_internal_forget_send(struct mount *mp,
833 struct fuse_dispatcher fdi;
834 struct fuse_forget_in *ffi;
837 * KASSERT(nlookup > 0, ("zero-times forget for vp #%llu",
838 * (long long unsigned) nodeid));
841 fdisp_init(&fdi, sizeof(*ffi));
842 fdisp_make(&fdi, FUSE_FORGET, mp, nodeid, td, cred);
845 ffi->nlookup = nlookup;
847 fuse_insert_message(fdi.tick, false);
851 /* Fetch the vnode's attributes from the daemon*/
853 fuse_internal_do_getattr(struct vnode *vp, struct vattr *vap,
854 struct ucred *cred, struct thread *td)
856 struct fuse_dispatcher fdi;
857 struct fuse_vnode_data *fvdat = VTOFUD(vp);
858 struct fuse_getattr_in *fgai;
859 struct fuse_attr_out *fao;
860 off_t old_filesize = fvdat->cached_attrs.va_size;
861 struct timespec old_ctime = fvdat->cached_attrs.va_ctime;
862 struct timespec old_mtime = fvdat->cached_attrs.va_mtime;
867 fdisp_make_vp(&fdi, FUSE_GETATTR, vp, td, cred);
870 * We could look up a file handle and set it in fgai->fh, but that
871 * involves extra runtime work and I'm unaware of any file systems that
874 fgai->getattr_flags = 0;
875 if ((err = fdisp_simple_putget_vp(&fdi, FUSE_GETATTR, vp, td, cred))) {
877 fuse_internal_vnode_disappear(vp);
881 fao = (struct fuse_attr_out *)fdi.answ;
882 vtyp = IFTOVT(fao->attr.mode);
883 if (fvdat->flag & FN_SIZECHANGE)
884 fao->attr.size = old_filesize;
885 if (fvdat->flag & FN_CTIMECHANGE) {
886 fao->attr.ctime = old_ctime.tv_sec;
887 fao->attr.ctimensec = old_ctime.tv_nsec;
889 if (fvdat->flag & FN_MTIMECHANGE) {
890 fao->attr.mtime = old_mtime.tv_sec;
891 fao->attr.mtimensec = old_mtime.tv_nsec;
893 fuse_internal_cache_attrs(vp, &fao->attr, fao->attr_valid,
894 fao->attr_valid_nsec, vap);
895 if (vtyp != vnode_vtype(vp)) {
896 fuse_internal_vnode_disappear(vp);
905 /* Read a vnode's attributes from cache or fetch them from the fuse daemon */
907 fuse_internal_getattr(struct vnode *vp, struct vattr *vap, struct ucred *cred,
912 if ((attrs = VTOVA(vp)) != NULL) {
913 *vap = *attrs; /* struct copy */
917 return fuse_internal_do_getattr(vp, vap, cred, td);
921 fuse_internal_vnode_disappear(struct vnode *vp)
923 struct fuse_vnode_data *fvdat = VTOFUD(vp);
925 ASSERT_VOP_ELOCKED(vp, "fuse_internal_vnode_disappear");
926 fvdat->flag |= FN_REVOKED;
930 /* fuse start/stop */
933 fuse_internal_init_callback(struct fuse_ticket *tick, struct uio *uio)
936 struct fuse_data *data = tick->tk_data;
937 struct fuse_init_out *fiio;
939 if ((err = tick->tk_aw_ohead.error)) {
942 if ((err = fticket_pull(tick, uio))) {
945 fiio = fticket_resp(tick)->base;
947 data->fuse_libabi_major = fiio->major;
948 data->fuse_libabi_minor = fiio->minor;
949 if (!fuse_libabi_geq(data, 7, 4)) {
951 * With a little work we could support servers as old as 7.1.
952 * But there would be little payoff.
954 SDT_PROBE2(fusefs, , internal, trace, 1,
955 "userpace version too low");
956 err = EPROTONOSUPPORT;
960 if (fuse_libabi_geq(data, 7, 5)) {
961 if (fticket_resp(tick)->len == sizeof(struct fuse_init_out) ||
962 fticket_resp(tick)->len == FUSE_COMPAT_22_INIT_OUT_SIZE) {
963 data->max_write = fiio->max_write;
964 if (fiio->flags & FUSE_ASYNC_READ)
965 data->dataflags |= FSESS_ASYNC_READ;
966 if (fiio->flags & FUSE_POSIX_LOCKS)
967 data->dataflags |= FSESS_POSIX_LOCKS;
968 if (fiio->flags & FUSE_EXPORT_SUPPORT)
969 data->dataflags |= FSESS_EXPORT_SUPPORT;
971 * Don't bother to check FUSE_BIG_WRITES, because it's
972 * redundant with max_write
975 * max_background and congestion_threshold are not
982 /* Old fixed values */
983 data->max_write = 4096;
986 if (fuse_libabi_geq(data, 7, 6))
987 data->max_readahead_blocks = fiio->max_readahead / maxbcachebuf;
989 if (!fuse_libabi_geq(data, 7, 7))
990 fsess_set_notimpl(data->mp, FUSE_INTERRUPT);
992 if (!fuse_libabi_geq(data, 7, 8)) {
993 fsess_set_notimpl(data->mp, FUSE_BMAP);
994 fsess_set_notimpl(data->mp, FUSE_DESTROY);
997 if (fuse_libabi_geq(data, 7, 23) && fiio->time_gran >= 1 &&
998 fiio->time_gran <= 1000000000)
999 data->time_gran = fiio->time_gran;
1001 data->time_gran = 1;
1003 if (!fuse_libabi_geq(data, 7, 23))
1004 data->cache_mode = fuse_data_cache_mode;
1005 else if (fiio->flags & FUSE_WRITEBACK_CACHE)
1006 data->cache_mode = FUSE_CACHE_WB;
1008 data->cache_mode = FUSE_CACHE_WT;
1012 fdata_set_dead(data);
1015 data->dataflags |= FSESS_INITED;
1016 wakeup(&data->ticketer);
1023 fuse_internal_send_init(struct fuse_data *data, struct thread *td)
1025 struct fuse_init_in *fiii;
1026 struct fuse_dispatcher fdi;
1028 fdisp_init(&fdi, sizeof(*fiii));
1029 fdisp_make(&fdi, FUSE_INIT, data->mp, 0, td, NULL);
1031 fiii->major = FUSE_KERNEL_VERSION;
1032 fiii->minor = FUSE_KERNEL_MINOR_VERSION;
1034 * fusefs currently reads ahead no more than one cache block at a time.
1035 * See fuse_read_biobackend
1037 fiii->max_readahead = maxbcachebuf;
1039 * Unsupported features:
1040 * FUSE_FILE_OPS: No known FUSE server or client supports it
1041 * FUSE_ATOMIC_O_TRUNC: our VFS cannot support it
1042 * FUSE_DONT_MASK: unlike Linux, FreeBSD always applies the umask, even
1043 * when default ACLs are in use.
1044 * FUSE_SPLICE_WRITE, FUSE_SPLICE_MOVE, FUSE_SPLICE_READ: FreeBSD
1045 * doesn't have splice(2).
1046 * FUSE_FLOCK_LOCKS: not yet implemented
1047 * FUSE_HAS_IOCTL_DIR: not yet implemented
1048 * FUSE_AUTO_INVAL_DATA: not yet implemented
1049 * FUSE_DO_READDIRPLUS: not yet implemented
1050 * FUSE_READDIRPLUS_AUTO: not yet implemented
1051 * FUSE_ASYNC_DIO: not yet implemented
1052 * FUSE_NO_OPEN_SUPPORT: not yet implemented
1054 fiii->flags = FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_EXPORT_SUPPORT
1055 | FUSE_BIG_WRITES | FUSE_WRITEBACK_CACHE;
1057 fuse_insert_callback(fdi.tick, fuse_internal_init_callback);
1058 fuse_insert_message(fdi.tick, false);
1059 fdisp_destroy(&fdi);
1063 * Send a FUSE_SETATTR operation with no permissions checks. If cred is NULL,
1064 * send the request with root credentials
1066 int fuse_internal_setattr(struct vnode *vp, struct vattr *vap,
1067 struct thread *td, struct ucred *cred)
1069 struct fuse_vnode_data *fvdat;
1070 struct fuse_dispatcher fdi;
1071 struct fuse_setattr_in *fsai;
1073 pid_t pid = td->td_proc->p_pid;
1074 struct fuse_data *data;
1078 int sizechanged = -1;
1079 uint64_t newsize = 0;
1081 mp = vnode_mount(vp);
1083 data = fuse_get_mpdata(mp);
1084 dataflags = data->dataflags;
1086 fdisp_init(&fdi, sizeof(*fsai));
1087 fdisp_make_vp(&fdi, FUSE_SETATTR, vp, td, cred);
1095 if (vap->va_uid != (uid_t)VNOVAL) {
1096 fsai->uid = vap->va_uid;
1097 fsai->valid |= FATTR_UID;
1099 if (vap->va_gid != (gid_t)VNOVAL) {
1100 fsai->gid = vap->va_gid;
1101 fsai->valid |= FATTR_GID;
1103 if (vap->va_size != VNOVAL) {
1104 struct fuse_filehandle *fufh = NULL;
1106 /*Truncate to a new value. */
1107 fsai->size = vap->va_size;
1109 newsize = vap->va_size;
1110 fsai->valid |= FATTR_SIZE;
1112 fuse_filehandle_getrw(vp, FWRITE, &fufh, cred, pid);
1114 fsai->fh = fufh->fh_id;
1115 fsai->valid |= FATTR_FH;
1117 VTOFUD(vp)->flag &= ~FN_SIZECHANGE;
1119 if (vap->va_atime.tv_sec != VNOVAL) {
1120 fsai->atime = vap->va_atime.tv_sec;
1121 fsai->atimensec = vap->va_atime.tv_nsec;
1122 fsai->valid |= FATTR_ATIME;
1123 if (vap->va_vaflags & VA_UTIMES_NULL)
1124 fsai->valid |= FATTR_ATIME_NOW;
1126 if (vap->va_mtime.tv_sec != VNOVAL) {
1127 fsai->mtime = vap->va_mtime.tv_sec;
1128 fsai->mtimensec = vap->va_mtime.tv_nsec;
1129 fsai->valid |= FATTR_MTIME;
1130 if (vap->va_vaflags & VA_UTIMES_NULL)
1131 fsai->valid |= FATTR_MTIME_NOW;
1132 } else if (fvdat->flag & FN_MTIMECHANGE) {
1133 fsai->mtime = fvdat->cached_attrs.va_mtime.tv_sec;
1134 fsai->mtimensec = fvdat->cached_attrs.va_mtime.tv_nsec;
1135 fsai->valid |= FATTR_MTIME;
1137 if (fuse_libabi_geq(data, 7, 23) && fvdat->flag & FN_CTIMECHANGE) {
1138 fsai->ctime = fvdat->cached_attrs.va_ctime.tv_sec;
1139 fsai->ctimensec = fvdat->cached_attrs.va_ctime.tv_nsec;
1140 fsai->valid |= FATTR_CTIME;
1142 if (vap->va_mode != (mode_t)VNOVAL) {
1143 fsai->mode = vap->va_mode & ALLPERMS;
1144 fsai->valid |= FATTR_MODE;
1150 if ((err = fdisp_wait_answ(&fdi)))
1152 vtyp = IFTOVT(((struct fuse_attr_out *)fdi.answ)->attr.mode);
1154 if (vnode_vtype(vp) != vtyp) {
1155 if (vnode_vtype(vp) == VNON && vtyp != VNON) {
1156 SDT_PROBE2(fusefs, , internal, trace, 1, "FUSE: Dang! "
1157 "vnode_vtype is VNON and vtype isn't.");
1160 * STALE vnode, ditch
1162 * The vnode has changed its type "behind our back".
1163 * There's nothing really we can do, so let us just
1164 * force an internal revocation and tell the caller to
1165 * try again, if interested.
1167 fuse_internal_vnode_disappear(vp);
1172 struct fuse_attr_out *fao = (struct fuse_attr_out*)fdi.answ;
1173 fuse_vnode_undirty_cached_timestamps(vp);
1174 fuse_internal_cache_attrs(vp, &fao->attr, fao->attr_valid,
1175 fao->attr_valid_nsec, NULL);
1179 fdisp_destroy(&fdi);
1183 #ifdef ZERO_PAD_INCOMPLETE_BUFS
1185 isbzero(void *buf, size_t len)
1189 for (i = 0; i < len; i++) {
1190 if (((char *)buf)[i])
1200 fuse_internal_init(void)
1202 fuse_lookup_cache_misses = counter_u64_alloc(M_WAITOK);
1203 counter_u64_zero(fuse_lookup_cache_misses);
1204 fuse_lookup_cache_hits = counter_u64_alloc(M_WAITOK);
1205 counter_u64_zero(fuse_lookup_cache_hits);
1209 fuse_internal_destroy(void)
1211 counter_u64_free(fuse_lookup_cache_hits);
1212 counter_u64_free(fuse_lookup_cache_misses);