2 * Copyright (c) 2007-2009 Google Inc. and Amit Singh
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * * Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following disclaimer
13 * in the documentation and/or other materials provided with the
15 * * Neither the name of Google Inc. nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 * Copyright (C) 2005 Csaba Henk.
32 * All rights reserved.
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
43 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 #include <sys/cdefs.h>
57 __FBSDID("$FreeBSD$");
59 #include <sys/types.h>
60 #include <sys/module.h>
61 #include <sys/systm.h>
62 #include <sys/errno.h>
63 #include <sys/param.h>
64 #include <sys/kernel.h>
67 #include <sys/malloc.h>
68 #include <sys/queue.h>
70 #include <sys/rwlock.h>
73 #include <sys/mount.h>
74 #include <sys/vnode.h>
75 #include <sys/namei.h>
77 #include <sys/unistd.h>
78 #include <sys/filedesc.h>
80 #include <sys/fcntl.h>
81 #include <sys/dirent.h>
84 #include <sys/sysctl.h>
85 #include <sys/vmmeter.h>
88 #include <vm/vm_extern.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_page.h>
92 #include <vm/vm_param.h>
93 #include <vm/vm_object.h>
94 #include <vm/vm_pager.h>
95 #include <vm/vnode_pager.h>
96 #include <vm/vm_object.h>
99 #include "fuse_file.h"
100 #include "fuse_internal.h"
101 #include "fuse_ipc.h"
102 #include "fuse_node.h"
103 #include "fuse_param.h"
106 #include <sys/priv.h>
108 #define FUSE_DEBUG_MODULE VNOPS
109 #include "fuse_debug.h"
112 static vop_access_t fuse_vnop_access;
113 static vop_close_t fuse_vnop_close;
114 static vop_create_t fuse_vnop_create;
115 static vop_fsync_t fuse_vnop_fsync;
116 static vop_getattr_t fuse_vnop_getattr;
117 static vop_inactive_t fuse_vnop_inactive;
118 static vop_link_t fuse_vnop_link;
119 static vop_lookup_t fuse_vnop_lookup;
120 static vop_mkdir_t fuse_vnop_mkdir;
121 static vop_mknod_t fuse_vnop_mknod;
122 static vop_open_t fuse_vnop_open;
123 static vop_read_t fuse_vnop_read;
124 static vop_readdir_t fuse_vnop_readdir;
125 static vop_readlink_t fuse_vnop_readlink;
126 static vop_reclaim_t fuse_vnop_reclaim;
127 static vop_remove_t fuse_vnop_remove;
128 static vop_rename_t fuse_vnop_rename;
129 static vop_rmdir_t fuse_vnop_rmdir;
130 static vop_setattr_t fuse_vnop_setattr;
131 static vop_strategy_t fuse_vnop_strategy;
132 static vop_symlink_t fuse_vnop_symlink;
133 static vop_write_t fuse_vnop_write;
134 static vop_getpages_t fuse_vnop_getpages;
135 static vop_putpages_t fuse_vnop_putpages;
136 static vop_print_t fuse_vnop_print;
138 struct vop_vector fuse_vnops = {
139 .vop_default = &default_vnodeops,
140 .vop_access = fuse_vnop_access,
141 .vop_close = fuse_vnop_close,
142 .vop_create = fuse_vnop_create,
143 .vop_fsync = fuse_vnop_fsync,
144 .vop_getattr = fuse_vnop_getattr,
145 .vop_inactive = fuse_vnop_inactive,
146 .vop_link = fuse_vnop_link,
147 .vop_lookup = fuse_vnop_lookup,
148 .vop_mkdir = fuse_vnop_mkdir,
149 .vop_mknod = fuse_vnop_mknod,
150 .vop_open = fuse_vnop_open,
151 .vop_pathconf = vop_stdpathconf,
152 .vop_read = fuse_vnop_read,
153 .vop_readdir = fuse_vnop_readdir,
154 .vop_readlink = fuse_vnop_readlink,
155 .vop_reclaim = fuse_vnop_reclaim,
156 .vop_remove = fuse_vnop_remove,
157 .vop_rename = fuse_vnop_rename,
158 .vop_rmdir = fuse_vnop_rmdir,
159 .vop_setattr = fuse_vnop_setattr,
160 .vop_strategy = fuse_vnop_strategy,
161 .vop_symlink = fuse_vnop_symlink,
162 .vop_write = fuse_vnop_write,
163 .vop_getpages = fuse_vnop_getpages,
164 .vop_putpages = fuse_vnop_putpages,
165 .vop_print = fuse_vnop_print,
168 static u_long fuse_lookup_cache_hits = 0;
170 SYSCTL_ULONG(_vfs_fuse, OID_AUTO, lookup_cache_hits, CTLFLAG_RD,
171 &fuse_lookup_cache_hits, 0, "");
173 static u_long fuse_lookup_cache_misses = 0;
175 SYSCTL_ULONG(_vfs_fuse, OID_AUTO, lookup_cache_misses, CTLFLAG_RD,
176 &fuse_lookup_cache_misses, 0, "");
178 int fuse_lookup_cache_enable = 1;
180 SYSCTL_INT(_vfs_fuse, OID_AUTO, lookup_cache_enable, CTLFLAG_RW,
181 &fuse_lookup_cache_enable, 0, "");
184 * XXX: This feature is highly experimental and can bring to instabilities,
185 * needs revisiting before to be enabled by default.
187 static int fuse_reclaim_revoked = 0;
189 SYSCTL_INT(_vfs_fuse, OID_AUTO, reclaim_revoked, CTLFLAG_RW,
190 &fuse_reclaim_revoked, 0, "");
192 int fuse_pbuf_freecnt = -1;
194 #define fuse_vm_page_lock(m) vm_page_lock((m));
195 #define fuse_vm_page_unlock(m) vm_page_unlock((m));
196 #define fuse_vm_page_lock_queues() ((void)0)
197 #define fuse_vm_page_unlock_queues() ((void)0)
200 struct vnop_access_args {
202 #if VOP_ACCESS_TAKES_ACCMODE_T
207 struct ucred *a_cred;
212 fuse_vnop_access(struct vop_access_args *ap)
214 struct vnode *vp = ap->a_vp;
215 int accmode = ap->a_accmode;
216 struct ucred *cred = ap->a_cred;
218 struct fuse_access_param facp;
219 struct fuse_data *data = fuse_get_mpdata(vnode_mount(vp));
223 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));
225 if (fuse_isdeadfs(vp)) {
226 if (vnode_isvroot(vp)) {
231 if (!(data->dataflags & FSESS_INITED)) {
232 if (vnode_isvroot(vp)) {
233 if (priv_check_cred(cred, PRIV_VFS_ADMIN, 0) ||
234 (fuse_match_cred(data->daemoncred, cred) == 0)) {
240 if (vnode_islnk(vp)) {
243 bzero(&facp, sizeof(facp));
245 err = fuse_internal_access(vp, accmode, &facp, ap->a_td, ap->a_cred);
246 FS_DEBUG2G("err=%d accmode=0x%x\n", err, accmode);
251 struct vnop_close_args {
254 struct ucred *a_cred;
259 fuse_vnop_close(struct vop_close_args *ap)
261 struct vnode *vp = ap->a_vp;
262 struct ucred *cred = ap->a_cred;
263 int fflag = ap->a_fflag;
264 fufh_type_t fufh_type;
266 fuse_trace_printf_vnop();
268 if (fuse_isdeadfs(vp)) {
271 if (vnode_isdir(vp)) {
272 if (fuse_filehandle_valid(vp, FUFH_RDONLY)) {
273 fuse_filehandle_close(vp, FUFH_RDONLY, NULL, cred);
277 if (fflag & IO_NDELAY) {
280 fufh_type = fuse_filehandle_xlate_from_fflags(fflag);
282 if (!fuse_filehandle_valid(vp, fufh_type)) {
285 for (i = 0; i < FUFH_MAXTYPE; i++)
286 if (fuse_filehandle_valid(vp, i))
288 if (i == FUFH_MAXTYPE)
289 panic("FUSE: fufh type %d found to be invalid in close"
293 if ((VTOFUD(vp)->flag & FN_SIZECHANGE) != 0) {
294 fuse_vnode_savesize(vp, cred);
300 struct vnop_create_args {
302 struct vnode **a_vpp;
303 struct componentname *a_cnp;
308 fuse_vnop_create(struct vop_create_args *ap)
310 struct vnode *dvp = ap->a_dvp;
311 struct vnode **vpp = ap->a_vpp;
312 struct componentname *cnp = ap->a_cnp;
313 struct vattr *vap = ap->a_vap;
314 struct thread *td = cnp->cn_thread;
315 struct ucred *cred = cnp->cn_cred;
317 struct fuse_open_in *foi;
318 struct fuse_entry_out *feo;
319 struct fuse_dispatcher fdi;
320 struct fuse_dispatcher *fdip = &fdi;
324 struct mount *mp = vnode_mount(dvp);
325 uint64_t parentnid = VTOFUD(dvp)->nid;
326 mode_t mode = MAKEIMODE(vap->va_type, vap->va_mode);
328 uint32_t x_open_flags;
330 fuse_trace_printf_vnop();
332 if (fuse_isdeadfs(dvp)) {
335 bzero(&fdi, sizeof(fdi));
337 /* XXX: Will we ever want devices ? */
338 if ((vap->va_type != VREG)) {
339 printf("fuse_vnop_create: unsupported va_type %d\n",
343 debug_printf("parent nid = %ju, mode = %x\n", (uintmax_t)parentnid,
346 fdisp_init(fdip, sizeof(*foi) + cnp->cn_namelen + 1);
347 if (!fsess_isimpl(mp, FUSE_CREATE)) {
348 debug_printf("eh, daemon doesn't implement create?\n");
351 fdisp_make(fdip, FUSE_CREATE, vnode_mount(dvp), parentnid, td, cred);
355 foi->flags = O_CREAT | O_RDWR;
357 memcpy((char *)fdip->indata + sizeof(*foi), cnp->cn_nameptr,
359 ((char *)fdip->indata)[sizeof(*foi) + cnp->cn_namelen] = '\0';
361 err = fdisp_wait_answ(fdip);
365 fsess_set_notimpl(mp, FUSE_CREATE);
366 debug_printf("create: got err=%d from daemon\n", err);
372 if ((err = fuse_internal_checkentry(feo, VREG))) {
375 err = fuse_vnode_get(mp, feo->nodeid, dvp, vpp, cnp, VREG);
377 struct fuse_release_in *fri;
378 uint64_t nodeid = feo->nodeid;
379 uint64_t fh_id = ((struct fuse_open_out *)(feo + 1))->fh;
381 fdisp_init(fdip, sizeof(*fri));
382 fdisp_make(fdip, FUSE_RELEASE, mp, nodeid, td, cred);
385 fri->flags = OFLAGS(mode);
386 fuse_insert_callback(fdip->tick, fuse_internal_forget_callback);
387 fuse_insert_message(fdip->tick);
390 ASSERT_VOP_ELOCKED(*vpp, "fuse_vnop_create");
392 fdip->answ = feo + 1;
394 x_fh_id = ((struct fuse_open_out *)(feo + 1))->fh;
395 x_open_flags = ((struct fuse_open_out *)(feo + 1))->open_flags;
396 fuse_filehandle_init(*vpp, FUFH_RDWR, NULL, x_fh_id);
397 fuse_vnode_open(*vpp, x_open_flags, td);
398 cache_purge_negative(dvp);
406 * Our vnop_fsync roughly corresponds to the FUSE_FSYNC method. The Linux
407 * version of FUSE also has a FUSE_FLUSH method.
409 * On Linux, fsync() synchronizes a file's complete in-core state with that
410 * on disk. The call is not supposed to return until the system has completed
411 * that action or until an error is detected.
413 * Linux also has an fdatasync() call that is similar to fsync() but is not
414 * required to update the metadata such as access time and modification time.
418 struct vnop_fsync_args {
419 struct vnodeop_desc *a_desc;
421 struct ucred * a_cred;
423 struct thread * a_td;
427 fuse_vnop_fsync(struct vop_fsync_args *ap)
429 struct vnode *vp = ap->a_vp;
430 struct thread *td = ap->a_td;
432 struct fuse_filehandle *fufh;
433 struct fuse_vnode_data *fvdat = VTOFUD(vp);
437 fuse_trace_printf_vnop();
439 if (fuse_isdeadfs(vp)) {
442 if ((err = vop_stdfsync(ap)))
445 if (!fsess_isimpl(vnode_mount(vp),
446 (vnode_vtype(vp) == VDIR ? FUSE_FSYNCDIR : FUSE_FSYNC))) {
449 for (type = 0; type < FUFH_MAXTYPE; type++) {
450 fufh = &(fvdat->fufh[type]);
451 if (FUFH_IS_VALID(fufh)) {
452 fuse_internal_fsync(vp, td, NULL, fufh);
461 struct vnop_getattr_args {
464 struct ucred *a_cred;
469 fuse_vnop_getattr(struct vop_getattr_args *ap)
471 struct vnode *vp = ap->a_vp;
472 struct vattr *vap = ap->a_vap;
473 struct ucred *cred = ap->a_cred;
474 struct thread *td = curthread;
475 struct fuse_vnode_data *fvdat = VTOFUD(vp);
479 struct fuse_dispatcher fdi;
481 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));
483 dataflags = fuse_get_mpdata(vnode_mount(vp))->dataflags;
485 /* Note that we are not bailing out on a dead file system just yet. */
487 if (!(dataflags & FSESS_INITED)) {
488 if (!vnode_isvroot(vp)) {
489 fdata_set_dead(fuse_get_mpdata(vnode_mount(vp)));
491 debug_printf("fuse_getattr b: returning ENOTCONN\n");
498 if ((err = fdisp_simple_putget_vp(&fdi, FUSE_GETATTR, vp, td, cred))) {
499 if ((err == ENOTCONN) && vnode_isvroot(vp)) {
500 /* see comment at similar place in fuse_statfs() */
505 fuse_internal_vnode_disappear(vp);
509 cache_attrs(vp, (struct fuse_attr_out *)fdi.answ);
510 if (vap != VTOVA(vp)) {
511 memcpy(vap, VTOVA(vp), sizeof(*vap));
513 if (vap->va_type != vnode_vtype(vp)) {
514 fuse_internal_vnode_disappear(vp);
518 if ((fvdat->flag & FN_SIZECHANGE) != 0)
519 vap->va_size = fvdat->filesize;
521 if (vnode_isreg(vp) && (fvdat->flag & FN_SIZECHANGE) == 0) {
523 * This is for those cases when the file size changed without us
524 * knowing, and we want to catch up.
526 off_t new_filesize = ((struct fuse_attr_out *)
527 fdi.answ)->attr.size;
529 if (fvdat->filesize != new_filesize) {
530 fuse_vnode_setsize(vp, cred, new_filesize);
533 debug_printf("fuse_getattr e: returning 0\n");
540 bzero(vap, sizeof(*vap));
541 vap->va_type = vnode_vtype(vp);
547 struct vnop_inactive_args {
553 fuse_vnop_inactive(struct vop_inactive_args *ap)
555 struct vnode *vp = ap->a_vp;
556 struct thread *td = ap->a_td;
558 struct fuse_vnode_data *fvdat = VTOFUD(vp);
559 struct fuse_filehandle *fufh = NULL;
561 int type, need_flush = 1;
563 FS_DEBUG("inode=%ju\n", (uintmax_t)VTOI(vp));
565 for (type = 0; type < FUFH_MAXTYPE; type++) {
566 fufh = &(fvdat->fufh[type]);
567 if (FUFH_IS_VALID(fufh)) {
568 if (need_flush && vp->v_type == VREG) {
569 if ((VTOFUD(vp)->flag & FN_SIZECHANGE) != 0) {
570 fuse_vnode_savesize(vp, NULL);
572 if (fuse_data_cache_invalidate ||
573 (fvdat->flag & FN_REVOKED) != 0)
574 fuse_io_invalbuf(vp, td);
576 fuse_io_flushbuf(vp, MNT_WAIT, td);
579 fuse_filehandle_close(vp, type, td, NULL);
583 if ((fvdat->flag & FN_REVOKED) != 0 && fuse_reclaim_revoked) {
590 struct vnop_link_args {
591 struct vnode *a_tdvp;
593 struct componentname *a_cnp;
597 fuse_vnop_link(struct vop_link_args *ap)
599 struct vnode *vp = ap->a_vp;
600 struct vnode *tdvp = ap->a_tdvp;
601 struct componentname *cnp = ap->a_cnp;
603 struct vattr *vap = VTOVA(vp);
605 struct fuse_dispatcher fdi;
606 struct fuse_entry_out *feo;
607 struct fuse_link_in fli;
611 fuse_trace_printf_vnop();
613 if (fuse_isdeadfs(vp)) {
616 if (vnode_mount(tdvp) != vnode_mount(vp)) {
619 if (vap->va_nlink >= FUSE_LINK_MAX) {
622 fli.oldnodeid = VTOI(vp);
625 fuse_internal_newentry_makerequest(vnode_mount(tdvp), VTOI(tdvp), cnp,
626 FUSE_LINK, &fli, sizeof(fli), &fdi);
627 if ((err = fdisp_wait_answ(&fdi))) {
632 err = fuse_internal_checkentry(feo, vnode_vtype(vp));
639 struct vnop_lookup_args {
640 struct vnodeop_desc *a_desc;
642 struct vnode **a_vpp;
643 struct componentname *a_cnp;
647 fuse_vnop_lookup(struct vop_lookup_args *ap)
649 struct vnode *dvp = ap->a_dvp;
650 struct vnode **vpp = ap->a_vpp;
651 struct componentname *cnp = ap->a_cnp;
652 struct thread *td = cnp->cn_thread;
653 struct ucred *cred = cnp->cn_cred;
655 int nameiop = cnp->cn_nameiop;
656 int flags = cnp->cn_flags;
657 int wantparent = flags & (LOCKPARENT | WANTPARENT);
658 int islastcn = flags & ISLASTCN;
659 struct mount *mp = vnode_mount(dvp);
663 struct vnode *vp = NULL;
665 struct fuse_dispatcher fdi;
669 struct fuse_access_param facp;
671 FS_DEBUG2G("parent_inode=%ju - %*s\n",
672 (uintmax_t)VTOI(dvp), (int)cnp->cn_namelen, cnp->cn_nameptr);
674 if (fuse_isdeadfs(dvp)) {
678 if (!vnode_isdir(dvp)) {
681 if (islastcn && vfs_isrdonly(mp) && (nameiop != LOOKUP)) {
685 * We do access check prior to doing anything else only in the case
686 * when we are at fs root (we'd like to say, "we are at the first
687 * component", but that's not exactly the same... nevermind).
688 * See further comments at further access checks.
691 bzero(&facp, sizeof(facp));
692 if (vnode_isvroot(dvp)) { /* early permission check hack */
693 if ((err = fuse_internal_access(dvp, VEXEC, &facp, td, cred))) {
697 if (flags & ISDOTDOT) {
698 nid = VTOFUD(dvp)->parent_nid;
705 } else if (cnp->cn_namelen == 1 && *(cnp->cn_nameptr) == '.') {
710 } else if (fuse_lookup_cache_enable) {
711 err = cache_lookup(dvp, vpp, cnp, NULL, NULL);
714 case -1: /* positive match */
715 atomic_add_acq_long(&fuse_lookup_cache_hits, 1);
718 case 0: /* no match in cache */
719 atomic_add_acq_long(&fuse_lookup_cache_misses, 1);
722 case ENOENT: /* negative match */
729 fdisp_init(&fdi, cnp->cn_namelen + 1);
733 fdisp_make(&fdi, op, mp, nid, td, cred);
735 if (op == FUSE_LOOKUP) {
736 memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen);
737 ((char *)fdi.indata)[cnp->cn_namelen] = '\0';
739 lookup_err = fdisp_wait_answ(&fdi);
741 if ((op == FUSE_LOOKUP) && !lookup_err) { /* lookup call succeeded */
742 nid = ((struct fuse_entry_out *)fdi.answ)->nodeid;
745 * zero nodeid is the same as "not found",
746 * but it's also cacheable (which we keep
747 * keep on doing not as of writing this)
750 } else if (nid == FUSE_ROOT_ID) {
755 (!fdi.answ_stat || lookup_err != ENOENT || op != FUSE_LOOKUP)) {
759 /* lookup_err, if non-zero, must be ENOENT at this point */
763 if ((nameiop == CREATE || nameiop == RENAME) && islastcn
764 /* && directory dvp has not been removed */ ) {
766 if (vfs_isrdonly(mp)) {
770 #if 0 /* THINK_ABOUT_THIS */
771 if ((err = fuse_internal_access(dvp, VWRITE, cred, td, &facp))) {
777 * Possibly record the position of a slot in the
778 * directory large enough for the new component name.
779 * This can be recorded in the vnode private data for
780 * dvp. Set the SAVENAME flag to hold onto the
781 * pathname for use later in VOP_CREATE or VOP_RENAME.
783 cnp->cn_flags |= SAVENAME;
788 /* Consider inserting name into cache. */
791 * No we can't use negative caching, as the fs
792 * changes are out of our control.
793 * False positives' falseness turns out just as things
794 * go by, but false negatives' falseness doesn't.
795 * (and aiding the caching mechanism with extra control
796 * mechanisms comes quite close to beating the whole purpose
800 if ((cnp->cn_flags & MAKEENTRY) != 0) {
801 FS_DEBUG("inserting NULL into cache\n");
802 cache_enter(dvp, NULL, cnp);
812 struct fuse_entry_out *feo = NULL;
813 struct fuse_attr *fattr = NULL;
815 if (op == FUSE_GETATTR) {
816 fattr = &((struct fuse_attr_out *)fdi.answ)->attr;
818 feo = (struct fuse_entry_out *)fdi.answ;
819 fattr = &(feo->attr);
823 * If deleting, and at end of pathname, return parameters
824 * which can be used to remove file. If the wantparent flag
825 * isn't set, we return only the directory, otherwise we go on
826 * and lock the inode, being careful with ".".
828 if (nameiop == DELETE && islastcn) {
830 * Check for write access on directory.
832 facp.xuid = fattr->uid;
833 facp.facc_flags |= FACCESS_STICKY;
834 err = fuse_internal_access(dvp, VWRITE, &facp, td, cred);
835 facp.facc_flags &= ~FACCESS_XQUERIES;
840 if (nid == VTOI(dvp)) {
844 err = fuse_vnode_get(dvp->v_mount, nid, dvp,
845 &vp, cnp, IFTOVT(fattr->mode));
852 * Save the name for use in VOP_RMDIR and VOP_REMOVE
855 cnp->cn_flags |= SAVENAME;
860 * If rewriting (RENAME), return the inode and the
861 * information required to rewrite the present directory
862 * Must get inode of directory entry to verify it's a
863 * regular file, or empty directory.
865 if (nameiop == RENAME && wantparent && islastcn) {
867 #if 0 /* THINK_ABOUT_THIS */
868 if ((err = fuse_internal_access(dvp, VWRITE, cred, td, &facp))) {
876 if (nid == VTOI(dvp)) {
880 err = fuse_vnode_get(vnode_mount(dvp),
885 IFTOVT(fattr->mode));
891 * Save the name for use in VOP_RENAME later.
893 cnp->cn_flags |= SAVENAME;
897 if (flags & ISDOTDOT) {
902 * Expanded copy of vn_vget_ino() so that
903 * fuse_vnode_get() can be used.
906 ltype = VOP_ISLOCKED(dvp);
907 err = vfs_busy(mp, MBF_NOWAIT);
911 err = vfs_busy(mp, 0);
912 vn_lock(dvp, ltype | LK_RETRY);
916 if ((dvp->v_iflag & VI_DOOMED) != 0) {
923 err = fuse_vnode_get(vnode_mount(dvp),
928 IFTOVT(fattr->mode));
930 vn_lock(dvp, ltype | LK_RETRY);
931 if ((dvp->v_iflag & VI_DOOMED) != 0) {
939 } else if (nid == VTOI(dvp)) {
943 err = fuse_vnode_get(vnode_mount(dvp),
948 IFTOVT(fattr->mode));
952 fuse_vnode_setparent(vp, dvp);
956 if (op == FUSE_GETATTR) {
957 cache_attrs(*vpp, (struct fuse_attr_out *)fdi.answ);
959 cache_attrs(*vpp, (struct fuse_entry_out *)fdi.answ);
962 /* Insert name into cache if appropriate. */
965 * Nooo, caching is evil. With caching, we can't avoid stale
966 * information taking over the playground (cached info is not
967 * just positive/negative, it does have qualitative aspects,
968 * too). And a (VOP/FUSE)_GETATTR is always thrown anyway, when
969 * walking down along cached path components, and that's not
970 * any cheaper than FUSE_LOOKUP. This might change with
971 * implementing kernel side attr caching, but... In Linux,
972 * lookup results are not cached, and the daemon is bombarded
973 * with FUSE_LOOKUPS on and on. This shows that by design, the
974 * daemon is expected to handle frequent lookup queries
975 * efficiently, do its caching in userspace, and so on.
977 * So just leave the name cache alone.
981 * Well, now I know, Linux caches lookups, but with a
982 * timeout... So it's the same thing as attribute caching:
983 * we can deal with it when implement timeouts.
986 if (cnp->cn_flags & MAKEENTRY) {
987 cache_enter(dvp, *vpp, cnp);
994 /* No lookup error; need to clean up. */
996 if (err) { /* Found inode; exit with no vnode. */
997 if (op == FUSE_LOOKUP) {
998 fuse_internal_forget_send(vnode_mount(dvp), td, cred,
1001 fdisp_destroy(&fdi);
1004 #ifndef NO_EARLY_PERM_CHECK_HACK
1007 * We have the attributes of the next item
1008 * *now*, and it's a fact, and we do not
1009 * have to do extra work for it (ie, beg the
1010 * daemon), and it neither depends on such
1011 * accidental things like attr caching. So
1012 * the big idea: check credentials *now*,
1013 * not at the beginning of the next call to
1016 * The first item of the lookup chain (fs root)
1017 * won't be checked then here, of course, as
1018 * its never "the next". But go and see that
1019 * the root is taken care about at the very
1020 * beginning of this function.
1022 * Now, given we want to do the access check
1023 * this way, one might ask: so then why not
1024 * do the access check just after fetching
1025 * the inode and its attributes from the
1026 * daemon? Why bother with producing the
1027 * corresponding vnode at all if something
1028 * is not OK? We know what's the deal as
1029 * soon as we get those attrs... There is
1030 * one bit of info though not given us by
1031 * the daemon: whether his response is
1032 * authoritative or not... His response should
1033 * be ignored if something is mounted over
1034 * the dir in question. But that can be
1035 * known only by having the vnode...
1037 int tmpvtype = vnode_vtype(*vpp);
1039 bzero(&facp, sizeof(facp));
1040 /*the early perm check hack */
1041 facp.facc_flags |= FACCESS_VA_VALID;
1043 if ((tmpvtype != VDIR) && (tmpvtype != VLNK)) {
1046 if (!err && !vnode_mountedhere(*vpp)) {
1047 err = fuse_internal_access(*vpp, VEXEC, &facp, td, cred);
1050 if (tmpvtype == VLNK)
1051 FS_DEBUG("weird, permission error with a symlink?\n");
1059 fdisp_destroy(&fdi);
1065 struct vnop_mkdir_args {
1066 struct vnode *a_dvp;
1067 struct vnode **a_vpp;
1068 struct componentname *a_cnp;
1069 struct vattr *a_vap;
1073 fuse_vnop_mkdir(struct vop_mkdir_args *ap)
1075 struct vnode *dvp = ap->a_dvp;
1076 struct vnode **vpp = ap->a_vpp;
1077 struct componentname *cnp = ap->a_cnp;
1078 struct vattr *vap = ap->a_vap;
1080 struct fuse_mkdir_in fmdi;
1082 fuse_trace_printf_vnop();
1084 if (fuse_isdeadfs(dvp)) {
1087 fmdi.mode = MAKEIMODE(vap->va_type, vap->va_mode);
1089 return (fuse_internal_newentry(dvp, vpp, cnp, FUSE_MKDIR, &fmdi,
1090 sizeof(fmdi), VDIR));
1094 struct vnop_mknod_args {
1095 struct vnode *a_dvp;
1096 struct vnode **a_vpp;
1097 struct componentname *a_cnp;
1098 struct vattr *a_vap;
1102 fuse_vnop_mknod(struct vop_mknod_args *ap)
1110 struct vnop_open_args {
1113 struct ucred *a_cred;
1114 struct thread *a_td;
1115 int a_fdidx; / struct file *a_fp;
1119 fuse_vnop_open(struct vop_open_args *ap)
1121 struct vnode *vp = ap->a_vp;
1122 int mode = ap->a_mode;
1123 struct thread *td = ap->a_td;
1124 struct ucred *cred = ap->a_cred;
1126 fufh_type_t fufh_type;
1127 struct fuse_vnode_data *fvdat;
1129 int error, isdir = 0;
1130 int32_t fuse_open_flags;
1132 FS_DEBUG2G("inode=%ju mode=0x%x\n", (uintmax_t)VTOI(vp), mode);
1134 if (fuse_isdeadfs(vp)) {
1139 if (vnode_isdir(vp)) {
1142 fuse_open_flags = 0;
1144 fufh_type = FUFH_RDONLY;
1146 fufh_type = fuse_filehandle_xlate_from_fflags(mode);
1148 * For WRONLY opens, force DIRECT_IO. This is necessary
1149 * since writing a partial block through the buffer cache
1150 * will result in a read of the block and that read won't
1151 * be allowed by the WRONLY open.
1153 if (fufh_type == FUFH_WRONLY ||
1154 (fvdat->flag & FN_DIRECTIO) != 0)
1155 fuse_open_flags = FOPEN_DIRECT_IO;
1158 if (fuse_filehandle_validrw(vp, fufh_type) != FUFH_INVALID) {
1159 fuse_vnode_open(vp, fuse_open_flags, td);
1162 error = fuse_filehandle_open(vp, fufh_type, NULL, td, cred);
1168 struct vnop_read_args {
1172 struct ucred *a_cred;
1176 fuse_vnop_read(struct vop_read_args *ap)
1178 struct vnode *vp = ap->a_vp;
1179 struct uio *uio = ap->a_uio;
1180 int ioflag = ap->a_ioflag;
1181 struct ucred *cred = ap->a_cred;
1183 FS_DEBUG2G("inode=%ju offset=%jd resid=%zd\n",
1184 (uintmax_t)VTOI(vp), uio->uio_offset, uio->uio_resid);
1186 if (fuse_isdeadfs(vp)) {
1190 if (VTOFUD(vp)->flag & FN_DIRECTIO) {
1191 ioflag |= IO_DIRECT;
1194 return fuse_io_dispatch(vp, uio, ioflag, cred);
1198 struct vnop_readdir_args {
1201 struct ucred *a_cred;
1208 fuse_vnop_readdir(struct vop_readdir_args *ap)
1210 struct vnode *vp = ap->a_vp;
1211 struct uio *uio = ap->a_uio;
1212 struct ucred *cred = ap->a_cred;
1214 struct fuse_filehandle *fufh = NULL;
1215 struct fuse_vnode_data *fvdat;
1216 struct fuse_iov cookediov;
1221 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));
1223 if (fuse_isdeadfs(vp)) {
1226 if ( /* XXXIP ((uio_iovcnt(uio) > 1)) || */
1227 (uio_resid(uio) < sizeof(struct dirent))) {
1232 if (!fuse_filehandle_valid(vp, FUFH_RDONLY)) {
1233 FS_DEBUG("calling readdir() before open()");
1234 err = fuse_filehandle_open(vp, FUFH_RDONLY, &fufh, NULL, cred);
1237 err = fuse_filehandle_get(vp, FUFH_RDONLY, &fufh);
1242 #define DIRCOOKEDSIZE FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + MAXNAMLEN + 1)
1243 fiov_init(&cookediov, DIRCOOKEDSIZE);
1245 err = fuse_internal_readdir(vp, uio, fufh, &cookediov);
1247 fiov_teardown(&cookediov);
1249 fuse_filehandle_close(vp, FUFH_RDONLY, NULL, cred);
1255 struct vnop_readlink_args {
1258 struct ucred *a_cred;
1262 fuse_vnop_readlink(struct vop_readlink_args *ap)
1264 struct vnode *vp = ap->a_vp;
1265 struct uio *uio = ap->a_uio;
1266 struct ucred *cred = ap->a_cred;
1268 struct fuse_dispatcher fdi;
1271 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));
1273 if (fuse_isdeadfs(vp)) {
1276 if (!vnode_islnk(vp)) {
1279 fdisp_init(&fdi, 0);
1280 err = fdisp_simple_putget_vp(&fdi, FUSE_READLINK, vp, curthread, cred);
1284 if (((char *)fdi.answ)[0] == '/' &&
1285 fuse_get_mpdata(vnode_mount(vp))->dataflags & FSESS_PUSH_SYMLINKS_IN) {
1286 char *mpth = vnode_mount(vp)->mnt_stat.f_mntonname;
1288 err = uiomove(mpth, strlen(mpth), uio);
1291 err = uiomove(fdi.answ, fdi.iosize, uio);
1294 fdisp_destroy(&fdi);
1299 struct vnop_reclaim_args {
1301 struct thread *a_td;
1305 fuse_vnop_reclaim(struct vop_reclaim_args *ap)
1307 struct vnode *vp = ap->a_vp;
1308 struct thread *td = ap->a_td;
1310 struct fuse_vnode_data *fvdat = VTOFUD(vp);
1311 struct fuse_filehandle *fufh = NULL;
1316 panic("FUSE: no vnode data during recycling");
1318 FS_DEBUG("inode=%ju\n", (uintmax_t)VTOI(vp));
1320 for (type = 0; type < FUFH_MAXTYPE; type++) {
1321 fufh = &(fvdat->fufh[type]);
1322 if (FUFH_IS_VALID(fufh)) {
1323 printf("FUSE: vnode being reclaimed but fufh (type=%d) is valid",
1325 fuse_filehandle_close(vp, type, td, NULL);
1329 if ((!fuse_isdeadfs(vp)) && (fvdat->nlookup)) {
1330 fuse_internal_forget_send(vnode_mount(vp), td, NULL, VTOI(vp),
1333 fuse_vnode_setparent(vp, NULL);
1335 vfs_hash_remove(vp);
1336 vnode_destroy_vobject(vp);
1337 fuse_vnode_destroy(vp);
1343 struct vnop_remove_args {
1344 struct vnode *a_dvp;
1346 struct componentname *a_cnp;
1350 fuse_vnop_remove(struct vop_remove_args *ap)
1352 struct vnode *dvp = ap->a_dvp;
1353 struct vnode *vp = ap->a_vp;
1354 struct componentname *cnp = ap->a_cnp;
1358 FS_DEBUG2G("inode=%ju name=%*s\n",
1359 (uintmax_t)VTOI(vp), (int)cnp->cn_namelen, cnp->cn_nameptr);
1361 if (fuse_isdeadfs(vp)) {
1364 if (vnode_isdir(vp)) {
1369 err = fuse_internal_remove(dvp, vp, cnp, FUSE_UNLINK);
1372 fuse_internal_vnode_disappear(vp);
1377 struct vnop_rename_args {
1378 struct vnode *a_fdvp;
1379 struct vnode *a_fvp;
1380 struct componentname *a_fcnp;
1381 struct vnode *a_tdvp;
1382 struct vnode *a_tvp;
1383 struct componentname *a_tcnp;
1387 fuse_vnop_rename(struct vop_rename_args *ap)
1389 struct vnode *fdvp = ap->a_fdvp;
1390 struct vnode *fvp = ap->a_fvp;
1391 struct componentname *fcnp = ap->a_fcnp;
1392 struct vnode *tdvp = ap->a_tdvp;
1393 struct vnode *tvp = ap->a_tvp;
1394 struct componentname *tcnp = ap->a_tcnp;
1395 struct fuse_data *data;
1399 FS_DEBUG2G("from: inode=%ju name=%*s -> to: inode=%ju name=%*s\n",
1400 (uintmax_t)VTOI(fvp), (int)fcnp->cn_namelen, fcnp->cn_nameptr,
1401 (uintmax_t)(tvp == NULL ? -1 : VTOI(tvp)),
1402 (int)tcnp->cn_namelen, tcnp->cn_nameptr);
1404 if (fuse_isdeadfs(fdvp)) {
1407 if (fvp->v_mount != tdvp->v_mount ||
1408 (tvp && fvp->v_mount != tvp->v_mount)) {
1409 FS_DEBUG("cross-device rename: %s -> %s\n",
1410 fcnp->cn_nameptr, (tcnp != NULL ? tcnp->cn_nameptr : "(NULL)"));
1417 * FUSE library is expected to check if target directory is not
1418 * under the source directory in the file system tree.
1419 * Linux performs this check at VFS level.
1421 data = fuse_get_mpdata(vnode_mount(tdvp));
1422 sx_xlock(&data->rename_lock);
1423 err = fuse_internal_rename(fdvp, fcnp, tdvp, tcnp);
1426 fuse_vnode_setparent(fvp, tdvp);
1428 fuse_vnode_setparent(tvp, NULL);
1430 sx_unlock(&data->rename_lock);
1432 if (tvp != NULL && tvp != fvp) {
1435 if (vnode_isdir(fvp)) {
1436 if ((tvp != NULL) && vnode_isdir(tvp)) {
1457 struct vnop_rmdir_args {
1458 struct vnode *a_dvp;
1460 struct componentname *a_cnp;
1464 fuse_vnop_rmdir(struct vop_rmdir_args *ap)
1466 struct vnode *dvp = ap->a_dvp;
1467 struct vnode *vp = ap->a_vp;
1471 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));
1473 if (fuse_isdeadfs(vp)) {
1476 if (VTOFUD(vp) == VTOFUD(dvp)) {
1479 err = fuse_internal_remove(dvp, vp, ap->a_cnp, FUSE_RMDIR);
1482 fuse_internal_vnode_disappear(vp);
1487 struct vnop_setattr_args {
1489 struct vattr *a_vap;
1490 struct ucred *a_cred;
1491 struct thread *a_td;
1495 fuse_vnop_setattr(struct vop_setattr_args *ap)
1497 struct vnode *vp = ap->a_vp;
1498 struct vattr *vap = ap->a_vap;
1499 struct ucred *cred = ap->a_cred;
1500 struct thread *td = curthread;
1502 struct fuse_dispatcher fdi;
1503 struct fuse_setattr_in *fsai;
1504 struct fuse_access_param facp;
1508 int sizechanged = 0;
1509 uint64_t newsize = 0;
1511 FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));
1513 if (fuse_isdeadfs(vp)) {
1516 fdisp_init(&fdi, sizeof(*fsai));
1517 fdisp_make_vp(&fdi, FUSE_SETATTR, vp, td, cred);
1521 bzero(&facp, sizeof(facp));
1523 facp.xuid = vap->va_uid;
1524 facp.xgid = vap->va_gid;
1526 if (vap->va_uid != (uid_t)VNOVAL) {
1527 facp.facc_flags |= FACCESS_CHOWN;
1528 fsai->uid = vap->va_uid;
1529 fsai->valid |= FATTR_UID;
1531 if (vap->va_gid != (gid_t)VNOVAL) {
1532 facp.facc_flags |= FACCESS_CHOWN;
1533 fsai->gid = vap->va_gid;
1534 fsai->valid |= FATTR_GID;
1536 if (vap->va_size != VNOVAL) {
1538 struct fuse_filehandle *fufh = NULL;
1540 /*Truncate to a new value. */
1541 fsai->size = vap->va_size;
1543 newsize = vap->va_size;
1544 fsai->valid |= FATTR_SIZE;
1546 fuse_filehandle_getrw(vp, FUFH_WRONLY, &fufh);
1548 fsai->fh = fufh->fh_id;
1549 fsai->valid |= FATTR_FH;
1552 if (vap->va_atime.tv_sec != VNOVAL) {
1553 fsai->atime = vap->va_atime.tv_sec;
1554 fsai->atimensec = vap->va_atime.tv_nsec;
1555 fsai->valid |= FATTR_ATIME;
1557 if (vap->va_mtime.tv_sec != VNOVAL) {
1558 fsai->mtime = vap->va_mtime.tv_sec;
1559 fsai->mtimensec = vap->va_mtime.tv_nsec;
1560 fsai->valid |= FATTR_MTIME;
1562 if (vap->va_mode != (mode_t)VNOVAL) {
1563 fsai->mode = vap->va_mode & ALLPERMS;
1564 fsai->valid |= FATTR_MODE;
1569 vtyp = vnode_vtype(vp);
1571 if (fsai->valid & FATTR_SIZE && vtyp == VDIR) {
1575 if (vfs_isrdonly(vnode_mount(vp)) && (fsai->valid & ~FATTR_SIZE || vtyp == VREG)) {
1579 if (fsai->valid & ~FATTR_SIZE) {
1580 /*err = fuse_internal_access(vp, VADMIN, context, &facp); */
1584 facp.facc_flags &= ~FACCESS_XQUERIES;
1586 if (err && !(fsai->valid & ~(FATTR_ATIME | FATTR_MTIME)) &&
1587 vap->va_vaflags & VA_UTIMES_NULL) {
1588 err = fuse_internal_access(vp, VWRITE, &facp, td, cred);
1592 if ((err = fdisp_wait_answ(&fdi)))
1594 vtyp = IFTOVT(((struct fuse_attr_out *)fdi.answ)->attr.mode);
1596 if (vnode_vtype(vp) != vtyp) {
1597 if (vnode_vtype(vp) == VNON && vtyp != VNON) {
1598 debug_printf("FUSE: Dang! vnode_vtype is VNON and vtype isn't.\n");
1601 * STALE vnode, ditch
1603 * The vnode has changed its type "behind our back". There's
1604 * nothing really we can do, so let us just force an internal
1605 * revocation and tell the caller to try again, if interested.
1607 fuse_internal_vnode_disappear(vp);
1611 if (!err && !sizechanged) {
1612 cache_attrs(vp, (struct fuse_attr_out *)fdi.answ);
1615 fdisp_destroy(&fdi);
1616 if (!err && sizechanged) {
1617 fuse_vnode_setsize(vp, cred, newsize);
1618 VTOFUD(vp)->flag &= ~FN_SIZECHANGE;
1624 struct vnop_strategy_args {
1630 fuse_vnop_strategy(struct vop_strategy_args *ap)
1632 struct vnode *vp = ap->a_vp;
1633 struct buf *bp = ap->a_bp;
1635 fuse_trace_printf_vnop();
1637 if (!vp || fuse_isdeadfs(vp)) {
1638 bp->b_ioflags |= BIO_ERROR;
1639 bp->b_error = ENXIO;
1643 if (bp->b_iocmd == BIO_WRITE)
1644 fuse_vnode_refreshsize(vp, NOCRED);
1646 (void)fuse_io_strategy(vp, bp);
1649 * This is a dangerous function. If returns error, that might mean a
1650 * panic. We prefer pretty much anything over being forced to panic
1651 * by a malicious daemon (a demon?). So we just return 0 anyway. You
1652 * should never mind this: this function has its own error
1653 * propagation mechanism via the argument buffer, so
1654 * not-that-melodramatic residents of the call chain still will be
1655 * able to know what to do.
1662 struct vnop_symlink_args {
1663 struct vnode *a_dvp;
1664 struct vnode **a_vpp;
1665 struct componentname *a_cnp;
1666 struct vattr *a_vap;
1671 fuse_vnop_symlink(struct vop_symlink_args *ap)
1673 struct vnode *dvp = ap->a_dvp;
1674 struct vnode **vpp = ap->a_vpp;
1675 struct componentname *cnp = ap->a_cnp;
1676 char *target = ap->a_target;
1678 struct fuse_dispatcher fdi;
1683 FS_DEBUG2G("inode=%ju name=%*s\n",
1684 (uintmax_t)VTOI(dvp), (int)cnp->cn_namelen, cnp->cn_nameptr);
1686 if (fuse_isdeadfs(dvp)) {
1690 * Unlike the other creator type calls, here we have to create a message
1691 * where the name of the new entry comes first, and the data describing
1692 * the entry comes second.
1693 * Hence we can't rely on our handy fuse_internal_newentry() routine,
1694 * but put together the message manually and just call the core part.
1697 len = strlen(target) + 1;
1698 fdisp_init(&fdi, len + cnp->cn_namelen + 1);
1699 fdisp_make_vp(&fdi, FUSE_SYMLINK, dvp, curthread, NULL);
1701 memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen);
1702 ((char *)fdi.indata)[cnp->cn_namelen] = '\0';
1703 memcpy((char *)fdi.indata + cnp->cn_namelen + 1, target, len);
1705 err = fuse_internal_newentry_core(dvp, vpp, cnp, VLNK, &fdi);
1706 fdisp_destroy(&fdi);
1711 struct vnop_write_args {
1715 struct ucred *a_cred;
1719 fuse_vnop_write(struct vop_write_args *ap)
1721 struct vnode *vp = ap->a_vp;
1722 struct uio *uio = ap->a_uio;
1723 int ioflag = ap->a_ioflag;
1724 struct ucred *cred = ap->a_cred;
1726 fuse_trace_printf_vnop();
1728 if (fuse_isdeadfs(vp)) {
1731 fuse_vnode_refreshsize(vp, cred);
1733 if (VTOFUD(vp)->flag & FN_DIRECTIO) {
1734 ioflag |= IO_DIRECT;
1737 return fuse_io_dispatch(vp, uio, ioflag, cred);
1741 struct vnop_getpages_args {
1749 fuse_vnop_getpages(struct vop_getpages_args *ap)
1751 int i, error, nextoff, size, toff, count, npages;
1761 FS_DEBUG2G("heh\n");
1764 KASSERT(vp->v_object, ("objectless vp passed to getpages"));
1765 td = curthread; /* XXX */
1766 cred = curthread->td_ucred; /* XXX */
1768 npages = ap->a_count;
1770 if (!fsess_opt_mmap(vnode_mount(vp))) {
1771 FS_DEBUG("called on non-cacheable vnode??\n");
1772 return (VM_PAGER_ERROR);
1776 * If the last page is partially valid, just return it and allow
1777 * the pager to zero-out the blanks. Partially valid pages can
1778 * only occur at the file EOF.
1780 * XXXGL: is that true for FUSE, which is a local filesystem,
1781 * but still somewhat disconnected from the kernel?
1783 VM_OBJECT_WLOCK(vp->v_object);
1784 if (pages[npages - 1]->valid != 0 && --npages == 0)
1786 VM_OBJECT_WUNLOCK(vp->v_object);
1789 * We use only the kva address for the buffer, but this is extremely
1790 * convenient and fast.
1792 bp = getpbuf(&fuse_pbuf_freecnt);
1794 kva = (vm_offset_t)bp->b_data;
1795 pmap_qenter(kva, pages, npages);
1796 VM_CNT_INC(v_vnodein);
1797 VM_CNT_ADD(v_vnodepgsin, npages);
1799 count = npages << PAGE_SHIFT;
1800 iov.iov_base = (caddr_t)kva;
1801 iov.iov_len = count;
1804 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
1805 uio.uio_resid = count;
1806 uio.uio_segflg = UIO_SYSSPACE;
1807 uio.uio_rw = UIO_READ;
1810 error = fuse_io_dispatch(vp, &uio, IO_DIRECT, cred);
1811 pmap_qremove(kva, npages);
1813 relpbuf(bp, &fuse_pbuf_freecnt);
1815 if (error && (uio.uio_resid == count)) {
1816 FS_DEBUG("error %d\n", error);
1817 return VM_PAGER_ERROR;
1820 * Calculate the number of bytes read and validate only that number
1821 * of bytes. Note that due to pending writes, size may be 0. This
1822 * does not mean that the remaining data is invalid!
1825 size = count - uio.uio_resid;
1826 VM_OBJECT_WLOCK(vp->v_object);
1827 fuse_vm_page_lock_queues();
1828 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
1831 nextoff = toff + PAGE_SIZE;
1834 if (nextoff <= size) {
1836 * Read operation filled an entire page
1838 m->valid = VM_PAGE_BITS_ALL;
1839 KASSERT(m->dirty == 0,
1840 ("fuse_getpages: page %p is dirty", m));
1841 } else if (size > toff) {
1843 * Read operation filled a partial page.
1846 vm_page_set_valid_range(m, 0, size - toff);
1847 KASSERT(m->dirty == 0,
1848 ("fuse_getpages: page %p is dirty", m));
1851 * Read operation was short. If no error occurred
1852 * we may have hit a zero-fill section. We simply
1853 * leave valid set to 0.
1858 fuse_vm_page_unlock_queues();
1860 VM_OBJECT_WUNLOCK(vp->v_object);
1865 return (VM_PAGER_OK);
1869 struct vnop_putpages_args {
1875 vm_ooffset_t a_offset;
1879 fuse_vnop_putpages(struct vop_putpages_args *ap)
1885 int i, error, npages, count;
1894 FS_DEBUG2G("heh\n");
1897 KASSERT(vp->v_object, ("objectless vp passed to putpages"));
1898 fsize = vp->v_object->un_pager.vnp.vnp_size;
1899 td = curthread; /* XXX */
1900 cred = curthread->td_ucred; /* XXX */
1902 count = ap->a_count;
1903 rtvals = ap->a_rtvals;
1904 npages = btoc(count);
1905 offset = IDX_TO_OFF(pages[0]->pindex);
1907 if (!fsess_opt_mmap(vnode_mount(vp))) {
1908 FS_DEBUG("called on non-cacheable vnode??\n");
1910 for (i = 0; i < npages; i++)
1911 rtvals[i] = VM_PAGER_AGAIN;
1914 * When putting pages, do not extend file past EOF.
1917 if (offset + count > fsize) {
1918 count = fsize - offset;
1923 * We use only the kva address for the buffer, but this is extremely
1924 * convenient and fast.
1926 bp = getpbuf(&fuse_pbuf_freecnt);
1928 kva = (vm_offset_t)bp->b_data;
1929 pmap_qenter(kva, pages, npages);
1930 VM_CNT_INC(v_vnodeout);
1931 VM_CNT_ADD(v_vnodepgsout, count);
1933 iov.iov_base = (caddr_t)kva;
1934 iov.iov_len = count;
1937 uio.uio_offset = offset;
1938 uio.uio_resid = count;
1939 uio.uio_segflg = UIO_SYSSPACE;
1940 uio.uio_rw = UIO_WRITE;
1943 error = fuse_io_dispatch(vp, &uio, IO_DIRECT, cred);
1945 pmap_qremove(kva, npages);
1946 relpbuf(bp, &fuse_pbuf_freecnt);
1949 int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
1951 for (i = 0; i < nwritten; i++) {
1952 rtvals[i] = VM_PAGER_OK;
1953 VM_OBJECT_WLOCK(pages[i]->object);
1954 vm_page_undirty(pages[i]);
1955 VM_OBJECT_WUNLOCK(pages[i]->object);
1962 struct vnop_print_args {
1967 fuse_vnop_print(struct vop_print_args *ap)
1969 struct fuse_vnode_data *fvdat = VTOFUD(ap->a_vp);
1971 printf("nodeid: %ju, parent nodeid: %ju, nlookup: %ju, flag: %#x\n",
1972 (uintmax_t)VTOILLU(ap->a_vp), (uintmax_t)fvdat->parent_nid,
1973 (uintmax_t)fvdat->nlookup,