1 /* $NetBSD: tmpfs_vnops.c,v 1.39 2007/07/23 15:41:01 jmmv Exp $ */
4 * SPDX-License-Identifier: BSD-2-Clause-NetBSD
6 * Copyright (c) 2005, 2006 The NetBSD Foundation, Inc.
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
36 * tmpfs vnode interface.
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/dirent.h>
44 #include <sys/fcntl.h>
46 #include <sys/filio.h>
47 #include <sys/limits.h>
48 #include <sys/lockf.h>
50 #include <sys/mount.h>
51 #include <sys/namei.h>
54 #include <sys/rwlock.h>
55 #include <sys/sched.h>
58 #include <sys/sysctl.h>
59 #include <sys/unistd.h>
60 #include <sys/vnode.h>
61 #include <security/audit/audit.h>
62 #include <security/mac/mac_framework.h>
65 #include <vm/vm_param.h>
66 #include <vm/vm_object.h>
67 #include <vm/vm_page.h>
68 #include <vm/vm_pager.h>
69 #include <vm/swap_pager.h>
71 #include <fs/tmpfs/tmpfs_vnops.h>
72 #include <fs/tmpfs/tmpfs.h>
74 SYSCTL_DECL(_vfs_tmpfs);
77 static volatile int tmpfs_rename_restarts;
78 SYSCTL_INT(_vfs_tmpfs, OID_AUTO, rename_restarts, CTLFLAG_RD,
79 __DEVOLATILE(int *, &tmpfs_rename_restarts), 0,
80 "Times rename had to restart due to lock contention");
83 tmpfs_vn_get_ino_alloc(struct mount *mp, void *arg, int lkflags,
87 return (tmpfs_alloc_vp(mp, arg, lkflags, rvp));
91 tmpfs_lookup1(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
93 struct tmpfs_dirent *de;
94 struct tmpfs_node *dnode, *pnode;
95 struct tmpfs_mount *tm;
98 /* Caller assumes responsibility for ensuring access (VEXEC). */
99 dnode = VP_TO_TMPFS_DIR(dvp);
102 /* We cannot be requesting the parent directory of the root node. */
103 MPASS(IMPLIES(dnode->tn_type == VDIR &&
104 dnode->tn_dir.tn_parent == dnode,
105 !(cnp->cn_flags & ISDOTDOT)));
107 TMPFS_ASSERT_LOCKED(dnode);
108 if (dnode->tn_dir.tn_parent == NULL) {
112 if (cnp->cn_flags & ISDOTDOT) {
113 tm = VFS_TO_TMPFS(dvp->v_mount);
114 pnode = dnode->tn_dir.tn_parent;
115 tmpfs_ref_node(pnode);
116 error = vn_vget_ino_gen(dvp, tmpfs_vn_get_ino_alloc,
117 pnode, cnp->cn_lkflags, vpp);
118 tmpfs_free_node(tm, pnode);
121 } else if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
126 de = tmpfs_dir_lookup(dnode, NULL, cnp);
127 if (de != NULL && de->td_node == NULL)
128 cnp->cn_flags |= ISWHITEOUT;
129 if (de == NULL || de->td_node == NULL) {
131 * The entry was not found in the directory.
132 * This is OK if we are creating or renaming an
133 * entry and are working on the last component of
136 if ((cnp->cn_flags & ISLASTCN) &&
137 (cnp->cn_nameiop == CREATE || \
138 cnp->cn_nameiop == RENAME ||
139 (cnp->cn_nameiop == DELETE &&
140 cnp->cn_flags & DOWHITEOUT &&
141 cnp->cn_flags & ISWHITEOUT))) {
142 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred,
151 struct tmpfs_node *tnode;
154 * The entry was found, so get its associated
160 * If we are not at the last path component and
161 * found a non-directory or non-link entry (which
162 * may itself be pointing to a directory), raise
165 if ((tnode->tn_type != VDIR &&
166 tnode->tn_type != VLNK) &&
167 !(cnp->cn_flags & ISLASTCN)) {
173 * If we are deleting or renaming the entry, keep
174 * track of its tmpfs_dirent so that it can be
175 * easily deleted later.
177 if ((cnp->cn_flags & ISLASTCN) &&
178 (cnp->cn_nameiop == DELETE ||
179 cnp->cn_nameiop == RENAME)) {
180 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred,
185 /* Allocate a new vnode on the matching entry. */
186 error = tmpfs_alloc_vp(dvp->v_mount, tnode,
187 cnp->cn_lkflags, vpp);
191 if ((dnode->tn_mode & S_ISTXT) &&
192 VOP_ACCESS(dvp, VADMIN, cnp->cn_cred,
193 curthread) && VOP_ACCESS(*vpp, VADMIN,
194 cnp->cn_cred, curthread)) {
201 error = tmpfs_alloc_vp(dvp->v_mount, tnode,
202 cnp->cn_lkflags, vpp);
210 * Store the result of this lookup in the cache. Avoid this if the
211 * request was for creation, as it does not improve timings on
214 if ((cnp->cn_flags & MAKEENTRY) != 0 && tmpfs_use_nc(dvp))
215 cache_enter(dvp, *vpp, cnp);
219 * If there were no errors, *vpp cannot be null and it must be
222 MPASS(IFF(error == 0, *vpp != NULLVP && VOP_ISLOCKED(*vpp)));
228 tmpfs_cached_lookup(struct vop_cachedlookup_args *v)
231 return (tmpfs_lookup1(v->a_dvp, v->a_vpp, v->a_cnp));
235 tmpfs_lookup(struct vop_lookup_args *v)
237 struct vnode *dvp = v->a_dvp;
238 struct vnode **vpp = v->a_vpp;
239 struct componentname *cnp = v->a_cnp;
242 /* Check accessibility of requested node as a first step. */
243 error = vn_dir_check_exec(dvp, cnp);
247 return (tmpfs_lookup1(dvp, vpp, cnp));
251 tmpfs_create(struct vop_create_args *v)
253 struct vnode *dvp = v->a_dvp;
254 struct vnode **vpp = v->a_vpp;
255 struct componentname *cnp = v->a_cnp;
256 struct vattr *vap = v->a_vap;
259 MPASS(vap->va_type == VREG || vap->va_type == VSOCK);
261 error = tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL);
262 if (error == 0 && (cnp->cn_flags & MAKEENTRY) != 0 && tmpfs_use_nc(dvp))
263 cache_enter(dvp, *vpp, cnp);
268 tmpfs_mknod(struct vop_mknod_args *v)
270 struct vnode *dvp = v->a_dvp;
271 struct vnode **vpp = v->a_vpp;
272 struct componentname *cnp = v->a_cnp;
273 struct vattr *vap = v->a_vap;
275 if (vap->va_type != VBLK && vap->va_type != VCHR &&
276 vap->va_type != VFIFO)
279 return (tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL));
282 struct fileops tmpfs_fnops;
285 tmpfs_open(struct vop_open_args *v)
288 struct tmpfs_node *node;
294 node = VP_TO_TMPFS_NODE(vp);
297 * The file is still active but all its names have been removed
298 * (e.g. by a "rmdir $(pwd)"). It cannot be opened any more as
299 * it is about to die.
301 if (node->tn_links < 1)
304 /* If the file is marked append-only, deny write requests. */
305 if (node->tn_flags & APPEND && (mode & (FWRITE | O_APPEND)) == FWRITE)
309 /* For regular files, the call below is nop. */
310 KASSERT(vp->v_type != VREG || (node->tn_reg.tn_aobj->flags &
311 OBJ_DEAD) == 0, ("dead object"));
312 vnode_create_vobject(vp, node->tn_size, v->a_td);
316 MPASS(fp == NULL || fp->f_data == NULL);
317 if (error == 0 && fp != NULL && vp->v_type == VREG) {
318 tmpfs_ref_node(node);
319 finit_vnode(fp, mode, node, &tmpfs_fnops);
326 tmpfs_close(struct vop_close_args *v)
328 struct vnode *vp = v->a_vp;
330 /* Update node times. */
337 tmpfs_fo_close(struct file *fp, struct thread *td)
339 struct tmpfs_node *node;
343 MPASS(node->tn_type == VREG);
344 tmpfs_free_node(node->tn_reg.tn_tmp, node);
346 return (vnops.fo_close(fp, td));
350 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see
351 * the comment above cache_fplookup for details.
354 tmpfs_fplookup_vexec(struct vop_fplookup_vexec_args *v)
357 struct tmpfs_node *node;
362 node = VP_TO_TMPFS_NODE_SMR(vp);
363 if (__predict_false(node == NULL))
366 all_x = S_IXUSR | S_IXGRP | S_IXOTH;
367 mode = atomic_load_short(&node->tn_mode);
368 if (__predict_true((mode & all_x) == all_x))
372 return (vaccess_vexec_smr(mode, node->tn_uid, node->tn_gid, cred));
376 tmpfs_access(struct vop_access_args *v)
378 struct vnode *vp = v->a_vp;
379 accmode_t accmode = v->a_accmode;
380 struct ucred *cred = v->a_cred;
381 mode_t all_x = S_IXUSR | S_IXGRP | S_IXOTH;
383 struct tmpfs_node *node;
385 MPASS(VOP_ISLOCKED(vp));
387 node = VP_TO_TMPFS_NODE(vp);
390 * Common case path lookup.
392 if (__predict_true(accmode == VEXEC && (node->tn_mode & all_x) == all_x))
395 switch (vp->v_type) {
401 if (accmode & VWRITE && vp->v_mount->mnt_flag & MNT_RDONLY) {
421 if (accmode & VWRITE && node->tn_flags & IMMUTABLE) {
426 error = vaccess(vp->v_type, node->tn_mode, node->tn_uid, node->tn_gid,
430 MPASS(VOP_ISLOCKED(vp));
436 tmpfs_stat(struct vop_stat_args *v)
438 struct vnode *vp = v->a_vp;
439 struct stat *sb = v->a_sb;
440 struct tmpfs_node *node;
443 node = VP_TO_TMPFS_NODE(vp);
445 tmpfs_update_getattr(vp);
447 error = vop_stat_helper_pre(v);
448 if (__predict_false(error))
451 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
452 sb->st_ino = node->tn_id;
453 sb->st_mode = node->tn_mode | VTTOIF(vp->v_type);
454 sb->st_nlink = node->tn_links;
455 sb->st_uid = node->tn_uid;
456 sb->st_gid = node->tn_gid;
457 sb->st_rdev = (vp->v_type == VBLK || vp->v_type == VCHR) ?
458 node->tn_rdev : NODEV;
459 sb->st_size = node->tn_size;
460 sb->st_atim.tv_sec = node->tn_atime.tv_sec;
461 sb->st_atim.tv_nsec = node->tn_atime.tv_nsec;
462 sb->st_mtim.tv_sec = node->tn_mtime.tv_sec;
463 sb->st_mtim.tv_nsec = node->tn_mtime.tv_nsec;
464 sb->st_ctim.tv_sec = node->tn_ctime.tv_sec;
465 sb->st_ctim.tv_nsec = node->tn_ctime.tv_nsec;
466 sb->st_birthtim.tv_sec = node->tn_birthtime.tv_sec;
467 sb->st_birthtim.tv_nsec = node->tn_birthtime.tv_nsec;
468 sb->st_blksize = PAGE_SIZE;
469 sb->st_flags = node->tn_flags;
470 sb->st_gen = node->tn_gen;
471 if (vp->v_type == VREG) {
473 vm_object_t obj = node->tn_reg.tn_aobj;
475 /* Handle torn read */
476 VM_OBJECT_RLOCK(obj);
478 sb->st_blocks = ptoa(node->tn_reg.tn_pages);
480 VM_OBJECT_RUNLOCK(obj);
483 sb->st_blocks = node->tn_size;
485 sb->st_blocks /= S_BLKSIZE;
486 return (vop_stat_helper_post(v, error));
490 tmpfs_getattr(struct vop_getattr_args *v)
492 struct vnode *vp = v->a_vp;
493 struct vattr *vap = v->a_vap;
494 struct tmpfs_node *node;
496 node = VP_TO_TMPFS_NODE(vp);
498 tmpfs_update_getattr(vp);
500 vap->va_type = vp->v_type;
501 vap->va_mode = node->tn_mode;
502 vap->va_nlink = node->tn_links;
503 vap->va_uid = node->tn_uid;
504 vap->va_gid = node->tn_gid;
505 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
506 vap->va_fileid = node->tn_id;
507 vap->va_size = node->tn_size;
508 vap->va_blocksize = PAGE_SIZE;
509 vap->va_atime = node->tn_atime;
510 vap->va_mtime = node->tn_mtime;
511 vap->va_ctime = node->tn_ctime;
512 vap->va_birthtime = node->tn_birthtime;
513 vap->va_gen = node->tn_gen;
514 vap->va_flags = node->tn_flags;
515 vap->va_rdev = (vp->v_type == VBLK || vp->v_type == VCHR) ?
516 node->tn_rdev : NODEV;
517 if (vp->v_type == VREG) {
519 vm_object_t obj = node->tn_reg.tn_aobj;
521 VM_OBJECT_RLOCK(obj);
523 vap->va_bytes = ptoa(node->tn_reg.tn_pages);
525 VM_OBJECT_RUNLOCK(obj);
528 vap->va_bytes = node->tn_size;
536 tmpfs_setattr(struct vop_setattr_args *v)
538 struct vnode *vp = v->a_vp;
539 struct vattr *vap = v->a_vap;
540 struct ucred *cred = v->a_cred;
541 struct thread *td = curthread;
545 MPASS(VOP_ISLOCKED(vp));
546 ASSERT_VOP_IN_SEQC(vp);
550 /* Abort if any unsettable attribute is given. */
551 if (vap->va_type != VNON ||
552 vap->va_nlink != VNOVAL ||
553 vap->va_fsid != VNOVAL ||
554 vap->va_fileid != VNOVAL ||
555 vap->va_blocksize != VNOVAL ||
556 vap->va_gen != VNOVAL ||
557 vap->va_rdev != VNOVAL ||
558 vap->va_bytes != VNOVAL)
561 if (error == 0 && (vap->va_flags != VNOVAL))
562 error = tmpfs_chflags(vp, vap->va_flags, cred, td);
564 if (error == 0 && (vap->va_size != VNOVAL))
565 error = tmpfs_chsize(vp, vap->va_size, cred, td);
567 if (error == 0 && (vap->va_uid != VNOVAL || vap->va_gid != VNOVAL))
568 error = tmpfs_chown(vp, vap->va_uid, vap->va_gid, cred, td);
570 if (error == 0 && (vap->va_mode != (mode_t)VNOVAL))
571 error = tmpfs_chmod(vp, vap->va_mode, cred, td);
573 if (error == 0 && ((vap->va_atime.tv_sec != VNOVAL &&
574 vap->va_atime.tv_nsec != VNOVAL) ||
575 (vap->va_mtime.tv_sec != VNOVAL &&
576 vap->va_mtime.tv_nsec != VNOVAL) ||
577 (vap->va_birthtime.tv_sec != VNOVAL &&
578 vap->va_birthtime.tv_nsec != VNOVAL)))
579 error = tmpfs_chtimes(vp, vap, cred, td);
582 * Update the node times. We give preference to the error codes
583 * generated by this function rather than the ones that may arise
588 MPASS(VOP_ISLOCKED(vp));
594 tmpfs_read(struct vop_read_args *v)
598 struct tmpfs_node *node;
601 if (vp->v_type != VREG)
604 if (uio->uio_offset < 0)
606 node = VP_TO_TMPFS_NODE(vp);
607 tmpfs_set_accessed(VFS_TO_TMPFS(vp->v_mount), node);
608 return (uiomove_object(node->tn_reg.tn_aobj, node->tn_size, uio));
612 tmpfs_read_pgcache(struct vop_read_pgcache_args *v)
615 struct tmpfs_node *node;
621 VNPASS((vn_irflag_read(vp) & VIRF_PGREAD) != 0, vp);
623 if (v->a_uio->uio_offset < 0)
629 node = VP_TO_TMPFS_NODE_SMR(vp);
632 MPASS(node->tn_type == VREG);
633 MPASS(node->tn_refcount >= 1);
634 object = node->tn_reg.tn_aobj;
638 MPASS(object->type == tmpfs_pager_type);
639 MPASS((object->flags & (OBJ_ANON | OBJ_DEAD | OBJ_SWAP)) ==
641 if (!VN_IS_DOOMED(vp)) {
642 /* size cannot become shorter due to rangelock. */
643 size = node->tn_size;
644 tmpfs_set_accessed(node->tn_reg.tn_tmp, node);
646 error = uiomove_object(object, size, v->a_uio);
655 tmpfs_write(struct vop_write_args *v)
659 struct tmpfs_node *node;
667 ioflag = v->a_ioflag;
669 node = VP_TO_TMPFS_NODE(vp);
670 oldsize = node->tn_size;
672 if (uio->uio_offset < 0 || vp->v_type != VREG)
674 if (uio->uio_resid == 0)
676 if (ioflag & IO_APPEND)
677 uio->uio_offset = node->tn_size;
678 error = vn_rlimit_fsizex(vp, uio, VFS_TO_TMPFS(vp->v_mount)->
679 tm_maxfilesize, &r, uio->uio_td);
681 vn_rlimit_fsizex_res(uio, r);
685 if (uio->uio_offset + uio->uio_resid > node->tn_size) {
686 error = tmpfs_reg_resize(vp, uio->uio_offset + uio->uio_resid,
692 error = uiomove_object(node->tn_reg.tn_aobj, node->tn_size, uio);
693 node->tn_status |= TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED;
694 node->tn_accessed = true;
695 if (node->tn_mode & (S_ISUID | S_ISGID)) {
696 if (priv_check_cred(v->a_cred, PRIV_VFS_RETAINSUGID)) {
697 newmode = node->tn_mode & ~(S_ISUID | S_ISGID);
698 vn_seqc_write_begin(vp);
699 atomic_store_short(&node->tn_mode, newmode);
700 vn_seqc_write_end(vp);
704 (void)tmpfs_reg_resize(vp, oldsize, TRUE);
707 MPASS(IMPLIES(error == 0, uio->uio_resid == 0));
708 MPASS(IMPLIES(error != 0, oldsize == node->tn_size));
710 vn_rlimit_fsizex_res(uio, r);
715 tmpfs_deallocate(struct vop_deallocate_args *v)
717 return (tmpfs_reg_punch_hole(v->a_vp, v->a_offset, v->a_len));
721 tmpfs_fsync(struct vop_fsync_args *v)
723 struct vnode *vp = v->a_vp;
725 MPASS(VOP_ISLOCKED(vp));
727 tmpfs_check_mtime(vp);
734 tmpfs_remove(struct vop_remove_args *v)
736 struct vnode *dvp = v->a_dvp;
737 struct vnode *vp = v->a_vp;
740 struct tmpfs_dirent *de;
741 struct tmpfs_mount *tmp;
742 struct tmpfs_node *dnode;
743 struct tmpfs_node *node;
745 MPASS(VOP_ISLOCKED(dvp));
746 MPASS(VOP_ISLOCKED(vp));
748 if (vp->v_type == VDIR) {
753 dnode = VP_TO_TMPFS_DIR(dvp);
754 node = VP_TO_TMPFS_NODE(vp);
755 tmp = VFS_TO_TMPFS(vp->v_mount);
756 de = tmpfs_dir_lookup(dnode, node, v->a_cnp);
759 /* Files marked as immutable or append-only cannot be deleted. */
760 if ((node->tn_flags & (IMMUTABLE | APPEND | NOUNLINK)) ||
761 (dnode->tn_flags & APPEND)) {
766 /* Remove the entry from the directory; as it is a file, we do not
767 * have to change the number of hard links of the directory. */
768 tmpfs_dir_detach(dvp, de);
769 if (v->a_cnp->cn_flags & DOWHITEOUT)
770 tmpfs_dir_whiteout_add(dvp, v->a_cnp);
772 /* Free the directory entry we just deleted. Note that the node
773 * referred by it will not be removed until the vnode is really
775 tmpfs_free_dirent(tmp, de);
777 node->tn_status |= TMPFS_NODE_CHANGED;
778 node->tn_accessed = true;
786 tmpfs_link(struct vop_link_args *v)
788 struct vnode *dvp = v->a_tdvp;
789 struct vnode *vp = v->a_vp;
790 struct componentname *cnp = v->a_cnp;
793 struct tmpfs_dirent *de;
794 struct tmpfs_node *node;
796 MPASS(VOP_ISLOCKED(dvp));
797 MPASS(dvp != vp); /* XXX When can this be false? */
798 node = VP_TO_TMPFS_NODE(vp);
800 /* Ensure that we do not overflow the maximum number of links imposed
802 MPASS(node->tn_links <= TMPFS_LINK_MAX);
803 if (node->tn_links == TMPFS_LINK_MAX) {
808 /* We cannot create links of files marked immutable or append-only. */
809 if (node->tn_flags & (IMMUTABLE | APPEND)) {
814 /* Allocate a new directory entry to represent the node. */
815 error = tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), node,
816 cnp->cn_nameptr, cnp->cn_namelen, &de);
820 /* Insert the new directory entry into the appropriate directory. */
821 if (cnp->cn_flags & ISWHITEOUT)
822 tmpfs_dir_whiteout_remove(dvp, cnp);
823 tmpfs_dir_attach(dvp, de);
825 /* vp link count has changed, so update node times. */
826 node->tn_status |= TMPFS_NODE_CHANGED;
836 * We acquire all but fdvp locks using non-blocking acquisitions. If we
837 * fail to acquire any lock in the path we will drop all held locks,
838 * acquire the new lock in a blocking fashion, and then release it and
839 * restart the rename. This acquire/release step ensures that we do not
840 * spin on a lock waiting for release. On error release all vnode locks
841 * and decrement references the way tmpfs_rename() would do.
844 tmpfs_rename_relock(struct vnode *fdvp, struct vnode **fvpp,
845 struct vnode *tdvp, struct vnode **tvpp,
846 struct componentname *fcnp, struct componentname *tcnp)
850 struct tmpfs_dirent *de;
851 int error, restarts = 0;
854 if (*tvpp != NULL && *tvpp != tdvp)
860 error = vn_lock(fdvp, LK_EXCLUSIVE);
863 if (vn_lock(tdvp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
865 error = vn_lock(tdvp, LK_EXCLUSIVE);
872 * Re-resolve fvp to be certain it still exists and fetch the
875 de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(fdvp), NULL, fcnp);
879 if ((fcnp->cn_flags & ISDOTDOT) != 0 ||
880 (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.'))
886 error = tmpfs_alloc_vp(mp, de->td_node, LK_EXCLUSIVE | LK_NOWAIT, &nvp);
892 error = tmpfs_alloc_vp(mp, de->td_node, LK_EXCLUSIVE, &nvp);
897 * Concurrent rename race.
912 * Re-resolve tvp and acquire the vnode lock if present.
914 de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(tdvp), NULL, tcnp);
916 * If tvp disappeared we just carry on.
918 if (de == NULL && *tvpp != NULL) {
923 * Get the tvp ino if the lookup succeeded. We may have to restart
924 * if the non-blocking acquire fails.
928 error = tmpfs_alloc_vp(mp, de->td_node,
929 LK_EXCLUSIVE | LK_NOWAIT, &nvp);
938 error = tmpfs_alloc_vp(mp, de->td_node, LK_EXCLUSIVE,
944 * fdvp contains fvp, thus tvp (=fdvp) is not empty.
953 tmpfs_rename_restarts += restarts;
963 tmpfs_rename_restarts += restarts;
969 tmpfs_rename(struct vop_rename_args *v)
971 struct vnode *fdvp = v->a_fdvp;
972 struct vnode *fvp = v->a_fvp;
973 struct componentname *fcnp = v->a_fcnp;
974 struct vnode *tdvp = v->a_tdvp;
975 struct vnode *tvp = v->a_tvp;
976 struct componentname *tcnp = v->a_tcnp;
978 struct tmpfs_dirent *de;
979 struct tmpfs_mount *tmp;
980 struct tmpfs_node *fdnode;
981 struct tmpfs_node *fnode;
982 struct tmpfs_node *tnode;
983 struct tmpfs_node *tdnode;
987 MPASS(VOP_ISLOCKED(tdvp));
988 MPASS(IMPLIES(tvp != NULL, VOP_ISLOCKED(tvp)));
990 want_seqc_end = false;
993 * Disallow cross-device renames.
994 * XXX Why isn't this done by the caller?
996 if (fvp->v_mount != tdvp->v_mount ||
997 (tvp != NULL && fvp->v_mount != tvp->v_mount)) {
1002 /* If source and target are the same file, there is nothing to do. */
1009 * If we need to move the directory between entries, lock the
1010 * source so that we can safely operate on it.
1012 if (fdvp != tdvp && fdvp != tvp) {
1013 if (vn_lock(fdvp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
1014 error = tmpfs_rename_relock(fdvp, &fvp, tdvp, &tvp,
1018 ASSERT_VOP_ELOCKED(fdvp,
1019 "tmpfs_rename: fdvp not locked");
1020 ASSERT_VOP_ELOCKED(tdvp,
1021 "tmpfs_rename: tdvp not locked");
1023 ASSERT_VOP_ELOCKED(tvp,
1024 "tmpfs_rename: tvp not locked");
1033 vn_seqc_write_begin(tvp);
1034 vn_seqc_write_begin(tdvp);
1035 vn_seqc_write_begin(fvp);
1036 vn_seqc_write_begin(fdvp);
1037 want_seqc_end = true;
1039 tmp = VFS_TO_TMPFS(tdvp->v_mount);
1040 tdnode = VP_TO_TMPFS_DIR(tdvp);
1041 tnode = (tvp == NULL) ? NULL : VP_TO_TMPFS_NODE(tvp);
1042 fdnode = VP_TO_TMPFS_DIR(fdvp);
1043 fnode = VP_TO_TMPFS_NODE(fvp);
1044 de = tmpfs_dir_lookup(fdnode, fnode, fcnp);
1047 * Entry can disappear before we lock fdvp,
1048 * also avoid manipulating '.' and '..' entries.
1051 if ((fcnp->cn_flags & ISDOTDOT) != 0 ||
1052 (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.'))
1058 MPASS(de->td_node == fnode);
1061 * If re-naming a directory to another preexisting directory
1062 * ensure that the target directory is empty so that its
1063 * removal causes no side effects.
1064 * Kern_rename guarantees the destination to be a directory
1065 * if the source is one.
1068 MPASS(tnode != NULL);
1070 if ((tnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) ||
1071 (tdnode->tn_flags & (APPEND | IMMUTABLE))) {
1076 if (fnode->tn_type == VDIR && tnode->tn_type == VDIR) {
1077 if (tnode->tn_size > 0) {
1081 } else if (fnode->tn_type == VDIR && tnode->tn_type != VDIR) {
1084 } else if (fnode->tn_type != VDIR && tnode->tn_type == VDIR) {
1088 MPASS(fnode->tn_type != VDIR &&
1089 tnode->tn_type != VDIR);
1093 if ((fnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))
1094 || (fdnode->tn_flags & (APPEND | IMMUTABLE))) {
1100 * Ensure that we have enough memory to hold the new name, if it
1101 * has to be changed.
1103 if (fcnp->cn_namelen != tcnp->cn_namelen ||
1104 bcmp(fcnp->cn_nameptr, tcnp->cn_nameptr, fcnp->cn_namelen) != 0) {
1105 newname = malloc(tcnp->cn_namelen, M_TMPFSNAME, M_WAITOK);
1110 * If the node is being moved to another directory, we have to do
1113 if (fdnode != tdnode) {
1115 * In case we are moving a directory, we have to adjust its
1116 * parent to point to the new parent.
1118 if (de->td_node->tn_type == VDIR) {
1119 struct tmpfs_node *n;
1122 * Ensure the target directory is not a child of the
1123 * directory being moved. Otherwise, we'd end up
1128 * TMPFS_LOCK guaranties that no nodes are freed while
1129 * traversing the list. Nodes can only be marked as
1130 * removed: tn_parent == NULL.
1134 while (n != n->tn_dir.tn_parent) {
1135 struct tmpfs_node *parent;
1138 TMPFS_NODE_UNLOCK(n);
1141 if (newname != NULL)
1142 free(newname, M_TMPFSNAME);
1145 parent = n->tn_dir.tn_parent;
1146 TMPFS_NODE_UNLOCK(n);
1147 if (parent == NULL) {
1151 TMPFS_NODE_LOCK(parent);
1152 if (parent->tn_dir.tn_parent == NULL) {
1153 TMPFS_NODE_UNLOCK(parent);
1162 if (newname != NULL)
1163 free(newname, M_TMPFSNAME);
1166 TMPFS_NODE_UNLOCK(n);
1168 /* Adjust the parent pointer. */
1169 TMPFS_VALIDATE_DIR(fnode);
1170 TMPFS_NODE_LOCK(de->td_node);
1171 de->td_node->tn_dir.tn_parent = tdnode;
1172 TMPFS_NODE_UNLOCK(de->td_node);
1175 * As a result of changing the target of the '..'
1176 * entry, the link count of the source and target
1177 * directories has to be adjusted.
1179 TMPFS_NODE_LOCK(tdnode);
1180 TMPFS_ASSERT_LOCKED(tdnode);
1182 TMPFS_NODE_UNLOCK(tdnode);
1184 TMPFS_NODE_LOCK(fdnode);
1185 TMPFS_ASSERT_LOCKED(fdnode);
1187 TMPFS_NODE_UNLOCK(fdnode);
1192 * Do the move: just remove the entry from the source directory
1193 * and insert it into the target one.
1195 tmpfs_dir_detach(fdvp, de);
1197 if (fcnp->cn_flags & DOWHITEOUT)
1198 tmpfs_dir_whiteout_add(fdvp, fcnp);
1199 if (tcnp->cn_flags & ISWHITEOUT)
1200 tmpfs_dir_whiteout_remove(tdvp, tcnp);
1203 * If the name has changed, we need to make it effective by changing
1204 * it in the directory entry.
1206 if (newname != NULL) {
1207 MPASS(tcnp->cn_namelen <= MAXNAMLEN);
1209 free(de->ud.td_name, M_TMPFSNAME);
1210 de->ud.td_name = newname;
1211 tmpfs_dirent_init(de, tcnp->cn_nameptr, tcnp->cn_namelen);
1213 fnode->tn_status |= TMPFS_NODE_CHANGED;
1214 tdnode->tn_status |= TMPFS_NODE_MODIFIED;
1218 * If we are overwriting an entry, we have to remove the old one
1219 * from the target directory.
1222 struct tmpfs_dirent *tde;
1224 /* Remove the old entry from the target directory. */
1225 tde = tmpfs_dir_lookup(tdnode, tnode, tcnp);
1226 tmpfs_dir_detach(tdvp, tde);
1229 * Free the directory entry we just deleted. Note that the
1230 * node referred by it will not be removed until the vnode is
1233 tmpfs_free_dirent(VFS_TO_TMPFS(tvp->v_mount), tde);
1236 tmpfs_dir_attach(tdvp, de);
1238 if (tmpfs_use_nc(fvp)) {
1239 cache_vop_rename(fdvp, fvp, tdvp, tvp, fcnp, tcnp);
1245 if (fdvp != tdvp && fdvp != tvp)
1249 if (want_seqc_end) {
1251 vn_seqc_write_end(tvp);
1252 vn_seqc_write_end(tdvp);
1253 vn_seqc_write_end(fvp);
1254 vn_seqc_write_end(fdvp);
1258 * Release target nodes.
1259 * XXX: I don't understand when tdvp can be the same as tvp, but
1260 * other code takes care of this...
1269 /* Release source nodes. */
1277 tmpfs_mkdir(struct vop_mkdir_args *v)
1279 struct vnode *dvp = v->a_dvp;
1280 struct vnode **vpp = v->a_vpp;
1281 struct componentname *cnp = v->a_cnp;
1282 struct vattr *vap = v->a_vap;
1284 MPASS(vap->va_type == VDIR);
1286 return (tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL));
1290 tmpfs_rmdir(struct vop_rmdir_args *v)
1292 struct vnode *dvp = v->a_dvp;
1293 struct vnode *vp = v->a_vp;
1296 struct tmpfs_dirent *de;
1297 struct tmpfs_mount *tmp;
1298 struct tmpfs_node *dnode;
1299 struct tmpfs_node *node;
1301 MPASS(VOP_ISLOCKED(dvp));
1302 MPASS(VOP_ISLOCKED(vp));
1304 tmp = VFS_TO_TMPFS(dvp->v_mount);
1305 dnode = VP_TO_TMPFS_DIR(dvp);
1306 node = VP_TO_TMPFS_DIR(vp);
1308 /* Directories with more than two entries ('.' and '..') cannot be
1310 if (node->tn_size > 0) {
1315 if ((dnode->tn_flags & APPEND)
1316 || (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))) {
1321 /* This invariant holds only if we are not trying to remove "..".
1322 * We checked for that above so this is safe now. */
1323 MPASS(node->tn_dir.tn_parent == dnode);
1325 /* Get the directory entry associated with node (vp). This was
1326 * filled by tmpfs_lookup while looking up the entry. */
1327 de = tmpfs_dir_lookup(dnode, node, v->a_cnp);
1328 MPASS(TMPFS_DIRENT_MATCHES(de,
1329 v->a_cnp->cn_nameptr,
1330 v->a_cnp->cn_namelen));
1332 /* Check flags to see if we are allowed to remove the directory. */
1333 if ((dnode->tn_flags & APPEND) != 0 ||
1334 (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) != 0) {
1339 /* Detach the directory entry from the directory (dnode). */
1340 tmpfs_dir_detach(dvp, de);
1341 if (v->a_cnp->cn_flags & DOWHITEOUT)
1342 tmpfs_dir_whiteout_add(dvp, v->a_cnp);
1344 /* No vnode should be allocated for this entry from this point */
1345 TMPFS_NODE_LOCK(node);
1347 node->tn_dir.tn_parent = NULL;
1348 node->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1349 node->tn_accessed = true;
1351 TMPFS_NODE_UNLOCK(node);
1353 TMPFS_NODE_LOCK(dnode);
1355 dnode->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1356 dnode->tn_accessed = true;
1357 TMPFS_NODE_UNLOCK(dnode);
1359 if (tmpfs_use_nc(dvp)) {
1360 cache_vop_rmdir(dvp, vp);
1363 /* Free the directory entry we just deleted. Note that the node
1364 * referred by it will not be removed until the vnode is really
1366 tmpfs_free_dirent(tmp, de);
1368 /* Release the deleted vnode (will destroy the node, notify
1369 * interested parties and clean it from the cache). */
1371 dnode->tn_status |= TMPFS_NODE_CHANGED;
1381 tmpfs_symlink(struct vop_symlink_args *v)
1383 struct vnode *dvp = v->a_dvp;
1384 struct vnode **vpp = v->a_vpp;
1385 struct componentname *cnp = v->a_cnp;
1386 struct vattr *vap = v->a_vap;
1387 const char *target = v->a_target;
1389 #ifdef notyet /* XXX FreeBSD BUG: kern_symlink is not setting VLNK */
1390 MPASS(vap->va_type == VLNK);
1392 vap->va_type = VLNK;
1395 return (tmpfs_alloc_file(dvp, vpp, vap, cnp, target));
1399 tmpfs_readdir(struct vop_readdir_args *va)
1403 struct tmpfs_mount *tm;
1404 struct tmpfs_node *node;
1406 int *eofflag, *ncookies;
1408 int error, maxcookies;
1412 eofflag = va->a_eofflag;
1413 cookies = va->a_cookies;
1414 ncookies = va->a_ncookies;
1416 /* This operation only makes sense on directory nodes. */
1417 if (vp->v_type != VDIR)
1421 node = VP_TO_TMPFS_DIR(vp);
1422 tm = VFS_TO_TMPFS(vp->v_mount);
1424 startresid = uio->uio_resid;
1426 /* Allocate cookies for NFS and compat modules. */
1427 if (cookies != NULL && ncookies != NULL) {
1428 maxcookies = howmany(node->tn_size,
1429 sizeof(struct tmpfs_dirent)) + 2;
1430 *cookies = malloc(maxcookies * sizeof(**cookies), M_TEMP,
1435 if (cookies == NULL)
1436 error = tmpfs_dir_getdents(tm, node, uio, 0, NULL, NULL);
1438 error = tmpfs_dir_getdents(tm, node, uio, maxcookies, *cookies,
1441 /* Buffer was filled without hitting EOF. */
1442 if (error == EJUSTRETURN)
1443 error = (uio->uio_resid != startresid) ? 0 : EINVAL;
1445 if (error != 0 && cookies != NULL && ncookies != NULL) {
1446 free(*cookies, M_TEMP);
1451 if (eofflag != NULL)
1453 (error == 0 && uio->uio_offset == TMPFS_DIRCOOKIE_EOF);
1459 tmpfs_readlink(struct vop_readlink_args *v)
1461 struct vnode *vp = v->a_vp;
1462 struct uio *uio = v->a_uio;
1465 struct tmpfs_node *node;
1467 MPASS(uio->uio_offset == 0);
1468 MPASS(vp->v_type == VLNK);
1470 node = VP_TO_TMPFS_NODE(vp);
1472 error = uiomove(node->tn_link_target, MIN(node->tn_size, uio->uio_resid),
1474 tmpfs_set_accessed(VFS_TO_TMPFS(vp->v_mount), node);
1480 * VOP_FPLOOKUP_SYMLINK routines are subject to special circumstances, see
1481 * the comment above cache_fplookup for details.
1483 * Check tmpfs_alloc_node for tmpfs-specific synchronisation notes.
1486 tmpfs_fplookup_symlink(struct vop_fplookup_symlink_args *v)
1489 struct tmpfs_node *node;
1493 node = VP_TO_TMPFS_NODE_SMR(vp);
1494 if (__predict_false(node == NULL))
1496 if (!atomic_load_char(&node->tn_link_smr))
1498 symlink = atomic_load_ptr(&node->tn_link_target);
1499 if (symlink == NULL)
1502 return (cache_symlink_resolve(v->a_fpl, symlink, node->tn_size));
1506 tmpfs_inactive(struct vop_inactive_args *v)
1509 struct tmpfs_node *node;
1512 node = VP_TO_TMPFS_NODE(vp);
1513 if (node->tn_links == 0)
1516 tmpfs_check_mtime(vp);
1521 tmpfs_need_inactive(struct vop_need_inactive_args *ap)
1524 struct tmpfs_node *node;
1525 struct vm_object *obj;
1528 node = VP_TO_TMPFS_NODE(vp);
1529 if (node->tn_links == 0)
1531 if (vp->v_type == VREG) {
1533 if (obj->generation != obj->cleangeneration)
1542 tmpfs_reclaim(struct vop_reclaim_args *v)
1545 struct tmpfs_mount *tmp;
1546 struct tmpfs_node *node;
1550 node = VP_TO_TMPFS_NODE(vp);
1551 tmp = VFS_TO_TMPFS(vp->v_mount);
1553 if (vp->v_type == VREG)
1554 tmpfs_destroy_vobject(vp, node->tn_reg.tn_aobj);
1555 vp->v_object = NULL;
1558 TMPFS_NODE_LOCK(node);
1562 * If the node referenced by this vnode was deleted by the user,
1563 * we must free its associated data structures (now that the vnode
1564 * is being reclaimed).
1567 if (node->tn_links == 0 &&
1568 (node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0) {
1569 node->tn_vpstate = TMPFS_VNODE_DOOMED;
1570 unlock = !tmpfs_free_node_locked(tmp, node, true);
1574 TMPFS_NODE_UNLOCK(node);
1578 MPASS(vp->v_data == NULL);
1583 tmpfs_print(struct vop_print_args *v)
1585 struct vnode *vp = v->a_vp;
1587 struct tmpfs_node *node;
1589 node = VP_TO_TMPFS_NODE(vp);
1591 printf("tag VT_TMPFS, tmpfs_node %p, flags 0x%lx, links %jd\n",
1592 node, node->tn_flags, (uintmax_t)node->tn_links);
1593 printf("\tmode 0%o, owner %d, group %d, size %jd, status 0x%x\n",
1594 node->tn_mode, node->tn_uid, node->tn_gid,
1595 (intmax_t)node->tn_size, node->tn_status);
1597 if (vp->v_type == VFIFO)
1606 tmpfs_pathconf(struct vop_pathconf_args *v)
1608 struct vnode *vp = v->a_vp;
1609 int name = v->a_name;
1610 long *retval = v->a_retval;
1618 *retval = TMPFS_LINK_MAX;
1621 case _PC_SYMLINK_MAX:
1622 *retval = MAXPATHLEN;
1630 if (vp->v_type == VDIR || vp->v_type == VFIFO)
1636 case _PC_CHOWN_RESTRICTED:
1648 case _PC_FILESIZEBITS:
1652 case _PC_MIN_HOLE_SIZE:
1653 *retval = PAGE_SIZE;
1657 error = vop_stdpathconf(v);
1664 tmpfs_vptofh(struct vop_vptofh_args *ap)
1667 IN struct vnode *a_vp;
1668 IN struct fid *a_fhp;
1672 struct tmpfs_fid_data tfd;
1673 struct tmpfs_node *node;
1676 node = VP_TO_TMPFS_NODE(ap->a_vp);
1678 fhp->fid_len = sizeof(tfd);
1681 * Copy into fid_data from the stack to avoid unaligned pointer use.
1682 * See the comment in sys/mount.h on struct fid for details.
1684 tfd.tfd_id = node->tn_id;
1685 tfd.tfd_gen = node->tn_gen;
1686 memcpy(fhp->fid_data, &tfd, fhp->fid_len);
1692 tmpfs_whiteout(struct vop_whiteout_args *ap)
1694 struct vnode *dvp = ap->a_dvp;
1695 struct componentname *cnp = ap->a_cnp;
1696 struct tmpfs_dirent *de;
1698 switch (ap->a_flags) {
1702 de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(dvp), NULL, cnp);
1704 return (de->td_node == NULL ? 0 : EEXIST);
1705 return (tmpfs_dir_whiteout_add(dvp, cnp));
1707 tmpfs_dir_whiteout_remove(dvp, cnp);
1710 panic("tmpfs_whiteout: unknown op");
1715 tmpfs_vptocnp_dir(struct tmpfs_node *tn, struct tmpfs_node *tnp,
1716 struct tmpfs_dirent **pde)
1718 struct tmpfs_dir_cursor dc;
1719 struct tmpfs_dirent *de;
1721 for (de = tmpfs_dir_first(tnp, &dc); de != NULL;
1722 de = tmpfs_dir_next(tnp, &dc)) {
1723 if (de->td_node == tn) {
1732 tmpfs_vptocnp_fill(struct vnode *vp, struct tmpfs_node *tn,
1733 struct tmpfs_node *tnp, char *buf, size_t *buflen, struct vnode **dvp)
1735 struct tmpfs_dirent *de;
1738 error = vn_vget_ino_gen(vp, tmpfs_vn_get_ino_alloc, tnp, LK_SHARED,
1742 error = tmpfs_vptocnp_dir(tn, tnp, &de);
1745 i -= de->td_namelen;
1749 bcopy(de->ud.td_name, buf + i, de->td_namelen);
1766 tmpfs_vptocnp(struct vop_vptocnp_args *ap)
1768 struct vnode *vp, **dvp;
1769 struct tmpfs_node *tn, *tnp, *tnp1;
1770 struct tmpfs_dirent *de;
1771 struct tmpfs_mount *tm;
1779 buflen = ap->a_buflen;
1781 tm = VFS_TO_TMPFS(vp->v_mount);
1782 tn = VP_TO_TMPFS_NODE(vp);
1783 if (tn->tn_type == VDIR) {
1784 tnp = tn->tn_dir.tn_parent;
1787 tmpfs_ref_node(tnp);
1788 error = tmpfs_vptocnp_fill(vp, tn, tn->tn_dir.tn_parent, buf,
1790 tmpfs_free_node(tm, tnp);
1796 LIST_FOREACH_SAFE(tnp, &tm->tm_nodes_used, tn_entries, tnp1) {
1797 if (tnp->tn_type != VDIR)
1799 TMPFS_NODE_LOCK(tnp);
1800 tmpfs_ref_node(tnp);
1803 * tn_vnode cannot be instantiated while we hold the
1804 * node lock, so the directory cannot be changed while
1805 * we iterate over it. Do this to avoid instantiating
1806 * vnode for directories which cannot point to our
1809 error = tnp->tn_vnode == NULL ? tmpfs_vptocnp_dir(tn, tnp,
1813 TMPFS_NODE_UNLOCK(tnp);
1815 error = tmpfs_vptocnp_fill(vp, tn, tnp, buf, buflen,
1818 tmpfs_free_node(tm, tnp);
1821 if (VN_IS_DOOMED(vp)) {
1822 tmpfs_free_node(tm, tnp);
1826 TMPFS_NODE_LOCK(tnp);
1828 if (tmpfs_free_node_locked(tm, tnp, false)) {
1831 KASSERT(tnp->tn_refcount > 0,
1832 ("node %p refcount zero", tnp));
1833 if (tnp->tn_attached) {
1834 tnp1 = LIST_NEXT(tnp, tn_entries);
1835 TMPFS_NODE_UNLOCK(tnp);
1837 TMPFS_NODE_UNLOCK(tnp);
1838 goto restart_locked;
1847 tmpfs_seek_data_locked(vm_object_t obj, off_t noff)
1850 vm_pindex_t p, p_m, p_swp;
1852 p = OFF_TO_IDX(noff);
1853 m = vm_page_find_least(obj, p);
1856 * Microoptimize the most common case for SEEK_DATA, where
1857 * there is no hole and the page is resident.
1859 if (m != NULL && vm_page_any_valid(m) && m->pindex == p)
1862 p_swp = swap_pager_find_least(obj, p);
1866 p_m = m == NULL ? obj->size : m->pindex;
1867 return (IDX_TO_OFF(MIN(p_m, p_swp)));
1871 tmpfs_seek_next(off_t noff)
1873 return (noff + PAGE_SIZE - (noff & PAGE_MASK));
1877 tmpfs_seek_clamp(struct tmpfs_node *tn, off_t *noff, bool seekdata)
1879 if (*noff < tn->tn_size)
1883 *noff = tn->tn_size;
1888 tmpfs_seek_hole_locked(vm_object_t obj, off_t noff)
1891 vm_pindex_t p, p_swp;
1893 for (;; noff = tmpfs_seek_next(noff)) {
1895 * Walk over the largest sequential run of the valid pages.
1897 for (m = vm_page_lookup(obj, OFF_TO_IDX(noff));
1898 m != NULL && vm_page_any_valid(m);
1899 m = vm_page_next(m), noff = tmpfs_seek_next(noff))
1903 * Found a hole in the object's page queue. Check if
1904 * there is a hole in the swap at the same place.
1906 p = OFF_TO_IDX(noff);
1907 p_swp = swap_pager_find_least(obj, p);
1909 noff = IDX_TO_OFF(p);
1917 tmpfs_seek_datahole(struct vnode *vp, off_t *off, bool seekdata)
1919 struct tmpfs_node *tn;
1924 if (vp->v_type != VREG)
1926 tn = VP_TO_TMPFS_NODE(vp);
1930 error = tmpfs_seek_clamp(tn, &noff, seekdata);
1933 obj = tn->tn_reg.tn_aobj;
1935 VM_OBJECT_RLOCK(obj);
1936 noff = seekdata ? tmpfs_seek_data_locked(obj, noff) :
1937 tmpfs_seek_hole_locked(obj, noff);
1938 VM_OBJECT_RUNLOCK(obj);
1940 error = tmpfs_seek_clamp(tn, &noff, seekdata);
1947 tmpfs_ioctl(struct vop_ioctl_args *ap)
1949 struct vnode *vp = ap->a_vp;
1952 switch (ap->a_command) {
1955 error = vn_lock(vp, LK_SHARED);
1960 error = tmpfs_seek_datahole(vp, (off_t *)ap->a_data,
1961 ap->a_command == FIOSEEKDATA);
1972 * Vnode operations vector used for files stored in a tmpfs file system.
1974 struct vop_vector tmpfs_vnodeop_entries = {
1975 .vop_default = &default_vnodeops,
1976 .vop_lookup = vfs_cache_lookup,
1977 .vop_cachedlookup = tmpfs_cached_lookup,
1978 .vop_create = tmpfs_create,
1979 .vop_mknod = tmpfs_mknod,
1980 .vop_open = tmpfs_open,
1981 .vop_close = tmpfs_close,
1982 .vop_fplookup_vexec = tmpfs_fplookup_vexec,
1983 .vop_fplookup_symlink = tmpfs_fplookup_symlink,
1984 .vop_access = tmpfs_access,
1985 .vop_stat = tmpfs_stat,
1986 .vop_getattr = tmpfs_getattr,
1987 .vop_setattr = tmpfs_setattr,
1988 .vop_read = tmpfs_read,
1989 .vop_read_pgcache = tmpfs_read_pgcache,
1990 .vop_write = tmpfs_write,
1991 .vop_deallocate = tmpfs_deallocate,
1992 .vop_fsync = tmpfs_fsync,
1993 .vop_remove = tmpfs_remove,
1994 .vop_link = tmpfs_link,
1995 .vop_rename = tmpfs_rename,
1996 .vop_mkdir = tmpfs_mkdir,
1997 .vop_rmdir = tmpfs_rmdir,
1998 .vop_symlink = tmpfs_symlink,
1999 .vop_readdir = tmpfs_readdir,
2000 .vop_readlink = tmpfs_readlink,
2001 .vop_inactive = tmpfs_inactive,
2002 .vop_need_inactive = tmpfs_need_inactive,
2003 .vop_reclaim = tmpfs_reclaim,
2004 .vop_print = tmpfs_print,
2005 .vop_pathconf = tmpfs_pathconf,
2006 .vop_vptofh = tmpfs_vptofh,
2007 .vop_whiteout = tmpfs_whiteout,
2008 .vop_bmap = VOP_EOPNOTSUPP,
2009 .vop_vptocnp = tmpfs_vptocnp,
2010 .vop_lock1 = vop_lock,
2011 .vop_unlock = vop_unlock,
2012 .vop_islocked = vop_islocked,
2013 .vop_add_writecount = vop_stdadd_writecount_nomsync,
2014 .vop_ioctl = tmpfs_ioctl,
2016 VFS_VOP_VECTOR_REGISTER(tmpfs_vnodeop_entries);
2019 * Same vector for mounts which do not use namecache.
2021 struct vop_vector tmpfs_vnodeop_nonc_entries = {
2022 .vop_default = &tmpfs_vnodeop_entries,
2023 .vop_lookup = tmpfs_lookup,
2025 VFS_VOP_VECTOR_REGISTER(tmpfs_vnodeop_nonc_entries);