1 /* $NetBSD: tmpfs_vnops.c,v 1.39 2007/07/23 15:41:01 jmmv Exp $ */
4 * SPDX-License-Identifier: BSD-2-Clause-NetBSD
6 * Copyright (c) 2005, 2006 The NetBSD Foundation, Inc.
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
36 * tmpfs vnode interface.
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/dirent.h>
44 #include <sys/extattr.h>
45 #include <sys/fcntl.h>
47 #include <sys/filio.h>
48 #include <sys/limits.h>
49 #include <sys/lockf.h>
51 #include <sys/mount.h>
52 #include <sys/namei.h>
55 #include <sys/rwlock.h>
56 #include <sys/sched.h>
59 #include <sys/sysctl.h>
60 #include <sys/unistd.h>
61 #include <sys/vnode.h>
62 #include <security/audit/audit.h>
63 #include <security/mac/mac_framework.h>
66 #include <vm/vm_param.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_page.h>
69 #include <vm/vm_pager.h>
70 #include <vm/swap_pager.h>
72 #include <fs/tmpfs/tmpfs_vnops.h>
73 #include <fs/tmpfs/tmpfs.h>
75 SYSCTL_DECL(_vfs_tmpfs);
78 static volatile int tmpfs_rename_restarts;
79 SYSCTL_INT(_vfs_tmpfs, OID_AUTO, rename_restarts, CTLFLAG_RD,
80 __DEVOLATILE(int *, &tmpfs_rename_restarts), 0,
81 "Times rename had to restart due to lock contention");
83 MALLOC_DEFINE(M_TMPFSEA, "tmpfs extattr", "tmpfs extattr structure");
86 tmpfs_vn_get_ino_alloc(struct mount *mp, void *arg, int lkflags,
90 return (tmpfs_alloc_vp(mp, arg, lkflags, rvp));
94 tmpfs_lookup1(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
96 struct tmpfs_dirent *de;
97 struct tmpfs_node *dnode, *pnode;
98 struct tmpfs_mount *tm;
101 /* Caller assumes responsibility for ensuring access (VEXEC). */
102 dnode = VP_TO_TMPFS_DIR(dvp);
105 /* We cannot be requesting the parent directory of the root node. */
106 MPASS(IMPLIES(dnode->tn_type == VDIR &&
107 dnode->tn_dir.tn_parent == dnode,
108 !(cnp->cn_flags & ISDOTDOT)));
110 TMPFS_ASSERT_LOCKED(dnode);
111 if (dnode->tn_dir.tn_parent == NULL) {
115 if (cnp->cn_flags & ISDOTDOT) {
116 tm = VFS_TO_TMPFS(dvp->v_mount);
117 pnode = dnode->tn_dir.tn_parent;
118 tmpfs_ref_node(pnode);
119 error = vn_vget_ino_gen(dvp, tmpfs_vn_get_ino_alloc,
120 pnode, cnp->cn_lkflags, vpp);
121 tmpfs_free_node(tm, pnode);
124 } else if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
129 de = tmpfs_dir_lookup(dnode, NULL, cnp);
130 if (de != NULL && de->td_node == NULL)
131 cnp->cn_flags |= ISWHITEOUT;
132 if (de == NULL || de->td_node == NULL) {
134 * The entry was not found in the directory.
135 * This is OK if we are creating or renaming an
136 * entry and are working on the last component of
139 if ((cnp->cn_flags & ISLASTCN) &&
140 (cnp->cn_nameiop == CREATE || \
141 cnp->cn_nameiop == RENAME ||
142 (cnp->cn_nameiop == DELETE &&
143 cnp->cn_flags & DOWHITEOUT &&
144 cnp->cn_flags & ISWHITEOUT))) {
145 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred,
154 struct tmpfs_node *tnode;
157 * The entry was found, so get its associated
163 * If we are not at the last path component and
164 * found a non-directory or non-link entry (which
165 * may itself be pointing to a directory), raise
168 if ((tnode->tn_type != VDIR &&
169 tnode->tn_type != VLNK) &&
170 !(cnp->cn_flags & ISLASTCN)) {
176 * If we are deleting or renaming the entry, keep
177 * track of its tmpfs_dirent so that it can be
178 * easily deleted later.
180 if ((cnp->cn_flags & ISLASTCN) &&
181 (cnp->cn_nameiop == DELETE ||
182 cnp->cn_nameiop == RENAME)) {
183 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred,
188 /* Allocate a new vnode on the matching entry. */
189 error = tmpfs_alloc_vp(dvp->v_mount, tnode,
190 cnp->cn_lkflags, vpp);
194 if ((dnode->tn_mode & S_ISTXT) &&
195 VOP_ACCESS(dvp, VADMIN, cnp->cn_cred,
196 curthread) && VOP_ACCESS(*vpp, VADMIN,
197 cnp->cn_cred, curthread)) {
204 error = tmpfs_alloc_vp(dvp->v_mount, tnode,
205 cnp->cn_lkflags, vpp);
213 * Store the result of this lookup in the cache. Avoid this if the
214 * request was for creation, as it does not improve timings on
217 if ((cnp->cn_flags & MAKEENTRY) != 0 && tmpfs_use_nc(dvp))
218 cache_enter(dvp, *vpp, cnp);
222 * If there were no errors, *vpp cannot be null and it must be
225 MPASS(IFF(error == 0, *vpp != NULLVP && VOP_ISLOCKED(*vpp)));
231 tmpfs_cached_lookup(struct vop_cachedlookup_args *v)
234 return (tmpfs_lookup1(v->a_dvp, v->a_vpp, v->a_cnp));
238 tmpfs_lookup(struct vop_lookup_args *v)
240 struct vnode *dvp = v->a_dvp;
241 struct vnode **vpp = v->a_vpp;
242 struct componentname *cnp = v->a_cnp;
245 /* Check accessibility of requested node as a first step. */
246 error = vn_dir_check_exec(dvp, cnp);
250 return (tmpfs_lookup1(dvp, vpp, cnp));
254 tmpfs_create(struct vop_create_args *v)
256 struct vnode *dvp = v->a_dvp;
257 struct vnode **vpp = v->a_vpp;
258 struct componentname *cnp = v->a_cnp;
259 struct vattr *vap = v->a_vap;
262 MPASS(vap->va_type == VREG || vap->va_type == VSOCK);
264 error = tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL);
265 if (error == 0 && (cnp->cn_flags & MAKEENTRY) != 0 && tmpfs_use_nc(dvp))
266 cache_enter(dvp, *vpp, cnp);
271 tmpfs_mknod(struct vop_mknod_args *v)
273 struct vnode *dvp = v->a_dvp;
274 struct vnode **vpp = v->a_vpp;
275 struct componentname *cnp = v->a_cnp;
276 struct vattr *vap = v->a_vap;
278 if (vap->va_type != VBLK && vap->va_type != VCHR &&
279 vap->va_type != VFIFO)
282 return (tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL));
285 struct fileops tmpfs_fnops;
288 tmpfs_open(struct vop_open_args *v)
291 struct tmpfs_node *node;
297 node = VP_TO_TMPFS_NODE(vp);
300 * The file is still active but all its names have been removed
301 * (e.g. by a "rmdir $(pwd)"). It cannot be opened any more as
302 * it is about to die.
304 if (node->tn_links < 1)
307 /* If the file is marked append-only, deny write requests. */
308 if (node->tn_flags & APPEND && (mode & (FWRITE | O_APPEND)) == FWRITE)
312 /* For regular files, the call below is nop. */
313 KASSERT(vp->v_type != VREG || (node->tn_reg.tn_aobj->flags &
314 OBJ_DEAD) == 0, ("dead object"));
315 vnode_create_vobject(vp, node->tn_size, v->a_td);
319 MPASS(fp == NULL || fp->f_data == NULL);
320 if (error == 0 && fp != NULL && vp->v_type == VREG) {
321 tmpfs_ref_node(node);
322 finit_vnode(fp, mode, node, &tmpfs_fnops);
329 tmpfs_close(struct vop_close_args *v)
331 struct vnode *vp = v->a_vp;
333 /* Update node times. */
340 tmpfs_fo_close(struct file *fp, struct thread *td)
342 struct tmpfs_node *node;
346 MPASS(node->tn_type == VREG);
347 tmpfs_free_node(node->tn_reg.tn_tmp, node);
349 return (vnops.fo_close(fp, td));
353 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see
354 * the comment above cache_fplookup for details.
357 tmpfs_fplookup_vexec(struct vop_fplookup_vexec_args *v)
360 struct tmpfs_node *node;
365 node = VP_TO_TMPFS_NODE_SMR(vp);
366 if (__predict_false(node == NULL))
369 all_x = S_IXUSR | S_IXGRP | S_IXOTH;
370 mode = atomic_load_short(&node->tn_mode);
371 if (__predict_true((mode & all_x) == all_x))
375 return (vaccess_vexec_smr(mode, node->tn_uid, node->tn_gid, cred));
379 tmpfs_access(struct vop_access_args *v)
381 struct vnode *vp = v->a_vp;
382 accmode_t accmode = v->a_accmode;
383 struct ucred *cred = v->a_cred;
384 mode_t all_x = S_IXUSR | S_IXGRP | S_IXOTH;
386 struct tmpfs_node *node;
388 MPASS(VOP_ISLOCKED(vp));
390 node = VP_TO_TMPFS_NODE(vp);
393 * Common case path lookup.
395 if (__predict_true(accmode == VEXEC && (node->tn_mode & all_x) == all_x))
398 switch (vp->v_type) {
404 if (accmode & VWRITE && vp->v_mount->mnt_flag & MNT_RDONLY) {
424 if (accmode & VWRITE && node->tn_flags & IMMUTABLE) {
429 error = vaccess(vp->v_type, node->tn_mode, node->tn_uid, node->tn_gid,
433 MPASS(VOP_ISLOCKED(vp));
439 tmpfs_stat(struct vop_stat_args *v)
441 struct vnode *vp = v->a_vp;
442 struct stat *sb = v->a_sb;
443 struct tmpfs_node *node;
446 node = VP_TO_TMPFS_NODE(vp);
448 tmpfs_update_getattr(vp);
450 error = vop_stat_helper_pre(v);
451 if (__predict_false(error))
454 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
455 sb->st_ino = node->tn_id;
456 sb->st_mode = node->tn_mode | VTTOIF(vp->v_type);
457 sb->st_nlink = node->tn_links;
458 sb->st_uid = node->tn_uid;
459 sb->st_gid = node->tn_gid;
460 sb->st_rdev = (vp->v_type == VBLK || vp->v_type == VCHR) ?
461 node->tn_rdev : NODEV;
462 sb->st_size = node->tn_size;
463 sb->st_atim.tv_sec = node->tn_atime.tv_sec;
464 sb->st_atim.tv_nsec = node->tn_atime.tv_nsec;
465 sb->st_mtim.tv_sec = node->tn_mtime.tv_sec;
466 sb->st_mtim.tv_nsec = node->tn_mtime.tv_nsec;
467 sb->st_ctim.tv_sec = node->tn_ctime.tv_sec;
468 sb->st_ctim.tv_nsec = node->tn_ctime.tv_nsec;
469 sb->st_birthtim.tv_sec = node->tn_birthtime.tv_sec;
470 sb->st_birthtim.tv_nsec = node->tn_birthtime.tv_nsec;
471 sb->st_blksize = PAGE_SIZE;
472 sb->st_flags = node->tn_flags;
473 sb->st_gen = node->tn_gen;
474 if (vp->v_type == VREG) {
476 vm_object_t obj = node->tn_reg.tn_aobj;
478 /* Handle torn read */
479 VM_OBJECT_RLOCK(obj);
481 sb->st_blocks = ptoa(node->tn_reg.tn_pages);
483 VM_OBJECT_RUNLOCK(obj);
486 sb->st_blocks = node->tn_size;
488 sb->st_blocks /= S_BLKSIZE;
489 return (vop_stat_helper_post(v, error));
493 tmpfs_getattr(struct vop_getattr_args *v)
495 struct vnode *vp = v->a_vp;
496 struct vattr *vap = v->a_vap;
497 struct tmpfs_node *node;
499 node = VP_TO_TMPFS_NODE(vp);
501 tmpfs_update_getattr(vp);
503 vap->va_type = vp->v_type;
504 vap->va_mode = node->tn_mode;
505 vap->va_nlink = node->tn_links;
506 vap->va_uid = node->tn_uid;
507 vap->va_gid = node->tn_gid;
508 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
509 vap->va_fileid = node->tn_id;
510 vap->va_size = node->tn_size;
511 vap->va_blocksize = PAGE_SIZE;
512 vap->va_atime = node->tn_atime;
513 vap->va_mtime = node->tn_mtime;
514 vap->va_ctime = node->tn_ctime;
515 vap->va_birthtime = node->tn_birthtime;
516 vap->va_gen = node->tn_gen;
517 vap->va_flags = node->tn_flags;
518 vap->va_rdev = (vp->v_type == VBLK || vp->v_type == VCHR) ?
519 node->tn_rdev : NODEV;
520 if (vp->v_type == VREG) {
522 vm_object_t obj = node->tn_reg.tn_aobj;
524 VM_OBJECT_RLOCK(obj);
526 vap->va_bytes = ptoa(node->tn_reg.tn_pages);
528 VM_OBJECT_RUNLOCK(obj);
531 vap->va_bytes = node->tn_size;
539 tmpfs_setattr(struct vop_setattr_args *v)
541 struct vnode *vp = v->a_vp;
542 struct vattr *vap = v->a_vap;
543 struct ucred *cred = v->a_cred;
544 struct thread *td = curthread;
548 MPASS(VOP_ISLOCKED(vp));
549 ASSERT_VOP_IN_SEQC(vp);
553 /* Abort if any unsettable attribute is given. */
554 if (vap->va_type != VNON ||
555 vap->va_nlink != VNOVAL ||
556 vap->va_fsid != VNOVAL ||
557 vap->va_fileid != VNOVAL ||
558 vap->va_blocksize != VNOVAL ||
559 vap->va_gen != VNOVAL ||
560 vap->va_rdev != VNOVAL ||
561 vap->va_bytes != VNOVAL)
564 if (error == 0 && (vap->va_flags != VNOVAL))
565 error = tmpfs_chflags(vp, vap->va_flags, cred, td);
567 if (error == 0 && (vap->va_size != VNOVAL))
568 error = tmpfs_chsize(vp, vap->va_size, cred, td);
570 if (error == 0 && (vap->va_uid != VNOVAL || vap->va_gid != VNOVAL))
571 error = tmpfs_chown(vp, vap->va_uid, vap->va_gid, cred, td);
573 if (error == 0 && (vap->va_mode != (mode_t)VNOVAL))
574 error = tmpfs_chmod(vp, vap->va_mode, cred, td);
576 if (error == 0 && ((vap->va_atime.tv_sec != VNOVAL &&
577 vap->va_atime.tv_nsec != VNOVAL) ||
578 (vap->va_mtime.tv_sec != VNOVAL &&
579 vap->va_mtime.tv_nsec != VNOVAL) ||
580 (vap->va_birthtime.tv_sec != VNOVAL &&
581 vap->va_birthtime.tv_nsec != VNOVAL)))
582 error = tmpfs_chtimes(vp, vap, cred, td);
585 * Update the node times. We give preference to the error codes
586 * generated by this function rather than the ones that may arise
591 MPASS(VOP_ISLOCKED(vp));
597 tmpfs_read(struct vop_read_args *v)
601 struct tmpfs_node *node;
604 if (vp->v_type != VREG)
607 if (uio->uio_offset < 0)
609 node = VP_TO_TMPFS_NODE(vp);
610 tmpfs_set_accessed(VFS_TO_TMPFS(vp->v_mount), node);
611 return (uiomove_object(node->tn_reg.tn_aobj, node->tn_size, uio));
615 tmpfs_read_pgcache(struct vop_read_pgcache_args *v)
618 struct tmpfs_node *node;
624 VNPASS((vn_irflag_read(vp) & VIRF_PGREAD) != 0, vp);
626 if (v->a_uio->uio_offset < 0)
632 node = VP_TO_TMPFS_NODE_SMR(vp);
635 MPASS(node->tn_type == VREG);
636 MPASS(node->tn_refcount >= 1);
637 object = node->tn_reg.tn_aobj;
641 MPASS(object->type == tmpfs_pager_type);
642 MPASS((object->flags & (OBJ_ANON | OBJ_DEAD | OBJ_SWAP)) ==
644 if (!VN_IS_DOOMED(vp)) {
645 /* size cannot become shorter due to rangelock. */
646 size = node->tn_size;
647 tmpfs_set_accessed(node->tn_reg.tn_tmp, node);
649 error = uiomove_object(object, size, v->a_uio);
658 tmpfs_write(struct vop_write_args *v)
662 struct tmpfs_node *node;
670 ioflag = v->a_ioflag;
672 node = VP_TO_TMPFS_NODE(vp);
673 oldsize = node->tn_size;
675 if (uio->uio_offset < 0 || vp->v_type != VREG)
677 if (uio->uio_resid == 0)
679 if (ioflag & IO_APPEND)
680 uio->uio_offset = node->tn_size;
681 error = vn_rlimit_fsizex(vp, uio, VFS_TO_TMPFS(vp->v_mount)->
682 tm_maxfilesize, &r, uio->uio_td);
684 vn_rlimit_fsizex_res(uio, r);
688 if (uio->uio_offset + uio->uio_resid > node->tn_size) {
689 error = tmpfs_reg_resize(vp, uio->uio_offset + uio->uio_resid,
695 error = uiomove_object(node->tn_reg.tn_aobj, node->tn_size, uio);
696 node->tn_status |= TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED;
697 node->tn_accessed = true;
698 if (node->tn_mode & (S_ISUID | S_ISGID)) {
699 if (priv_check_cred(v->a_cred, PRIV_VFS_RETAINSUGID)) {
700 newmode = node->tn_mode & ~(S_ISUID | S_ISGID);
701 vn_seqc_write_begin(vp);
702 atomic_store_short(&node->tn_mode, newmode);
703 vn_seqc_write_end(vp);
707 (void)tmpfs_reg_resize(vp, oldsize, TRUE);
710 MPASS(IMPLIES(error == 0, uio->uio_resid == 0));
711 MPASS(IMPLIES(error != 0, oldsize == node->tn_size));
713 vn_rlimit_fsizex_res(uio, r);
718 tmpfs_deallocate(struct vop_deallocate_args *v)
720 return (tmpfs_reg_punch_hole(v->a_vp, v->a_offset, v->a_len));
724 tmpfs_fsync(struct vop_fsync_args *v)
726 struct vnode *vp = v->a_vp;
728 MPASS(VOP_ISLOCKED(vp));
730 tmpfs_check_mtime(vp);
737 tmpfs_remove(struct vop_remove_args *v)
739 struct vnode *dvp = v->a_dvp;
740 struct vnode *vp = v->a_vp;
743 struct tmpfs_dirent *de;
744 struct tmpfs_mount *tmp;
745 struct tmpfs_node *dnode;
746 struct tmpfs_node *node;
748 MPASS(VOP_ISLOCKED(dvp));
749 MPASS(VOP_ISLOCKED(vp));
751 if (vp->v_type == VDIR) {
756 dnode = VP_TO_TMPFS_DIR(dvp);
757 node = VP_TO_TMPFS_NODE(vp);
758 tmp = VFS_TO_TMPFS(vp->v_mount);
759 de = tmpfs_dir_lookup(dnode, node, v->a_cnp);
762 /* Files marked as immutable or append-only cannot be deleted. */
763 if ((node->tn_flags & (IMMUTABLE | APPEND | NOUNLINK)) ||
764 (dnode->tn_flags & APPEND)) {
769 /* Remove the entry from the directory; as it is a file, we do not
770 * have to change the number of hard links of the directory. */
771 tmpfs_dir_detach(dvp, de);
772 if (v->a_cnp->cn_flags & DOWHITEOUT)
773 tmpfs_dir_whiteout_add(dvp, v->a_cnp);
775 /* Free the directory entry we just deleted. Note that the node
776 * referred by it will not be removed until the vnode is really
778 tmpfs_free_dirent(tmp, de);
780 node->tn_status |= TMPFS_NODE_CHANGED;
781 node->tn_accessed = true;
789 tmpfs_link(struct vop_link_args *v)
791 struct vnode *dvp = v->a_tdvp;
792 struct vnode *vp = v->a_vp;
793 struct componentname *cnp = v->a_cnp;
796 struct tmpfs_dirent *de;
797 struct tmpfs_node *node;
799 MPASS(VOP_ISLOCKED(dvp));
800 MPASS(dvp != vp); /* XXX When can this be false? */
801 node = VP_TO_TMPFS_NODE(vp);
803 /* Ensure that we do not overflow the maximum number of links imposed
805 MPASS(node->tn_links <= TMPFS_LINK_MAX);
806 if (node->tn_links == TMPFS_LINK_MAX) {
811 /* We cannot create links of files marked immutable or append-only. */
812 if (node->tn_flags & (IMMUTABLE | APPEND)) {
817 /* Allocate a new directory entry to represent the node. */
818 error = tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), node,
819 cnp->cn_nameptr, cnp->cn_namelen, &de);
823 /* Insert the new directory entry into the appropriate directory. */
824 if (cnp->cn_flags & ISWHITEOUT)
825 tmpfs_dir_whiteout_remove(dvp, cnp);
826 tmpfs_dir_attach(dvp, de);
828 /* vp link count has changed, so update node times. */
829 node->tn_status |= TMPFS_NODE_CHANGED;
839 * We acquire all but fdvp locks using non-blocking acquisitions. If we
840 * fail to acquire any lock in the path we will drop all held locks,
841 * acquire the new lock in a blocking fashion, and then release it and
842 * restart the rename. This acquire/release step ensures that we do not
843 * spin on a lock waiting for release. On error release all vnode locks
844 * and decrement references the way tmpfs_rename() would do.
847 tmpfs_rename_relock(struct vnode *fdvp, struct vnode **fvpp,
848 struct vnode *tdvp, struct vnode **tvpp,
849 struct componentname *fcnp, struct componentname *tcnp)
853 struct tmpfs_dirent *de;
854 int error, restarts = 0;
857 if (*tvpp != NULL && *tvpp != tdvp)
863 error = vn_lock(fdvp, LK_EXCLUSIVE);
866 if (vn_lock(tdvp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
868 error = vn_lock(tdvp, LK_EXCLUSIVE);
875 * Re-resolve fvp to be certain it still exists and fetch the
878 de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(fdvp), NULL, fcnp);
882 if ((fcnp->cn_flags & ISDOTDOT) != 0 ||
883 (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.'))
889 error = tmpfs_alloc_vp(mp, de->td_node, LK_EXCLUSIVE | LK_NOWAIT, &nvp);
895 error = tmpfs_alloc_vp(mp, de->td_node, LK_EXCLUSIVE, &nvp);
900 * Concurrent rename race.
915 * Re-resolve tvp and acquire the vnode lock if present.
917 de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(tdvp), NULL, tcnp);
919 * If tvp disappeared we just carry on.
921 if (de == NULL && *tvpp != NULL) {
926 * Get the tvp ino if the lookup succeeded. We may have to restart
927 * if the non-blocking acquire fails.
931 error = tmpfs_alloc_vp(mp, de->td_node,
932 LK_EXCLUSIVE | LK_NOWAIT, &nvp);
941 error = tmpfs_alloc_vp(mp, de->td_node, LK_EXCLUSIVE,
947 * fdvp contains fvp, thus tvp (=fdvp) is not empty.
956 tmpfs_rename_restarts += restarts;
966 tmpfs_rename_restarts += restarts;
972 tmpfs_rename(struct vop_rename_args *v)
974 struct vnode *fdvp = v->a_fdvp;
975 struct vnode *fvp = v->a_fvp;
976 struct componentname *fcnp = v->a_fcnp;
977 struct vnode *tdvp = v->a_tdvp;
978 struct vnode *tvp = v->a_tvp;
979 struct componentname *tcnp = v->a_tcnp;
981 struct tmpfs_dirent *de;
982 struct tmpfs_mount *tmp;
983 struct tmpfs_node *fdnode;
984 struct tmpfs_node *fnode;
985 struct tmpfs_node *tnode;
986 struct tmpfs_node *tdnode;
990 MPASS(VOP_ISLOCKED(tdvp));
991 MPASS(IMPLIES(tvp != NULL, VOP_ISLOCKED(tvp)));
993 want_seqc_end = false;
996 * Disallow cross-device renames.
997 * XXX Why isn't this done by the caller?
999 if (fvp->v_mount != tdvp->v_mount ||
1000 (tvp != NULL && fvp->v_mount != tvp->v_mount)) {
1005 /* If source and target are the same file, there is nothing to do. */
1012 * If we need to move the directory between entries, lock the
1013 * source so that we can safely operate on it.
1015 if (fdvp != tdvp && fdvp != tvp) {
1016 if (vn_lock(fdvp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
1017 error = tmpfs_rename_relock(fdvp, &fvp, tdvp, &tvp,
1021 ASSERT_VOP_ELOCKED(fdvp,
1022 "tmpfs_rename: fdvp not locked");
1023 ASSERT_VOP_ELOCKED(tdvp,
1024 "tmpfs_rename: tdvp not locked");
1026 ASSERT_VOP_ELOCKED(tvp,
1027 "tmpfs_rename: tvp not locked");
1036 * Avoid manipulating '.' and '..' entries.
1038 if ((fcnp->cn_flags & ISDOTDOT) != 0 ||
1039 (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.')) {
1045 vn_seqc_write_begin(tvp);
1046 vn_seqc_write_begin(tdvp);
1047 vn_seqc_write_begin(fvp);
1048 vn_seqc_write_begin(fdvp);
1049 want_seqc_end = true;
1051 tmp = VFS_TO_TMPFS(tdvp->v_mount);
1052 tdnode = VP_TO_TMPFS_DIR(tdvp);
1053 tnode = (tvp == NULL) ? NULL : VP_TO_TMPFS_NODE(tvp);
1054 fdnode = VP_TO_TMPFS_DIR(fdvp);
1055 fnode = VP_TO_TMPFS_NODE(fvp);
1056 de = tmpfs_dir_lookup(fdnode, fnode, fcnp);
1059 * Entry can disappear before we lock fdvp.
1062 if ((fcnp->cn_flags & ISDOTDOT) != 0 ||
1063 (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.'))
1069 MPASS(de->td_node == fnode);
1072 * If re-naming a directory to another preexisting directory
1073 * ensure that the target directory is empty so that its
1074 * removal causes no side effects.
1075 * Kern_rename guarantees the destination to be a directory
1076 * if the source is one.
1079 MPASS(tnode != NULL);
1081 if ((tnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) ||
1082 (tdnode->tn_flags & (APPEND | IMMUTABLE))) {
1087 if (fnode->tn_type == VDIR && tnode->tn_type == VDIR) {
1088 if (tnode->tn_size > 0) {
1092 } else if (fnode->tn_type == VDIR && tnode->tn_type != VDIR) {
1095 } else if (fnode->tn_type != VDIR && tnode->tn_type == VDIR) {
1099 MPASS(fnode->tn_type != VDIR &&
1100 tnode->tn_type != VDIR);
1104 if ((fnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))
1105 || (fdnode->tn_flags & (APPEND | IMMUTABLE))) {
1111 * Ensure that we have enough memory to hold the new name, if it
1112 * has to be changed.
1114 if (fcnp->cn_namelen != tcnp->cn_namelen ||
1115 bcmp(fcnp->cn_nameptr, tcnp->cn_nameptr, fcnp->cn_namelen) != 0) {
1116 newname = malloc(tcnp->cn_namelen, M_TMPFSNAME, M_WAITOK);
1121 * If the node is being moved to another directory, we have to do
1124 if (fdnode != tdnode) {
1126 * In case we are moving a directory, we have to adjust its
1127 * parent to point to the new parent.
1129 if (de->td_node->tn_type == VDIR) {
1130 struct tmpfs_node *n;
1133 * Ensure the target directory is not a child of the
1134 * directory being moved. Otherwise, we'd end up
1139 * TMPFS_LOCK guaranties that no nodes are freed while
1140 * traversing the list. Nodes can only be marked as
1141 * removed: tn_parent == NULL.
1145 while (n != n->tn_dir.tn_parent) {
1146 struct tmpfs_node *parent;
1149 TMPFS_NODE_UNLOCK(n);
1152 if (newname != NULL)
1153 free(newname, M_TMPFSNAME);
1156 parent = n->tn_dir.tn_parent;
1157 TMPFS_NODE_UNLOCK(n);
1158 if (parent == NULL) {
1162 TMPFS_NODE_LOCK(parent);
1163 if (parent->tn_dir.tn_parent == NULL) {
1164 TMPFS_NODE_UNLOCK(parent);
1173 if (newname != NULL)
1174 free(newname, M_TMPFSNAME);
1177 TMPFS_NODE_UNLOCK(n);
1179 /* Adjust the parent pointer. */
1180 TMPFS_VALIDATE_DIR(fnode);
1181 TMPFS_NODE_LOCK(de->td_node);
1182 de->td_node->tn_dir.tn_parent = tdnode;
1183 TMPFS_NODE_UNLOCK(de->td_node);
1186 * As a result of changing the target of the '..'
1187 * entry, the link count of the source and target
1188 * directories has to be adjusted.
1190 TMPFS_NODE_LOCK(tdnode);
1191 TMPFS_ASSERT_LOCKED(tdnode);
1193 TMPFS_NODE_UNLOCK(tdnode);
1195 TMPFS_NODE_LOCK(fdnode);
1196 TMPFS_ASSERT_LOCKED(fdnode);
1198 TMPFS_NODE_UNLOCK(fdnode);
1203 * Do the move: just remove the entry from the source directory
1204 * and insert it into the target one.
1206 tmpfs_dir_detach(fdvp, de);
1208 if (fcnp->cn_flags & DOWHITEOUT)
1209 tmpfs_dir_whiteout_add(fdvp, fcnp);
1210 if (tcnp->cn_flags & ISWHITEOUT)
1211 tmpfs_dir_whiteout_remove(tdvp, tcnp);
1214 * If the name has changed, we need to make it effective by changing
1215 * it in the directory entry.
1217 if (newname != NULL) {
1218 MPASS(tcnp->cn_namelen <= MAXNAMLEN);
1220 free(de->ud.td_name, M_TMPFSNAME);
1221 de->ud.td_name = newname;
1222 tmpfs_dirent_init(de, tcnp->cn_nameptr, tcnp->cn_namelen);
1224 fnode->tn_status |= TMPFS_NODE_CHANGED;
1225 tdnode->tn_status |= TMPFS_NODE_MODIFIED;
1229 * If we are overwriting an entry, we have to remove the old one
1230 * from the target directory.
1233 struct tmpfs_dirent *tde;
1235 /* Remove the old entry from the target directory. */
1236 tde = tmpfs_dir_lookup(tdnode, tnode, tcnp);
1237 tmpfs_dir_detach(tdvp, tde);
1239 /* Update node's ctime because of possible hardlinks. */
1240 tnode->tn_status |= TMPFS_NODE_CHANGED;
1244 * Free the directory entry we just deleted. Note that the
1245 * node referred by it will not be removed until the vnode is
1248 tmpfs_free_dirent(VFS_TO_TMPFS(tvp->v_mount), tde);
1251 tmpfs_dir_attach(tdvp, de);
1253 if (tmpfs_use_nc(fvp)) {
1254 cache_vop_rename(fdvp, fvp, tdvp, tvp, fcnp, tcnp);
1260 if (fdvp != tdvp && fdvp != tvp)
1264 if (want_seqc_end) {
1266 vn_seqc_write_end(tvp);
1267 vn_seqc_write_end(tdvp);
1268 vn_seqc_write_end(fvp);
1269 vn_seqc_write_end(fdvp);
1273 * Release target nodes.
1274 * XXX: I don't understand when tdvp can be the same as tvp, but
1275 * other code takes care of this...
1284 /* Release source nodes. */
1292 tmpfs_mkdir(struct vop_mkdir_args *v)
1294 struct vnode *dvp = v->a_dvp;
1295 struct vnode **vpp = v->a_vpp;
1296 struct componentname *cnp = v->a_cnp;
1297 struct vattr *vap = v->a_vap;
1299 MPASS(vap->va_type == VDIR);
1301 return (tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL));
1305 tmpfs_rmdir(struct vop_rmdir_args *v)
1307 struct vnode *dvp = v->a_dvp;
1308 struct vnode *vp = v->a_vp;
1311 struct tmpfs_dirent *de;
1312 struct tmpfs_mount *tmp;
1313 struct tmpfs_node *dnode;
1314 struct tmpfs_node *node;
1316 MPASS(VOP_ISLOCKED(dvp));
1317 MPASS(VOP_ISLOCKED(vp));
1319 tmp = VFS_TO_TMPFS(dvp->v_mount);
1320 dnode = VP_TO_TMPFS_DIR(dvp);
1321 node = VP_TO_TMPFS_DIR(vp);
1323 /* Directories with more than two entries ('.' and '..') cannot be
1325 if (node->tn_size > 0) {
1330 if ((dnode->tn_flags & APPEND)
1331 || (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))) {
1336 /* This invariant holds only if we are not trying to remove "..".
1337 * We checked for that above so this is safe now. */
1338 MPASS(node->tn_dir.tn_parent == dnode);
1340 /* Get the directory entry associated with node (vp). This was
1341 * filled by tmpfs_lookup while looking up the entry. */
1342 de = tmpfs_dir_lookup(dnode, node, v->a_cnp);
1343 MPASS(TMPFS_DIRENT_MATCHES(de,
1344 v->a_cnp->cn_nameptr,
1345 v->a_cnp->cn_namelen));
1347 /* Check flags to see if we are allowed to remove the directory. */
1348 if ((dnode->tn_flags & APPEND) != 0 ||
1349 (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) != 0) {
1354 /* Detach the directory entry from the directory (dnode). */
1355 tmpfs_dir_detach(dvp, de);
1356 if (v->a_cnp->cn_flags & DOWHITEOUT)
1357 tmpfs_dir_whiteout_add(dvp, v->a_cnp);
1359 /* No vnode should be allocated for this entry from this point */
1360 TMPFS_NODE_LOCK(node);
1362 node->tn_dir.tn_parent = NULL;
1363 node->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1364 node->tn_accessed = true;
1366 TMPFS_NODE_UNLOCK(node);
1368 TMPFS_NODE_LOCK(dnode);
1370 dnode->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1371 dnode->tn_accessed = true;
1372 TMPFS_NODE_UNLOCK(dnode);
1374 if (tmpfs_use_nc(dvp)) {
1375 cache_vop_rmdir(dvp, vp);
1378 /* Free the directory entry we just deleted. Note that the node
1379 * referred by it will not be removed until the vnode is really
1381 tmpfs_free_dirent(tmp, de);
1383 /* Release the deleted vnode (will destroy the node, notify
1384 * interested parties and clean it from the cache). */
1386 dnode->tn_status |= TMPFS_NODE_CHANGED;
1396 tmpfs_symlink(struct vop_symlink_args *v)
1398 struct vnode *dvp = v->a_dvp;
1399 struct vnode **vpp = v->a_vpp;
1400 struct componentname *cnp = v->a_cnp;
1401 struct vattr *vap = v->a_vap;
1402 const char *target = v->a_target;
1404 #ifdef notyet /* XXX FreeBSD BUG: kern_symlink is not setting VLNK */
1405 MPASS(vap->va_type == VLNK);
1407 vap->va_type = VLNK;
1410 return (tmpfs_alloc_file(dvp, vpp, vap, cnp, target));
1414 tmpfs_readdir(struct vop_readdir_args *va)
1418 struct tmpfs_mount *tm;
1419 struct tmpfs_node *node;
1421 int *eofflag, *ncookies;
1423 int error, maxcookies;
1427 eofflag = va->a_eofflag;
1428 cookies = va->a_cookies;
1429 ncookies = va->a_ncookies;
1431 /* This operation only makes sense on directory nodes. */
1432 if (vp->v_type != VDIR)
1436 node = VP_TO_TMPFS_DIR(vp);
1437 tm = VFS_TO_TMPFS(vp->v_mount);
1439 startresid = uio->uio_resid;
1441 /* Allocate cookies for NFS and compat modules. */
1442 if (cookies != NULL && ncookies != NULL) {
1443 maxcookies = howmany(node->tn_size,
1444 sizeof(struct tmpfs_dirent)) + 2;
1445 *cookies = malloc(maxcookies * sizeof(**cookies), M_TEMP,
1450 if (cookies == NULL)
1451 error = tmpfs_dir_getdents(tm, node, uio, 0, NULL, NULL);
1453 error = tmpfs_dir_getdents(tm, node, uio, maxcookies, *cookies,
1456 /* Buffer was filled without hitting EOF. */
1457 if (error == EJUSTRETURN)
1458 error = (uio->uio_resid != startresid) ? 0 : EINVAL;
1460 if (error != 0 && cookies != NULL && ncookies != NULL) {
1461 free(*cookies, M_TEMP);
1466 if (eofflag != NULL)
1468 (error == 0 && uio->uio_offset == TMPFS_DIRCOOKIE_EOF);
1474 tmpfs_readlink(struct vop_readlink_args *v)
1476 struct vnode *vp = v->a_vp;
1477 struct uio *uio = v->a_uio;
1480 struct tmpfs_node *node;
1482 MPASS(uio->uio_offset == 0);
1483 MPASS(vp->v_type == VLNK);
1485 node = VP_TO_TMPFS_NODE(vp);
1487 error = uiomove(node->tn_link_target, MIN(node->tn_size, uio->uio_resid),
1489 tmpfs_set_accessed(VFS_TO_TMPFS(vp->v_mount), node);
1495 * VOP_FPLOOKUP_SYMLINK routines are subject to special circumstances, see
1496 * the comment above cache_fplookup for details.
1498 * Check tmpfs_alloc_node for tmpfs-specific synchronisation notes.
1501 tmpfs_fplookup_symlink(struct vop_fplookup_symlink_args *v)
1504 struct tmpfs_node *node;
1508 node = VP_TO_TMPFS_NODE_SMR(vp);
1509 if (__predict_false(node == NULL))
1511 if (!atomic_load_char(&node->tn_link_smr))
1513 symlink = atomic_load_ptr(&node->tn_link_target);
1514 if (symlink == NULL)
1517 return (cache_symlink_resolve(v->a_fpl, symlink, node->tn_size));
1521 tmpfs_inactive(struct vop_inactive_args *v)
1524 struct tmpfs_node *node;
1527 node = VP_TO_TMPFS_NODE(vp);
1528 if (node->tn_links == 0)
1531 tmpfs_check_mtime(vp);
1536 tmpfs_need_inactive(struct vop_need_inactive_args *ap)
1539 struct tmpfs_node *node;
1540 struct vm_object *obj;
1543 node = VP_TO_TMPFS_NODE(vp);
1544 if (node->tn_links == 0)
1546 if (vp->v_type == VREG) {
1548 if (obj->generation != obj->cleangeneration)
1557 tmpfs_reclaim(struct vop_reclaim_args *v)
1560 struct tmpfs_mount *tmp;
1561 struct tmpfs_node *node;
1565 node = VP_TO_TMPFS_NODE(vp);
1566 tmp = VFS_TO_TMPFS(vp->v_mount);
1568 if (vp->v_type == VREG)
1569 tmpfs_destroy_vobject(vp, node->tn_reg.tn_aobj);
1570 vp->v_object = NULL;
1573 TMPFS_NODE_LOCK(node);
1577 * If the node referenced by this vnode was deleted by the user,
1578 * we must free its associated data structures (now that the vnode
1579 * is being reclaimed).
1582 if (node->tn_links == 0 &&
1583 (node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0) {
1584 node->tn_vpstate = TMPFS_VNODE_DOOMED;
1585 unlock = !tmpfs_free_node_locked(tmp, node, true);
1589 TMPFS_NODE_UNLOCK(node);
1593 MPASS(vp->v_data == NULL);
1598 tmpfs_print(struct vop_print_args *v)
1600 struct vnode *vp = v->a_vp;
1602 struct tmpfs_node *node;
1604 node = VP_TO_TMPFS_NODE(vp);
1606 printf("tag VT_TMPFS, tmpfs_node %p, flags 0x%lx, links %jd\n",
1607 node, node->tn_flags, (uintmax_t)node->tn_links);
1608 printf("\tmode 0%o, owner %d, group %d, size %jd, status 0x%x\n",
1609 node->tn_mode, node->tn_uid, node->tn_gid,
1610 (intmax_t)node->tn_size, node->tn_status);
1612 if (vp->v_type == VFIFO)
1621 tmpfs_pathconf(struct vop_pathconf_args *v)
1623 struct vnode *vp = v->a_vp;
1624 int name = v->a_name;
1625 long *retval = v->a_retval;
1633 *retval = TMPFS_LINK_MAX;
1636 case _PC_SYMLINK_MAX:
1637 *retval = MAXPATHLEN;
1645 if (vp->v_type == VDIR || vp->v_type == VFIFO)
1651 case _PC_CHOWN_RESTRICTED:
1663 case _PC_FILESIZEBITS:
1667 case _PC_MIN_HOLE_SIZE:
1668 *retval = PAGE_SIZE;
1672 error = vop_stdpathconf(v);
1679 tmpfs_vptofh(struct vop_vptofh_args *ap)
1682 IN struct vnode *a_vp;
1683 IN struct fid *a_fhp;
1687 struct tmpfs_fid_data tfd;
1688 struct tmpfs_node *node;
1691 node = VP_TO_TMPFS_NODE(ap->a_vp);
1693 fhp->fid_len = sizeof(tfd);
1696 * Copy into fid_data from the stack to avoid unaligned pointer use.
1697 * See the comment in sys/mount.h on struct fid for details.
1699 tfd.tfd_id = node->tn_id;
1700 tfd.tfd_gen = node->tn_gen;
1701 memcpy(fhp->fid_data, &tfd, fhp->fid_len);
1707 tmpfs_whiteout(struct vop_whiteout_args *ap)
1709 struct vnode *dvp = ap->a_dvp;
1710 struct componentname *cnp = ap->a_cnp;
1711 struct tmpfs_dirent *de;
1713 switch (ap->a_flags) {
1717 de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(dvp), NULL, cnp);
1719 return (de->td_node == NULL ? 0 : EEXIST);
1720 return (tmpfs_dir_whiteout_add(dvp, cnp));
1722 tmpfs_dir_whiteout_remove(dvp, cnp);
1725 panic("tmpfs_whiteout: unknown op");
1730 tmpfs_vptocnp_dir(struct tmpfs_node *tn, struct tmpfs_node *tnp,
1731 struct tmpfs_dirent **pde)
1733 struct tmpfs_dir_cursor dc;
1734 struct tmpfs_dirent *de;
1736 for (de = tmpfs_dir_first(tnp, &dc); de != NULL;
1737 de = tmpfs_dir_next(tnp, &dc)) {
1738 if (de->td_node == tn) {
1747 tmpfs_vptocnp_fill(struct vnode *vp, struct tmpfs_node *tn,
1748 struct tmpfs_node *tnp, char *buf, size_t *buflen, struct vnode **dvp)
1750 struct tmpfs_dirent *de;
1753 error = vn_vget_ino_gen(vp, tmpfs_vn_get_ino_alloc, tnp, LK_SHARED,
1757 error = tmpfs_vptocnp_dir(tn, tnp, &de);
1760 i -= de->td_namelen;
1764 bcopy(de->ud.td_name, buf + i, de->td_namelen);
1781 tmpfs_vptocnp(struct vop_vptocnp_args *ap)
1783 struct vnode *vp, **dvp;
1784 struct tmpfs_node *tn, *tnp, *tnp1;
1785 struct tmpfs_dirent *de;
1786 struct tmpfs_mount *tm;
1794 buflen = ap->a_buflen;
1796 tm = VFS_TO_TMPFS(vp->v_mount);
1797 tn = VP_TO_TMPFS_NODE(vp);
1798 if (tn->tn_type == VDIR) {
1799 tnp = tn->tn_dir.tn_parent;
1802 tmpfs_ref_node(tnp);
1803 error = tmpfs_vptocnp_fill(vp, tn, tn->tn_dir.tn_parent, buf,
1805 tmpfs_free_node(tm, tnp);
1811 LIST_FOREACH_SAFE(tnp, &tm->tm_nodes_used, tn_entries, tnp1) {
1812 if (tnp->tn_type != VDIR)
1814 TMPFS_NODE_LOCK(tnp);
1815 tmpfs_ref_node(tnp);
1818 * tn_vnode cannot be instantiated while we hold the
1819 * node lock, so the directory cannot be changed while
1820 * we iterate over it. Do this to avoid instantiating
1821 * vnode for directories which cannot point to our
1824 error = tnp->tn_vnode == NULL ? tmpfs_vptocnp_dir(tn, tnp,
1828 TMPFS_NODE_UNLOCK(tnp);
1830 error = tmpfs_vptocnp_fill(vp, tn, tnp, buf, buflen,
1833 tmpfs_free_node(tm, tnp);
1836 if (VN_IS_DOOMED(vp)) {
1837 tmpfs_free_node(tm, tnp);
1841 TMPFS_NODE_LOCK(tnp);
1843 if (tmpfs_free_node_locked(tm, tnp, false)) {
1846 KASSERT(tnp->tn_refcount > 0,
1847 ("node %p refcount zero", tnp));
1848 if (tnp->tn_attached) {
1849 tnp1 = LIST_NEXT(tnp, tn_entries);
1850 TMPFS_NODE_UNLOCK(tnp);
1852 TMPFS_NODE_UNLOCK(tnp);
1853 goto restart_locked;
1862 tmpfs_extattr_free(struct tmpfs_extattr *ea)
1864 free(ea->ea_name, M_TMPFSEA);
1865 free(ea->ea_value, M_TMPFSEA);
1866 free(ea, M_TMPFSEA);
1870 tmpfs_extattr_update_mem(struct tmpfs_mount *tmp, ssize_t size)
1874 !tmpfs_pages_check_avail(tmp, howmany(size, PAGE_SIZE))) {
1878 if (tmp->tm_ea_memory_inuse + size > tmp->tm_ea_memory_max) {
1882 tmp->tm_ea_memory_inuse += size;
1888 tmpfs_deleteextattr(struct vop_deleteextattr_args *ap)
1890 struct vnode *vp = ap->a_vp;
1891 struct tmpfs_mount *tmp;
1892 struct tmpfs_node *node;
1893 struct tmpfs_extattr *ea;
1898 node = VP_TO_TMPFS_NODE(vp);
1899 tmp = VFS_TO_TMPFS(vp->v_mount);
1900 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1901 return (EOPNOTSUPP);
1902 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1903 ap->a_cred, ap->a_td, VWRITE);
1906 if (ap->a_name == NULL || ap->a_name[0] == '\0')
1908 namelen = strlen(ap->a_name);
1909 if (namelen > EXTATTR_MAXNAMELEN)
1912 LIST_FOREACH(ea, &node->tn_extattrs, ea_extattrs) {
1913 if (ea->ea_namespace == ap->a_attrnamespace &&
1914 namelen == ea->ea_namelen &&
1915 memcmp(ap->a_name, ea->ea_name, namelen) == 0)
1921 LIST_REMOVE(ea, ea_extattrs);
1922 diff = -(sizeof(struct tmpfs_extattr) + namelen + ea->ea_size);
1923 tmpfs_extattr_update_mem(tmp, diff);
1924 tmpfs_extattr_free(ea);
1929 tmpfs_getextattr(struct vop_getextattr_args *ap)
1931 struct vnode *vp = ap->a_vp;
1932 struct tmpfs_node *node;
1933 struct tmpfs_extattr *ea;
1937 node = VP_TO_TMPFS_NODE(vp);
1938 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1939 return (EOPNOTSUPP);
1940 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1941 ap->a_cred, ap->a_td, VREAD);
1944 if (ap->a_name == NULL || ap->a_name[0] == '\0')
1946 namelen = strlen(ap->a_name);
1947 if (namelen > EXTATTR_MAXNAMELEN)
1950 LIST_FOREACH(ea, &node->tn_extattrs, ea_extattrs) {
1951 if (ea->ea_namespace == ap->a_attrnamespace &&
1952 namelen == ea->ea_namelen &&
1953 memcmp(ap->a_name, ea->ea_name, namelen) == 0)
1959 if (ap->a_size != NULL)
1960 *ap->a_size = ea->ea_size;
1961 if (ap->a_uio != NULL && ea->ea_size != 0)
1962 error = uiomove(ea->ea_value, ea->ea_size, ap->a_uio);
1967 tmpfs_listextattr(struct vop_listextattr_args *ap)
1969 struct vnode *vp = ap->a_vp;
1970 struct tmpfs_node *node;
1971 struct tmpfs_extattr *ea;
1974 node = VP_TO_TMPFS_NODE(vp);
1975 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1976 return (EOPNOTSUPP);
1977 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1978 ap->a_cred, ap->a_td, VREAD);
1981 if (ap->a_size != NULL)
1984 LIST_FOREACH(ea, &node->tn_extattrs, ea_extattrs) {
1985 if (ea->ea_namespace != ap->a_attrnamespace)
1987 if (ap->a_size != NULL)
1988 *ap->a_size += ea->ea_namelen + 1;
1989 if (ap->a_uio != NULL) {
1990 error = uiomove(&ea->ea_namelen, 1, ap->a_uio);
1993 error = uiomove(ea->ea_name, ea->ea_namelen, ap->a_uio);
2003 tmpfs_setextattr(struct vop_setextattr_args *ap)
2005 struct vnode *vp = ap->a_vp;
2006 struct tmpfs_mount *tmp;
2007 struct tmpfs_node *node;
2008 struct tmpfs_extattr *ea;
2009 struct tmpfs_extattr *new_ea;
2015 node = VP_TO_TMPFS_NODE(vp);
2016 tmp = VFS_TO_TMPFS(vp->v_mount);
2017 attr_size = ap->a_uio->uio_resid;
2019 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
2020 return (EOPNOTSUPP);
2021 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
2022 ap->a_cred, ap->a_td, VWRITE);
2025 if (ap->a_name == NULL || ap->a_name[0] == '\0')
2027 namelen = strlen(ap->a_name);
2028 if (namelen > EXTATTR_MAXNAMELEN)
2031 LIST_FOREACH(ea, &node->tn_extattrs, ea_extattrs) {
2032 if (ea->ea_namespace == ap->a_attrnamespace &&
2033 namelen == ea->ea_namelen &&
2034 memcmp(ap->a_name, ea->ea_name, namelen) == 0) {
2035 diff -= sizeof(struct tmpfs_extattr) + ea->ea_namelen +
2041 diff += sizeof(struct tmpfs_extattr) + namelen + attr_size;
2042 if (!tmpfs_extattr_update_mem(tmp, diff))
2044 new_ea = malloc(sizeof(struct tmpfs_extattr), M_TMPFSEA, M_WAITOK);
2045 new_ea->ea_namespace = ap->a_attrnamespace;
2046 new_ea->ea_name = malloc(namelen, M_TMPFSEA, M_WAITOK);
2047 new_ea->ea_namelen = namelen;
2048 memcpy(new_ea->ea_name, ap->a_name, namelen);
2049 if (attr_size != 0) {
2050 new_ea->ea_value = malloc(attr_size, M_TMPFSEA, M_WAITOK);
2051 new_ea->ea_size = attr_size;
2052 error = uiomove(new_ea->ea_value, attr_size, ap->a_uio);
2054 new_ea->ea_value = NULL;
2055 new_ea->ea_size = 0;
2058 tmpfs_extattr_update_mem(tmp, -diff);
2059 tmpfs_extattr_free(new_ea);
2063 LIST_REMOVE(ea, ea_extattrs);
2064 tmpfs_extattr_free(ea);
2066 LIST_INSERT_HEAD(&node->tn_extattrs, new_ea, ea_extattrs);
2071 tmpfs_seek_data_locked(vm_object_t obj, off_t noff)
2074 vm_pindex_t p, p_m, p_swp;
2076 p = OFF_TO_IDX(noff);
2077 m = vm_page_find_least(obj, p);
2080 * Microoptimize the most common case for SEEK_DATA, where
2081 * there is no hole and the page is resident.
2083 if (m != NULL && vm_page_any_valid(m) && m->pindex == p)
2086 p_swp = swap_pager_find_least(obj, p);
2090 p_m = m == NULL ? obj->size : m->pindex;
2091 return (IDX_TO_OFF(MIN(p_m, p_swp)));
2095 tmpfs_seek_next(off_t noff)
2097 return (noff + PAGE_SIZE - (noff & PAGE_MASK));
2101 tmpfs_seek_clamp(struct tmpfs_node *tn, off_t *noff, bool seekdata)
2103 if (*noff < tn->tn_size)
2107 *noff = tn->tn_size;
2112 tmpfs_seek_hole_locked(vm_object_t obj, off_t noff)
2115 vm_pindex_t p, p_swp;
2117 for (;; noff = tmpfs_seek_next(noff)) {
2119 * Walk over the largest sequential run of the valid pages.
2121 for (m = vm_page_lookup(obj, OFF_TO_IDX(noff));
2122 m != NULL && vm_page_any_valid(m);
2123 m = vm_page_next(m), noff = tmpfs_seek_next(noff))
2127 * Found a hole in the object's page queue. Check if
2128 * there is a hole in the swap at the same place.
2130 p = OFF_TO_IDX(noff);
2131 p_swp = swap_pager_find_least(obj, p);
2133 noff = IDX_TO_OFF(p);
2141 tmpfs_seek_datahole(struct vnode *vp, off_t *off, bool seekdata)
2143 struct tmpfs_node *tn;
2148 if (vp->v_type != VREG)
2150 tn = VP_TO_TMPFS_NODE(vp);
2154 error = tmpfs_seek_clamp(tn, &noff, seekdata);
2157 obj = tn->tn_reg.tn_aobj;
2159 VM_OBJECT_RLOCK(obj);
2160 noff = seekdata ? tmpfs_seek_data_locked(obj, noff) :
2161 tmpfs_seek_hole_locked(obj, noff);
2162 VM_OBJECT_RUNLOCK(obj);
2164 error = tmpfs_seek_clamp(tn, &noff, seekdata);
2171 tmpfs_ioctl(struct vop_ioctl_args *ap)
2173 struct vnode *vp = ap->a_vp;
2176 switch (ap->a_command) {
2179 error = vn_lock(vp, LK_SHARED);
2184 error = tmpfs_seek_datahole(vp, (off_t *)ap->a_data,
2185 ap->a_command == FIOSEEKDATA);
2196 * Vnode operations vector used for files stored in a tmpfs file system.
2198 struct vop_vector tmpfs_vnodeop_entries = {
2199 .vop_default = &default_vnodeops,
2200 .vop_lookup = vfs_cache_lookup,
2201 .vop_cachedlookup = tmpfs_cached_lookup,
2202 .vop_create = tmpfs_create,
2203 .vop_mknod = tmpfs_mknod,
2204 .vop_open = tmpfs_open,
2205 .vop_close = tmpfs_close,
2206 .vop_fplookup_vexec = tmpfs_fplookup_vexec,
2207 .vop_fplookup_symlink = tmpfs_fplookup_symlink,
2208 .vop_access = tmpfs_access,
2209 .vop_stat = tmpfs_stat,
2210 .vop_getattr = tmpfs_getattr,
2211 .vop_setattr = tmpfs_setattr,
2212 .vop_read = tmpfs_read,
2213 .vop_read_pgcache = tmpfs_read_pgcache,
2214 .vop_write = tmpfs_write,
2215 .vop_deallocate = tmpfs_deallocate,
2216 .vop_fsync = tmpfs_fsync,
2217 .vop_remove = tmpfs_remove,
2218 .vop_link = tmpfs_link,
2219 .vop_rename = tmpfs_rename,
2220 .vop_mkdir = tmpfs_mkdir,
2221 .vop_rmdir = tmpfs_rmdir,
2222 .vop_symlink = tmpfs_symlink,
2223 .vop_readdir = tmpfs_readdir,
2224 .vop_readlink = tmpfs_readlink,
2225 .vop_inactive = tmpfs_inactive,
2226 .vop_need_inactive = tmpfs_need_inactive,
2227 .vop_reclaim = tmpfs_reclaim,
2228 .vop_print = tmpfs_print,
2229 .vop_pathconf = tmpfs_pathconf,
2230 .vop_vptofh = tmpfs_vptofh,
2231 .vop_whiteout = tmpfs_whiteout,
2232 .vop_bmap = VOP_EOPNOTSUPP,
2233 .vop_vptocnp = tmpfs_vptocnp,
2234 .vop_lock1 = vop_lock,
2235 .vop_unlock = vop_unlock,
2236 .vop_islocked = vop_islocked,
2237 .vop_deleteextattr = tmpfs_deleteextattr,
2238 .vop_getextattr = tmpfs_getextattr,
2239 .vop_listextattr = tmpfs_listextattr,
2240 .vop_setextattr = tmpfs_setextattr,
2241 .vop_add_writecount = vop_stdadd_writecount_nomsync,
2242 .vop_ioctl = tmpfs_ioctl,
2244 VFS_VOP_VECTOR_REGISTER(tmpfs_vnodeop_entries);
2247 * Same vector for mounts which do not use namecache.
2249 struct vop_vector tmpfs_vnodeop_nonc_entries = {
2250 .vop_default = &tmpfs_vnodeop_entries,
2251 .vop_lookup = tmpfs_lookup,
2253 VFS_VOP_VECTOR_REGISTER(tmpfs_vnodeop_nonc_entries);