4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
24 * Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved.
28 * ZFS control directory (a.k.a. ".zfs")
30 * This directory provides a common location for all ZFS meta-objects.
31 * Currently, this is only the 'snapshot' directory, but this may expand in the
32 * future. The elements are built using the GFS primitives, as the hierarchy
33 * does not actually exist on disk.
35 * For 'snapshot', we don't want to have all snapshots always mounted, because
36 * this would take up a huge amount of space in /etc/mnttab. We have three
39 * ctldir ------> snapshotdir -------> snapshot
45 * The 'snapshot' node contains just enough information to lookup '..' and act
46 * as a mountpoint for the snapshot. Whenever we lookup a specific snapshot, we
47 * perform an automount of the underlying filesystem and return the
48 * corresponding vnode.
50 * All mounts are handled automatically by the kernel, but unmounts are
51 * (currently) handled from user land. The main reason is that there is no
52 * reliable way to auto-unmount the filesystem when it's "no longer in use".
53 * When the user unmounts a filesystem, we call zfsctl_unmount(), which
54 * unmounts any snapshots within the snapshot directory.
56 * The '.zfs', '.zfs/snapshot', and all directories created under
57 * '.zfs/snapshot' (ie: '.zfs/snapshot/<snapname>') are all GFS nodes and
58 * share the same vfs_t as the head filesystem (what '.zfs' lives under).
60 * File systems mounted ontop of the GFS nodes '.zfs/snapshot/<snapname>'
61 * (ie: snapshots) are ZFS nodes and have their own unique vfs_t.
62 * However, vnodes within these mounted on file systems have their v_vfsp
63 * fields set to the head filesystem to make NFS happy (see
64 * zfsctl_snapdir_lookup()). We VFS_HOLD the head filesystem's vfs_t
65 * so that it cannot be freed until all snapshots have been unmounted.
68 #include <sys/zfs_context.h>
69 #include <sys/zfs_ctldir.h>
70 #include <sys/zfs_ioctl.h>
71 #include <sys/zfs_vfsops.h>
72 #include <sys/namei.h>
75 #include <sys/dsl_dataset.h>
76 #include <sys/dsl_destroy.h>
77 #include <sys/dsl_deleg.h>
78 #include <sys/mount.h>
81 #include "zfs_namecheck.h"
84 * "Synthetic" filesystem implementation.
88 * Assert that A implies B.
90 #define KASSERT_IMPLY(A, B, msg) KASSERT(!(A) || (B), (msg));
92 static MALLOC_DEFINE(M_SFSNODES, "sfs_nodes", "synthetic-fs nodes");
94 typedef struct sfs_node {
95 char sn_name[ZFS_MAX_DATASET_NAME_LEN];
96 uint64_t sn_parent_id;
101 * Check the parent's ID as well as the node's to account for a chance
102 * that IDs originating from different domains (snapshot IDs, artifical
103 * IDs, znode IDs) may clash.
106 sfs_compare_ids(struct vnode *vp, void *arg)
108 sfs_node_t *n1 = vp->v_data;
109 sfs_node_t *n2 = arg;
112 equal = n1->sn_id == n2->sn_id &&
113 n1->sn_parent_id == n2->sn_parent_id;
115 /* Zero means equality. */
120 sfs_vnode_get(const struct mount *mp, int flags, uint64_t parent_id,
121 uint64_t id, struct vnode **vpp)
127 search.sn_parent_id = parent_id;
128 err = vfs_hash_get(mp, (u_int)id, flags, curthread, vpp,
129 sfs_compare_ids, &search);
134 sfs_vnode_insert(struct vnode *vp, int flags, uint64_t parent_id,
135 uint64_t id, struct vnode **vpp)
139 KASSERT(vp->v_data != NULL, ("sfs_vnode_insert with NULL v_data"));
140 err = vfs_hash_insert(vp, (u_int)id, flags, curthread, vpp,
141 sfs_compare_ids, vp->v_data);
146 sfs_vnode_remove(struct vnode *vp)
151 typedef void sfs_vnode_setup_fn(vnode_t *vp, void *arg);
154 sfs_vgetx(struct mount *mp, int flags, uint64_t parent_id, uint64_t id,
155 const char *tag, struct vop_vector *vops,
156 sfs_vnode_setup_fn setup, void *arg,
162 error = sfs_vnode_get(mp, flags, parent_id, id, vpp);
163 if (error != 0 || *vpp != NULL) {
164 KASSERT_IMPLY(error == 0, (*vpp)->v_data != NULL,
165 "sfs vnode with no data");
169 /* Allocate a new vnode/inode. */
170 error = getnewvnode(tag, mp, vops, &vp);
177 * Exclusively lock the vnode vnode while it's being constructed.
179 lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL);
180 error = insmntque(vp, mp);
188 error = sfs_vnode_insert(vp, flags, parent_id, id, vpp);
189 if (error != 0 || *vpp != NULL) {
190 KASSERT_IMPLY(error == 0, (*vpp)->v_data != NULL,
191 "sfs vnode with no data");
200 sfs_print_node(sfs_node_t *node)
202 printf("\tname = %s\n", node->sn_name);
203 printf("\tparent_id = %ju\n", (uintmax_t)node->sn_parent_id);
204 printf("\tid = %ju\n", (uintmax_t)node->sn_id);
208 sfs_alloc_node(size_t size, const char *name, uint64_t parent_id, uint64_t id)
210 struct sfs_node *node;
212 KASSERT(strlen(name) < sizeof(node->sn_name),
213 ("sfs node name is too long"));
214 KASSERT(size >= sizeof(*node), ("sfs node size is too small"));
215 node = malloc(size, M_SFSNODES, M_WAITOK | M_ZERO);
216 strlcpy(node->sn_name, name, sizeof(node->sn_name));
217 node->sn_parent_id = parent_id;
224 sfs_destroy_node(sfs_node_t *node)
226 free(node, M_SFSNODES);
230 sfs_reclaim_vnode(vnode_t *vp)
235 sfs_vnode_remove(vp);
242 sfs_readdir_common(uint64_t parent_id, uint64_t id, struct vop_readdir_args *ap,
243 uio_t *uio, off_t *offp)
248 /* Reset ncookies for subsequent use of vfs_read_dirent. */
249 if (ap->a_ncookies != NULL)
252 if (uio->uio_resid < sizeof(entry))
253 return (SET_ERROR(EINVAL));
255 if (uio->uio_offset < 0)
256 return (SET_ERROR(EINVAL));
257 if (uio->uio_offset == 0) {
259 entry.d_type = DT_DIR;
260 entry.d_name[0] = '.';
261 entry.d_name[1] = '\0';
263 entry.d_reclen = sizeof(entry);
264 error = vfs_read_dirent(ap, &entry, uio->uio_offset);
266 return (SET_ERROR(error));
269 if (uio->uio_offset < sizeof(entry))
270 return (SET_ERROR(EINVAL));
271 if (uio->uio_offset == sizeof(entry)) {
272 entry.d_fileno = parent_id;
273 entry.d_type = DT_DIR;
274 entry.d_name[0] = '.';
275 entry.d_name[1] = '.';
276 entry.d_name[2] = '\0';
278 entry.d_reclen = sizeof(entry);
279 error = vfs_read_dirent(ap, &entry, uio->uio_offset);
281 return (SET_ERROR(error));
285 *offp = 2 * sizeof(entry);
291 * .zfs inode namespace
293 * We need to generate unique inode numbers for all files and directories
294 * within the .zfs pseudo-filesystem. We use the following scheme:
299 * .zfs/snapshot/<snap> objectid(snap)
301 #define ZFSCTL_INO_SNAP(id) (id)
303 static struct vop_vector zfsctl_ops_root;
304 static struct vop_vector zfsctl_ops_snapdir;
305 static struct vop_vector zfsctl_ops_snapshot;
306 static struct vop_vector zfsctl_ops_shares_dir;
319 zfsctl_is_node(vnode_t *vp)
321 return (vn_matchops(vp, zfsctl_ops_root) ||
322 vn_matchops(vp, zfsctl_ops_snapdir) ||
323 vn_matchops(vp, zfsctl_ops_snapshot) ||
324 vn_matchops(vp, zfsctl_ops_shares_dir));
328 typedef struct zfsctl_root {
336 * Create the '.zfs' directory.
339 zfsctl_create(zfsvfs_t *zfsvfs)
341 zfsctl_root_t *dot_zfs;
346 ASSERT(zfsvfs->z_ctldir == NULL);
348 snapdir = sfs_alloc_node(sizeof(*snapdir), "snapshot", ZFSCTL_INO_ROOT,
350 dot_zfs = (zfsctl_root_t *)sfs_alloc_node(sizeof(*dot_zfs), ".zfs", 0,
352 dot_zfs->snapdir = snapdir;
354 VERIFY(VFS_ROOT(zfsvfs->z_vfs, LK_EXCLUSIVE, &rvp) == 0);
355 VERIFY(0 == sa_lookup(VTOZ(rvp)->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs),
356 &crtime, sizeof(crtime)));
357 ZFS_TIME_DECODE(&dot_zfs->cmtime, crtime);
360 zfsvfs->z_ctldir = dot_zfs;
364 * Destroy the '.zfs' directory. Only called when the filesystem is unmounted.
365 * The nodes must not have any associated vnodes by now as they should be
369 zfsctl_destroy(zfsvfs_t *zfsvfs)
371 sfs_destroy_node(zfsvfs->z_ctldir->snapdir);
372 sfs_destroy_node((sfs_node_t *)zfsvfs->z_ctldir);
373 zfsvfs->z_ctldir = NULL;
377 zfsctl_fs_root_vnode(struct mount *mp, void *arg __unused, int flags,
380 return (VFS_ROOT(mp, flags, vpp));
384 zfsctl_common_vnode_setup(vnode_t *vp, void *arg)
386 ASSERT_VOP_ELOCKED(vp, __func__);
388 /* We support shared locking. */
395 zfsctl_root_vnode(struct mount *mp, void *arg __unused, int flags,
401 node = ((zfsvfs_t*)mp->mnt_data)->z_ctldir;
402 err = sfs_vgetx(mp, flags, 0, ZFSCTL_INO_ROOT, "zfs", &zfsctl_ops_root,
403 zfsctl_common_vnode_setup, node, vpp);
408 zfsctl_snapdir_vnode(struct mount *mp, void *arg __unused, int flags,
414 node = ((zfsvfs_t*)mp->mnt_data)->z_ctldir->snapdir;
415 err = sfs_vgetx(mp, flags, ZFSCTL_INO_ROOT, ZFSCTL_INO_SNAPDIR, "zfs",
416 &zfsctl_ops_snapdir, zfsctl_common_vnode_setup, node, vpp);
421 * Given a root znode, retrieve the associated .zfs directory.
422 * Add a hold to the vnode and return it.
425 zfsctl_root(zfsvfs_t *zfsvfs, int flags, vnode_t **vpp)
430 error = zfsctl_root_vnode(zfsvfs->z_vfs, NULL, flags, vpp);
435 * Common open routine. Disallow any write access.
438 zfsctl_common_open(struct vop_open_args *ap)
440 int flags = ap->a_mode;
443 return (SET_ERROR(EACCES));
449 * Common close routine. Nothing to do here.
453 zfsctl_common_close(struct vop_close_args *ap)
459 * Common access routine. Disallow writes.
462 zfsctl_common_access(ap)
463 struct vop_access_args /* {
466 struct ucred *a_cred;
470 accmode_t accmode = ap->a_accmode;
472 if (accmode & VWRITE)
473 return (SET_ERROR(EACCES));
478 * Common getattr function. Fill in basic information.
481 zfsctl_common_getattr(vnode_t *vp, vattr_t *vap)
492 * We are a purely virtual object, so we have no
493 * blocksize or allocated blocks.
499 vap->va_mode = S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP |
503 * We live in the now (for atime).
507 /* FreeBSD: Reset chflags(2) flags. */
510 vap->va_nodeid = node->sn_id;
512 /* At least '.' and '..'. */
517 zfsctl_common_fid(ap)
518 struct vop_fid_args /* {
523 vnode_t *vp = ap->a_vp;
524 fid_t *fidp = (void *)ap->a_fid;
525 sfs_node_t *node = vp->v_data;
526 uint64_t object = node->sn_id;
530 zfid = (zfid_short_t *)fidp;
531 zfid->zf_len = SHORT_FID_LEN;
533 for (i = 0; i < sizeof(zfid->zf_object); i++)
534 zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
536 /* .zfs nodes always have a generation number of 0 */
537 for (i = 0; i < sizeof(zfid->zf_gen); i++)
544 zfsctl_common_reclaim(ap)
545 struct vop_reclaim_args /* {
550 vnode_t *vp = ap->a_vp;
552 (void) sfs_reclaim_vnode(vp);
557 zfsctl_common_print(ap)
558 struct vop_print_args /* {
562 sfs_print_node(ap->a_vp->v_data);
567 * Get root directory attributes.
570 zfsctl_root_getattr(ap)
571 struct vop_getattr_args /* {
574 struct ucred *a_cred;
577 struct vnode *vp = ap->a_vp;
578 struct vattr *vap = ap->a_vap;
579 zfsctl_root_t *node = vp->v_data;
581 zfsctl_common_getattr(vp, vap);
582 vap->va_ctime = node->cmtime;
583 vap->va_mtime = vap->va_ctime;
584 vap->va_birthtime = vap->va_ctime;
585 vap->va_nlink += 1; /* snapdir */
586 vap->va_size = vap->va_nlink;
591 * When we lookup "." we still can be asked to lock it
592 * differently, can't we?
595 zfsctl_relock_dot(vnode_t *dvp, int ltype)
598 if (ltype != VOP_ISLOCKED(dvp)) {
599 if (ltype == LK_EXCLUSIVE)
600 vn_lock(dvp, LK_UPGRADE | LK_RETRY);
601 else /* if (ltype == LK_SHARED) */
602 vn_lock(dvp, LK_DOWNGRADE | LK_RETRY);
604 /* Relock for the "." case may left us with reclaimed vnode. */
605 if ((dvp->v_iflag & VI_DOOMED) != 0) {
607 return (SET_ERROR(ENOENT));
614 * Special case the handling of "..".
617 zfsctl_root_lookup(ap)
618 struct vop_lookup_args /* {
620 struct vnode **a_vpp;
621 struct componentname *a_cnp;
624 struct componentname *cnp = ap->a_cnp;
625 vnode_t *dvp = ap->a_dvp;
626 vnode_t **vpp = ap->a_vpp;
627 cred_t *cr = ap->a_cnp->cn_cred;
628 int flags = ap->a_cnp->cn_flags;
629 int lkflags = ap->a_cnp->cn_lkflags;
630 int nameiop = ap->a_cnp->cn_nameiop;
634 ASSERT(dvp->v_type == VDIR);
636 if ((flags & ISLASTCN) != 0 && nameiop != LOOKUP)
637 return (SET_ERROR(ENOTSUP));
639 if (cnp->cn_namelen == 1 && *cnp->cn_nameptr == '.') {
640 err = zfsctl_relock_dot(dvp, lkflags & LK_TYPE_MASK);
643 } else if ((flags & ISDOTDOT) != 0) {
644 err = vn_vget_ino_gen(dvp, zfsctl_fs_root_vnode, NULL,
646 } else if (strncmp(cnp->cn_nameptr, "snapshot", cnp->cn_namelen) == 0) {
647 err = zfsctl_snapdir_vnode(dvp->v_mount, NULL, lkflags, vpp);
649 err = SET_ERROR(ENOENT);
657 zfsctl_root_readdir(ap)
658 struct vop_readdir_args /* {
661 struct ucred *a_cred;
668 vnode_t *vp = ap->a_vp;
669 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
670 zfsctl_root_t *node = vp->v_data;
671 uio_t *uio = ap->a_uio;
672 int *eofp = ap->a_eofflag;
676 ASSERT(vp->v_type == VDIR);
678 error = sfs_readdir_common(zfsvfs->z_root, ZFSCTL_INO_ROOT, ap, uio,
681 if (error == ENAMETOOLONG) /* ran out of destination space */
685 if (uio->uio_offset != dots_offset)
686 return (SET_ERROR(EINVAL));
688 CTASSERT(sizeof(node->snapdir->sn_name) <= sizeof(entry.d_name));
689 entry.d_fileno = node->snapdir->sn_id;
690 entry.d_type = DT_DIR;
691 strcpy(entry.d_name, node->snapdir->sn_name);
692 entry.d_namlen = strlen(entry.d_name);
693 entry.d_reclen = sizeof(entry);
694 error = vfs_read_dirent(ap, &entry, uio->uio_offset);
696 if (error == ENAMETOOLONG)
698 return (SET_ERROR(error));
706 zfsctl_root_vptocnp(struct vop_vptocnp_args *ap)
708 static const char dotzfs_name[4] = ".zfs";
712 if (*ap->a_buflen < sizeof (dotzfs_name))
713 return (SET_ERROR(ENOMEM));
715 error = vn_vget_ino_gen(ap->a_vp, zfsctl_fs_root_vnode, NULL,
718 return (SET_ERROR(error));
722 *ap->a_buflen -= sizeof (dotzfs_name);
723 bcopy(dotzfs_name, ap->a_buf + *ap->a_buflen, sizeof (dotzfs_name));
727 static struct vop_vector zfsctl_ops_root = {
728 .vop_default = &default_vnodeops,
729 .vop_open = zfsctl_common_open,
730 .vop_close = zfsctl_common_close,
731 .vop_ioctl = VOP_EINVAL,
732 .vop_getattr = zfsctl_root_getattr,
733 .vop_access = zfsctl_common_access,
734 .vop_readdir = zfsctl_root_readdir,
735 .vop_lookup = zfsctl_root_lookup,
736 .vop_inactive = VOP_NULL,
737 .vop_reclaim = zfsctl_common_reclaim,
738 .vop_fid = zfsctl_common_fid,
739 .vop_print = zfsctl_common_print,
740 .vop_vptocnp = zfsctl_root_vptocnp,
744 zfsctl_snapshot_zname(vnode_t *vp, const char *name, int len, char *zname)
746 objset_t *os = ((zfsvfs_t *)((vp)->v_vfsp->vfs_data))->z_os;
748 dmu_objset_name(os, zname);
749 if (strlen(zname) + 1 + strlen(name) >= len)
750 return (SET_ERROR(ENAMETOOLONG));
751 (void) strcat(zname, "@");
752 (void) strcat(zname, name);
757 zfsctl_snapshot_lookup(vnode_t *vp, const char *name, uint64_t *id)
759 objset_t *os = ((zfsvfs_t *)((vp)->v_vfsp->vfs_data))->z_os;
762 err = dsl_dataset_snap_lookup(dmu_objset_ds(os), name, id);
767 * Given a vnode get a root vnode of a filesystem mounted on top of
768 * the vnode, if any. The root vnode is referenced and locked.
769 * If no filesystem is mounted then the orinal vnode remains referenced
770 * and locked. If any error happens the orinal vnode is unlocked and
774 zfsctl_mounted_here(vnode_t **vpp, int flags)
779 ASSERT_VOP_LOCKED(*vpp, __func__);
780 ASSERT3S((*vpp)->v_type, ==, VDIR);
782 if ((mp = (*vpp)->v_mountedhere) != NULL) {
783 err = vfs_busy(mp, 0);
784 KASSERT(err == 0, ("vfs_busy(mp, 0) failed with %d", err));
785 KASSERT(vrefcnt(*vpp) > 1, ("unreferenced mountpoint"));
787 err = VFS_ROOT(mp, flags, vpp);
791 return (EJUSTRETURN);
795 const char *snap_name;
797 } snapshot_setup_arg_t;
800 zfsctl_snapshot_vnode_setup(vnode_t *vp, void *arg)
802 snapshot_setup_arg_t *ssa = arg;
805 ASSERT_VOP_ELOCKED(vp, __func__);
807 node = sfs_alloc_node(sizeof(sfs_node_t),
808 ssa->snap_name, ZFSCTL_INO_SNAPDIR, ssa->snap_id);
809 zfsctl_common_vnode_setup(vp, node);
811 /* We have to support recursive locking. */
816 * Lookup entry point for the 'snapshot' directory. Try to open the
817 * snapshot if it exist, creating the pseudo filesystem vnode as necessary.
818 * Perform a mount of the associated dataset on top of the vnode.
819 * There are four possibilities:
820 * - the snapshot node and vnode do not exist
821 * - the snapshot vnode is covered by the mounted snapshot
822 * - the snapshot vnode is not covered yet, the mount operation is in progress
823 * - the snapshot vnode is not covered, because the snapshot has been unmounted
824 * The last two states are transient and should be relatively short-lived.
827 zfsctl_snapdir_lookup(ap)
828 struct vop_lookup_args /* {
830 struct vnode **a_vpp;
831 struct componentname *a_cnp;
834 vnode_t *dvp = ap->a_dvp;
835 vnode_t **vpp = ap->a_vpp;
836 struct componentname *cnp = ap->a_cnp;
837 char name[NAME_MAX + 1];
838 char fullname[ZFS_MAX_DATASET_NAME_LEN];
840 size_t mountpoint_len;
841 zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data;
843 int nameiop = cnp->cn_nameiop;
844 int lkflags = cnp->cn_lkflags;
845 int flags = cnp->cn_flags;
848 ASSERT(dvp->v_type == VDIR);
850 if ((flags & ISLASTCN) != 0 && nameiop != LOOKUP)
851 return (SET_ERROR(ENOTSUP));
853 if (cnp->cn_namelen == 1 && *cnp->cn_nameptr == '.') {
854 err = zfsctl_relock_dot(dvp, lkflags & LK_TYPE_MASK);
859 if (flags & ISDOTDOT) {
860 err = vn_vget_ino_gen(dvp, zfsctl_root_vnode, NULL, lkflags,
865 if (cnp->cn_namelen >= sizeof(name))
866 return (SET_ERROR(ENAMETOOLONG));
868 strlcpy(name, ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen + 1);
869 err = zfsctl_snapshot_lookup(dvp, name, &snap_id);
871 return (SET_ERROR(ENOENT));
874 snapshot_setup_arg_t ssa;
876 ssa.snap_name = name;
877 ssa.snap_id = snap_id;
878 err = sfs_vgetx(dvp->v_mount, LK_SHARED, ZFSCTL_INO_SNAPDIR,
879 snap_id, "zfs", &zfsctl_ops_snapshot,
880 zfsctl_snapshot_vnode_setup, &ssa, vpp);
884 /* Check if a new vnode has just been created. */
885 if (VOP_ISLOCKED(*vpp) == LK_EXCLUSIVE)
889 * The vnode must be referenced at least by this thread and
890 * the mount point or the thread doing the mounting.
891 * There can be more references from concurrent lookups.
893 KASSERT(vrefcnt(*vpp) > 1, ("found unreferenced mountpoint"));
896 * Check if a snapshot is already mounted on top of the vnode.
898 err = zfsctl_mounted_here(vpp, lkflags);
899 if (err != EJUSTRETURN)
903 * If the vnode is not covered, then either the mount operation
904 * is in progress or the snapshot has already been unmounted
905 * but the vnode hasn't been inactivated and reclaimed yet.
906 * We can try to re-use the vnode in the latter case.
909 if (((*vpp)->v_iflag & VI_MOUNT) == 0) {
910 /* Upgrade to exclusive lock in order to:
911 * - avoid race conditions
912 * - satisfy the contract of mount_snapshot()
914 err = VOP_LOCK(*vpp, LK_TRYUPGRADE | LK_INTERLOCK);
922 * In this state we can loop on uncontested locks and starve
923 * the thread doing the lengthy, non-trivial mount operation.
924 * So, yield to prevent that from happening.
927 kern_yield(PRI_USER);
930 VERIFY0(zfsctl_snapshot_zname(dvp, name, sizeof(fullname), fullname));
932 mountpoint_len = strlen(dvp->v_vfsp->mnt_stat.f_mntonname) +
933 strlen("/" ZFS_CTLDIR_NAME "/snapshot/") + strlen(name) + 1;
934 mountpoint = kmem_alloc(mountpoint_len, KM_SLEEP);
935 (void) snprintf(mountpoint, mountpoint_len,
936 "%s/" ZFS_CTLDIR_NAME "/snapshot/%s",
937 dvp->v_vfsp->mnt_stat.f_mntonname, name);
939 err = mount_snapshot(curthread, vpp, "zfs", mountpoint, fullname, 0);
940 kmem_free(mountpoint, mountpoint_len);
943 * Fix up the root vnode mounted on .zfs/snapshot/<snapname>.
945 * This is where we lie about our v_vfsp in order to
946 * make .zfs/snapshot/<snapname> accessible over NFS
947 * without requiring manual mounts of <snapname>.
949 ASSERT(VTOZ(*vpp)->z_zfsvfs != zfsvfs);
950 VTOZ(*vpp)->z_zfsvfs->z_parent = zfsvfs;
952 /* Clear the root flag (set via VFS_ROOT) as well. */
953 (*vpp)->v_vflag &= ~VV_ROOT;
962 zfsctl_snapdir_readdir(ap)
963 struct vop_readdir_args /* {
966 struct ucred *a_cred;
972 char snapname[ZFS_MAX_DATASET_NAME_LEN];
974 vnode_t *vp = ap->a_vp;
975 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
976 uio_t *uio = ap->a_uio;
977 int *eofp = ap->a_eofflag;
981 ASSERT(vp->v_type == VDIR);
983 error = sfs_readdir_common(ZFSCTL_INO_ROOT, ZFSCTL_INO_SNAPDIR, ap, uio,
986 if (error == ENAMETOOLONG) /* ran out of destination space */
995 cookie = uio->uio_offset - dots_offset;
997 dsl_pool_config_enter(dmu_objset_pool(zfsvfs->z_os), FTAG);
998 error = dmu_snapshot_list_next(zfsvfs->z_os, sizeof(snapname),
999 snapname, &id, &cookie, NULL);
1000 dsl_pool_config_exit(dmu_objset_pool(zfsvfs->z_os), FTAG);
1002 if (error == ENOENT) {
1010 entry.d_fileno = id;
1011 entry.d_type = DT_DIR;
1012 strcpy(entry.d_name, snapname);
1013 entry.d_namlen = strlen(entry.d_name);
1014 entry.d_reclen = sizeof(entry);
1015 error = vfs_read_dirent(ap, &entry, uio->uio_offset);
1017 if (error == ENAMETOOLONG)
1019 return (SET_ERROR(error));
1021 uio->uio_offset = cookie + dots_offset;
1027 zfsctl_snapdir_getattr(ap)
1028 struct vop_getattr_args /* {
1030 struct vattr *a_vap;
1031 struct ucred *a_cred;
1034 vnode_t *vp = ap->a_vp;
1035 vattr_t *vap = ap->a_vap;
1036 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
1037 dsl_dataset_t *ds = dmu_objset_ds(zfsvfs->z_os);
1038 sfs_node_t *node = vp->v_data;
1039 uint64_t snap_count;
1042 zfsctl_common_getattr(vp, vap);
1043 vap->va_ctime = dmu_objset_snap_cmtime(zfsvfs->z_os);
1044 vap->va_mtime = vap->va_ctime;
1045 vap->va_birthtime = vap->va_ctime;
1046 if (dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0) {
1047 err = zap_count(dmu_objset_pool(ds->ds_objset)->dp_meta_objset,
1048 dsl_dataset_phys(ds)->ds_snapnames_zapobj, &snap_count);
1051 vap->va_nlink += snap_count;
1053 vap->va_size = vap->va_nlink;
1058 static struct vop_vector zfsctl_ops_snapdir = {
1059 .vop_default = &default_vnodeops,
1060 .vop_open = zfsctl_common_open,
1061 .vop_close = zfsctl_common_close,
1062 .vop_getattr = zfsctl_snapdir_getattr,
1063 .vop_access = zfsctl_common_access,
1064 .vop_readdir = zfsctl_snapdir_readdir,
1065 .vop_lookup = zfsctl_snapdir_lookup,
1066 .vop_reclaim = zfsctl_common_reclaim,
1067 .vop_fid = zfsctl_common_fid,
1068 .vop_print = zfsctl_common_print,
1072 zfsctl_snapshot_inactive(ap)
1073 struct vop_inactive_args /* {
1075 struct thread *a_td;
1078 vnode_t *vp = ap->a_vp;
1080 VERIFY(vrecycle(vp) == 1);
1085 zfsctl_snapshot_reclaim(ap)
1086 struct vop_reclaim_args /* {
1088 struct thread *a_td;
1091 vnode_t *vp = ap->a_vp;
1092 void *data = vp->v_data;
1094 sfs_reclaim_vnode(vp);
1095 sfs_destroy_node(data);
1100 zfsctl_snapshot_vptocnp(struct vop_vptocnp_args *ap)
1112 len = strlen(node->sn_name);
1113 if (*ap->a_buflen < len)
1114 return (SET_ERROR(ENOMEM));
1117 * Prevent unmounting of the snapshot while the vnode lock
1118 * is not held. That is not strictly required, but allows
1119 * us to assert that an uncovered snapshot vnode is never
1122 mp = vp->v_mountedhere;
1124 return (SET_ERROR(ENOENT));
1125 error = vfs_busy(mp, 0);
1126 KASSERT(error == 0, ("vfs_busy(mp, 0) failed with %d", error));
1129 * We can vput the vnode as we can now depend on the reference owned
1130 * by the busied mp. But we also need to hold the vnode, because
1131 * the reference may go after vfs_unbusy() which has to be called
1132 * before we can lock the vnode again.
1134 locked = VOP_ISLOCKED(vp);
1138 /* Look up .zfs/snapshot, our parent. */
1139 error = zfsctl_snapdir_vnode(vp->v_mount, NULL, LK_SHARED, &dvp);
1143 *ap->a_buflen -= len;
1144 bcopy(node->sn_name, ap->a_buf + *ap->a_buflen, len);
1147 vget(vp, locked | LK_VNHELD | LK_RETRY, curthread);
1152 * These VP's should never see the light of day. They should always
1155 static struct vop_vector zfsctl_ops_snapshot = {
1156 .vop_default = NULL, /* ensure very restricted access */
1157 .vop_inactive = zfsctl_snapshot_inactive,
1158 .vop_reclaim = zfsctl_snapshot_reclaim,
1159 .vop_vptocnp = zfsctl_snapshot_vptocnp,
1160 .vop_lock1 = vop_stdlock,
1161 .vop_unlock = vop_stdunlock,
1162 .vop_islocked = vop_stdislocked,
1163 .vop_advlockpurge = vop_stdadvlockpurge, /* called by vgone */
1164 .vop_print = zfsctl_common_print,
1168 zfsctl_lookup_objset(vfs_t *vfsp, uint64_t objsetid, zfsvfs_t **zfsvfsp)
1171 zfsvfs_t *zfsvfs = vfsp->vfs_data;
1175 ASSERT(zfsvfs->z_ctldir != NULL);
1177 error = sfs_vnode_get(vfsp, LK_EXCLUSIVE,
1178 ZFSCTL_INO_SNAPDIR, objsetid, &vp);
1179 if (error == 0 && vp != NULL) {
1181 * XXX Probably need to at least reference, if not busy, the mp.
1183 if (vp->v_mountedhere != NULL)
1184 *zfsvfsp = vp->v_mountedhere->mnt_data;
1187 if (*zfsvfsp == NULL)
1188 return (SET_ERROR(EINVAL));
1193 * Unmount any snapshots for the given filesystem. This is called from
1194 * zfs_umount() - if we have a ctldir, then go through and unmount all the
1198 zfsctl_umount_snapshots(vfs_t *vfsp, int fflags, cred_t *cr)
1200 char snapname[ZFS_MAX_DATASET_NAME_LEN];
1201 zfsvfs_t *zfsvfs = vfsp->vfs_data;
1210 ASSERT(zfsvfs->z_ctldir != NULL);
1216 dsl_pool_config_enter(dmu_objset_pool(zfsvfs->z_os), FTAG);
1217 error = dmu_snapshot_list_next(zfsvfs->z_os, sizeof(snapname),
1218 snapname, &id, &cookie, NULL);
1219 dsl_pool_config_exit(dmu_objset_pool(zfsvfs->z_os), FTAG);
1221 if (error == ENOENT)
1227 error = sfs_vnode_get(vfsp, LK_EXCLUSIVE,
1228 ZFSCTL_INO_SNAPDIR, id, &vp);
1229 if (error != 0 || vp == NULL)
1232 mp = vp->v_mountedhere;
1235 * v_mountedhere being NULL means that the
1236 * (uncovered) vnode is in a transient state
1237 * (mounting or unmounting), so loop until it
1247 continue; /* no mountpoint, nothing to do */
1250 * The mount-point vnode is kept locked to avoid spurious EBUSY
1251 * from a concurrent umount.
1252 * The vnode lock must have recursive locking enabled.
1255 error = dounmount(mp, fflags, curthread);
1256 KASSERT_IMPLY(error == 0, vrefcnt(vp) == 1,
1257 ("extra references after unmount"));
1262 KASSERT_IMPLY((fflags & MS_FORCE) != 0, error == 0,
1263 ("force unmounting failed"));