4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
24 * Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved.
28 * ZFS control directory (a.k.a. ".zfs")
30 * This directory provides a common location for all ZFS meta-objects.
31 * Currently, this is only the 'snapshot' directory, but this may expand in the
32 * future. The elements are built using the GFS primitives, as the hierarchy
33 * does not actually exist on disk.
35 * For 'snapshot', we don't want to have all snapshots always mounted, because
36 * this would take up a huge amount of space in /etc/mnttab. We have three
39 * ctldir ------> snapshotdir -------> snapshot
45 * The 'snapshot' node contains just enough information to lookup '..' and act
46 * as a mountpoint for the snapshot. Whenever we lookup a specific snapshot, we
47 * perform an automount of the underlying filesystem and return the
48 * corresponding vnode.
50 * All mounts are handled automatically by the kernel, but unmounts are
51 * (currently) handled from user land. The main reason is that there is no
52 * reliable way to auto-unmount the filesystem when it's "no longer in use".
53 * When the user unmounts a filesystem, we call zfsctl_unmount(), which
54 * unmounts any snapshots within the snapshot directory.
56 * The '.zfs', '.zfs/snapshot', and all directories created under
57 * '.zfs/snapshot' (ie: '.zfs/snapshot/<snapname>') are all GFS nodes and
58 * share the same vfs_t as the head filesystem (what '.zfs' lives under).
60 * File systems mounted ontop of the GFS nodes '.zfs/snapshot/<snapname>'
61 * (ie: snapshots) are ZFS nodes and have their own unique vfs_t.
62 * However, vnodes within these mounted on file systems have their v_vfsp
63 * fields set to the head filesystem to make NFS happy (see
64 * zfsctl_snapdir_lookup()). We VFS_HOLD the head filesystem's vfs_t
65 * so that it cannot be freed until all snapshots have been unmounted.
68 #include <sys/zfs_context.h>
69 #include <sys/zfs_ctldir.h>
70 #include <sys/zfs_ioctl.h>
71 #include <sys/zfs_vfsops.h>
72 #include <sys/namei.h>
75 #include <sys/dsl_dataset.h>
76 #include <sys/dsl_destroy.h>
77 #include <sys/dsl_deleg.h>
78 #include <sys/mount.h>
81 #include "zfs_namecheck.h"
84 * "Synthetic" filesystem implementation.
88 * Assert that A implies B.
90 #define KASSERT_IMPLY(A, B, msg) KASSERT(!(A) || (B), (msg));
92 static MALLOC_DEFINE(M_SFSNODES, "sfs_nodes", "synthetic-fs nodes");
94 typedef struct sfs_node {
95 char sn_name[ZFS_MAX_DATASET_NAME_LEN];
96 uint64_t sn_parent_id;
101 * Check the parent's ID as well as the node's to account for a chance
102 * that IDs originating from different domains (snapshot IDs, artifical
103 * IDs, znode IDs) may clash.
106 sfs_compare_ids(struct vnode *vp, void *arg)
108 sfs_node_t *n1 = vp->v_data;
109 sfs_node_t *n2 = arg;
112 equal = n1->sn_id == n2->sn_id &&
113 n1->sn_parent_id == n2->sn_parent_id;
115 /* Zero means equality. */
120 sfs_vnode_get(const struct mount *mp, int flags, uint64_t parent_id,
121 uint64_t id, struct vnode **vpp)
127 search.sn_parent_id = parent_id;
128 err = vfs_hash_get(mp, (u_int)id, flags, curthread, vpp,
129 sfs_compare_ids, &search);
134 sfs_vnode_insert(struct vnode *vp, int flags, uint64_t parent_id,
135 uint64_t id, struct vnode **vpp)
139 KASSERT(vp->v_data != NULL, ("sfs_vnode_insert with NULL v_data"));
140 err = vfs_hash_insert(vp, (u_int)id, flags, curthread, vpp,
141 sfs_compare_ids, vp->v_data);
146 sfs_vnode_remove(struct vnode *vp)
151 typedef void sfs_vnode_setup_fn(vnode_t *vp, void *arg);
154 sfs_vgetx(struct mount *mp, int flags, uint64_t parent_id, uint64_t id,
155 const char *tag, struct vop_vector *vops,
156 sfs_vnode_setup_fn setup, void *arg,
162 error = sfs_vnode_get(mp, flags, parent_id, id, vpp);
163 if (error != 0 || *vpp != NULL) {
164 KASSERT_IMPLY(error == 0, (*vpp)->v_data != NULL,
165 "sfs vnode with no data");
169 /* Allocate a new vnode/inode. */
170 error = getnewvnode(tag, mp, vops, &vp);
177 * Exclusively lock the vnode vnode while it's being constructed.
179 lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL);
180 error = insmntque(vp, mp);
188 error = sfs_vnode_insert(vp, flags, parent_id, id, vpp);
189 if (error != 0 || *vpp != NULL) {
190 KASSERT_IMPLY(error == 0, (*vpp)->v_data != NULL,
191 "sfs vnode with no data");
200 sfs_print_node(sfs_node_t *node)
202 printf("\tname = %s\n", node->sn_name);
203 printf("\tparent_id = %ju\n", (uintmax_t)node->sn_parent_id);
204 printf("\tid = %ju\n", (uintmax_t)node->sn_id);
208 sfs_alloc_node(size_t size, const char *name, uint64_t parent_id, uint64_t id)
210 struct sfs_node *node;
212 KASSERT(strlen(name) < sizeof(node->sn_name),
213 ("sfs node name is too long"));
214 KASSERT(size >= sizeof(*node), ("sfs node size is too small"));
215 node = malloc(size, M_SFSNODES, M_WAITOK | M_ZERO);
216 strlcpy(node->sn_name, name, sizeof(node->sn_name));
217 node->sn_parent_id = parent_id;
224 sfs_destroy_node(sfs_node_t *node)
226 free(node, M_SFSNODES);
230 sfs_reclaim_vnode(vnode_t *vp)
235 sfs_vnode_remove(vp);
242 sfs_readdir_common(uint64_t parent_id, uint64_t id, struct vop_readdir_args *ap,
243 uio_t *uio, off_t *offp)
248 /* Reset ncookies for subsequent use of vfs_read_dirent. */
249 if (ap->a_ncookies != NULL)
252 if (uio->uio_resid < sizeof(entry))
253 return (SET_ERROR(EINVAL));
255 if (uio->uio_offset < 0)
256 return (SET_ERROR(EINVAL));
257 if (uio->uio_offset == 0) {
259 entry.d_type = DT_DIR;
260 entry.d_name[0] = '.';
261 entry.d_name[1] = '\0';
263 entry.d_reclen = sizeof(entry);
264 error = vfs_read_dirent(ap, &entry, uio->uio_offset);
266 return (SET_ERROR(error));
269 if (uio->uio_offset < sizeof(entry))
270 return (SET_ERROR(EINVAL));
271 if (uio->uio_offset == sizeof(entry)) {
272 entry.d_fileno = parent_id;
273 entry.d_type = DT_DIR;
274 entry.d_name[0] = '.';
275 entry.d_name[1] = '.';
276 entry.d_name[2] = '\0';
278 entry.d_reclen = sizeof(entry);
279 error = vfs_read_dirent(ap, &entry, uio->uio_offset);
281 return (SET_ERROR(error));
285 *offp = 2 * sizeof(entry);
291 * .zfs inode namespace
293 * We need to generate unique inode numbers for all files and directories
294 * within the .zfs pseudo-filesystem. We use the following scheme:
299 * .zfs/snapshot/<snap> objectid(snap)
301 #define ZFSCTL_INO_SNAP(id) (id)
303 static struct vop_vector zfsctl_ops_root;
304 static struct vop_vector zfsctl_ops_snapdir;
305 static struct vop_vector zfsctl_ops_snapshot;
306 static struct vop_vector zfsctl_ops_shares_dir;
319 zfsctl_is_node(vnode_t *vp)
321 return (vn_matchops(vp, zfsctl_ops_root) ||
322 vn_matchops(vp, zfsctl_ops_snapdir) ||
323 vn_matchops(vp, zfsctl_ops_snapshot) ||
324 vn_matchops(vp, zfsctl_ops_shares_dir));
328 typedef struct zfsctl_root {
336 * Create the '.zfs' directory.
339 zfsctl_create(zfsvfs_t *zfsvfs)
341 zfsctl_root_t *dot_zfs;
346 ASSERT(zfsvfs->z_ctldir == NULL);
348 snapdir = sfs_alloc_node(sizeof(*snapdir), "snapshot", ZFSCTL_INO_ROOT,
350 dot_zfs = (zfsctl_root_t *)sfs_alloc_node(sizeof(*dot_zfs), ".zfs", 0,
352 dot_zfs->snapdir = snapdir;
354 VERIFY(VFS_ROOT(zfsvfs->z_vfs, LK_EXCLUSIVE, &rvp) == 0);
355 VERIFY(0 == sa_lookup(VTOZ(rvp)->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs),
356 &crtime, sizeof(crtime)));
357 ZFS_TIME_DECODE(&dot_zfs->cmtime, crtime);
360 zfsvfs->z_ctldir = dot_zfs;
364 * Destroy the '.zfs' directory. Only called when the filesystem is unmounted.
365 * The nodes must not have any associated vnodes by now as they should be
369 zfsctl_destroy(zfsvfs_t *zfsvfs)
371 sfs_destroy_node(zfsvfs->z_ctldir->snapdir);
372 sfs_destroy_node((sfs_node_t *)zfsvfs->z_ctldir);
373 zfsvfs->z_ctldir = NULL;
377 zfsctl_fs_root_vnode(struct mount *mp, void *arg __unused, int flags,
380 return (VFS_ROOT(mp, flags, vpp));
384 zfsctl_common_vnode_setup(vnode_t *vp, void *arg)
386 ASSERT_VOP_ELOCKED(vp, __func__);
388 /* We support shared locking. */
395 zfsctl_root_vnode(struct mount *mp, void *arg __unused, int flags,
401 node = ((zfsvfs_t*)mp->mnt_data)->z_ctldir;
402 err = sfs_vgetx(mp, flags, 0, ZFSCTL_INO_ROOT, "zfs", &zfsctl_ops_root,
403 zfsctl_common_vnode_setup, node, vpp);
408 zfsctl_snapdir_vnode(struct mount *mp, void *arg __unused, int flags,
414 node = ((zfsvfs_t*)mp->mnt_data)->z_ctldir->snapdir;
415 err = sfs_vgetx(mp, flags, ZFSCTL_INO_ROOT, ZFSCTL_INO_SNAPDIR, "zfs",
416 &zfsctl_ops_snapdir, zfsctl_common_vnode_setup, node, vpp);
421 * Given a root znode, retrieve the associated .zfs directory.
422 * Add a hold to the vnode and return it.
425 zfsctl_root(zfsvfs_t *zfsvfs, int flags, vnode_t **vpp)
430 error = zfsctl_root_vnode(zfsvfs->z_vfs, NULL, flags, vpp);
435 * Common open routine. Disallow any write access.
439 zfsctl_common_open(struct vop_open_args *ap)
441 int flags = ap->a_mode;
444 return (SET_ERROR(EACCES));
450 * Common close routine. Nothing to do here.
454 zfsctl_common_close(struct vop_close_args *ap)
460 * Common access routine. Disallow writes.
464 zfsctl_common_access(ap)
465 struct vop_access_args /* {
468 struct ucred *a_cred;
472 accmode_t accmode = ap->a_accmode;
474 if (accmode & VWRITE)
475 return (SET_ERROR(EACCES));
480 * Common getattr function. Fill in basic information.
483 zfsctl_common_getattr(vnode_t *vp, vattr_t *vap)
494 * We are a purely virtual object, so we have no
495 * blocksize or allocated blocks.
501 vap->va_mode = S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP |
505 * We live in the now (for atime).
509 /* FreeBSD: Reset chflags(2) flags. */
512 vap->va_nodeid = node->sn_id;
514 /* At least '.' and '..'. */
520 zfsctl_common_fid(ap)
521 struct vop_fid_args /* {
526 vnode_t *vp = ap->a_vp;
527 fid_t *fidp = (void *)ap->a_fid;
528 sfs_node_t *node = vp->v_data;
529 uint64_t object = node->sn_id;
533 zfid = (zfid_short_t *)fidp;
534 zfid->zf_len = SHORT_FID_LEN;
536 for (i = 0; i < sizeof(zfid->zf_object); i++)
537 zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
539 /* .zfs nodes always have a generation number of 0 */
540 for (i = 0; i < sizeof(zfid->zf_gen); i++)
547 zfsctl_common_reclaim(ap)
548 struct vop_reclaim_args /* {
553 vnode_t *vp = ap->a_vp;
555 (void) sfs_reclaim_vnode(vp);
560 zfsctl_common_print(ap)
561 struct vop_print_args /* {
565 sfs_print_node(ap->a_vp->v_data);
570 * Get root directory attributes.
574 zfsctl_root_getattr(ap)
575 struct vop_getattr_args /* {
578 struct ucred *a_cred;
581 struct vnode *vp = ap->a_vp;
582 struct vattr *vap = ap->a_vap;
583 zfsctl_root_t *node = vp->v_data;
585 zfsctl_common_getattr(vp, vap);
586 vap->va_ctime = node->cmtime;
587 vap->va_mtime = vap->va_ctime;
588 vap->va_birthtime = vap->va_ctime;
589 vap->va_nlink += 1; /* snapdir */
590 vap->va_size = vap->va_nlink;
595 * When we lookup "." we still can be asked to lock it
596 * differently, can't we?
599 zfsctl_relock_dot(vnode_t *dvp, int ltype)
602 if (ltype != VOP_ISLOCKED(dvp)) {
603 if (ltype == LK_EXCLUSIVE)
604 vn_lock(dvp, LK_UPGRADE | LK_RETRY);
605 else /* if (ltype == LK_SHARED) */
606 vn_lock(dvp, LK_DOWNGRADE | LK_RETRY);
608 /* Relock for the "." case may left us with reclaimed vnode. */
609 if ((dvp->v_iflag & VI_DOOMED) != 0) {
611 return (SET_ERROR(ENOENT));
618 * Special case the handling of "..".
621 zfsctl_root_lookup(ap)
622 struct vop_lookup_args /* {
624 struct vnode **a_vpp;
625 struct componentname *a_cnp;
628 struct componentname *cnp = ap->a_cnp;
629 vnode_t *dvp = ap->a_dvp;
630 vnode_t **vpp = ap->a_vpp;
631 cred_t *cr = ap->a_cnp->cn_cred;
632 int flags = ap->a_cnp->cn_flags;
633 int lkflags = ap->a_cnp->cn_lkflags;
634 int nameiop = ap->a_cnp->cn_nameiop;
638 ASSERT(dvp->v_type == VDIR);
640 if ((flags & ISLASTCN) != 0 && nameiop != LOOKUP)
641 return (SET_ERROR(ENOTSUP));
643 if (cnp->cn_namelen == 1 && *cnp->cn_nameptr == '.') {
644 err = zfsctl_relock_dot(dvp, lkflags & LK_TYPE_MASK);
647 } else if ((flags & ISDOTDOT) != 0) {
648 err = vn_vget_ino_gen(dvp, zfsctl_fs_root_vnode, NULL,
650 } else if (strncmp(cnp->cn_nameptr, "snapshot", cnp->cn_namelen) == 0) {
651 err = zfsctl_snapdir_vnode(dvp->v_mount, NULL, lkflags, vpp);
653 err = SET_ERROR(ENOENT);
661 zfsctl_root_readdir(ap)
662 struct vop_readdir_args /* {
665 struct ucred *a_cred;
672 vnode_t *vp = ap->a_vp;
673 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
674 zfsctl_root_t *node = vp->v_data;
675 uio_t *uio = ap->a_uio;
676 int *eofp = ap->a_eofflag;
680 ASSERT(vp->v_type == VDIR);
682 error = sfs_readdir_common(zfsvfs->z_root, ZFSCTL_INO_ROOT, ap, uio,
685 if (error == ENAMETOOLONG) /* ran out of destination space */
689 if (uio->uio_offset != dots_offset)
690 return (SET_ERROR(EINVAL));
692 CTASSERT(sizeof(node->snapdir->sn_name) <= sizeof(entry.d_name));
693 entry.d_fileno = node->snapdir->sn_id;
694 entry.d_type = DT_DIR;
695 strcpy(entry.d_name, node->snapdir->sn_name);
696 entry.d_namlen = strlen(entry.d_name);
697 entry.d_reclen = sizeof(entry);
698 error = vfs_read_dirent(ap, &entry, uio->uio_offset);
700 if (error == ENAMETOOLONG)
702 return (SET_ERROR(error));
710 zfsctl_root_vptocnp(struct vop_vptocnp_args *ap)
712 static const char dotzfs_name[4] = ".zfs";
716 if (*ap->a_buflen < sizeof (dotzfs_name))
717 return (SET_ERROR(ENOMEM));
719 error = vn_vget_ino_gen(ap->a_vp, zfsctl_fs_root_vnode, NULL,
722 return (SET_ERROR(error));
726 *ap->a_buflen -= sizeof (dotzfs_name);
727 bcopy(dotzfs_name, ap->a_buf + *ap->a_buflen, sizeof (dotzfs_name));
731 static struct vop_vector zfsctl_ops_root = {
732 .vop_default = &default_vnodeops,
733 .vop_open = zfsctl_common_open,
734 .vop_close = zfsctl_common_close,
735 .vop_ioctl = VOP_EINVAL,
736 .vop_getattr = zfsctl_root_getattr,
737 .vop_access = zfsctl_common_access,
738 .vop_readdir = zfsctl_root_readdir,
739 .vop_lookup = zfsctl_root_lookup,
740 .vop_inactive = VOP_NULL,
741 .vop_reclaim = zfsctl_common_reclaim,
742 .vop_fid = zfsctl_common_fid,
743 .vop_print = zfsctl_common_print,
744 .vop_vptocnp = zfsctl_root_vptocnp,
748 zfsctl_snapshot_zname(vnode_t *vp, const char *name, int len, char *zname)
750 objset_t *os = ((zfsvfs_t *)((vp)->v_vfsp->vfs_data))->z_os;
752 dmu_objset_name(os, zname);
753 if (strlen(zname) + 1 + strlen(name) >= len)
754 return (SET_ERROR(ENAMETOOLONG));
755 (void) strcat(zname, "@");
756 (void) strcat(zname, name);
761 zfsctl_snapshot_lookup(vnode_t *vp, const char *name, uint64_t *id)
763 objset_t *os = ((zfsvfs_t *)((vp)->v_vfsp->vfs_data))->z_os;
766 err = dsl_dataset_snap_lookup(dmu_objset_ds(os), name, id);
771 * Given a vnode get a root vnode of a filesystem mounted on top of
772 * the vnode, if any. The root vnode is referenced and locked.
773 * If no filesystem is mounted then the orinal vnode remains referenced
774 * and locked. If any error happens the orinal vnode is unlocked and
778 zfsctl_mounted_here(vnode_t **vpp, int flags)
783 ASSERT_VOP_LOCKED(*vpp, __func__);
784 ASSERT3S((*vpp)->v_type, ==, VDIR);
786 if ((mp = (*vpp)->v_mountedhere) != NULL) {
787 err = vfs_busy(mp, 0);
788 KASSERT(err == 0, ("vfs_busy(mp, 0) failed with %d", err));
789 KASSERT(vrefcnt(*vpp) > 1, ("unreferenced mountpoint"));
791 err = VFS_ROOT(mp, flags, vpp);
795 return (EJUSTRETURN);
799 const char *snap_name;
801 } snapshot_setup_arg_t;
804 zfsctl_snapshot_vnode_setup(vnode_t *vp, void *arg)
806 snapshot_setup_arg_t *ssa = arg;
809 ASSERT_VOP_ELOCKED(vp, __func__);
811 node = sfs_alloc_node(sizeof(sfs_node_t),
812 ssa->snap_name, ZFSCTL_INO_SNAPDIR, ssa->snap_id);
813 zfsctl_common_vnode_setup(vp, node);
815 /* We have to support recursive locking. */
820 * Lookup entry point for the 'snapshot' directory. Try to open the
821 * snapshot if it exist, creating the pseudo filesystem vnode as necessary.
822 * Perform a mount of the associated dataset on top of the vnode.
826 zfsctl_snapdir_lookup(ap)
827 struct vop_lookup_args /* {
829 struct vnode **a_vpp;
830 struct componentname *a_cnp;
833 vnode_t *dvp = ap->a_dvp;
834 vnode_t **vpp = ap->a_vpp;
835 struct componentname *cnp = ap->a_cnp;
836 char name[NAME_MAX + 1];
837 char fullname[ZFS_MAX_DATASET_NAME_LEN];
839 size_t mountpoint_len;
840 zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data;
842 int nameiop = cnp->cn_nameiop;
843 int lkflags = cnp->cn_lkflags;
844 int flags = cnp->cn_flags;
847 ASSERT(dvp->v_type == VDIR);
849 if ((flags & ISLASTCN) != 0 && nameiop != LOOKUP)
850 return (SET_ERROR(ENOTSUP));
852 if (cnp->cn_namelen == 1 && *cnp->cn_nameptr == '.') {
853 err = zfsctl_relock_dot(dvp, lkflags & LK_TYPE_MASK);
858 if (flags & ISDOTDOT) {
859 err = vn_vget_ino_gen(dvp, zfsctl_root_vnode, NULL, lkflags,
864 if (cnp->cn_namelen >= sizeof(name))
865 return (SET_ERROR(ENAMETOOLONG));
867 strlcpy(name, ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen + 1);
868 err = zfsctl_snapshot_lookup(dvp, name, &snap_id);
870 return (SET_ERROR(ENOENT));
873 snapshot_setup_arg_t ssa;
875 ssa.snap_name = name;
876 ssa.snap_id = snap_id;
877 err = sfs_vgetx(dvp->v_mount, LK_SHARED, ZFSCTL_INO_SNAPDIR,
878 snap_id, "zfs", &zfsctl_ops_snapshot,
879 zfsctl_snapshot_vnode_setup, &ssa, vpp);
883 /* Check if a new vnode has just been created. */
884 if (VOP_ISLOCKED(*vpp) == LK_EXCLUSIVE)
888 * The vnode must be referenced at least by this thread and
889 * the mounted snapshot or the thread doing the mounting.
890 * There can be more references from concurrent lookups.
892 KASSERT(vrefcnt(*vpp) > 1, ("found unreferenced mountpoint"));
895 * Check if a snapshot is already mounted on top of the vnode.
897 err = zfsctl_mounted_here(vpp, lkflags);
898 if (err != EJUSTRETURN)
903 * If the vnode not covered yet, then the mount operation
904 * must be in progress.
907 KASSERT(((*vpp)->v_iflag & VI_MOUNT) != 0,
908 ("snapshot vnode not covered"));
914 * In this situation we can loop on uncontested locks and starve
915 * the thread doing the lengthy, non-trivial mount operation.
917 kern_yield(PRI_USER);
920 VERIFY0(zfsctl_snapshot_zname(dvp, name, sizeof(fullname), fullname));
922 mountpoint_len = strlen(dvp->v_vfsp->mnt_stat.f_mntonname) +
923 strlen("/" ZFS_CTLDIR_NAME "/snapshot/") + strlen(name) + 1;
924 mountpoint = kmem_alloc(mountpoint_len, KM_SLEEP);
925 (void) snprintf(mountpoint, mountpoint_len,
926 "%s/" ZFS_CTLDIR_NAME "/snapshot/%s",
927 dvp->v_vfsp->mnt_stat.f_mntonname, name);
929 err = mount_snapshot(curthread, vpp, "zfs", mountpoint, fullname, 0);
930 kmem_free(mountpoint, mountpoint_len);
933 * Fix up the root vnode mounted on .zfs/snapshot/<snapname>.
935 * This is where we lie about our v_vfsp in order to
936 * make .zfs/snapshot/<snapname> accessible over NFS
937 * without requiring manual mounts of <snapname>.
939 ASSERT(VTOZ(*vpp)->z_zfsvfs != zfsvfs);
940 VTOZ(*vpp)->z_zfsvfs->z_parent = zfsvfs;
942 /* Clear the root flag (set via VFS_ROOT) as well. */
943 (*vpp)->v_vflag &= ~VV_ROOT;
952 zfsctl_snapdir_readdir(ap)
953 struct vop_readdir_args /* {
956 struct ucred *a_cred;
962 char snapname[ZFS_MAX_DATASET_NAME_LEN];
964 vnode_t *vp = ap->a_vp;
965 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
966 uio_t *uio = ap->a_uio;
967 int *eofp = ap->a_eofflag;
971 ASSERT(vp->v_type == VDIR);
973 error = sfs_readdir_common(ZFSCTL_INO_ROOT, ZFSCTL_INO_SNAPDIR, ap, uio,
976 if (error == ENAMETOOLONG) /* ran out of destination space */
985 cookie = uio->uio_offset - dots_offset;
987 dsl_pool_config_enter(dmu_objset_pool(zfsvfs->z_os), FTAG);
988 error = dmu_snapshot_list_next(zfsvfs->z_os, sizeof(snapname),
989 snapname, &id, &cookie, NULL);
990 dsl_pool_config_exit(dmu_objset_pool(zfsvfs->z_os), FTAG);
992 if (error == ENOENT) {
1000 entry.d_fileno = id;
1001 entry.d_type = DT_DIR;
1002 strcpy(entry.d_name, snapname);
1003 entry.d_namlen = strlen(entry.d_name);
1004 entry.d_reclen = sizeof(entry);
1005 error = vfs_read_dirent(ap, &entry, uio->uio_offset);
1007 if (error == ENAMETOOLONG)
1009 return (SET_ERROR(error));
1011 uio->uio_offset = cookie + dots_offset;
1018 zfsctl_snapdir_getattr(ap)
1019 struct vop_getattr_args /* {
1021 struct vattr *a_vap;
1022 struct ucred *a_cred;
1025 vnode_t *vp = ap->a_vp;
1026 vattr_t *vap = ap->a_vap;
1027 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
1028 dsl_dataset_t *ds = dmu_objset_ds(zfsvfs->z_os);
1029 sfs_node_t *node = vp->v_data;
1030 uint64_t snap_count;
1033 zfsctl_common_getattr(vp, vap);
1034 vap->va_ctime = dmu_objset_snap_cmtime(zfsvfs->z_os);
1035 vap->va_mtime = vap->va_ctime;
1036 vap->va_birthtime = vap->va_ctime;
1037 if (dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0) {
1038 err = zap_count(dmu_objset_pool(ds->ds_objset)->dp_meta_objset,
1039 dsl_dataset_phys(ds)->ds_snapnames_zapobj, &snap_count);
1042 vap->va_nlink += snap_count;
1044 vap->va_size = vap->va_nlink;
1049 static struct vop_vector zfsctl_ops_snapdir = {
1050 .vop_default = &default_vnodeops,
1051 .vop_open = zfsctl_common_open,
1052 .vop_close = zfsctl_common_close,
1053 .vop_getattr = zfsctl_snapdir_getattr,
1054 .vop_access = zfsctl_common_access,
1055 .vop_readdir = zfsctl_snapdir_readdir,
1056 .vop_lookup = zfsctl_snapdir_lookup,
1057 .vop_reclaim = zfsctl_common_reclaim,
1058 .vop_fid = zfsctl_common_fid,
1059 .vop_print = zfsctl_common_print,
1063 zfsctl_snapshot_inactive(ap)
1064 struct vop_inactive_args /* {
1066 struct thread *a_td;
1069 vnode_t *vp = ap->a_vp;
1071 VERIFY(vrecycle(vp) == 1);
1076 zfsctl_snapshot_reclaim(ap)
1077 struct vop_reclaim_args /* {
1079 struct thread *a_td;
1082 vnode_t *vp = ap->a_vp;
1083 void *data = vp->v_data;
1085 sfs_reclaim_vnode(vp);
1086 sfs_destroy_node(data);
1091 zfsctl_snapshot_vptocnp(struct vop_vptocnp_args *ap)
1103 len = strlen(node->sn_name);
1104 if (*ap->a_buflen < len)
1105 return (SET_ERROR(ENOMEM));
1108 * Prevent unmounting of the snapshot while the vnode lock
1109 * is not held. That is not strictly required, but allows
1110 * us to assert that an uncovered snapshot vnode is never
1113 mp = vp->v_mountedhere;
1115 return (SET_ERROR(ENOENT));
1116 error = vfs_busy(mp, 0);
1117 KASSERT(error == 0, ("vfs_busy(mp, 0) failed with %d", error));
1120 * We can vput the vnode as we can now depend on the reference owned
1121 * by the busied mp. But we also need to hold the vnode, because
1122 * the reference may go after vfs_unbusy() which has to be called
1123 * before we can lock the vnode again.
1125 locked = VOP_ISLOCKED(vp);
1129 /* Look up .zfs/snapshot, our parent. */
1130 error = zfsctl_snapdir_vnode(vp->v_mount, NULL, LK_SHARED, &dvp);
1134 *ap->a_buflen -= len;
1135 bcopy(node->sn_name, ap->a_buf + *ap->a_buflen, len);
1138 vget(vp, locked | LK_VNHELD | LK_RETRY, curthread);
1143 * These VP's should never see the light of day. They should always
1146 static struct vop_vector zfsctl_ops_snapshot = {
1147 .vop_default = NULL, /* ensure very restricted access */
1148 .vop_inactive = zfsctl_snapshot_inactive,
1149 .vop_reclaim = zfsctl_snapshot_reclaim,
1150 .vop_vptocnp = zfsctl_snapshot_vptocnp,
1151 .vop_lock1 = vop_stdlock,
1152 .vop_unlock = vop_stdunlock,
1153 .vop_islocked = vop_stdislocked,
1154 .vop_advlockpurge = vop_stdadvlockpurge, /* called by vgone */
1155 .vop_print = zfsctl_common_print,
1159 zfsctl_lookup_objset(vfs_t *vfsp, uint64_t objsetid, zfsvfs_t **zfsvfsp)
1162 zfsvfs_t *zfsvfs = vfsp->vfs_data;
1166 ASSERT(zfsvfs->z_ctldir != NULL);
1168 error = sfs_vnode_get(vfsp, LK_EXCLUSIVE,
1169 ZFSCTL_INO_SNAPDIR, objsetid, &vp);
1170 if (error == 0 && vp != NULL) {
1172 * XXX Probably need to at least reference, if not busy, the mp.
1174 if (vp->v_mountedhere != NULL)
1175 *zfsvfsp = vp->v_mountedhere->mnt_data;
1178 if (*zfsvfsp == NULL)
1179 return (SET_ERROR(EINVAL));
1184 * Unmount any snapshots for the given filesystem. This is called from
1185 * zfs_umount() - if we have a ctldir, then go through and unmount all the
1189 zfsctl_umount_snapshots(vfs_t *vfsp, int fflags, cred_t *cr)
1191 char snapname[ZFS_MAX_DATASET_NAME_LEN];
1192 zfsvfs_t *zfsvfs = vfsp->vfs_data;
1201 ASSERT(zfsvfs->z_ctldir != NULL);
1207 dsl_pool_config_enter(dmu_objset_pool(zfsvfs->z_os), FTAG);
1208 error = dmu_snapshot_list_next(zfsvfs->z_os, sizeof(snapname),
1209 snapname, &id, &cookie, NULL);
1210 dsl_pool_config_exit(dmu_objset_pool(zfsvfs->z_os), FTAG);
1212 if (error == ENOENT)
1218 error = sfs_vnode_get(vfsp, LK_EXCLUSIVE,
1219 ZFSCTL_INO_SNAPDIR, id, &vp);
1220 if (error != 0 || vp == NULL)
1223 mp = vp->v_mountedhere;
1226 * v_mountedhere being NULL means that the
1227 * (uncovered) vnode is in a transient state
1228 * (mounting or unmounting), so loop until it
1238 continue; /* no mountpoint, nothing to do */
1241 * The mount-point vnode is kept locked to avoid spurious EBUSY
1242 * from a concurrent umount.
1243 * The vnode lock must have recursive locking enabled.
1246 error = dounmount(mp, fflags, curthread);
1247 KASSERT_IMPLY(error == 0, vrefcnt(vp) == 1,
1248 ("extra references after unmount"));
1253 KASSERT_IMPLY((fflags & MS_FORCE) != 0, error == 0,
1254 ("force unmounting failed"));