4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
26 /* Portions Copyright 2010 Robert Milkowski */
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/sysmacros.h>
32 #include <sys/pathname.h>
33 #include <sys/vnode.h>
35 #include <sys/mntent.h>
36 #include <sys/cmn_err.h>
37 #include <sys/zfs_znode.h>
38 #include <sys/zfs_vnops.h>
39 #include <sys/zfs_dir.h>
41 #include <sys/fs/zfs.h>
43 #include <sys/dsl_prop.h>
44 #include <sys/dsl_dataset.h>
45 #include <sys/dsl_deleg.h>
49 #include <sys/sa_impl.h>
50 #include <sys/policy.h>
51 #include <sys/atomic.h>
52 #include <sys/zfs_ioctl.h>
53 #include <sys/zfs_ctldir.h>
54 #include <sys/zfs_fuid.h>
55 #include <sys/zfs_quota.h>
56 #include <sys/sunddi.h>
57 #include <sys/dmu_objset.h>
58 #include <sys/dsl_dir.h>
59 #include <sys/spa_boot.h>
60 #include <sys/objlist.h>
62 #include <linux/vfs_compat.h>
63 #include "zfs_comutil.h"
88 static const match_table_t zpl_tokens = {
89 { TOKEN_RO, MNTOPT_RO },
90 { TOKEN_RW, MNTOPT_RW },
91 { TOKEN_SETUID, MNTOPT_SETUID },
92 { TOKEN_NOSETUID, MNTOPT_NOSETUID },
93 { TOKEN_EXEC, MNTOPT_EXEC },
94 { TOKEN_NOEXEC, MNTOPT_NOEXEC },
95 { TOKEN_DEVICES, MNTOPT_DEVICES },
96 { TOKEN_NODEVICES, MNTOPT_NODEVICES },
97 { TOKEN_DIRXATTR, MNTOPT_DIRXATTR },
98 { TOKEN_SAXATTR, MNTOPT_SAXATTR },
99 { TOKEN_XATTR, MNTOPT_XATTR },
100 { TOKEN_NOXATTR, MNTOPT_NOXATTR },
101 { TOKEN_ATIME, MNTOPT_ATIME },
102 { TOKEN_NOATIME, MNTOPT_NOATIME },
103 { TOKEN_RELATIME, MNTOPT_RELATIME },
104 { TOKEN_NORELATIME, MNTOPT_NORELATIME },
105 { TOKEN_NBMAND, MNTOPT_NBMAND },
106 { TOKEN_NONBMAND, MNTOPT_NONBMAND },
107 { TOKEN_MNTPOINT, MNTOPT_MNTPOINT "=%s" },
108 { TOKEN_LAST, NULL },
112 zfsvfs_vfs_free(vfs_t *vfsp)
115 if (vfsp->vfs_mntpoint != NULL)
116 kmem_strfree(vfsp->vfs_mntpoint);
118 kmem_free(vfsp, sizeof (vfs_t));
123 zfsvfs_parse_option(char *option, int token, substring_t *args, vfs_t *vfsp)
127 vfsp->vfs_readonly = B_TRUE;
128 vfsp->vfs_do_readonly = B_TRUE;
131 vfsp->vfs_readonly = B_FALSE;
132 vfsp->vfs_do_readonly = B_TRUE;
135 vfsp->vfs_setuid = B_TRUE;
136 vfsp->vfs_do_setuid = B_TRUE;
139 vfsp->vfs_setuid = B_FALSE;
140 vfsp->vfs_do_setuid = B_TRUE;
143 vfsp->vfs_exec = B_TRUE;
144 vfsp->vfs_do_exec = B_TRUE;
147 vfsp->vfs_exec = B_FALSE;
148 vfsp->vfs_do_exec = B_TRUE;
151 vfsp->vfs_devices = B_TRUE;
152 vfsp->vfs_do_devices = B_TRUE;
154 case TOKEN_NODEVICES:
155 vfsp->vfs_devices = B_FALSE;
156 vfsp->vfs_do_devices = B_TRUE;
159 vfsp->vfs_xattr = ZFS_XATTR_DIR;
160 vfsp->vfs_do_xattr = B_TRUE;
163 vfsp->vfs_xattr = ZFS_XATTR_SA;
164 vfsp->vfs_do_xattr = B_TRUE;
167 vfsp->vfs_xattr = ZFS_XATTR_DIR;
168 vfsp->vfs_do_xattr = B_TRUE;
171 vfsp->vfs_xattr = ZFS_XATTR_OFF;
172 vfsp->vfs_do_xattr = B_TRUE;
175 vfsp->vfs_atime = B_TRUE;
176 vfsp->vfs_do_atime = B_TRUE;
179 vfsp->vfs_atime = B_FALSE;
180 vfsp->vfs_do_atime = B_TRUE;
183 vfsp->vfs_relatime = B_TRUE;
184 vfsp->vfs_do_relatime = B_TRUE;
186 case TOKEN_NORELATIME:
187 vfsp->vfs_relatime = B_FALSE;
188 vfsp->vfs_do_relatime = B_TRUE;
191 vfsp->vfs_nbmand = B_TRUE;
192 vfsp->vfs_do_nbmand = B_TRUE;
195 vfsp->vfs_nbmand = B_FALSE;
196 vfsp->vfs_do_nbmand = B_TRUE;
199 vfsp->vfs_mntpoint = match_strdup(&args[0]);
200 if (vfsp->vfs_mntpoint == NULL)
201 return (SET_ERROR(ENOMEM));
212 * Parse the raw mntopts and return a vfs_t describing the options.
215 zfsvfs_parse_options(char *mntopts, vfs_t **vfsp)
220 tmp_vfsp = kmem_zalloc(sizeof (vfs_t), KM_SLEEP);
222 if (mntopts != NULL) {
223 substring_t args[MAX_OPT_ARGS];
224 char *tmp_mntopts, *p, *t;
227 tmp_mntopts = t = kmem_strdup(mntopts);
228 if (tmp_mntopts == NULL)
229 return (SET_ERROR(ENOMEM));
231 while ((p = strsep(&t, ",")) != NULL) {
235 args[0].to = args[0].from = NULL;
236 token = match_token(p, zpl_tokens, args);
237 error = zfsvfs_parse_option(p, token, args, tmp_vfsp);
239 kmem_strfree(tmp_mntopts);
240 zfsvfs_vfs_free(tmp_vfsp);
245 kmem_strfree(tmp_mntopts);
254 zfs_is_readonly(zfsvfs_t *zfsvfs)
256 return (!!(zfsvfs->z_sb->s_flags & SB_RDONLY));
261 zfs_sync(struct super_block *sb, int wait, cred_t *cr)
263 zfsvfs_t *zfsvfs = sb->s_fs_info;
266 * Semantically, the only requirement is that the sync be initiated.
267 * The DMU syncs out txgs frequently, so there's nothing to do.
272 if (zfsvfs != NULL) {
274 * Sync a specific filesystem.
279 dp = dmu_objset_pool(zfsvfs->z_os);
282 * If the system is shutting down, then skip any
283 * filesystems which may exist on a suspended pool.
285 if (spa_suspended(dp->dp_spa)) {
290 if (zfsvfs->z_log != NULL)
291 zil_commit(zfsvfs->z_log, 0);
296 * Sync all ZFS filesystems. This is what happens when you
297 * run sync(1M). Unlike other filesystems, ZFS honors the
298 * request by waiting for all pools to commit all dirty data.
307 atime_changed_cb(void *arg, uint64_t newval)
309 zfsvfs_t *zfsvfs = arg;
310 struct super_block *sb = zfsvfs->z_sb;
315 * Update SB_NOATIME bit in VFS super block. Since atime update is
316 * determined by atime_needs_update(), atime_needs_update() needs to
317 * return false if atime is turned off, and not unconditionally return
318 * false if atime is turned on.
321 sb->s_flags &= ~SB_NOATIME;
323 sb->s_flags |= SB_NOATIME;
327 relatime_changed_cb(void *arg, uint64_t newval)
329 ((zfsvfs_t *)arg)->z_relatime = newval;
333 xattr_changed_cb(void *arg, uint64_t newval)
335 zfsvfs_t *zfsvfs = arg;
337 if (newval == ZFS_XATTR_OFF) {
338 zfsvfs->z_flags &= ~ZSB_XATTR;
340 zfsvfs->z_flags |= ZSB_XATTR;
342 if (newval == ZFS_XATTR_SA)
343 zfsvfs->z_xattr_sa = B_TRUE;
345 zfsvfs->z_xattr_sa = B_FALSE;
350 acltype_changed_cb(void *arg, uint64_t newval)
352 zfsvfs_t *zfsvfs = arg;
355 case ZFS_ACLTYPE_OFF:
356 zfsvfs->z_acl_type = ZFS_ACLTYPE_OFF;
357 zfsvfs->z_sb->s_flags &= ~SB_POSIXACL;
359 case ZFS_ACLTYPE_POSIXACL:
360 #ifdef CONFIG_FS_POSIX_ACL
361 zfsvfs->z_acl_type = ZFS_ACLTYPE_POSIXACL;
362 zfsvfs->z_sb->s_flags |= SB_POSIXACL;
364 zfsvfs->z_acl_type = ZFS_ACLTYPE_OFF;
365 zfsvfs->z_sb->s_flags &= ~SB_POSIXACL;
366 #endif /* CONFIG_FS_POSIX_ACL */
374 blksz_changed_cb(void *arg, uint64_t newval)
376 zfsvfs_t *zfsvfs = arg;
377 ASSERT3U(newval, <=, spa_maxblocksize(dmu_objset_spa(zfsvfs->z_os)));
378 ASSERT3U(newval, >=, SPA_MINBLOCKSIZE);
379 ASSERT(ISP2(newval));
381 zfsvfs->z_max_blksz = newval;
385 readonly_changed_cb(void *arg, uint64_t newval)
387 zfsvfs_t *zfsvfs = arg;
388 struct super_block *sb = zfsvfs->z_sb;
394 sb->s_flags |= SB_RDONLY;
396 sb->s_flags &= ~SB_RDONLY;
400 devices_changed_cb(void *arg, uint64_t newval)
405 setuid_changed_cb(void *arg, uint64_t newval)
410 exec_changed_cb(void *arg, uint64_t newval)
415 nbmand_changed_cb(void *arg, uint64_t newval)
417 zfsvfs_t *zfsvfs = arg;
418 struct super_block *sb = zfsvfs->z_sb;
424 sb->s_flags |= SB_MANDLOCK;
426 sb->s_flags &= ~SB_MANDLOCK;
430 snapdir_changed_cb(void *arg, uint64_t newval)
432 ((zfsvfs_t *)arg)->z_show_ctldir = newval;
436 vscan_changed_cb(void *arg, uint64_t newval)
438 ((zfsvfs_t *)arg)->z_vscan = newval;
442 acl_mode_changed_cb(void *arg, uint64_t newval)
444 zfsvfs_t *zfsvfs = arg;
446 zfsvfs->z_acl_mode = newval;
450 acl_inherit_changed_cb(void *arg, uint64_t newval)
452 ((zfsvfs_t *)arg)->z_acl_inherit = newval;
456 zfs_register_callbacks(vfs_t *vfsp)
458 struct dsl_dataset *ds = NULL;
460 zfsvfs_t *zfsvfs = NULL;
464 zfsvfs = vfsp->vfs_data;
469 * The act of registering our callbacks will destroy any mount
470 * options we may have. In order to enable temporary overrides
471 * of mount options, we stash away the current values and
472 * restore them after we register the callbacks.
474 if (zfs_is_readonly(zfsvfs) || !spa_writeable(dmu_objset_spa(os))) {
475 vfsp->vfs_do_readonly = B_TRUE;
476 vfsp->vfs_readonly = B_TRUE;
480 * Register property callbacks.
482 * It would probably be fine to just check for i/o error from
483 * the first prop_register(), but I guess I like to go
486 ds = dmu_objset_ds(os);
487 dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
488 error = dsl_prop_register(ds,
489 zfs_prop_to_name(ZFS_PROP_ATIME), atime_changed_cb, zfsvfs);
490 error = error ? error : dsl_prop_register(ds,
491 zfs_prop_to_name(ZFS_PROP_RELATIME), relatime_changed_cb, zfsvfs);
492 error = error ? error : dsl_prop_register(ds,
493 zfs_prop_to_name(ZFS_PROP_XATTR), xattr_changed_cb, zfsvfs);
494 error = error ? error : dsl_prop_register(ds,
495 zfs_prop_to_name(ZFS_PROP_RECORDSIZE), blksz_changed_cb, zfsvfs);
496 error = error ? error : dsl_prop_register(ds,
497 zfs_prop_to_name(ZFS_PROP_READONLY), readonly_changed_cb, zfsvfs);
498 error = error ? error : dsl_prop_register(ds,
499 zfs_prop_to_name(ZFS_PROP_DEVICES), devices_changed_cb, zfsvfs);
500 error = error ? error : dsl_prop_register(ds,
501 zfs_prop_to_name(ZFS_PROP_SETUID), setuid_changed_cb, zfsvfs);
502 error = error ? error : dsl_prop_register(ds,
503 zfs_prop_to_name(ZFS_PROP_EXEC), exec_changed_cb, zfsvfs);
504 error = error ? error : dsl_prop_register(ds,
505 zfs_prop_to_name(ZFS_PROP_SNAPDIR), snapdir_changed_cb, zfsvfs);
506 error = error ? error : dsl_prop_register(ds,
507 zfs_prop_to_name(ZFS_PROP_ACLTYPE), acltype_changed_cb, zfsvfs);
508 error = error ? error : dsl_prop_register(ds,
509 zfs_prop_to_name(ZFS_PROP_ACLMODE), acl_mode_changed_cb, zfsvfs);
510 error = error ? error : dsl_prop_register(ds,
511 zfs_prop_to_name(ZFS_PROP_ACLINHERIT), acl_inherit_changed_cb,
513 error = error ? error : dsl_prop_register(ds,
514 zfs_prop_to_name(ZFS_PROP_VSCAN), vscan_changed_cb, zfsvfs);
515 error = error ? error : dsl_prop_register(ds,
516 zfs_prop_to_name(ZFS_PROP_NBMAND), nbmand_changed_cb, zfsvfs);
517 dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
522 * Invoke our callbacks to restore temporary mount options.
524 if (vfsp->vfs_do_readonly)
525 readonly_changed_cb(zfsvfs, vfsp->vfs_readonly);
526 if (vfsp->vfs_do_setuid)
527 setuid_changed_cb(zfsvfs, vfsp->vfs_setuid);
528 if (vfsp->vfs_do_exec)
529 exec_changed_cb(zfsvfs, vfsp->vfs_exec);
530 if (vfsp->vfs_do_devices)
531 devices_changed_cb(zfsvfs, vfsp->vfs_devices);
532 if (vfsp->vfs_do_xattr)
533 xattr_changed_cb(zfsvfs, vfsp->vfs_xattr);
534 if (vfsp->vfs_do_atime)
535 atime_changed_cb(zfsvfs, vfsp->vfs_atime);
536 if (vfsp->vfs_do_relatime)
537 relatime_changed_cb(zfsvfs, vfsp->vfs_relatime);
538 if (vfsp->vfs_do_nbmand)
539 nbmand_changed_cb(zfsvfs, vfsp->vfs_nbmand);
544 dsl_prop_unregister_all(ds, zfsvfs);
549 * Takes a dataset, a property, a value and that value's setpoint as
550 * found in the ZAP. Checks if the property has been changed in the vfs.
551 * If so, val and setpoint will be overwritten with updated content.
552 * Otherwise, they are left unchanged.
555 zfs_get_temporary_prop(dsl_dataset_t *ds, zfs_prop_t zfs_prop, uint64_t *val,
564 error = dmu_objset_from_ds(ds, &os);
568 if (dmu_objset_type(os) != DMU_OST_ZFS)
571 mutex_enter(&os->os_user_ptr_lock);
572 zfvp = dmu_objset_get_user(os);
573 mutex_exit(&os->os_user_ptr_lock);
581 if (vfsp->vfs_do_atime)
582 tmp = vfsp->vfs_atime;
584 case ZFS_PROP_RELATIME:
585 if (vfsp->vfs_do_relatime)
586 tmp = vfsp->vfs_relatime;
588 case ZFS_PROP_DEVICES:
589 if (vfsp->vfs_do_devices)
590 tmp = vfsp->vfs_devices;
593 if (vfsp->vfs_do_exec)
594 tmp = vfsp->vfs_exec;
596 case ZFS_PROP_SETUID:
597 if (vfsp->vfs_do_setuid)
598 tmp = vfsp->vfs_setuid;
600 case ZFS_PROP_READONLY:
601 if (vfsp->vfs_do_readonly)
602 tmp = vfsp->vfs_readonly;
605 if (vfsp->vfs_do_xattr)
606 tmp = vfsp->vfs_xattr;
608 case ZFS_PROP_NBMAND:
609 if (vfsp->vfs_do_nbmand)
610 tmp = vfsp->vfs_nbmand;
617 (void) strcpy(setpoint, "temporary");
624 * Associate this zfsvfs with the given objset, which must be owned.
625 * This will cache a bunch of on-disk state from the objset in the
629 zfsvfs_init(zfsvfs_t *zfsvfs, objset_t *os)
634 zfsvfs->z_max_blksz = SPA_OLD_MAXBLOCKSIZE;
635 zfsvfs->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;
638 error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zfsvfs->z_version);
641 if (zfsvfs->z_version >
642 zfs_zpl_version_map(spa_version(dmu_objset_spa(os)))) {
643 (void) printk("Can't mount a version %lld file system "
644 "on a version %lld pool\n. Pool must be upgraded to mount "
645 "this file system.\n", (u_longlong_t)zfsvfs->z_version,
646 (u_longlong_t)spa_version(dmu_objset_spa(os)));
647 return (SET_ERROR(ENOTSUP));
649 error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &val);
652 zfsvfs->z_norm = (int)val;
654 error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &val);
657 zfsvfs->z_utf8 = (val != 0);
659 error = zfs_get_zplprop(os, ZFS_PROP_CASE, &val);
662 zfsvfs->z_case = (uint_t)val;
664 if ((error = zfs_get_zplprop(os, ZFS_PROP_ACLTYPE, &val)) != 0)
666 zfsvfs->z_acl_type = (uint_t)val;
669 * Fold case on file systems that are always or sometimes case
672 if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
673 zfsvfs->z_case == ZFS_CASE_MIXED)
674 zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER;
676 zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
677 zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
680 if (zfsvfs->z_use_sa) {
681 /* should either have both of these objects or none */
682 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1,
687 error = zfs_get_zplprop(os, ZFS_PROP_XATTR, &val);
688 if ((error == 0) && (val == ZFS_XATTR_SA))
689 zfsvfs->z_xattr_sa = B_TRUE;
692 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1,
696 ASSERT(zfsvfs->z_root != 0);
698 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
699 &zfsvfs->z_unlinkedobj);
703 error = zap_lookup(os, MASTER_NODE_OBJ,
704 zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA],
705 8, 1, &zfsvfs->z_userquota_obj);
707 zfsvfs->z_userquota_obj = 0;
711 error = zap_lookup(os, MASTER_NODE_OBJ,
712 zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA],
713 8, 1, &zfsvfs->z_groupquota_obj);
715 zfsvfs->z_groupquota_obj = 0;
719 error = zap_lookup(os, MASTER_NODE_OBJ,
720 zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTQUOTA],
721 8, 1, &zfsvfs->z_projectquota_obj);
723 zfsvfs->z_projectquota_obj = 0;
727 error = zap_lookup(os, MASTER_NODE_OBJ,
728 zfs_userquota_prop_prefixes[ZFS_PROP_USEROBJQUOTA],
729 8, 1, &zfsvfs->z_userobjquota_obj);
731 zfsvfs->z_userobjquota_obj = 0;
735 error = zap_lookup(os, MASTER_NODE_OBJ,
736 zfs_userquota_prop_prefixes[ZFS_PROP_GROUPOBJQUOTA],
737 8, 1, &zfsvfs->z_groupobjquota_obj);
739 zfsvfs->z_groupobjquota_obj = 0;
743 error = zap_lookup(os, MASTER_NODE_OBJ,
744 zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTOBJQUOTA],
745 8, 1, &zfsvfs->z_projectobjquota_obj);
747 zfsvfs->z_projectobjquota_obj = 0;
751 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1,
752 &zfsvfs->z_fuid_obj);
754 zfsvfs->z_fuid_obj = 0;
758 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SHARES_DIR, 8, 1,
759 &zfsvfs->z_shares_dir);
761 zfsvfs->z_shares_dir = 0;
765 error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
766 &zfsvfs->z_attr_table);
770 if (zfsvfs->z_version >= ZPL_VERSION_SA)
771 sa_register_update_callback(os, zfs_sa_upgrade);
777 zfsvfs_create(const char *osname, boolean_t readonly, zfsvfs_t **zfvp)
782 boolean_t ro = (readonly || (strchr(osname, '@') != NULL));
784 zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
786 error = dmu_objset_own(osname, DMU_OST_ZFS, ro, B_TRUE, zfsvfs, &os);
788 kmem_free(zfsvfs, sizeof (zfsvfs_t));
792 error = zfsvfs_create_impl(zfvp, zfsvfs, os);
794 dmu_objset_disown(os, B_TRUE, zfsvfs);
801 * Note: zfsvfs is assumed to be malloc'd, and will be freed by this function
802 * on a failure. Do not pass in a statically allocated zfsvfs.
805 zfsvfs_create_impl(zfsvfs_t **zfvp, zfsvfs_t *zfsvfs, objset_t *os)
809 zfsvfs->z_vfs = NULL;
811 zfsvfs->z_parent = zfsvfs;
813 mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
814 mutex_init(&zfsvfs->z_lock, NULL, MUTEX_DEFAULT, NULL);
815 list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
816 offsetof(znode_t, z_link_node));
817 rrm_init(&zfsvfs->z_teardown_lock, B_FALSE);
818 rw_init(&zfsvfs->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL);
819 rw_init(&zfsvfs->z_fuid_lock, NULL, RW_DEFAULT, NULL);
821 int size = MIN(1 << (highbit64(zfs_object_mutex_size) - 1),
823 zfsvfs->z_hold_size = size;
824 zfsvfs->z_hold_trees = vmem_zalloc(sizeof (avl_tree_t) * size,
826 zfsvfs->z_hold_locks = vmem_zalloc(sizeof (kmutex_t) * size, KM_SLEEP);
827 for (int i = 0; i != size; i++) {
828 avl_create(&zfsvfs->z_hold_trees[i], zfs_znode_hold_compare,
829 sizeof (znode_hold_t), offsetof(znode_hold_t, zh_node));
830 mutex_init(&zfsvfs->z_hold_locks[i], NULL, MUTEX_DEFAULT, NULL);
833 error = zfsvfs_init(zfsvfs, os);
840 zfsvfs->z_drain_task = TASKQID_INVALID;
841 zfsvfs->z_draining = B_FALSE;
842 zfsvfs->z_drain_cancel = B_TRUE;
849 zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting)
852 boolean_t readonly = zfs_is_readonly(zfsvfs);
854 error = zfs_register_callbacks(zfsvfs->z_vfs);
858 zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data);
861 * If we are not mounting (ie: online recv), then we don't
862 * have to worry about replaying the log as we blocked all
863 * operations out since we closed the ZIL.
866 ASSERT3P(zfsvfs->z_kstat.dk_kstats, ==, NULL);
867 dataset_kstats_create(&zfsvfs->z_kstat, zfsvfs->z_os);
870 * During replay we remove the read only flag to
871 * allow replays to succeed.
874 readonly_changed_cb(zfsvfs, B_FALSE);
877 if (zap_get_stats(zfsvfs->z_os, zfsvfs->z_unlinkedobj,
879 dataset_kstats_update_nunlinks_kstat(
880 &zfsvfs->z_kstat, zs.zs_num_entries);
881 dprintf_ds(zfsvfs->z_os->os_dsl_dataset,
882 "num_entries in unlinked set: %llu",
885 zfs_unlinked_drain(zfsvfs);
886 dsl_dir_t *dd = zfsvfs->z_os->os_dsl_dataset->ds_dir;
887 dd->dd_activity_cancelled = B_FALSE;
891 * Parse and replay the intent log.
893 * Because of ziltest, this must be done after
894 * zfs_unlinked_drain(). (Further note: ziltest
895 * doesn't use readonly mounts, where
896 * zfs_unlinked_drain() isn't called.) This is because
897 * ziltest causes spa_sync() to think it's committed,
898 * but actually it is not, so the intent log contains
899 * many txg's worth of changes.
901 * In particular, if object N is in the unlinked set in
902 * the last txg to actually sync, then it could be
903 * actually freed in a later txg and then reallocated
904 * in a yet later txg. This would write a "create
905 * object N" record to the intent log. Normally, this
906 * would be fine because the spa_sync() would have
907 * written out the fact that object N is free, before
908 * we could write the "create object N" intent log
911 * But when we are in ziltest mode, we advance the "open
912 * txg" without actually spa_sync()-ing the changes to
913 * disk. So we would see that object N is still
914 * allocated and in the unlinked set, and there is an
915 * intent log record saying to allocate it.
917 if (spa_writeable(dmu_objset_spa(zfsvfs->z_os))) {
918 if (zil_replay_disable) {
919 zil_destroy(zfsvfs->z_log, B_FALSE);
921 zfsvfs->z_replay = B_TRUE;
922 zil_replay(zfsvfs->z_os, zfsvfs,
924 zfsvfs->z_replay = B_FALSE;
928 /* restore readonly bit */
930 readonly_changed_cb(zfsvfs, B_TRUE);
934 * Set the objset user_ptr to track its zfsvfs.
936 mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
937 dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
938 mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
944 zfsvfs_free(zfsvfs_t *zfsvfs)
946 int i, size = zfsvfs->z_hold_size;
948 zfs_fuid_destroy(zfsvfs);
950 mutex_destroy(&zfsvfs->z_znodes_lock);
951 mutex_destroy(&zfsvfs->z_lock);
952 list_destroy(&zfsvfs->z_all_znodes);
953 rrm_destroy(&zfsvfs->z_teardown_lock);
954 rw_destroy(&zfsvfs->z_teardown_inactive_lock);
955 rw_destroy(&zfsvfs->z_fuid_lock);
956 for (i = 0; i != size; i++) {
957 avl_destroy(&zfsvfs->z_hold_trees[i]);
958 mutex_destroy(&zfsvfs->z_hold_locks[i]);
960 vmem_free(zfsvfs->z_hold_trees, sizeof (avl_tree_t) * size);
961 vmem_free(zfsvfs->z_hold_locks, sizeof (kmutex_t) * size);
962 zfsvfs_vfs_free(zfsvfs->z_vfs);
963 dataset_kstats_destroy(&zfsvfs->z_kstat);
964 kmem_free(zfsvfs, sizeof (zfsvfs_t));
968 zfs_set_fuid_feature(zfsvfs_t *zfsvfs)
970 zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
971 zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
975 zfs_unregister_callbacks(zfsvfs_t *zfsvfs)
977 objset_t *os = zfsvfs->z_os;
979 if (!dmu_objset_is_snapshot(os))
980 dsl_prop_unregister_all(dmu_objset_ds(os), zfsvfs);
985 * Check that the hex label string is appropriate for the dataset being
986 * mounted into the global_zone proper.
988 * Return an error if the hex label string is not default or
989 * admin_low/admin_high. For admin_low labels, the corresponding
990 * dataset must be readonly.
993 zfs_check_global_label(const char *dsname, const char *hexsl)
995 if (strcasecmp(hexsl, ZFS_MLSLABEL_DEFAULT) == 0)
997 if (strcasecmp(hexsl, ADMIN_HIGH) == 0)
999 if (strcasecmp(hexsl, ADMIN_LOW) == 0) {
1000 /* must be readonly */
1003 if (dsl_prop_get_integer(dsname,
1004 zfs_prop_to_name(ZFS_PROP_READONLY), &rdonly, NULL))
1005 return (SET_ERROR(EACCES));
1006 return (rdonly ? 0 : SET_ERROR(EACCES));
1008 return (SET_ERROR(EACCES));
1010 #endif /* HAVE_MLSLABEL */
1013 zfs_statfs_project(zfsvfs_t *zfsvfs, znode_t *zp, struct kstatfs *statp,
1016 char buf[20 + DMU_OBJACCT_PREFIX_LEN];
1017 uint64_t offset = DMU_OBJACCT_PREFIX_LEN;
1022 strlcpy(buf, DMU_OBJACCT_PREFIX, DMU_OBJACCT_PREFIX_LEN + 1);
1023 err = zfs_id_to_fuidstr(zfsvfs, NULL, zp->z_projid, buf + offset,
1024 sizeof (buf) - offset, B_FALSE);
1028 if (zfsvfs->z_projectquota_obj == 0)
1031 err = zap_lookup(zfsvfs->z_os, zfsvfs->z_projectquota_obj,
1032 buf + offset, 8, 1, "a);
1038 err = zap_lookup(zfsvfs->z_os, DMU_PROJECTUSED_OBJECT,
1039 buf + offset, 8, 1, &used);
1040 if (unlikely(err == ENOENT)) {
1042 u_longlong_t nblocks;
1045 * Quota accounting is async, so it is possible race case.
1046 * There is at least one object with the given project ID.
1048 sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
1049 if (unlikely(zp->z_blksz == 0))
1050 blksize = zfsvfs->z_max_blksz;
1052 used = blksize * nblocks;
1057 statp->f_blocks = quota >> bshift;
1058 statp->f_bfree = (quota > used) ? ((quota - used) >> bshift) : 0;
1059 statp->f_bavail = statp->f_bfree;
1062 if (zfsvfs->z_projectobjquota_obj == 0)
1065 err = zap_lookup(zfsvfs->z_os, zfsvfs->z_projectobjquota_obj,
1066 buf + offset, 8, 1, "a);
1072 err = zap_lookup(zfsvfs->z_os, DMU_PROJECTUSED_OBJECT,
1074 if (unlikely(err == ENOENT)) {
1076 * Quota accounting is async, so it is possible race case.
1077 * There is at least one object with the given project ID.
1084 statp->f_files = quota;
1085 statp->f_ffree = (quota > used) ? (quota - used) : 0;
1091 zfs_statvfs(struct inode *ip, struct kstatfs *statp)
1093 zfsvfs_t *zfsvfs = ITOZSB(ip);
1094 uint64_t refdbytes, availbytes, usedobjs, availobjs;
1099 dmu_objset_space(zfsvfs->z_os,
1100 &refdbytes, &availbytes, &usedobjs, &availobjs);
1102 uint64_t fsid = dmu_objset_fsid_guid(zfsvfs->z_os);
1104 * The underlying storage pool actually uses multiple block
1105 * size. Under Solaris frsize (fragment size) is reported as
1106 * the smallest block size we support, and bsize (block size)
1107 * as the filesystem's maximum block size. Unfortunately,
1108 * under Linux the fragment size and block size are often used
1109 * interchangeably. Thus we are forced to report both of them
1110 * as the filesystem's maximum block size.
1112 statp->f_frsize = zfsvfs->z_max_blksz;
1113 statp->f_bsize = zfsvfs->z_max_blksz;
1114 uint32_t bshift = fls(statp->f_bsize) - 1;
1117 * The following report "total" blocks of various kinds in
1118 * the file system, but reported in terms of f_bsize - the
1122 /* Round up so we never have a filesystem using 0 blocks. */
1123 refdbytes = P2ROUNDUP(refdbytes, statp->f_bsize);
1124 statp->f_blocks = (refdbytes + availbytes) >> bshift;
1125 statp->f_bfree = availbytes >> bshift;
1126 statp->f_bavail = statp->f_bfree; /* no root reservation */
1129 * statvfs() should really be called statufs(), because it assumes
1130 * static metadata. ZFS doesn't preallocate files, so the best
1131 * we can do is report the max that could possibly fit in f_files,
1132 * and that minus the number actually used in f_ffree.
1133 * For f_ffree, report the smaller of the number of objects available
1134 * and the number of blocks (each object will take at least a block).
1136 statp->f_ffree = MIN(availobjs, availbytes >> DNODE_SHIFT);
1137 statp->f_files = statp->f_ffree + usedobjs;
1138 statp->f_fsid.val[0] = (uint32_t)fsid;
1139 statp->f_fsid.val[1] = (uint32_t)(fsid >> 32);
1140 statp->f_type = ZFS_SUPER_MAGIC;
1141 statp->f_namelen = MAXNAMELEN - 1;
1144 * We have all of 40 characters to stuff a string here.
1145 * Is there anything useful we could/should provide?
1147 bzero(statp->f_spare, sizeof (statp->f_spare));
1149 if (dmu_objset_projectquota_enabled(zfsvfs->z_os) &&
1150 dmu_objset_projectquota_present(zfsvfs->z_os)) {
1151 znode_t *zp = ITOZ(ip);
1153 if (zp->z_pflags & ZFS_PROJINHERIT && zp->z_projid &&
1154 zpl_is_valid_projid(zp->z_projid))
1155 err = zfs_statfs_project(zfsvfs, zp, statp, bshift);
1163 zfs_root(zfsvfs_t *zfsvfs, struct inode **ipp)
1170 error = zfs_zget(zfsvfs, zfsvfs->z_root, &rootzp);
1172 *ipp = ZTOI(rootzp);
1179 * Linux kernels older than 3.1 do not support a per-filesystem shrinker.
1180 * To accommodate this we must improvise and manually walk the list of znodes
1181 * attempting to prune dentries in order to be able to drop the inodes.
1183 * To avoid scanning the same znodes multiple times they are always rotated
1184 * to the end of the z_all_znodes list. New znodes are inserted at the
1185 * end of the list so we're always scanning the oldest znodes first.
1188 zfs_prune_aliases(zfsvfs_t *zfsvfs, unsigned long nr_to_scan)
1190 znode_t **zp_array, *zp;
1191 int max_array = MIN(nr_to_scan, PAGE_SIZE * 8 / sizeof (znode_t *));
1195 zp_array = kmem_zalloc(max_array * sizeof (znode_t *), KM_SLEEP);
1197 mutex_enter(&zfsvfs->z_znodes_lock);
1198 while ((zp = list_head(&zfsvfs->z_all_znodes)) != NULL) {
1200 if ((i++ > nr_to_scan) || (j >= max_array))
1203 ASSERT(list_link_active(&zp->z_link_node));
1204 list_remove(&zfsvfs->z_all_znodes, zp);
1205 list_insert_tail(&zfsvfs->z_all_znodes, zp);
1207 /* Skip active znodes and .zfs entries */
1208 if (MUTEX_HELD(&zp->z_lock) || zp->z_is_ctldir)
1211 if (igrab(ZTOI(zp)) == NULL)
1217 mutex_exit(&zfsvfs->z_znodes_lock);
1219 for (i = 0; i < j; i++) {
1222 ASSERT3P(zp, !=, NULL);
1223 d_prune_aliases(ZTOI(zp));
1225 if (atomic_read(&ZTOI(zp)->i_count) == 1)
1231 kmem_free(zp_array, max_array * sizeof (znode_t *));
1237 * The ARC has requested that the filesystem drop entries from the dentry
1238 * and inode caches. This can occur when the ARC needs to free meta data
1239 * blocks but can't because they are all pinned by entries in these caches.
1242 zfs_prune(struct super_block *sb, unsigned long nr_to_scan, int *objects)
1244 zfsvfs_t *zfsvfs = sb->s_fs_info;
1246 struct shrinker *shrinker = &sb->s_shrink;
1247 struct shrink_control sc = {
1248 .nr_to_scan = nr_to_scan,
1249 .gfp_mask = GFP_KERNEL,
1254 #if defined(HAVE_SPLIT_SHRINKER_CALLBACK) && \
1255 defined(SHRINK_CONTROL_HAS_NID) && \
1256 defined(SHRINKER_NUMA_AWARE)
1257 if (sb->s_shrink.flags & SHRINKER_NUMA_AWARE) {
1259 for_each_online_node(sc.nid) {
1260 *objects += (*shrinker->scan_objects)(shrinker, &sc);
1263 *objects = (*shrinker->scan_objects)(shrinker, &sc);
1266 #elif defined(HAVE_SPLIT_SHRINKER_CALLBACK)
1267 *objects = (*shrinker->scan_objects)(shrinker, &sc);
1268 #elif defined(HAVE_SINGLE_SHRINKER_CALLBACK)
1269 *objects = (*shrinker->shrink)(shrinker, &sc);
1270 #elif defined(HAVE_D_PRUNE_ALIASES)
1271 #define D_PRUNE_ALIASES_IS_DEFAULT
1272 *objects = zfs_prune_aliases(zfsvfs, nr_to_scan);
1274 #error "No available dentry and inode cache pruning mechanism."
1277 #if defined(HAVE_D_PRUNE_ALIASES) && !defined(D_PRUNE_ALIASES_IS_DEFAULT)
1278 #undef D_PRUNE_ALIASES_IS_DEFAULT
1280 * Fall back to zfs_prune_aliases if the kernel's per-superblock
1281 * shrinker couldn't free anything, possibly due to the inodes being
1282 * allocated in a different memcg.
1285 *objects = zfs_prune_aliases(zfsvfs, nr_to_scan);
1290 dprintf_ds(zfsvfs->z_os->os_dsl_dataset,
1291 "pruning, nr_to_scan=%lu objects=%d error=%d\n",
1292 nr_to_scan, *objects, error);
1298 * Teardown the zfsvfs_t.
1300 * Note, if 'unmounting' is FALSE, we return with the 'z_teardown_lock'
1301 * and 'z_teardown_inactive_lock' held.
1304 zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
1308 zfs_unlinked_drain_stop_wait(zfsvfs);
1311 * If someone has not already unmounted this file system,
1312 * drain the zrele_taskq to ensure all active references to the
1313 * zfsvfs_t have been handled only then can it be safely destroyed.
1317 * If we're unmounting we have to wait for the list to
1320 * If we're not unmounting there's no guarantee the list
1321 * will drain completely, but iputs run from the taskq
1322 * may add the parents of dir-based xattrs to the taskq
1323 * so we want to wait for these.
1325 * We can safely read z_nr_znodes without locking because the
1326 * VFS has already blocked operations which add to the
1327 * z_all_znodes list and thus increment z_nr_znodes.
1330 while (zfsvfs->z_nr_znodes > 0) {
1331 taskq_wait_outstanding(dsl_pool_zrele_taskq(
1332 dmu_objset_pool(zfsvfs->z_os)), 0);
1333 if (++round > 1 && !unmounting)
1338 rrm_enter(&zfsvfs->z_teardown_lock, RW_WRITER, FTAG);
1342 * We purge the parent filesystem's super block as the
1343 * parent filesystem and all of its snapshots have their
1344 * inode's super block set to the parent's filesystem's
1345 * super block. Note, 'z_parent' is self referential
1346 * for non-snapshots.
1348 shrink_dcache_sb(zfsvfs->z_parent->z_sb);
1352 * Close the zil. NB: Can't close the zil while zfs_inactive
1353 * threads are blocked as zil_close can call zfs_inactive.
1355 if (zfsvfs->z_log) {
1356 zil_close(zfsvfs->z_log);
1357 zfsvfs->z_log = NULL;
1360 rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_WRITER);
1363 * If we are not unmounting (ie: online recv) and someone already
1364 * unmounted this file system while we were doing the switcheroo,
1365 * or a reopen of z_os failed then just bail out now.
1367 if (!unmounting && (zfsvfs->z_unmounted || zfsvfs->z_os == NULL)) {
1368 rw_exit(&zfsvfs->z_teardown_inactive_lock);
1369 rrm_exit(&zfsvfs->z_teardown_lock, FTAG);
1370 return (SET_ERROR(EIO));
1374 * At this point there are no VFS ops active, and any new VFS ops
1375 * will fail with EIO since we have z_teardown_lock for writer (only
1376 * relevant for forced unmount).
1378 * Release all holds on dbufs. We also grab an extra reference to all
1379 * the remaining inodes so that the kernel does not attempt to free
1380 * any inodes of a suspended fs. This can cause deadlocks since the
1381 * zfs_resume_fs() process may involve starting threads, which might
1382 * attempt to free unreferenced inodes to free up memory for the new
1386 mutex_enter(&zfsvfs->z_znodes_lock);
1387 for (zp = list_head(&zfsvfs->z_all_znodes); zp != NULL;
1388 zp = list_next(&zfsvfs->z_all_znodes, zp)) {
1390 zfs_znode_dmu_fini(zp);
1391 if (igrab(ZTOI(zp)) != NULL)
1392 zp->z_suspended = B_TRUE;
1395 mutex_exit(&zfsvfs->z_znodes_lock);
1399 * If we are unmounting, set the unmounted flag and let new VFS ops
1400 * unblock. zfs_inactive will have the unmounted behavior, and all
1401 * other VFS ops will fail with EIO.
1404 zfsvfs->z_unmounted = B_TRUE;
1405 rw_exit(&zfsvfs->z_teardown_inactive_lock);
1406 rrm_exit(&zfsvfs->z_teardown_lock, FTAG);
1410 * z_os will be NULL if there was an error in attempting to reopen
1411 * zfsvfs, so just return as the properties had already been
1413 * unregistered and cached data had been evicted before.
1415 if (zfsvfs->z_os == NULL)
1419 * Unregister properties.
1421 zfs_unregister_callbacks(zfsvfs);
1424 * Evict cached data. We must write out any dirty data before
1425 * disowning the dataset.
1427 objset_t *os = zfsvfs->z_os;
1428 boolean_t os_dirty = B_FALSE;
1429 for (int t = 0; t < TXG_SIZE; t++) {
1430 if (dmu_objset_is_dirty(os, t)) {
1435 if (!zfs_is_readonly(zfsvfs) && os_dirty) {
1436 txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
1438 dmu_objset_evict_dbufs(zfsvfs->z_os);
1439 dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
1440 dsl_dir_cancel_waiters(dd);
1445 #if defined(HAVE_SUPER_SETUP_BDI_NAME)
1446 atomic_long_t zfs_bdi_seq = ATOMIC_LONG_INIT(0);
1450 zfs_domount(struct super_block *sb, zfs_mnt_t *zm, int silent)
1452 const char *osname = zm->mnt_osname;
1453 struct inode *root_inode;
1454 uint64_t recordsize;
1456 zfsvfs_t *zfsvfs = NULL;
1462 error = zfsvfs_parse_options(zm->mnt_data, &vfs);
1466 error = zfsvfs_create(osname, vfs->vfs_readonly, &zfsvfs);
1468 zfsvfs_vfs_free(vfs);
1472 if ((error = dsl_prop_get_integer(osname, "recordsize",
1473 &recordsize, NULL))) {
1474 zfsvfs_vfs_free(vfs);
1478 vfs->vfs_data = zfsvfs;
1479 zfsvfs->z_vfs = vfs;
1481 sb->s_fs_info = zfsvfs;
1482 sb->s_magic = ZFS_SUPER_MAGIC;
1483 sb->s_maxbytes = MAX_LFS_FILESIZE;
1484 sb->s_time_gran = 1;
1485 sb->s_blocksize = recordsize;
1486 sb->s_blocksize_bits = ilog2(recordsize);
1488 error = -zpl_bdi_setup(sb, "zfs");
1492 sb->s_bdi->ra_pages = 0;
1494 /* Set callback operations for the file system. */
1495 sb->s_op = &zpl_super_operations;
1496 sb->s_xattr = zpl_xattr_handlers;
1497 sb->s_export_op = &zpl_export_operations;
1498 sb->s_d_op = &zpl_dentry_operations;
1500 /* Set features for file system. */
1501 zfs_set_fuid_feature(zfsvfs);
1503 if (dmu_objset_is_snapshot(zfsvfs->z_os)) {
1506 atime_changed_cb(zfsvfs, B_FALSE);
1507 readonly_changed_cb(zfsvfs, B_TRUE);
1508 if ((error = dsl_prop_get_integer(osname,
1509 "xattr", &pval, NULL)))
1511 xattr_changed_cb(zfsvfs, pval);
1512 if ((error = dsl_prop_get_integer(osname,
1513 "acltype", &pval, NULL)))
1515 acltype_changed_cb(zfsvfs, pval);
1516 zfsvfs->z_issnap = B_TRUE;
1517 zfsvfs->z_os->os_sync = ZFS_SYNC_DISABLED;
1518 zfsvfs->z_snap_defer_time = jiffies;
1520 mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
1521 dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
1522 mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
1524 if ((error = zfsvfs_setup(zfsvfs, B_TRUE)))
1528 /* Allocate a root inode for the filesystem. */
1529 error = zfs_root(zfsvfs, &root_inode);
1531 (void) zfs_umount(sb);
1535 /* Allocate a root dentry for the filesystem */
1536 sb->s_root = d_make_root(root_inode);
1537 if (sb->s_root == NULL) {
1538 (void) zfs_umount(sb);
1539 error = SET_ERROR(ENOMEM);
1543 if (!zfsvfs->z_issnap)
1544 zfsctl_create(zfsvfs);
1546 zfsvfs->z_arc_prune = arc_add_prune_callback(zpl_prune_sb, sb);
1549 if (zfsvfs != NULL) {
1550 dmu_objset_disown(zfsvfs->z_os, B_TRUE, zfsvfs);
1551 zfsvfs_free(zfsvfs);
1554 * make sure we don't have dangling sb->s_fs_info which
1555 * zfs_preumount will use.
1557 sb->s_fs_info = NULL;
1564 * Called when an unmount is requested and certain sanity checks have
1565 * already passed. At this point no dentries or inodes have been reclaimed
1566 * from their respective caches. We drop the extra reference on the .zfs
1567 * control directory to allow everything to be reclaimed. All snapshots
1568 * must already have been unmounted to reach this point.
1571 zfs_preumount(struct super_block *sb)
1573 zfsvfs_t *zfsvfs = sb->s_fs_info;
1575 /* zfsvfs is NULL when zfs_domount fails during mount */
1577 zfs_unlinked_drain_stop_wait(zfsvfs);
1578 zfsctl_destroy(sb->s_fs_info);
1580 * Wait for zrele_async before entering evict_inodes in
1581 * generic_shutdown_super. The reason we must finish before
1582 * evict_inodes is when lazytime is on, or when zfs_purgedir
1583 * calls zfs_zget, zrele would bump i_count from 0 to 1. This
1584 * would race with the i_count check in evict_inodes. This means
1585 * it could destroy the inode while we are still using it.
1587 * We wait for two passes. xattr directories in the first pass
1588 * may add xattr entries in zfs_purgedir, so in the second pass
1589 * we wait for them. We don't use taskq_wait here because it is
1590 * a pool wide taskq. Other mounted filesystems can constantly
1591 * do zrele_async and there's no guarantee when taskq will be
1594 taskq_wait_outstanding(dsl_pool_zrele_taskq(
1595 dmu_objset_pool(zfsvfs->z_os)), 0);
1596 taskq_wait_outstanding(dsl_pool_zrele_taskq(
1597 dmu_objset_pool(zfsvfs->z_os)), 0);
1602 * Called once all other unmount released tear down has occurred.
1603 * It is our responsibility to release any remaining infrastructure.
1607 zfs_umount(struct super_block *sb)
1609 zfsvfs_t *zfsvfs = sb->s_fs_info;
1612 if (zfsvfs->z_arc_prune != NULL)
1613 arc_remove_prune_callback(zfsvfs->z_arc_prune);
1614 VERIFY(zfsvfs_teardown(zfsvfs, B_TRUE) == 0);
1616 zpl_bdi_destroy(sb);
1619 * z_os will be NULL if there was an error in
1620 * attempting to reopen zfsvfs.
1624 * Unset the objset user_ptr.
1626 mutex_enter(&os->os_user_ptr_lock);
1627 dmu_objset_set_user(os, NULL);
1628 mutex_exit(&os->os_user_ptr_lock);
1631 * Finally release the objset
1633 dmu_objset_disown(os, B_TRUE, zfsvfs);
1636 zfsvfs_free(zfsvfs);
1641 zfs_remount(struct super_block *sb, int *flags, zfs_mnt_t *zm)
1643 zfsvfs_t *zfsvfs = sb->s_fs_info;
1645 boolean_t issnap = dmu_objset_is_snapshot(zfsvfs->z_os);
1648 if ((issnap || !spa_writeable(dmu_objset_spa(zfsvfs->z_os))) &&
1649 !(*flags & SB_RDONLY)) {
1650 *flags |= SB_RDONLY;
1654 error = zfsvfs_parse_options(zm->mnt_data, &vfsp);
1658 if (!zfs_is_readonly(zfsvfs) && (*flags & SB_RDONLY))
1659 txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
1661 zfs_unregister_callbacks(zfsvfs);
1662 zfsvfs_vfs_free(zfsvfs->z_vfs);
1664 vfsp->vfs_data = zfsvfs;
1665 zfsvfs->z_vfs = vfsp;
1667 (void) zfs_register_callbacks(vfsp);
1673 zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp)
1675 zfsvfs_t *zfsvfs = sb->s_fs_info;
1677 uint64_t object = 0;
1678 uint64_t fid_gen = 0;
1685 if (fidp->fid_len == SHORT_FID_LEN || fidp->fid_len == LONG_FID_LEN) {
1686 zfid_short_t *zfid = (zfid_short_t *)fidp;
1688 for (i = 0; i < sizeof (zfid->zf_object); i++)
1689 object |= ((uint64_t)zfid->zf_object[i]) << (8 * i);
1691 for (i = 0; i < sizeof (zfid->zf_gen); i++)
1692 fid_gen |= ((uint64_t)zfid->zf_gen[i]) << (8 * i);
1694 return (SET_ERROR(EINVAL));
1697 /* LONG_FID_LEN means snapdirs */
1698 if (fidp->fid_len == LONG_FID_LEN) {
1699 zfid_long_t *zlfid = (zfid_long_t *)fidp;
1700 uint64_t objsetid = 0;
1701 uint64_t setgen = 0;
1703 for (i = 0; i < sizeof (zlfid->zf_setid); i++)
1704 objsetid |= ((uint64_t)zlfid->zf_setid[i]) << (8 * i);
1706 for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
1707 setgen |= ((uint64_t)zlfid->zf_setgen[i]) << (8 * i);
1709 if (objsetid != ZFSCTL_INO_SNAPDIRS - object) {
1710 dprintf("snapdir fid: objsetid (%llu) != "
1711 "ZFSCTL_INO_SNAPDIRS (%llu) - object (%llu)\n",
1712 objsetid, ZFSCTL_INO_SNAPDIRS, object);
1714 return (SET_ERROR(EINVAL));
1717 if (fid_gen > 1 || setgen != 0) {
1718 dprintf("snapdir fid: fid_gen (%llu) and setgen "
1719 "(%llu)\n", fid_gen, setgen);
1720 return (SET_ERROR(EINVAL));
1723 return (zfsctl_snapdir_vget(sb, objsetid, fid_gen, ipp));
1727 /* A zero fid_gen means we are in the .zfs control directories */
1729 (object == ZFSCTL_INO_ROOT || object == ZFSCTL_INO_SNAPDIR)) {
1730 *ipp = zfsvfs->z_ctldir;
1731 ASSERT(*ipp != NULL);
1732 if (object == ZFSCTL_INO_SNAPDIR) {
1733 VERIFY(zfsctl_root_lookup(*ipp, "snapshot", ipp,
1734 0, kcred, NULL, NULL) == 0);
1742 gen_mask = -1ULL >> (64 - 8 * i);
1744 dprintf("getting %llu [%llu mask %llx]\n", object, fid_gen, gen_mask);
1745 if ((err = zfs_zget(zfsvfs, object, &zp))) {
1750 /* Don't export xattr stuff */
1751 if (zp->z_pflags & ZFS_XATTR) {
1754 return (SET_ERROR(ENOENT));
1757 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &zp_gen,
1759 zp_gen = zp_gen & gen_mask;
1762 if ((fid_gen == 0) && (zfsvfs->z_root == object))
1764 if (zp->z_unlinked || zp_gen != fid_gen) {
1765 dprintf("znode gen (%llu) != fid gen (%llu)\n", zp_gen,
1769 return (SET_ERROR(ENOENT));
1774 zfs_inode_update(ITOZ(*ipp));
1781 * Block out VFS ops and close zfsvfs_t
1783 * Note, if successful, then we return with the 'z_teardown_lock' and
1784 * 'z_teardown_inactive_lock' write held. We leave ownership of the underlying
1785 * dataset and objset intact so that they can be atomically handed off during
1786 * a subsequent rollback or recv operation and the resume thereafter.
1789 zfs_suspend_fs(zfsvfs_t *zfsvfs)
1793 if ((error = zfsvfs_teardown(zfsvfs, B_FALSE)) != 0)
1800 * Rebuild SA and release VOPs. Note that ownership of the underlying dataset
1801 * is an invariant across any of the operations that can be performed while the
1802 * filesystem was suspended. Whether it succeeded or failed, the preconditions
1803 * are the same: the relevant objset and associated dataset are owned by
1804 * zfsvfs, held, and long held on entry.
1807 zfs_resume_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
1812 ASSERT(RRM_WRITE_HELD(&zfsvfs->z_teardown_lock));
1813 ASSERT(RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock));
1816 * We already own this, so just update the objset_t, as the one we
1817 * had before may have been evicted.
1820 VERIFY3P(ds->ds_owner, ==, zfsvfs);
1821 VERIFY(dsl_dataset_long_held(ds));
1822 dsl_pool_t *dp = spa_get_dsl(dsl_dataset_get_spa(ds));
1823 dsl_pool_config_enter(dp, FTAG);
1824 VERIFY0(dmu_objset_from_ds(ds, &os));
1825 dsl_pool_config_exit(dp, FTAG);
1827 err = zfsvfs_init(zfsvfs, os);
1831 ds->ds_dir->dd_activity_cancelled = B_FALSE;
1832 VERIFY(zfsvfs_setup(zfsvfs, B_FALSE) == 0);
1834 zfs_set_fuid_feature(zfsvfs);
1835 zfsvfs->z_rollback_time = jiffies;
1838 * Attempt to re-establish all the active inodes with their
1839 * dbufs. If a zfs_rezget() fails, then we unhash the inode
1840 * and mark it stale. This prevents a collision if a new
1841 * inode/object is created which must use the same inode
1842 * number. The stale inode will be be released when the
1843 * VFS prunes the dentry holding the remaining references
1844 * on the stale inode.
1846 mutex_enter(&zfsvfs->z_znodes_lock);
1847 for (zp = list_head(&zfsvfs->z_all_znodes); zp;
1848 zp = list_next(&zfsvfs->z_all_znodes, zp)) {
1849 err2 = zfs_rezget(zp);
1851 remove_inode_hash(ZTOI(zp));
1852 zp->z_is_stale = B_TRUE;
1855 /* see comment in zfs_suspend_fs() */
1856 if (zp->z_suspended) {
1857 zfs_zrele_async(zp);
1858 zp->z_suspended = B_FALSE;
1861 mutex_exit(&zfsvfs->z_znodes_lock);
1863 if (!zfs_is_readonly(zfsvfs) && !zfsvfs->z_unmounted) {
1865 * zfs_suspend_fs() could have interrupted freeing
1866 * of dnodes. We need to restart this freeing so
1867 * that we don't "leak" the space.
1869 zfs_unlinked_drain(zfsvfs);
1873 * Most of the time zfs_suspend_fs is used for changing the contents
1874 * of the underlying dataset. ZFS rollback and receive operations
1875 * might create files for which negative dentries are present in
1876 * the cache. Since walking the dcache would require a lot of GPL-only
1877 * code duplication, it's much easier on these rather rare occasions
1878 * just to flush the whole dcache for the given dataset/filesystem.
1880 shrink_dcache_sb(zfsvfs->z_sb);
1884 zfsvfs->z_unmounted = B_TRUE;
1886 /* release the VFS ops */
1887 rw_exit(&zfsvfs->z_teardown_inactive_lock);
1888 rrm_exit(&zfsvfs->z_teardown_lock, FTAG);
1892 * Since we couldn't setup the sa framework, try to force
1893 * unmount this file system.
1896 (void) zfs_umount(zfsvfs->z_sb);
1902 * Release VOPs and unmount a suspended filesystem.
1905 zfs_end_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
1907 ASSERT(RRM_WRITE_HELD(&zfsvfs->z_teardown_lock));
1908 ASSERT(RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock));
1911 * We already own this, so just hold and rele it to update the
1912 * objset_t, as the one we had before may have been evicted.
1915 VERIFY3P(ds->ds_owner, ==, zfsvfs);
1916 VERIFY(dsl_dataset_long_held(ds));
1917 dsl_pool_t *dp = spa_get_dsl(dsl_dataset_get_spa(ds));
1918 dsl_pool_config_enter(dp, FTAG);
1919 VERIFY0(dmu_objset_from_ds(ds, &os));
1920 dsl_pool_config_exit(dp, FTAG);
1923 /* release the VOPs */
1924 rw_exit(&zfsvfs->z_teardown_inactive_lock);
1925 rrm_exit(&zfsvfs->z_teardown_lock, FTAG);
1928 * Try to force unmount this file system.
1930 (void) zfs_umount(zfsvfs->z_sb);
1931 zfsvfs->z_unmounted = B_TRUE;
1936 * Automounted snapshots rely on periodic revalidation
1937 * to defer snapshots from being automatically unmounted.
1941 zfs_exit_fs(zfsvfs_t *zfsvfs)
1943 if (!zfsvfs->z_issnap)
1946 if (time_after(jiffies, zfsvfs->z_snap_defer_time +
1947 MAX(zfs_expire_snapshot * HZ / 2, HZ))) {
1948 zfsvfs->z_snap_defer_time = jiffies;
1949 zfsctl_snapshot_unmount_delay(zfsvfs->z_os->os_spa,
1950 dmu_objset_id(zfsvfs->z_os),
1951 zfs_expire_snapshot);
1956 zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
1959 objset_t *os = zfsvfs->z_os;
1962 if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION)
1963 return (SET_ERROR(EINVAL));
1965 if (newvers < zfsvfs->z_version)
1966 return (SET_ERROR(EINVAL));
1968 if (zfs_spa_version_map(newvers) >
1969 spa_version(dmu_objset_spa(zfsvfs->z_os)))
1970 return (SET_ERROR(ENOTSUP));
1972 tx = dmu_tx_create(os);
1973 dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_FALSE, ZPL_VERSION_STR);
1974 if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
1975 dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_TRUE,
1977 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
1979 error = dmu_tx_assign(tx, TXG_WAIT);
1985 error = zap_update(os, MASTER_NODE_OBJ, ZPL_VERSION_STR,
1986 8, 1, &newvers, tx);
1993 if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
1996 ASSERT3U(spa_version(dmu_objset_spa(zfsvfs->z_os)), >=,
1998 sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
1999 DMU_OT_NONE, 0, tx);
2001 error = zap_add(os, MASTER_NODE_OBJ,
2002 ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
2005 VERIFY(0 == sa_set_sa_object(os, sa_obj));
2006 sa_register_update_callback(os, zfs_sa_upgrade);
2009 spa_history_log_internal_ds(dmu_objset_ds(os), "upgrade", tx,
2010 "from %llu to %llu", zfsvfs->z_version, newvers);
2014 zfsvfs->z_version = newvers;
2015 os->os_version = newvers;
2017 zfs_set_fuid_feature(zfsvfs);
2023 * Read a property stored within the master node.
2026 zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value)
2028 uint64_t *cached_copy = NULL;
2031 * Figure out where in the objset_t the cached copy would live, if it
2032 * is available for the requested property.
2036 case ZFS_PROP_VERSION:
2037 cached_copy = &os->os_version;
2039 case ZFS_PROP_NORMALIZE:
2040 cached_copy = &os->os_normalization;
2042 case ZFS_PROP_UTF8ONLY:
2043 cached_copy = &os->os_utf8only;
2046 cached_copy = &os->os_casesensitivity;
2052 if (cached_copy != NULL && *cached_copy != OBJSET_PROP_UNINITIALIZED) {
2053 *value = *cached_copy;
2058 * If the property wasn't cached, look up the file system's value for
2059 * the property. For the version property, we look up a slightly
2064 if (prop == ZFS_PROP_VERSION)
2065 pname = ZPL_VERSION_STR;
2067 pname = zfs_prop_to_name(prop);
2070 ASSERT3U(os->os_phys->os_type, ==, DMU_OST_ZFS);
2071 error = zap_lookup(os, MASTER_NODE_OBJ, pname, 8, 1, value);
2074 if (error == ENOENT) {
2075 /* No value set, use the default value */
2077 case ZFS_PROP_VERSION:
2078 *value = ZPL_VERSION;
2080 case ZFS_PROP_NORMALIZE:
2081 case ZFS_PROP_UTF8ONLY:
2085 *value = ZFS_CASE_SENSITIVE;
2087 case ZFS_PROP_ACLTYPE:
2088 *value = ZFS_ACLTYPE_OFF;
2097 * If one of the methods for getting the property value above worked,
2098 * copy it into the objset_t's cache.
2100 if (error == 0 && cached_copy != NULL) {
2101 *cached_copy = *value;
2108 * Return true if the corresponding vfs's unmounted flag is set.
2109 * Otherwise return false.
2110 * If this function returns true we know VFS unmount has been initiated.
2113 zfs_get_vfs_flag_unmounted(objset_t *os)
2116 boolean_t unmounted = B_FALSE;
2118 ASSERT(dmu_objset_type(os) == DMU_OST_ZFS);
2120 mutex_enter(&os->os_user_ptr_lock);
2121 zfvp = dmu_objset_get_user(os);
2122 if (zfvp != NULL && zfvp->z_unmounted)
2124 mutex_exit(&os->os_user_ptr_lock);
2131 zfsvfs_update_fromname(const char *oldname, const char *newname)
2134 * We don't need to do anything here, the devname is always current by
2135 * virtue of zfsvfs->z_sb->s_op->show_devname.
2144 dmu_objset_register_type(DMU_OST_ZFS, zpl_get_file_info);
2145 register_filesystem(&zpl_fs_type);
2152 * we don't use outstanding because zpl_posix_acl_free might add more.
2154 taskq_wait(system_delay_taskq);
2155 taskq_wait(system_taskq);
2156 unregister_filesystem(&zpl_fs_type);
2161 #if defined(_KERNEL)
2162 EXPORT_SYMBOL(zfs_suspend_fs);
2163 EXPORT_SYMBOL(zfs_resume_fs);
2164 EXPORT_SYMBOL(zfs_set_version);
2165 EXPORT_SYMBOL(zfsvfs_create);
2166 EXPORT_SYMBOL(zfsvfs_free);
2167 EXPORT_SYMBOL(zfs_is_readonly);
2168 EXPORT_SYMBOL(zfs_domount);
2169 EXPORT_SYMBOL(zfs_preumount);
2170 EXPORT_SYMBOL(zfs_umount);
2171 EXPORT_SYMBOL(zfs_remount);
2172 EXPORT_SYMBOL(zfs_statvfs);
2173 EXPORT_SYMBOL(zfs_vget);
2174 EXPORT_SYMBOL(zfs_prune);