4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/types.h>
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/kernel.h>
30 #include <sys/sysmacros.h>
33 #include <sys/vnode.h>
35 #include <sys/mntent.h>
36 #include <sys/mount.h>
37 #include <sys/cmn_err.h>
38 #include <sys/zfs_znode.h>
39 #include <sys/zfs_dir.h>
41 #include <sys/fs/zfs.h>
43 #include <sys/dsl_prop.h>
44 #include <sys/dsl_dataset.h>
45 #include <sys/dsl_deleg.h>
48 #include <sys/varargs.h>
49 #include <sys/policy.h>
50 #include <sys/atomic.h>
51 #include <sys/zfs_ioctl.h>
52 #include <sys/zfs_ctldir.h>
53 #include <sys/zfs_fuid.h>
54 #include <sys/sunddi.h>
56 #include <sys/dmu_objset.h>
57 #include <sys/spa_boot.h>
59 struct mtx zfs_debug_mtx;
60 MTX_SYSINIT(zfs_debug_mtx, &zfs_debug_mtx, "zfs_debug", MTX_DEF);
62 SYSCTL_NODE(_vfs, OID_AUTO, zfs, CTLFLAG_RW, 0, "ZFS file system");
64 int zfs_super_owner = 0;
65 SYSCTL_INT(_vfs_zfs, OID_AUTO, super_owner, CTLFLAG_RW, &zfs_super_owner, 0,
66 "File system owner can perform privileged operation on his file systems");
68 int zfs_debug_level = 0;
69 TUNABLE_INT("vfs.zfs.debug", &zfs_debug_level);
70 SYSCTL_INT(_vfs_zfs, OID_AUTO, debug, CTLFLAG_RW, &zfs_debug_level, 0,
73 SYSCTL_NODE(_vfs_zfs, OID_AUTO, version, CTLFLAG_RD, 0, "ZFS versions");
74 static int zfs_version_acl = ZFS_ACL_VERSION;
75 SYSCTL_INT(_vfs_zfs_version, OID_AUTO, acl, CTLFLAG_RD, &zfs_version_acl, 0,
77 static int zfs_version_dmu_backup_header = DMU_BACKUP_HEADER_VERSION;
78 SYSCTL_INT(_vfs_zfs_version, OID_AUTO, dmu_backup_header, CTLFLAG_RD,
79 &zfs_version_dmu_backup_header, 0, "DMU_BACKUP_HEADER_VERSION");
80 static int zfs_version_dmu_backup_stream = DMU_BACKUP_STREAM_VERSION;
81 SYSCTL_INT(_vfs_zfs_version, OID_AUTO, dmu_backup_stream, CTLFLAG_RD,
82 &zfs_version_dmu_backup_stream, 0, "DMU_BACKUP_STREAM_VERSION");
83 static int zfs_version_spa = SPA_VERSION;
84 SYSCTL_INT(_vfs_zfs_version, OID_AUTO, spa, CTLFLAG_RD, &zfs_version_spa, 0,
86 static int zfs_version_zpl = ZPL_VERSION;
87 SYSCTL_INT(_vfs_zfs_version, OID_AUTO, zpl, CTLFLAG_RD, &zfs_version_zpl, 0,
90 static int zfs_mount(vfs_t *vfsp);
91 static int zfs_umount(vfs_t *vfsp, int fflag);
92 static int zfs_root(vfs_t *vfsp, int flags, vnode_t **vpp);
93 static int zfs_statfs(vfs_t *vfsp, struct statfs *statp);
94 static int zfs_vget(vfs_t *vfsp, ino_t ino, int flags, vnode_t **vpp);
95 static int zfs_sync(vfs_t *vfsp, int waitfor);
96 static int zfs_checkexp(vfs_t *vfsp, struct sockaddr *nam, int *extflagsp,
97 struct ucred **credanonp, int *numsecflavors, int **secflavors);
98 static int zfs_fhtovp(vfs_t *vfsp, fid_t *fidp, vnode_t **vpp);
99 static void zfs_objset_close(zfsvfs_t *zfsvfs);
100 static void zfs_freevfs(vfs_t *vfsp);
102 static struct vfsops zfs_vfsops = {
103 .vfs_mount = zfs_mount,
104 .vfs_unmount = zfs_umount,
105 .vfs_root = zfs_root,
106 .vfs_statfs = zfs_statfs,
107 .vfs_vget = zfs_vget,
108 .vfs_sync = zfs_sync,
109 .vfs_checkexp = zfs_checkexp,
110 .vfs_fhtovp = zfs_fhtovp,
113 VFS_SET(zfs_vfsops, zfs, VFCF_JAIL | VFCF_DELEGADMIN);
116 * We need to keep a count of active fs's.
117 * This is necessary to prevent our module
118 * from being unloaded after a umount -f
120 static uint32_t zfs_active_fs_count = 0;
124 zfs_sync(vfs_t *vfsp, int waitfor)
128 * Data integrity is job one. We don't want a compromised kernel
129 * writing to the storage pool, so we never sync during panic.
136 * Sync a specific filesystem.
138 zfsvfs_t *zfsvfs = vfsp->vfs_data;
142 error = vfs_stdsync(vfsp, waitfor);
147 dp = dmu_objset_pool(zfsvfs->z_os);
150 * If the system is shutting down, then skip any
151 * filesystems which may exist on a suspended pool.
153 if (sys_shutdown && spa_suspended(dp->dp_spa)) {
158 if (zfsvfs->z_log != NULL)
159 zil_commit(zfsvfs->z_log, UINT64_MAX, 0);
161 txg_wait_synced(dp, 0);
165 * Sync all ZFS filesystems. This is what happens when you
166 * run sync(1M). Unlike other filesystems, ZFS honors the
167 * request by waiting for all pools to commit all dirty data.
176 atime_changed_cb(void *arg, uint64_t newval)
178 zfsvfs_t *zfsvfs = arg;
180 if (newval == TRUE) {
181 zfsvfs->z_atime = TRUE;
182 zfsvfs->z_vfs->vfs_flag &= ~MNT_NOATIME;
183 vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOATIME);
184 vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_ATIME, NULL, 0);
186 zfsvfs->z_atime = FALSE;
187 zfsvfs->z_vfs->vfs_flag |= MNT_NOATIME;
188 vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_ATIME);
189 vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOATIME, NULL, 0);
194 xattr_changed_cb(void *arg, uint64_t newval)
196 zfsvfs_t *zfsvfs = arg;
198 if (newval == TRUE) {
199 /* XXX locking on vfs_flag? */
201 zfsvfs->z_vfs->vfs_flag |= VFS_XATTR;
203 vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOXATTR);
204 vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_XATTR, NULL, 0);
206 /* XXX locking on vfs_flag? */
208 zfsvfs->z_vfs->vfs_flag &= ~VFS_XATTR;
210 vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_XATTR);
211 vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOXATTR, NULL, 0);
216 blksz_changed_cb(void *arg, uint64_t newval)
218 zfsvfs_t *zfsvfs = arg;
220 if (newval < SPA_MINBLOCKSIZE ||
221 newval > SPA_MAXBLOCKSIZE || !ISP2(newval))
222 newval = SPA_MAXBLOCKSIZE;
224 zfsvfs->z_max_blksz = newval;
225 zfsvfs->z_vfs->mnt_stat.f_iosize = newval;
229 readonly_changed_cb(void *arg, uint64_t newval)
231 zfsvfs_t *zfsvfs = arg;
234 /* XXX locking on vfs_flag? */
235 zfsvfs->z_vfs->vfs_flag |= VFS_RDONLY;
236 vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_RW);
237 vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_RO, NULL, 0);
239 /* XXX locking on vfs_flag? */
240 zfsvfs->z_vfs->vfs_flag &= ~VFS_RDONLY;
241 vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_RO);
242 vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_RW, NULL, 0);
247 setuid_changed_cb(void *arg, uint64_t newval)
249 zfsvfs_t *zfsvfs = arg;
251 if (newval == FALSE) {
252 zfsvfs->z_vfs->vfs_flag |= VFS_NOSETUID;
253 vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_SETUID);
254 vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOSETUID, NULL, 0);
256 zfsvfs->z_vfs->vfs_flag &= ~VFS_NOSETUID;
257 vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOSETUID);
258 vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_SETUID, NULL, 0);
263 exec_changed_cb(void *arg, uint64_t newval)
265 zfsvfs_t *zfsvfs = arg;
267 if (newval == FALSE) {
268 zfsvfs->z_vfs->vfs_flag |= VFS_NOEXEC;
269 vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_EXEC);
270 vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOEXEC, NULL, 0);
272 zfsvfs->z_vfs->vfs_flag &= ~VFS_NOEXEC;
273 vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOEXEC);
274 vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_EXEC, NULL, 0);
279 * The nbmand mount option can be changed at mount time.
280 * We can't allow it to be toggled on live file systems or incorrect
281 * behavior may be seen from cifs clients
283 * This property isn't registered via dsl_prop_register(), but this callback
284 * will be called when a file system is first mounted
287 nbmand_changed_cb(void *arg, uint64_t newval)
289 zfsvfs_t *zfsvfs = arg;
290 if (newval == FALSE) {
291 vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NBMAND);
292 vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NONBMAND, NULL, 0);
294 vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NONBMAND);
295 vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NBMAND, NULL, 0);
300 snapdir_changed_cb(void *arg, uint64_t newval)
302 zfsvfs_t *zfsvfs = arg;
304 zfsvfs->z_show_ctldir = newval;
308 vscan_changed_cb(void *arg, uint64_t newval)
310 zfsvfs_t *zfsvfs = arg;
312 zfsvfs->z_vscan = newval;
316 acl_mode_changed_cb(void *arg, uint64_t newval)
318 zfsvfs_t *zfsvfs = arg;
320 zfsvfs->z_acl_mode = newval;
324 acl_inherit_changed_cb(void *arg, uint64_t newval)
326 zfsvfs_t *zfsvfs = arg;
328 zfsvfs->z_acl_inherit = newval;
332 zfs_register_callbacks(vfs_t *vfsp)
334 struct dsl_dataset *ds = NULL;
336 zfsvfs_t *zfsvfs = NULL;
338 int readonly, do_readonly = FALSE;
339 int setuid, do_setuid = FALSE;
340 int exec, do_exec = FALSE;
341 int xattr, do_xattr = FALSE;
342 int atime, do_atime = FALSE;
346 zfsvfs = vfsp->vfs_data;
351 * This function can be called for a snapshot when we update snapshot's
352 * mount point, which isn't really supported.
354 if (dmu_objset_is_snapshot(os))
358 * The act of registering our callbacks will destroy any mount
359 * options we may have. In order to enable temporary overrides
360 * of mount options, we stash away the current values and
361 * restore them after we register the callbacks.
363 if (vfs_optionisset(vfsp, MNTOPT_RO, NULL)) {
365 do_readonly = B_TRUE;
366 } else if (vfs_optionisset(vfsp, MNTOPT_RW, NULL)) {
368 do_readonly = B_TRUE;
370 if (vfs_optionisset(vfsp, MNTOPT_NOSUID, NULL)) {
374 if (vfs_optionisset(vfsp, MNTOPT_NOSETUID, NULL)) {
377 } else if (vfs_optionisset(vfsp, MNTOPT_SETUID, NULL)) {
382 if (vfs_optionisset(vfsp, MNTOPT_NOEXEC, NULL)) {
385 } else if (vfs_optionisset(vfsp, MNTOPT_EXEC, NULL)) {
389 if (vfs_optionisset(vfsp, MNTOPT_NOXATTR, NULL)) {
392 } else if (vfs_optionisset(vfsp, MNTOPT_XATTR, NULL)) {
396 if (vfs_optionisset(vfsp, MNTOPT_NOATIME, NULL)) {
399 } else if (vfs_optionisset(vfsp, MNTOPT_ATIME, NULL)) {
405 * nbmand is a special property. It can only be changed at
408 * This is weird, but it is documented to only be changeable
411 if (vfs_optionisset(vfsp, MNTOPT_NONBMAND, NULL)) {
413 } else if (vfs_optionisset(vfsp, MNTOPT_NBMAND, NULL)) {
416 char osname[MAXNAMELEN];
418 dmu_objset_name(os, osname);
419 if (error = dsl_prop_get_integer(osname, "nbmand", &nbmand,
426 * Register property callbacks.
428 * It would probably be fine to just check for i/o error from
429 * the first prop_register(), but I guess I like to go
432 ds = dmu_objset_ds(os);
433 error = dsl_prop_register(ds, "atime", atime_changed_cb, zfsvfs);
434 error = error ? error : dsl_prop_register(ds,
435 "xattr", xattr_changed_cb, zfsvfs);
436 error = error ? error : dsl_prop_register(ds,
437 "recordsize", blksz_changed_cb, zfsvfs);
438 error = error ? error : dsl_prop_register(ds,
439 "readonly", readonly_changed_cb, zfsvfs);
440 error = error ? error : dsl_prop_register(ds,
441 "setuid", setuid_changed_cb, zfsvfs);
442 error = error ? error : dsl_prop_register(ds,
443 "exec", exec_changed_cb, zfsvfs);
444 error = error ? error : dsl_prop_register(ds,
445 "snapdir", snapdir_changed_cb, zfsvfs);
446 error = error ? error : dsl_prop_register(ds,
447 "aclmode", acl_mode_changed_cb, zfsvfs);
448 error = error ? error : dsl_prop_register(ds,
449 "aclinherit", acl_inherit_changed_cb, zfsvfs);
450 error = error ? error : dsl_prop_register(ds,
451 "vscan", vscan_changed_cb, zfsvfs);
456 * Invoke our callbacks to restore temporary mount options.
459 readonly_changed_cb(zfsvfs, readonly);
461 setuid_changed_cb(zfsvfs, setuid);
463 exec_changed_cb(zfsvfs, exec);
465 xattr_changed_cb(zfsvfs, xattr);
467 atime_changed_cb(zfsvfs, atime);
469 nbmand_changed_cb(zfsvfs, nbmand);
475 * We may attempt to unregister some callbacks that are not
476 * registered, but this is OK; it will simply return ENOMSG,
477 * which we will ignore.
479 (void) dsl_prop_unregister(ds, "atime", atime_changed_cb, zfsvfs);
480 (void) dsl_prop_unregister(ds, "xattr", xattr_changed_cb, zfsvfs);
481 (void) dsl_prop_unregister(ds, "recordsize", blksz_changed_cb, zfsvfs);
482 (void) dsl_prop_unregister(ds, "readonly", readonly_changed_cb, zfsvfs);
483 (void) dsl_prop_unregister(ds, "setuid", setuid_changed_cb, zfsvfs);
484 (void) dsl_prop_unregister(ds, "exec", exec_changed_cb, zfsvfs);
485 (void) dsl_prop_unregister(ds, "snapdir", snapdir_changed_cb, zfsvfs);
486 (void) dsl_prop_unregister(ds, "aclmode", acl_mode_changed_cb, zfsvfs);
487 (void) dsl_prop_unregister(ds, "aclinherit", acl_inherit_changed_cb,
489 (void) dsl_prop_unregister(ds, "vscan", vscan_changed_cb, zfsvfs);
495 uidacct(objset_t *os, boolean_t isgroup, uint64_t fuid,
496 int64_t delta, dmu_tx_t *tx)
501 uint64_t obj = isgroup ? DMU_GROUPUSED_OBJECT : DMU_USERUSED_OBJECT;
506 (void) snprintf(buf, sizeof (buf), "%llx", (longlong_t)fuid);
507 err = zap_lookup(os, obj, buf, 8, 1, &used);
508 ASSERT(err == 0 || err == ENOENT);
509 /* no underflow/overflow */
510 ASSERT(delta > 0 || used >= -delta);
511 ASSERT(delta < 0 || used + delta > used);
514 err = zap_remove(os, obj, buf, tx);
516 err = zap_update(os, obj, buf, 8, 1, &used, tx);
521 zfs_space_delta_cb(objset_t *os, dmu_object_type_t bonustype,
522 void *oldbonus, void *newbonus,
523 uint64_t oldused, uint64_t newused, dmu_tx_t *tx)
525 znode_phys_t *oldznp = oldbonus;
526 znode_phys_t *newznp = newbonus;
528 if (bonustype != DMU_OT_ZNODE)
531 /* We charge 512 for the dnode (if it's allocated). */
532 if (oldznp->zp_gen != 0)
533 oldused += DNODE_SIZE;
534 if (newznp->zp_gen != 0)
535 newused += DNODE_SIZE;
537 if (oldznp->zp_uid == newznp->zp_uid) {
538 uidacct(os, B_FALSE, oldznp->zp_uid, newused-oldused, tx);
540 uidacct(os, B_FALSE, oldznp->zp_uid, -oldused, tx);
541 uidacct(os, B_FALSE, newznp->zp_uid, newused, tx);
544 if (oldznp->zp_gid == newznp->zp_gid) {
545 uidacct(os, B_TRUE, oldznp->zp_gid, newused-oldused, tx);
547 uidacct(os, B_TRUE, oldznp->zp_gid, -oldused, tx);
548 uidacct(os, B_TRUE, newznp->zp_gid, newused, tx);
553 fuidstr_to_sid(zfsvfs_t *zfsvfs, const char *fuidstr,
554 char *domainbuf, int buflen, uid_t *ridp)
559 fuid = strtonum(fuidstr, NULL);
561 domain = zfs_fuid_find_by_idx(zfsvfs, FUID_INDEX(fuid));
563 (void) strlcpy(domainbuf, domain, buflen);
566 *ridp = FUID_RID(fuid);
570 zfs_userquota_prop_to_obj(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type)
573 case ZFS_PROP_USERUSED:
574 return (DMU_USERUSED_OBJECT);
575 case ZFS_PROP_GROUPUSED:
576 return (DMU_GROUPUSED_OBJECT);
577 case ZFS_PROP_USERQUOTA:
578 return (zfsvfs->z_userquota_obj);
579 case ZFS_PROP_GROUPQUOTA:
580 return (zfsvfs->z_groupquota_obj);
586 zfs_userspace_many(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
587 uint64_t *cookiep, void *vbuf, uint64_t *bufsizep)
592 zfs_useracct_t *buf = vbuf;
595 if (!dmu_objset_userspace_present(zfsvfs->z_os))
598 obj = zfs_userquota_prop_to_obj(zfsvfs, type);
604 for (zap_cursor_init_serialized(&zc, zfsvfs->z_os, obj, *cookiep);
605 (error = zap_cursor_retrieve(&zc, &za)) == 0;
606 zap_cursor_advance(&zc)) {
607 if ((uintptr_t)buf - (uintptr_t)vbuf + sizeof (zfs_useracct_t) >
611 fuidstr_to_sid(zfsvfs, za.za_name,
612 buf->zu_domain, sizeof (buf->zu_domain), &buf->zu_rid);
614 buf->zu_space = za.za_first_integer;
620 ASSERT3U((uintptr_t)buf - (uintptr_t)vbuf, <=, *bufsizep);
621 *bufsizep = (uintptr_t)buf - (uintptr_t)vbuf;
622 *cookiep = zap_cursor_serialize(&zc);
623 zap_cursor_fini(&zc);
628 * buf must be big enough (eg, 32 bytes)
631 id_to_fuidstr(zfsvfs_t *zfsvfs, const char *domain, uid_t rid,
632 char *buf, boolean_t addok)
637 if (domain && domain[0]) {
638 domainid = zfs_fuid_find_by_domain(zfsvfs, domain, NULL, addok);
642 fuid = FUID_ENCODE(domainid, rid);
643 (void) sprintf(buf, "%llx", (longlong_t)fuid);
648 zfs_userspace_one(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
649 const char *domain, uint64_t rid, uint64_t *valp)
657 if (!dmu_objset_userspace_present(zfsvfs->z_os))
660 obj = zfs_userquota_prop_to_obj(zfsvfs, type);
664 err = id_to_fuidstr(zfsvfs, domain, rid, buf, B_FALSE);
668 err = zap_lookup(zfsvfs->z_os, obj, buf, 8, 1, valp);
675 zfs_set_userquota(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
676 const char *domain, uint64_t rid, uint64_t quota)
682 boolean_t fuid_dirtied;
684 if (type != ZFS_PROP_USERQUOTA && type != ZFS_PROP_GROUPQUOTA)
687 if (zfsvfs->z_version < ZPL_VERSION_USERSPACE)
690 objp = (type == ZFS_PROP_USERQUOTA) ? &zfsvfs->z_userquota_obj :
691 &zfsvfs->z_groupquota_obj;
693 err = id_to_fuidstr(zfsvfs, domain, rid, buf, B_TRUE);
696 fuid_dirtied = zfsvfs->z_fuid_dirty;
698 tx = dmu_tx_create(zfsvfs->z_os);
699 dmu_tx_hold_zap(tx, *objp ? *objp : DMU_NEW_OBJECT, B_TRUE, NULL);
701 dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_TRUE,
702 zfs_userquota_prop_prefixes[type]);
705 zfs_fuid_txhold(zfsvfs, tx);
706 err = dmu_tx_assign(tx, TXG_WAIT);
712 mutex_enter(&zfsvfs->z_lock);
714 *objp = zap_create(zfsvfs->z_os, DMU_OT_USERGROUP_QUOTA,
716 VERIFY(0 == zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
717 zfs_userquota_prop_prefixes[type], 8, 1, objp, tx));
719 mutex_exit(&zfsvfs->z_lock);
722 err = zap_remove(zfsvfs->z_os, *objp, buf, tx);
726 err = zap_update(zfsvfs->z_os, *objp, buf, 8, 1, "a, tx);
730 zfs_fuid_sync(zfsvfs, tx);
736 zfs_usergroup_overquota(zfsvfs_t *zfsvfs, boolean_t isgroup, uint64_t fuid)
739 uint64_t used, quota, usedobj, quotaobj;
742 usedobj = isgroup ? DMU_GROUPUSED_OBJECT : DMU_USERUSED_OBJECT;
743 quotaobj = isgroup ? zfsvfs->z_groupquota_obj : zfsvfs->z_userquota_obj;
745 if (quotaobj == 0 || zfsvfs->z_replay)
748 (void) sprintf(buf, "%llx", (longlong_t)fuid);
749 err = zap_lookup(zfsvfs->z_os, quotaobj, buf, 8, 1, "a);
753 err = zap_lookup(zfsvfs->z_os, usedobj, buf, 8, 1, &used);
756 return (used >= quota);
760 zfsvfs_create(const char *osname, int mode, zfsvfs_t **zvp)
767 if (error = dsl_prop_get_integer(osname, "readonly", &zval, NULL))
770 mode |= DS_MODE_READONLY;
772 error = dmu_objset_open(osname, DMU_OST_ZFS, mode, &os);
773 if (error == EROFS) {
774 mode |= DS_MODE_READONLY;
775 error = dmu_objset_open(osname, DMU_OST_ZFS, mode, &os);
781 * Initialize the zfs-specific filesystem structure.
782 * Should probably make this a kmem cache, shuffle fields,
783 * and just bzero up to z_hold_mtx[].
785 zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
786 zfsvfs->z_vfs = NULL;
787 zfsvfs->z_parent = zfsvfs;
788 zfsvfs->z_max_blksz = SPA_MAXBLOCKSIZE;
789 zfsvfs->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;
792 error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zfsvfs->z_version);
795 } else if (zfsvfs->z_version > ZPL_VERSION) {
796 (void) printf("Mismatched versions: File system "
797 "is version %llu on-disk format, which is "
798 "incompatible with this software version %lld!",
799 (u_longlong_t)zfsvfs->z_version, ZPL_VERSION);
804 if ((error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &zval)) != 0)
806 zfsvfs->z_norm = (int)zval;
808 if ((error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &zval)) != 0)
810 zfsvfs->z_utf8 = (zval != 0);
812 if ((error = zfs_get_zplprop(os, ZFS_PROP_CASE, &zval)) != 0)
814 zfsvfs->z_case = (uint_t)zval;
817 * Fold case on file systems that are always or sometimes case
820 if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
821 zfsvfs->z_case == ZFS_CASE_MIXED)
822 zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER;
824 zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
826 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1,
830 ASSERT(zfsvfs->z_root != 0);
832 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
833 &zfsvfs->z_unlinkedobj);
837 error = zap_lookup(os, MASTER_NODE_OBJ,
838 zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA],
839 8, 1, &zfsvfs->z_userquota_obj);
840 if (error && error != ENOENT)
843 error = zap_lookup(os, MASTER_NODE_OBJ,
844 zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA],
845 8, 1, &zfsvfs->z_groupquota_obj);
846 if (error && error != ENOENT)
849 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1,
850 &zfsvfs->z_fuid_obj);
851 if (error && error != ENOENT)
854 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SHARES_DIR, 8, 1,
855 &zfsvfs->z_shares_dir);
856 if (error && error != ENOENT)
859 mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
860 mutex_init(&zfsvfs->z_online_recv_lock, NULL, MUTEX_DEFAULT, NULL);
861 mutex_init(&zfsvfs->z_lock, NULL, MUTEX_DEFAULT, NULL);
862 list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
863 offsetof(znode_t, z_link_node));
864 rrw_init(&zfsvfs->z_teardown_lock);
865 rw_init(&zfsvfs->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL);
866 rw_init(&zfsvfs->z_fuid_lock, NULL, RW_DEFAULT, NULL);
867 for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
868 mutex_init(&zfsvfs->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);
874 dmu_objset_close(os);
876 kmem_free(zfsvfs, sizeof (zfsvfs_t));
881 zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting)
885 error = zfs_register_callbacks(zfsvfs->z_vfs);
890 * Set the objset user_ptr to track its zfsvfs.
892 mutex_enter(&zfsvfs->z_os->os->os_user_ptr_lock);
893 dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
894 mutex_exit(&zfsvfs->z_os->os->os_user_ptr_lock);
896 zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data);
898 zil_destroy(zfsvfs->z_log, B_FALSE);
899 zfsvfs->z_log = NULL;
903 * If we are not mounting (ie: online recv), then we don't
904 * have to worry about replaying the log as we blocked all
905 * operations out since we closed the ZIL.
911 * During replay we remove the read only flag to
912 * allow replays to succeed.
914 readonly = zfsvfs->z_vfs->vfs_flag & VFS_RDONLY;
916 zfsvfs->z_vfs->vfs_flag &= ~VFS_RDONLY;
918 zfs_unlinked_drain(zfsvfs);
922 * Parse and replay the intent log.
924 * Because of ziltest, this must be done after
925 * zfs_unlinked_drain(). (Further note: ziltest
926 * doesn't use readonly mounts, where
927 * zfs_unlinked_drain() isn't called.) This is because
928 * ziltest causes spa_sync() to think it's committed,
929 * but actually it is not, so the intent log contains
930 * many txg's worth of changes.
932 * In particular, if object N is in the unlinked set in
933 * the last txg to actually sync, then it could be
934 * actually freed in a later txg and then reallocated
935 * in a yet later txg. This would write a "create
936 * object N" record to the intent log. Normally, this
937 * would be fine because the spa_sync() would have
938 * written out the fact that object N is free, before
939 * we could write the "create object N" intent log
942 * But when we are in ziltest mode, we advance the "open
943 * txg" without actually spa_sync()-ing the changes to
944 * disk. So we would see that object N is still
945 * allocated and in the unlinked set, and there is an
946 * intent log record saying to allocate it.
948 zfsvfs->z_replay = B_TRUE;
949 zil_replay(zfsvfs->z_os, zfsvfs, zfs_replay_vector);
950 zfsvfs->z_replay = B_FALSE;
952 zfsvfs->z_vfs->vfs_flag |= readonly; /* restore readonly bit */
959 zfsvfs_free(zfsvfs_t *zfsvfs)
963 zfs_fuid_destroy(zfsvfs);
965 mutex_destroy(&zfsvfs->z_znodes_lock);
966 mutex_destroy(&zfsvfs->z_online_recv_lock);
967 mutex_destroy(&zfsvfs->z_lock);
968 list_destroy(&zfsvfs->z_all_znodes);
969 rrw_destroy(&zfsvfs->z_teardown_lock);
970 rw_destroy(&zfsvfs->z_teardown_inactive_lock);
971 rw_destroy(&zfsvfs->z_fuid_lock);
972 for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
973 mutex_destroy(&zfsvfs->z_hold_mtx[i]);
974 kmem_free(zfsvfs, sizeof (zfsvfs_t));
978 zfs_set_fuid_feature(zfsvfs_t *zfsvfs)
980 zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
981 if (zfsvfs->z_use_fuids && zfsvfs->z_vfs) {
982 vfs_set_feature(zfsvfs->z_vfs, VFSFT_XVATTR);
983 vfs_set_feature(zfsvfs->z_vfs, VFSFT_SYSATTR_VIEWS);
984 vfs_set_feature(zfsvfs->z_vfs, VFSFT_ACEMASKONACCESS);
985 vfs_set_feature(zfsvfs->z_vfs, VFSFT_ACLONCREATE);
990 zfs_domount(vfs_t *vfsp, char *osname)
992 uint64_t recordsize, fsid_guid;
1000 error = zfsvfs_create(osname, DS_MODE_OWNER, &zfsvfs);
1003 zfsvfs->z_vfs = vfsp;
1005 if (error = dsl_prop_get_integer(osname, "recordsize", &recordsize,
1008 zfsvfs->z_vfs->vfs_bsize = SPA_MINBLOCKSIZE;
1009 zfsvfs->z_vfs->mnt_stat.f_iosize = recordsize;
1011 vfsp->vfs_data = zfsvfs;
1012 vfsp->mnt_flag |= MNT_LOCAL;
1013 vfsp->mnt_kern_flag |= MNTK_MPSAFE;
1014 vfsp->mnt_kern_flag |= MNTK_LOOKUP_SHARED;
1015 vfsp->mnt_kern_flag |= MNTK_SHARED_WRITES;
1019 * The fsid is 64 bits, composed of an 8-bit fs type, which
1020 * separates our fsid from any other filesystem types, and a
1021 * 56-bit objset unique ID. The objset unique ID is unique to
1022 * all objsets open on this system, provided by unique_create().
1023 * The 8-bit fs type must be put in the low bits of fsid[1]
1024 * because that's where other Solaris filesystems put it.
1026 fsid_guid = dmu_objset_fsid_guid(zfsvfs->z_os);
1027 ASSERT((fsid_guid & ~((1ULL<<56)-1)) == 0);
1028 vfsp->vfs_fsid.val[0] = fsid_guid;
1029 vfsp->vfs_fsid.val[1] = ((fsid_guid>>32) << 8) |
1030 vfsp->mnt_vfc->vfc_typenum & 0xFF;
1033 * Set features for file system.
1035 zfs_set_fuid_feature(zfsvfs);
1036 if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
1037 vfs_set_feature(vfsp, VFSFT_DIRENTFLAGS);
1038 vfs_set_feature(vfsp, VFSFT_CASEINSENSITIVE);
1039 vfs_set_feature(vfsp, VFSFT_NOCASESENSITIVE);
1040 } else if (zfsvfs->z_case == ZFS_CASE_MIXED) {
1041 vfs_set_feature(vfsp, VFSFT_DIRENTFLAGS);
1042 vfs_set_feature(vfsp, VFSFT_CASEINSENSITIVE);
1045 if (dmu_objset_is_snapshot(zfsvfs->z_os)) {
1048 atime_changed_cb(zfsvfs, B_FALSE);
1049 readonly_changed_cb(zfsvfs, B_TRUE);
1050 if (error = dsl_prop_get_integer(osname, "xattr", &pval, NULL))
1052 xattr_changed_cb(zfsvfs, pval);
1053 zfsvfs->z_issnap = B_TRUE;
1055 mutex_enter(&zfsvfs->z_os->os->os_user_ptr_lock);
1056 dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
1057 mutex_exit(&zfsvfs->z_os->os->os_user_ptr_lock);
1059 error = zfsvfs_setup(zfsvfs, B_TRUE);
1062 vfs_mountedfrom(vfsp, osname);
1063 /* Grab extra reference. */
1064 VERIFY(VFS_ROOT(vfsp, LK_EXCLUSIVE, &vp) == 0);
1067 if (!zfsvfs->z_issnap)
1068 zfsctl_create(zfsvfs);
1071 dmu_objset_close(zfsvfs->z_os);
1072 zfsvfs_free(zfsvfs);
1074 atomic_add_32(&zfs_active_fs_count, 1);
1081 zfs_unregister_callbacks(zfsvfs_t *zfsvfs)
1083 objset_t *os = zfsvfs->z_os;
1084 struct dsl_dataset *ds;
1087 * Unregister properties.
1089 if (!dmu_objset_is_snapshot(os)) {
1090 ds = dmu_objset_ds(os);
1091 VERIFY(dsl_prop_unregister(ds, "atime", atime_changed_cb,
1094 VERIFY(dsl_prop_unregister(ds, "xattr", xattr_changed_cb,
1097 VERIFY(dsl_prop_unregister(ds, "recordsize", blksz_changed_cb,
1100 VERIFY(dsl_prop_unregister(ds, "readonly", readonly_changed_cb,
1103 VERIFY(dsl_prop_unregister(ds, "setuid", setuid_changed_cb,
1106 VERIFY(dsl_prop_unregister(ds, "exec", exec_changed_cb,
1109 VERIFY(dsl_prop_unregister(ds, "snapdir", snapdir_changed_cb,
1112 VERIFY(dsl_prop_unregister(ds, "aclmode", acl_mode_changed_cb,
1115 VERIFY(dsl_prop_unregister(ds, "aclinherit",
1116 acl_inherit_changed_cb, zfsvfs) == 0);
1118 VERIFY(dsl_prop_unregister(ds, "vscan",
1119 vscan_changed_cb, zfsvfs) == 0);
1125 zfs_mount(vfs_t *vfsp)
1127 kthread_t *td = curthread;
1128 vnode_t *mvp = vfsp->mnt_vnodecovered;
1129 cred_t *cr = td->td_ucred;
1134 if (vfs_getopt(vfsp->mnt_optnew, "from", (void **)&osname, NULL))
1138 * If full-owner-access is enabled and delegated administration is
1139 * turned on, we must set nosuid.
1141 if (zfs_super_owner &&
1142 dsl_deleg_access(osname, ZFS_DELEG_PERM_MOUNT, cr) != ECANCELED) {
1143 secpolicy_fs_mount_clearopts(cr, vfsp);
1147 * Check for mount privilege?
1149 * If we don't have privilege then see if
1150 * we have local permission to allow it
1152 error = secpolicy_fs_mount(cr, mvp, vfsp);
1154 error = dsl_deleg_access(osname, ZFS_DELEG_PERM_MOUNT, cr);
1158 if (!(vfsp->vfs_flag & MS_REMOUNT)) {
1162 * Make sure user is the owner of the mount point
1163 * or has sufficient privileges.
1166 vattr.va_mask = AT_UID;
1168 vn_lock(mvp, LK_SHARED | LK_RETRY);
1169 if (error = VOP_GETATTR(mvp, &vattr, cr)) {
1174 if (secpolicy_vnode_owner(mvp, cr, vattr.va_uid) != 0 &&
1175 VOP_ACCESS(mvp, VWRITE, cr, td) != 0) {
1182 secpolicy_fs_mount_clearopts(cr, vfsp);
1186 * Refuse to mount a filesystem if we are in a local zone and the
1187 * dataset is not visible.
1189 if (!INGLOBALZONE(curthread) &&
1190 (!zone_dataset_visible(osname, &canwrite) || !canwrite)) {
1196 * When doing a remount, we simply refresh our temporary properties
1197 * according to those options set in the current VFS options.
1199 if (vfsp->vfs_flag & MS_REMOUNT) {
1200 /* refresh mount options */
1201 zfs_unregister_callbacks(vfsp->vfs_data);
1202 error = zfs_register_callbacks(vfsp);
1207 error = zfs_domount(vfsp, osname);
1211 * Add an extra VFS_HOLD on our parent vfs so that it can't
1212 * disappear due to a forced unmount.
1214 if (error == 0 && ((zfsvfs_t *)vfsp->vfs_data)->z_issnap)
1215 VFS_HOLD(mvp->v_vfsp);
1222 zfs_statfs(vfs_t *vfsp, struct statfs *statp)
1224 zfsvfs_t *zfsvfs = vfsp->vfs_data;
1225 uint64_t refdbytes, availbytes, usedobjs, availobjs;
1227 statp->f_version = STATFS_VERSION;
1231 dmu_objset_space(zfsvfs->z_os,
1232 &refdbytes, &availbytes, &usedobjs, &availobjs);
1235 * The underlying storage pool actually uses multiple block sizes.
1236 * We report the fragsize as the smallest block size we support,
1237 * and we report our blocksize as the filesystem's maximum blocksize.
1239 statp->f_bsize = SPA_MINBLOCKSIZE;
1240 statp->f_iosize = zfsvfs->z_vfs->mnt_stat.f_iosize;
1243 * The following report "total" blocks of various kinds in the
1244 * file system, but reported in terms of f_frsize - the
1248 statp->f_blocks = (refdbytes + availbytes) >> SPA_MINBLOCKSHIFT;
1249 statp->f_bfree = availbytes / statp->f_bsize;
1250 statp->f_bavail = statp->f_bfree; /* no root reservation */
1253 * statvfs() should really be called statufs(), because it assumes
1254 * static metadata. ZFS doesn't preallocate files, so the best
1255 * we can do is report the max that could possibly fit in f_files,
1256 * and that minus the number actually used in f_ffree.
1257 * For f_ffree, report the smaller of the number of object available
1258 * and the number of blocks (each object will take at least a block).
1260 statp->f_ffree = MIN(availobjs, statp->f_bfree);
1261 statp->f_files = statp->f_ffree + usedobjs;
1264 * We're a zfs filesystem.
1266 (void) strlcpy(statp->f_fstypename, "zfs", sizeof(statp->f_fstypename));
1268 strlcpy(statp->f_mntfromname, vfsp->mnt_stat.f_mntfromname,
1269 sizeof(statp->f_mntfromname));
1270 strlcpy(statp->f_mntonname, vfsp->mnt_stat.f_mntonname,
1271 sizeof(statp->f_mntonname));
1273 statp->f_namemax = ZFS_MAXNAMELEN;
1280 zfs_root(vfs_t *vfsp, int flags, vnode_t **vpp)
1282 zfsvfs_t *zfsvfs = vfsp->vfs_data;
1286 ZFS_ENTER_NOERROR(zfsvfs);
1288 error = zfs_zget(zfsvfs, zfsvfs->z_root, &rootzp);
1293 *vpp = ZTOV(rootzp);
1294 error = vn_lock(*vpp, flags);
1295 (*vpp)->v_vflag |= VV_ROOT;
1302 * Teardown the zfsvfs::z_os.
1304 * Note, if 'unmounting' if FALSE, we return with the 'z_teardown_lock'
1305 * and 'z_teardown_inactive_lock' held.
1308 zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
1312 rrw_enter(&zfsvfs->z_teardown_lock, RW_WRITER, FTAG);
1316 * We purge the parent filesystem's vfsp as the parent
1317 * filesystem and all of its snapshots have their vnode's
1318 * v_vfsp set to the parent's filesystem's vfsp. Note,
1319 * 'z_parent' is self referential for non-snapshots.
1321 (void) dnlc_purge_vfsp(zfsvfs->z_parent->z_vfs, 0);
1322 #ifdef FREEBSD_NAMECACHE
1323 cache_purgevfs(zfsvfs->z_parent->z_vfs);
1328 * Close the zil. NB: Can't close the zil while zfs_inactive
1329 * threads are blocked as zil_close can call zfs_inactive.
1331 if (zfsvfs->z_log) {
1332 zil_close(zfsvfs->z_log);
1333 zfsvfs->z_log = NULL;
1336 rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_WRITER);
1339 * If we are not unmounting (ie: online recv) and someone already
1340 * unmounted this file system while we were doing the switcheroo,
1341 * or a reopen of z_os failed then just bail out now.
1343 if (!unmounting && (zfsvfs->z_unmounted || zfsvfs->z_os == NULL)) {
1344 rw_exit(&zfsvfs->z_teardown_inactive_lock);
1345 rrw_exit(&zfsvfs->z_teardown_lock, FTAG);
1350 * At this point there are no vops active, and any new vops will
1351 * fail with EIO since we have z_teardown_lock for writer (only
1352 * relavent for forced unmount).
1354 * Release all holds on dbufs.
1356 mutex_enter(&zfsvfs->z_znodes_lock);
1357 for (zp = list_head(&zfsvfs->z_all_znodes); zp != NULL;
1358 zp = list_next(&zfsvfs->z_all_znodes, zp))
1360 ASSERT(ZTOV(zp)->v_count >= 0);
1361 zfs_znode_dmu_fini(zp);
1363 mutex_exit(&zfsvfs->z_znodes_lock);
1366 * If we are unmounting, set the unmounted flag and let new vops
1367 * unblock. zfs_inactive will have the unmounted behavior, and all
1368 * other vops will fail with EIO.
1371 zfsvfs->z_unmounted = B_TRUE;
1372 rrw_exit(&zfsvfs->z_teardown_lock, FTAG);
1373 rw_exit(&zfsvfs->z_teardown_inactive_lock);
1377 * Some znodes might not be fully reclaimed, wait for them.
1379 mutex_enter(&zfsvfs->z_znodes_lock);
1380 while (list_head(&zfsvfs->z_all_znodes) != NULL) {
1381 msleep(zfsvfs, &zfsvfs->z_znodes_lock, 0,
1384 mutex_exit(&zfsvfs->z_znodes_lock);
1389 * z_os will be NULL if there was an error in attempting to reopen
1390 * zfsvfs, so just return as the properties had already been
1391 * unregistered and cached data had been evicted before.
1393 if (zfsvfs->z_os == NULL)
1397 * Unregister properties.
1399 zfs_unregister_callbacks(zfsvfs);
1404 if (dmu_objset_evict_dbufs(zfsvfs->z_os)) {
1405 txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
1406 (void) dmu_objset_evict_dbufs(zfsvfs->z_os);
1414 zfs_umount(vfs_t *vfsp, int fflag)
1416 kthread_t *td = curthread;
1417 zfsvfs_t *zfsvfs = vfsp->vfs_data;
1419 cred_t *cr = td->td_ucred;
1422 ret = secpolicy_fs_unmount(cr, vfsp);
1424 ret = dsl_deleg_access((char *)refstr_value(vfsp->vfs_resource),
1425 ZFS_DELEG_PERM_MOUNT, cr);
1430 * We purge the parent filesystem's vfsp as the parent filesystem
1431 * and all of its snapshots have their vnode's v_vfsp set to the
1432 * parent's filesystem's vfsp. Note, 'z_parent' is self
1433 * referential for non-snapshots.
1435 (void) dnlc_purge_vfsp(zfsvfs->z_parent->z_vfs, 0);
1438 * Unmount any snapshots mounted under .zfs before unmounting the
1441 if (zfsvfs->z_ctldir != NULL) {
1442 if ((ret = zfsctl_umount_snapshots(vfsp, fflag, cr)) != 0)
1444 ret = vflush(vfsp, 0, 0, td);
1445 ASSERT(ret == EBUSY);
1446 if (!(fflag & MS_FORCE)) {
1447 if (zfsvfs->z_ctldir->v_count > 1)
1449 ASSERT(zfsvfs->z_ctldir->v_count == 1);
1451 zfsctl_destroy(zfsvfs);
1452 ASSERT(zfsvfs->z_ctldir == NULL);
1455 if (fflag & MS_FORCE) {
1457 * Mark file system as unmounted before calling
1458 * vflush(FORCECLOSE). This way we ensure no future vnops
1459 * will be called and risk operating on DOOMED vnodes.
1461 rrw_enter(&zfsvfs->z_teardown_lock, RW_WRITER, FTAG);
1462 zfsvfs->z_unmounted = B_TRUE;
1463 rrw_exit(&zfsvfs->z_teardown_lock, FTAG);
1467 * Flush all the files.
1469 ret = vflush(vfsp, 1, (fflag & MS_FORCE) ? FORCECLOSE : 0, td);
1471 if (!zfsvfs->z_issnap) {
1472 zfsctl_create(zfsvfs);
1473 ASSERT(zfsvfs->z_ctldir != NULL);
1478 if (!(fflag & MS_FORCE)) {
1480 * Check the number of active vnodes in the file system.
1481 * Our count is maintained in the vfs structure, but the
1482 * number is off by 1 to indicate a hold on the vfs
1485 * The '.zfs' directory maintains a reference of its
1486 * own, and any active references underneath are
1487 * reflected in the vnode count.
1489 if (zfsvfs->z_ctldir == NULL) {
1490 if (vfsp->vfs_count > 1)
1493 if (vfsp->vfs_count > 2 ||
1494 zfsvfs->z_ctldir->v_count > 1)
1499 vfsp->mnt_kern_flag |= MNTK_UNMOUNTF;
1503 VERIFY(zfsvfs_teardown(zfsvfs, B_TRUE) == 0);
1507 * z_os will be NULL if there was an error in
1508 * attempting to reopen zfsvfs.
1512 * Unset the objset user_ptr.
1514 mutex_enter(&os->os->os_user_ptr_lock);
1515 dmu_objset_set_user(os, NULL);
1516 mutex_exit(&os->os->os_user_ptr_lock);
1519 * Finally release the objset
1521 dmu_objset_close(os);
1525 * We can now safely destroy the '.zfs' directory node.
1527 if (zfsvfs->z_ctldir != NULL)
1528 zfsctl_destroy(zfsvfs);
1529 if (zfsvfs->z_issnap) {
1530 vnode_t *svp = vfsp->mnt_vnodecovered;
1532 if (svp->v_count >= 2)
1541 zfs_vget(vfs_t *vfsp, ino_t ino, int flags, vnode_t **vpp)
1543 zfsvfs_t *zfsvfs = vfsp->vfs_data;
1548 * XXXPJD: zfs_zget() can't operate on virtual entires like .zfs/ or
1549 * .zfs/snapshot/ directories, so for now just return EOPNOTSUPP.
1550 * This will make NFS to fall back to using READDIR instead of
1552 * Also snapshots are stored in AVL tree, but based on their names,
1553 * not inode numbers, so it will be very inefficient to iterate
1554 * over all snapshots to find the right one.
1555 * Note that OpenSolaris READDIRPLUS implementation does LOOKUP on
1556 * d_name, and not VGET on d_fileno as we do.
1558 if (ino == ZFSCTL_INO_ROOT || ino == ZFSCTL_INO_SNAPDIR)
1559 return (EOPNOTSUPP);
1562 err = zfs_zget(zfsvfs, ino, &zp);
1563 if (err == 0 && zp->z_unlinked) {
1572 vn_lock(*vpp, flags);
1578 zfs_checkexp(vfs_t *vfsp, struct sockaddr *nam, int *extflagsp,
1579 struct ucred **credanonp, int *numsecflavors, int **secflavors)
1581 zfsvfs_t *zfsvfs = vfsp->vfs_data;
1584 * If this is regular file system vfsp is the same as
1585 * zfsvfs->z_parent->z_vfs, but if it is snapshot,
1586 * zfsvfs->z_parent->z_vfs represents parent file system
1587 * which we have to use here, because only this file system
1588 * has mnt_export configured.
1590 vfsp = zfsvfs->z_parent->z_vfs;
1592 return (vfs_stdcheckexp(zfsvfs->z_parent->z_vfs, nam, extflagsp,
1593 credanonp, numsecflavors, secflavors));
1596 CTASSERT(SHORT_FID_LEN <= sizeof(struct fid));
1597 CTASSERT(LONG_FID_LEN <= sizeof(struct fid));
1600 zfs_fhtovp(vfs_t *vfsp, fid_t *fidp, vnode_t **vpp)
1602 zfsvfs_t *zfsvfs = vfsp->vfs_data;
1604 uint64_t object = 0;
1605 uint64_t fid_gen = 0;
1615 * On FreeBSD we can get snapshot's mount point or its parent file
1616 * system mount point depending if snapshot is already mounted or not.
1618 if (zfsvfs->z_parent == zfsvfs && fidp->fid_len == LONG_FID_LEN) {
1619 zfid_long_t *zlfid = (zfid_long_t *)fidp;
1620 uint64_t objsetid = 0;
1621 uint64_t setgen = 0;
1623 for (i = 0; i < sizeof (zlfid->zf_setid); i++)
1624 objsetid |= ((uint64_t)zlfid->zf_setid[i]) << (8 * i);
1626 for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
1627 setgen |= ((uint64_t)zlfid->zf_setgen[i]) << (8 * i);
1631 err = zfsctl_lookup_objset(vfsp, objsetid, &zfsvfs);
1637 if (fidp->fid_len == SHORT_FID_LEN || fidp->fid_len == LONG_FID_LEN) {
1638 zfid_short_t *zfid = (zfid_short_t *)fidp;
1640 for (i = 0; i < sizeof (zfid->zf_object); i++)
1641 object |= ((uint64_t)zfid->zf_object[i]) << (8 * i);
1643 for (i = 0; i < sizeof (zfid->zf_gen); i++)
1644 fid_gen |= ((uint64_t)zfid->zf_gen[i]) << (8 * i);
1650 /* A zero fid_gen means we are in the .zfs control directories */
1652 (object == ZFSCTL_INO_ROOT || object == ZFSCTL_INO_SNAPDIR)) {
1653 *vpp = zfsvfs->z_ctldir;
1654 ASSERT(*vpp != NULL);
1655 if (object == ZFSCTL_INO_SNAPDIR) {
1656 VERIFY(zfsctl_root_lookup(*vpp, "snapshot", vpp, NULL,
1657 0, NULL, NULL, NULL, NULL, NULL) == 0);
1662 vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
1666 gen_mask = -1ULL >> (64 - 8 * i);
1668 dprintf("getting %llu [%u mask %llx]\n", object, fid_gen, gen_mask);
1669 if (err = zfs_zget(zfsvfs, object, &zp)) {
1673 zp_gen = zp->z_phys->zp_gen & gen_mask;
1676 if (zp->z_unlinked || zp_gen != fid_gen) {
1677 dprintf("znode gen (%u) != fid gen (%u)\n", zp_gen, fid_gen);
1686 vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
1687 vnode_create_vobject(*vpp, zp->z_phys->zp_size, curthread);
1692 * Block out VOPs and close zfsvfs_t::z_os
1694 * Note, if successful, then we return with the 'z_teardown_lock' and
1695 * 'z_teardown_inactive_lock' write held.
1698 zfs_suspend_fs(zfsvfs_t *zfsvfs, char *name, int *modep)
1702 if ((error = zfsvfs_teardown(zfsvfs, B_FALSE)) != 0)
1705 *modep = zfsvfs->z_os->os_mode;
1707 dmu_objset_name(zfsvfs->z_os, name);
1708 dmu_objset_close(zfsvfs->z_os);
1714 * Reopen zfsvfs_t::z_os and release VOPs.
1717 zfs_resume_fs(zfsvfs_t *zfsvfs, const char *osname, int mode)
1721 ASSERT(RRW_WRITE_HELD(&zfsvfs->z_teardown_lock));
1722 ASSERT(RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock));
1724 err = dmu_objset_open(osname, DMU_OST_ZFS, mode, &zfsvfs->z_os);
1726 zfsvfs->z_os = NULL;
1730 VERIFY(zfsvfs_setup(zfsvfs, B_FALSE) == 0);
1733 * Attempt to re-establish all the active znodes with
1734 * their dbufs. If a zfs_rezget() fails, then we'll let
1735 * any potential callers discover that via ZFS_ENTER_VERIFY_VP
1736 * when they try to use their znode.
1738 mutex_enter(&zfsvfs->z_znodes_lock);
1739 for (zp = list_head(&zfsvfs->z_all_znodes); zp;
1740 zp = list_next(&zfsvfs->z_all_znodes, zp)) {
1741 (void) zfs_rezget(zp);
1743 mutex_exit(&zfsvfs->z_znodes_lock);
1747 /* release the VOPs */
1748 rw_exit(&zfsvfs->z_teardown_inactive_lock);
1749 rrw_exit(&zfsvfs->z_teardown_lock, FTAG);
1753 * Since we couldn't reopen zfsvfs::z_os, force
1754 * unmount this file system.
1756 if (vn_vfswlock(zfsvfs->z_vfs->vfs_vnodecovered) == 0)
1757 (void) dounmount(zfsvfs->z_vfs, MS_FORCE, curthread);
1763 zfs_freevfs(vfs_t *vfsp)
1765 zfsvfs_t *zfsvfs = vfsp->vfs_data;
1768 * If this is a snapshot, we have an extra VFS_HOLD on our parent
1769 * from zfs_mount(). Release it here.
1771 if (zfsvfs->z_issnap)
1772 VFS_RELE(zfsvfs->z_parent->z_vfs);
1774 zfsvfs_free(zfsvfs);
1776 atomic_add_32(&zfs_active_fs_count, -1);
1780 static int desiredvnodes_backup;
1784 zfs_vnodes_adjust(void)
1787 int newdesiredvnodes;
1789 desiredvnodes_backup = desiredvnodes;
1792 * We calculate newdesiredvnodes the same way it is done in
1793 * vntblinit(). If it is equal to desiredvnodes, it means that
1794 * it wasn't tuned by the administrator and we can tune it down.
1796 newdesiredvnodes = min(maxproc + cnt.v_page_count / 4, 2 *
1797 vm_kmem_size / (5 * (sizeof(struct vm_object) +
1798 sizeof(struct vnode))));
1799 if (newdesiredvnodes == desiredvnodes)
1800 desiredvnodes = (3 * newdesiredvnodes) / 4;
1805 zfs_vnodes_adjust_back(void)
1809 desiredvnodes = desiredvnodes_backup;
1817 printf("ZFS filesystem version " ZPL_VERSION_STRING "\n");
1820 * Initialize znode cache, vnode ops, etc...
1825 * Initialize .zfs directory structures
1830 * Reduce number of vnode. Originally number of vnodes is calculated
1831 * with UFS inode in mind. We reduce it here, because it's too big for
1834 zfs_vnodes_adjust();
1836 dmu_objset_register_type(DMU_OST_ZFS, zfs_space_delta_cb);
1844 zfs_vnodes_adjust_back();
1850 return (zfs_active_fs_count != 0);
1854 zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
1857 objset_t *os = zfsvfs->z_os;
1860 if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION)
1863 if (newvers < zfsvfs->z_version)
1866 tx = dmu_tx_create(os);
1867 dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_FALSE, ZPL_VERSION_STR);
1868 error = dmu_tx_assign(tx, TXG_WAIT);
1873 error = zap_update(os, MASTER_NODE_OBJ, ZPL_VERSION_STR,
1874 8, 1, &newvers, tx);
1881 spa_history_internal_log(LOG_DS_UPGRADE,
1882 dmu_objset_spa(os), tx, CRED(),
1883 "oldver=%llu newver=%llu dataset = %llu",
1884 zfsvfs->z_version, newvers, dmu_objset_id(os));
1888 zfsvfs->z_version = newvers;
1890 if (zfsvfs->z_version >= ZPL_VERSION_FUID)
1891 zfs_set_fuid_feature(zfsvfs);
1896 * Read a property stored within the master node.
1899 zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value)
1905 * Look up the file system's value for the property. For the
1906 * version property, we look up a slightly different string.
1908 if (prop == ZFS_PROP_VERSION)
1909 pname = ZPL_VERSION_STR;
1911 pname = zfs_prop_to_name(prop);
1914 error = zap_lookup(os, MASTER_NODE_OBJ, pname, 8, 1, value);
1916 if (error == ENOENT) {
1917 /* No value set, use the default value */
1919 case ZFS_PROP_VERSION:
1920 *value = ZPL_VERSION;
1922 case ZFS_PROP_NORMALIZE:
1923 case ZFS_PROP_UTF8ONLY:
1927 *value = ZFS_CASE_SENSITIVE;