4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
24 * Copyright (c) 2013 Martin Matuska. All rights reserved.
25 * Copyright (c) 2014 Joyent, Inc. All rights reserved.
26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
28 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
32 #include <sys/dmu_objset.h>
33 #include <sys/dmu_tx.h>
34 #include <sys/dsl_dataset.h>
35 #include <sys/dsl_dir.h>
36 #include <sys/dsl_prop.h>
37 #include <sys/dsl_synctask.h>
38 #include <sys/dsl_deleg.h>
39 #include <sys/dmu_impl.h>
41 #include <sys/spa_impl.h>
42 #include <sys/metaslab.h>
46 #include <sys/sunddi.h>
47 #include <sys/zfeature.h>
48 #include <sys/policy.h>
49 #include <sys/zfs_vfsops.h>
50 #include <sys/zfs_znode.h>
53 #include "zfs_namecheck.h"
57 * Filesystem and Snapshot Limits
58 * ------------------------------
60 * These limits are used to restrict the number of filesystems and/or snapshots
61 * that can be created at a given level in the tree or below. A typical
62 * use-case is with a delegated dataset where the administrator wants to ensure
63 * that a user within the zone is not creating too many additional filesystems
64 * or snapshots, even though they're not exceeding their space quota.
66 * The filesystem and snapshot counts are stored as extensible properties. This
67 * capability is controlled by a feature flag and must be enabled to be used.
68 * Once enabled, the feature is not active until the first limit is set. At
69 * that point, future operations to create/destroy filesystems or snapshots
70 * will validate and update the counts.
72 * Because the count properties will not exist before the feature is active,
73 * the counts are updated when a limit is first set on an uninitialized
74 * dsl_dir node in the tree (The filesystem/snapshot count on a node includes
75 * all of the nested filesystems/snapshots. Thus, a new leaf node has a
76 * filesystem count of 0 and a snapshot count of 0. Non-existent filesystem and
77 * snapshot count properties on a node indicate uninitialized counts on that
78 * node.) When first setting a limit on an uninitialized node, the code starts
79 * at the filesystem with the new limit and descends into all sub-filesystems
80 * to add the count properties.
82 * In practice this is lightweight since a limit is typically set when the
83 * filesystem is created and thus has no children. Once valid, changing the
84 * limit value won't require a re-traversal since the counts are already valid.
85 * When recursively fixing the counts, if a node with a limit is encountered
86 * during the descent, the counts are known to be valid and there is no need to
87 * descend into that filesystem's children. The counts on filesystems above the
88 * one with the new limit will still be uninitialized, unless a limit is
89 * eventually set on one of those filesystems. The counts are always recursively
90 * updated when a limit is set on a dataset, unless there is already a limit.
91 * When a new limit value is set on a filesystem with an existing limit, it is
92 * possible for the new limit to be less than the current count at that level
93 * since a user who can change the limit is also allowed to exceed the limit.
95 * Once the feature is active, then whenever a filesystem or snapshot is
96 * created, the code recurses up the tree, validating the new count against the
97 * limit at each initialized level. In practice, most levels will not have a
98 * limit set. If there is a limit at any initialized level up the tree, the
99 * check must pass or the creation will fail. Likewise, when a filesystem or
100 * snapshot is destroyed, the counts are recursively adjusted all the way up
101 * the initialized nodes in the tree. Renaming a filesystem into different point
102 * in the tree will first validate, then update the counts on each branch up to
103 * the common ancestor. A receive will also validate the counts and then update
106 * An exception to the above behavior is that the limit is not enforced if the
107 * user has permission to modify the limit. This is primarily so that
108 * recursive snapshots in the global zone always work. We want to prevent a
109 * denial-of-service in which a lower level delegated dataset could max out its
110 * limit and thus block recursive snapshots from being taken in the global zone.
111 * Because of this, it is possible for the snapshot count to be over the limit
112 * and snapshots taken in the global zone could cause a lower level dataset to
113 * hit or exceed its limit. The administrator taking the global zone recursive
114 * snapshot should be aware of this side-effect and behave accordingly.
115 * For consistency, the filesystem limit is also not enforced if the user can
118 * The filesystem and snapshot limits are validated by dsl_fs_ss_limit_check()
119 * and updated by dsl_fs_ss_count_adjust(). A new limit value is setup in
120 * dsl_dir_activate_fs_ss_limit() and the counts are adjusted, if necessary, by
121 * dsl_dir_init_fs_ss_count().
124 static uint64_t dsl_dir_space_towrite(dsl_dir_t *dd);
126 typedef struct ddulrt_arg {
127 dsl_dir_t *ddulrta_dd;
132 dsl_dir_evict_async(void *dbu)
136 dsl_pool_t *dp __maybe_unused = dd->dd_pool;
140 for (t = 0; t < TXG_SIZE; t++) {
141 ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t));
142 ASSERT(dd->dd_tempreserved[t] == 0);
143 ASSERT(dd->dd_space_towrite[t] == 0);
147 dsl_dir_async_rele(dd->dd_parent, dd);
149 spa_async_close(dd->dd_pool->dp_spa, dd);
151 if (dsl_deadlist_is_open(&dd->dd_livelist))
152 dsl_dir_livelist_close(dd);
155 cv_destroy(&dd->dd_activity_cv);
156 mutex_destroy(&dd->dd_activity_lock);
157 mutex_destroy(&dd->dd_lock);
158 kmem_free(dd, sizeof (dsl_dir_t));
162 dsl_dir_hold_obj(dsl_pool_t *dp, uint64_t ddobj,
163 const char *tail, const void *tag, dsl_dir_t **ddp)
167 dmu_object_info_t doi;
170 ASSERT(dsl_pool_config_held(dp));
172 err = dmu_bonus_hold(dp->dp_meta_objset, ddobj, tag, &dbuf);
175 dd = dmu_buf_get_user(dbuf);
177 dmu_object_info_from_db(dbuf, &doi);
178 ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_DSL_DIR);
179 ASSERT3U(doi.doi_bonus_size, >=, sizeof (dsl_dir_phys_t));
184 dd = kmem_zalloc(sizeof (dsl_dir_t), KM_SLEEP);
185 dd->dd_object = ddobj;
189 mutex_init(&dd->dd_lock, NULL, MUTEX_DEFAULT, NULL);
190 mutex_init(&dd->dd_activity_lock, NULL, MUTEX_DEFAULT, NULL);
191 cv_init(&dd->dd_activity_cv, NULL, CV_DEFAULT, NULL);
194 if (dsl_dir_is_zapified(dd)) {
195 err = zap_lookup(dp->dp_meta_objset,
196 ddobj, DD_FIELD_CRYPTO_KEY_OBJ,
197 sizeof (uint64_t), 1, &dd->dd_crypto_obj);
199 /* check for on-disk format errata */
200 if (dsl_dir_incompatible_encryption_version(
202 dp->dp_spa->spa_errata =
203 ZPOOL_ERRATA_ZOL_6845_ENCRYPTION;
205 } else if (err != ENOENT) {
210 if (dsl_dir_phys(dd)->dd_parent_obj) {
211 err = dsl_dir_hold_obj(dp,
212 dsl_dir_phys(dd)->dd_parent_obj, NULL, dd,
220 err = zap_lookup(dp->dp_meta_objset,
221 dsl_dir_phys(dd->dd_parent)->
222 dd_child_dir_zapobj, tail,
223 sizeof (foundobj), 1, &foundobj);
224 ASSERT(err || foundobj == ddobj);
226 (void) strlcpy(dd->dd_myname, tail,
227 sizeof (dd->dd_myname));
229 err = zap_value_search(dp->dp_meta_objset,
230 dsl_dir_phys(dd->dd_parent)->
232 ddobj, 0, dd->dd_myname);
237 (void) strlcpy(dd->dd_myname, spa_name(dp->dp_spa),
238 sizeof (dd->dd_myname));
241 if (dsl_dir_is_clone(dd)) {
242 dmu_buf_t *origin_bonus;
243 dsl_dataset_phys_t *origin_phys;
246 * We can't open the origin dataset, because
247 * that would require opening this dsl_dir.
248 * Just look at its phys directly instead.
250 err = dmu_bonus_hold(dp->dp_meta_objset,
251 dsl_dir_phys(dd)->dd_origin_obj, FTAG,
255 origin_phys = origin_bonus->db_data;
257 origin_phys->ds_creation_txg;
258 dmu_buf_rele(origin_bonus, FTAG);
259 if (dsl_dir_is_zapified(dd)) {
261 err = zap_lookup(dp->dp_meta_objset,
262 dd->dd_object, DD_FIELD_LIVELIST,
263 sizeof (uint64_t), 1, &obj);
265 dsl_dir_livelist_open(dd, obj);
266 else if (err != ENOENT)
271 if (dsl_dir_is_zapified(dd)) {
272 inode_timespec_t t = {0};
273 (void) zap_lookup(dp->dp_meta_objset, ddobj,
274 DD_FIELD_SNAPSHOTS_CHANGED,
276 sizeof (inode_timespec_t) / sizeof (uint64_t),
278 dd->dd_snap_cmtime = t;
281 dmu_buf_init_user(&dd->dd_dbu, NULL, dsl_dir_evict_async,
283 winner = dmu_buf_set_user_ie(dbuf, &dd->dd_dbu);
284 if (winner != NULL) {
286 dsl_dir_rele(dd->dd_parent, dd);
287 if (dsl_deadlist_is_open(&dd->dd_livelist))
288 dsl_dir_livelist_close(dd);
290 cv_destroy(&dd->dd_activity_cv);
291 mutex_destroy(&dd->dd_activity_lock);
292 mutex_destroy(&dd->dd_lock);
293 kmem_free(dd, sizeof (dsl_dir_t));
296 spa_open_ref(dp->dp_spa, dd);
301 * The dsl_dir_t has both open-to-close and instantiate-to-evict
302 * holds on the spa. We need the open-to-close holds because
303 * otherwise the spa_refcnt wouldn't change when we open a
304 * dir which the spa also has open, so we could incorrectly
305 * think it was OK to unload/export/destroy the pool. We need
306 * the instantiate-to-evict hold because the dsl_dir_t has a
307 * pointer to the dd_pool, which has a pointer to the spa_t.
309 spa_open_ref(dp->dp_spa, tag);
310 ASSERT3P(dd->dd_pool, ==, dp);
311 ASSERT3U(dd->dd_object, ==, ddobj);
312 ASSERT3P(dd->dd_dbuf, ==, dbuf);
318 dsl_dir_rele(dd->dd_parent, dd);
319 if (dsl_deadlist_is_open(&dd->dd_livelist))
320 dsl_dir_livelist_close(dd);
322 cv_destroy(&dd->dd_activity_cv);
323 mutex_destroy(&dd->dd_activity_lock);
324 mutex_destroy(&dd->dd_lock);
325 kmem_free(dd, sizeof (dsl_dir_t));
326 dmu_buf_rele(dbuf, tag);
331 dsl_dir_rele(dsl_dir_t *dd, const void *tag)
333 dprintf_dd(dd, "%s\n", "");
334 spa_close(dd->dd_pool->dp_spa, tag);
335 dmu_buf_rele(dd->dd_dbuf, tag);
339 * Remove a reference to the given dsl dir that is being asynchronously
340 * released. Async releases occur from a taskq performing eviction of
341 * dsl datasets and dirs. This process is identical to a normal release
342 * with the exception of using the async API for releasing the reference on
346 dsl_dir_async_rele(dsl_dir_t *dd, const void *tag)
348 dprintf_dd(dd, "%s\n", "");
349 spa_async_close(dd->dd_pool->dp_spa, tag);
350 dmu_buf_rele(dd->dd_dbuf, tag);
353 /* buf must be at least ZFS_MAX_DATASET_NAME_LEN bytes */
355 dsl_dir_name(dsl_dir_t *dd, char *buf)
358 dsl_dir_name(dd->dd_parent, buf);
359 VERIFY3U(strlcat(buf, "/", ZFS_MAX_DATASET_NAME_LEN), <,
360 ZFS_MAX_DATASET_NAME_LEN);
364 if (!MUTEX_HELD(&dd->dd_lock)) {
366 * recursive mutex so that we can use
367 * dprintf_dd() with dd_lock held
369 mutex_enter(&dd->dd_lock);
370 VERIFY3U(strlcat(buf, dd->dd_myname, ZFS_MAX_DATASET_NAME_LEN),
371 <, ZFS_MAX_DATASET_NAME_LEN);
372 mutex_exit(&dd->dd_lock);
374 VERIFY3U(strlcat(buf, dd->dd_myname, ZFS_MAX_DATASET_NAME_LEN),
375 <, ZFS_MAX_DATASET_NAME_LEN);
379 /* Calculate name length, avoiding all the strcat calls of dsl_dir_name */
381 dsl_dir_namelen(dsl_dir_t *dd)
386 /* parent's name + 1 for the "/" */
387 result = dsl_dir_namelen(dd->dd_parent) + 1;
390 if (!MUTEX_HELD(&dd->dd_lock)) {
391 /* see dsl_dir_name */
392 mutex_enter(&dd->dd_lock);
393 result += strlen(dd->dd_myname);
394 mutex_exit(&dd->dd_lock);
396 result += strlen(dd->dd_myname);
403 getcomponent(const char *path, char *component, const char **nextp)
407 if ((path == NULL) || (path[0] == '\0'))
408 return (SET_ERROR(ENOENT));
409 /* This would be a good place to reserve some namespace... */
410 p = strpbrk(path, "/@");
411 if (p && (p[1] == '/' || p[1] == '@')) {
412 /* two separators in a row */
413 return (SET_ERROR(EINVAL));
415 if (p == NULL || p == path) {
417 * if the first thing is an @ or /, it had better be an
418 * @ and it had better not have any more ats or slashes,
419 * and it had better have something after the @.
422 (p[0] != '@' || strpbrk(path+1, "/@") || p[1] == '\0'))
423 return (SET_ERROR(EINVAL));
424 if (strlen(path) >= ZFS_MAX_DATASET_NAME_LEN)
425 return (SET_ERROR(ENAMETOOLONG));
426 (void) strlcpy(component, path, ZFS_MAX_DATASET_NAME_LEN);
428 } else if (p[0] == '/') {
429 if (p - path >= ZFS_MAX_DATASET_NAME_LEN)
430 return (SET_ERROR(ENAMETOOLONG));
431 (void) strlcpy(component, path, p - path + 1);
433 } else if (p[0] == '@') {
435 * if the next separator is an @, there better not be
438 if (strchr(path, '/'))
439 return (SET_ERROR(EINVAL));
440 if (p - path >= ZFS_MAX_DATASET_NAME_LEN)
441 return (SET_ERROR(ENAMETOOLONG));
442 (void) strlcpy(component, path, p - path + 1);
444 panic("invalid p=%p", (void *)p);
451 * Return the dsl_dir_t, and possibly the last component which couldn't
452 * be found in *tail. The name must be in the specified dsl_pool_t. This
453 * thread must hold the dp_config_rwlock for the pool. Returns NULL if the
454 * path is bogus, or if tail==NULL and we couldn't parse the whole name.
455 * (*tail)[0] == '@' means that the last component is a snapshot.
458 dsl_dir_hold(dsl_pool_t *dp, const char *name, const void *tag,
459 dsl_dir_t **ddp, const char **tailp)
462 const char *spaname, *next, *nextnext = NULL;
467 buf = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
468 err = getcomponent(name, buf, &next);
472 /* Make sure the name is in the specified pool. */
473 spaname = spa_name(dp->dp_spa);
474 if (strcmp(buf, spaname) != 0) {
475 err = SET_ERROR(EXDEV);
479 ASSERT(dsl_pool_config_held(dp));
481 err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, NULL, tag, &dd);
486 while (next != NULL) {
488 err = getcomponent(next, buf, &nextnext);
491 ASSERT(next[0] != '\0');
494 dprintf("looking up %s in obj%lld\n",
495 buf, (longlong_t)dsl_dir_phys(dd)->dd_child_dir_zapobj);
497 err = zap_lookup(dp->dp_meta_objset,
498 dsl_dir_phys(dd)->dd_child_dir_zapobj,
499 buf, sizeof (ddobj), 1, &ddobj);
506 err = dsl_dir_hold_obj(dp, ddobj, buf, tag, &child_dd);
509 dsl_dir_rele(dd, tag);
515 dsl_dir_rele(dd, tag);
520 * It's an error if there's more than one component left, or
521 * tailp==NULL and there's any component left.
524 (tailp == NULL || (nextnext && nextnext[0] != '\0'))) {
526 dsl_dir_rele(dd, tag);
527 dprintf("next=%p (%s) tail=%p\n", next, next?next:"", tailp);
528 err = SET_ERROR(ENOENT);
535 kmem_free(buf, ZFS_MAX_DATASET_NAME_LEN);
540 * If the counts are already initialized for this filesystem and its
541 * descendants then do nothing, otherwise initialize the counts.
543 * The counts on this filesystem, and those below, may be uninitialized due to
544 * either the use of a pre-existing pool which did not support the
545 * filesystem/snapshot limit feature, or one in which the feature had not yet
548 * Recursively descend the filesystem tree and update the filesystem/snapshot
549 * counts on each filesystem below, then update the cumulative count on the
550 * current filesystem. If the filesystem already has a count set on it,
551 * then we know that its counts, and the counts on the filesystems below it,
552 * are already correct, so we don't have to update this filesystem.
555 dsl_dir_init_fs_ss_count(dsl_dir_t *dd, dmu_tx_t *tx)
557 uint64_t my_fs_cnt = 0;
558 uint64_t my_ss_cnt = 0;
559 dsl_pool_t *dp = dd->dd_pool;
560 objset_t *os = dp->dp_meta_objset;
565 ASSERT(spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT));
566 ASSERT(dsl_pool_config_held(dp));
567 ASSERT(dmu_tx_is_syncing(tx));
569 dsl_dir_zapify(dd, tx);
572 * If the filesystem count has already been initialized then we
573 * don't need to recurse down any further.
575 if (zap_contains(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT) == 0)
578 zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
579 za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
581 /* Iterate my child dirs */
582 for (zap_cursor_init(zc, os, dsl_dir_phys(dd)->dd_child_dir_zapobj);
583 zap_cursor_retrieve(zc, za) == 0; zap_cursor_advance(zc)) {
587 VERIFY0(dsl_dir_hold_obj(dp, za->za_first_integer, NULL, FTAG,
591 * Ignore hidden ($FREE, $MOS & $ORIGIN) objsets.
593 if (chld_dd->dd_myname[0] == '$') {
594 dsl_dir_rele(chld_dd, FTAG);
598 my_fs_cnt++; /* count this child */
600 dsl_dir_init_fs_ss_count(chld_dd, tx);
602 VERIFY0(zap_lookup(os, chld_dd->dd_object,
603 DD_FIELD_FILESYSTEM_COUNT, sizeof (count), 1, &count));
605 VERIFY0(zap_lookup(os, chld_dd->dd_object,
606 DD_FIELD_SNAPSHOT_COUNT, sizeof (count), 1, &count));
609 dsl_dir_rele(chld_dd, FTAG);
612 /* Count my snapshots (we counted children's snapshots above) */
613 VERIFY0(dsl_dataset_hold_obj(dd->dd_pool,
614 dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds));
616 for (zap_cursor_init(zc, os, dsl_dataset_phys(ds)->ds_snapnames_zapobj);
617 zap_cursor_retrieve(zc, za) == 0;
618 zap_cursor_advance(zc)) {
619 /* Don't count temporary snapshots */
620 if (za->za_name[0] != '%')
625 dsl_dataset_rele(ds, FTAG);
627 kmem_free(zc, sizeof (zap_cursor_t));
628 kmem_free(za, sizeof (zap_attribute_t));
630 /* we're in a sync task, update counts */
631 dmu_buf_will_dirty(dd->dd_dbuf, tx);
632 VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT,
633 sizeof (my_fs_cnt), 1, &my_fs_cnt, tx));
634 VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT,
635 sizeof (my_ss_cnt), 1, &my_ss_cnt, tx));
639 dsl_dir_actv_fs_ss_limit_check(void *arg, dmu_tx_t *tx)
641 char *ddname = (char *)arg;
642 dsl_pool_t *dp = dmu_tx_pool(tx);
647 error = dsl_dataset_hold(dp, ddname, FTAG, &ds);
651 if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT)) {
652 dsl_dataset_rele(ds, FTAG);
653 return (SET_ERROR(ENOTSUP));
657 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT) &&
658 dsl_dir_is_zapified(dd) &&
659 zap_contains(dp->dp_meta_objset, dd->dd_object,
660 DD_FIELD_FILESYSTEM_COUNT) == 0) {
661 dsl_dataset_rele(ds, FTAG);
662 return (SET_ERROR(EALREADY));
665 dsl_dataset_rele(ds, FTAG);
670 dsl_dir_actv_fs_ss_limit_sync(void *arg, dmu_tx_t *tx)
672 char *ddname = (char *)arg;
673 dsl_pool_t *dp = dmu_tx_pool(tx);
677 VERIFY0(dsl_dataset_hold(dp, ddname, FTAG, &ds));
679 spa = dsl_dataset_get_spa(ds);
681 if (!spa_feature_is_active(spa, SPA_FEATURE_FS_SS_LIMIT)) {
683 * Since the feature was not active and we're now setting a
684 * limit, increment the feature-active counter so that the
685 * feature becomes active for the first time.
687 * We are already in a sync task so we can update the MOS.
689 spa_feature_incr(spa, SPA_FEATURE_FS_SS_LIMIT, tx);
693 * Since we are now setting a non-UINT64_MAX limit on the filesystem,
694 * we need to ensure the counts are correct. Descend down the tree from
695 * this point and update all of the counts to be accurate.
697 dsl_dir_init_fs_ss_count(ds->ds_dir, tx);
699 dsl_dataset_rele(ds, FTAG);
703 * Make sure the feature is enabled and activate it if necessary.
704 * Since we're setting a limit, ensure the on-disk counts are valid.
705 * This is only called by the ioctl path when setting a limit value.
707 * We do not need to validate the new limit, since users who can change the
708 * limit are also allowed to exceed the limit.
711 dsl_dir_activate_fs_ss_limit(const char *ddname)
715 error = dsl_sync_task(ddname, dsl_dir_actv_fs_ss_limit_check,
716 dsl_dir_actv_fs_ss_limit_sync, (void *)ddname, 0,
717 ZFS_SPACE_CHECK_RESERVED);
719 if (error == EALREADY)
726 * Used to determine if the filesystem_limit or snapshot_limit should be
727 * enforced. We allow the limit to be exceeded if the user has permission to
728 * write the property value. We pass in the creds that we got in the open
729 * context since we will always be the GZ root in syncing context. We also have
730 * to handle the case where we are allowed to change the limit on the current
731 * dataset, but there may be another limit in the tree above.
733 * We can never modify these two properties within a non-global zone. In
734 * addition, the other checks are modeled on zfs_secpolicy_write_perms. We
735 * can't use that function since we are already holding the dp_config_rwlock.
736 * In addition, we already have the dd and dealing with snapshots is simplified
747 dsl_enforce_ds_ss_limits(dsl_dir_t *dd, zfs_prop_t prop,
748 cred_t *cr, proc_t *proc)
750 enforce_res_t enforce = ENFORCE_ALWAYS;
754 const char *zonedstr;
756 ASSERT(prop == ZFS_PROP_FILESYSTEM_LIMIT ||
757 prop == ZFS_PROP_SNAPSHOT_LIMIT);
760 if (crgetzoneid(cr) != GLOBAL_ZONEID)
761 return (ENFORCE_ALWAYS);
764 * We are checking the saved credentials of the user process, which is
765 * not the current process. Note that we can't use secpolicy_zfs(),
766 * because it only works if the cred is that of the current process (on
769 if (secpolicy_zfs_proc(cr, proc) == 0)
770 return (ENFORCE_NEVER);
775 if ((obj = dsl_dir_phys(dd)->dd_head_dataset_obj) == 0)
776 return (ENFORCE_ALWAYS);
778 ASSERT(dsl_pool_config_held(dd->dd_pool));
780 if (dsl_dataset_hold_obj(dd->dd_pool, obj, FTAG, &ds) != 0)
781 return (ENFORCE_ALWAYS);
783 zonedstr = zfs_prop_to_name(ZFS_PROP_ZONED);
784 if (dsl_prop_get_ds(ds, zonedstr, 8, 1, &zoned, NULL) || zoned) {
785 /* Only root can access zoned fs's from the GZ */
786 enforce = ENFORCE_ALWAYS;
788 if (dsl_deleg_access_impl(ds, zfs_prop_to_name(prop), cr) == 0)
789 enforce = ENFORCE_ABOVE;
792 dsl_dataset_rele(ds, FTAG);
797 * Check if adding additional child filesystem(s) would exceed any filesystem
798 * limits or adding additional snapshot(s) would exceed any snapshot limits.
799 * The prop argument indicates which limit to check.
801 * Note that all filesystem limits up to the root (or the highest
802 * initialized) filesystem or the given ancestor must be satisfied.
805 dsl_fs_ss_limit_check(dsl_dir_t *dd, uint64_t delta, zfs_prop_t prop,
806 dsl_dir_t *ancestor, cred_t *cr, proc_t *proc)
808 objset_t *os = dd->dd_pool->dp_meta_objset;
809 uint64_t limit, count;
810 const char *count_prop;
811 enforce_res_t enforce;
814 ASSERT(dsl_pool_config_held(dd->dd_pool));
815 ASSERT(prop == ZFS_PROP_FILESYSTEM_LIMIT ||
816 prop == ZFS_PROP_SNAPSHOT_LIMIT);
819 * If we're allowed to change the limit, don't enforce the limit
820 * e.g. this can happen if a snapshot is taken by an administrative
821 * user in the global zone (i.e. a recursive snapshot by root).
822 * However, we must handle the case of delegated permissions where we
823 * are allowed to change the limit on the current dataset, but there
824 * is another limit in the tree above.
826 enforce = dsl_enforce_ds_ss_limits(dd, prop, cr, proc);
827 if (enforce == ENFORCE_NEVER)
831 * e.g. if renaming a dataset with no snapshots, count adjustment
837 if (prop == ZFS_PROP_SNAPSHOT_LIMIT) {
839 * We don't enforce the limit for temporary snapshots. This is
840 * indicated by a NULL cred_t argument.
845 count_prop = DD_FIELD_SNAPSHOT_COUNT;
847 count_prop = DD_FIELD_FILESYSTEM_COUNT;
851 * If an ancestor has been provided, stop checking the limit once we
852 * hit that dir. We need this during rename so that we don't overcount
853 * the check once we recurse up to the common ancestor.
859 * If we hit an uninitialized node while recursing up the tree, we can
860 * stop since we know there is no limit here (or above). The counts are
861 * not valid on this node and we know we won't touch this node's counts.
863 if (!dsl_dir_is_zapified(dd))
865 err = zap_lookup(os, dd->dd_object,
866 count_prop, sizeof (count), 1, &count);
872 err = dsl_prop_get_dd(dd, zfs_prop_to_name(prop), 8, 1, &limit, NULL,
877 /* Is there a limit which we've hit? */
878 if (enforce == ENFORCE_ALWAYS && (count + delta) > limit)
879 return (SET_ERROR(EDQUOT));
881 if (dd->dd_parent != NULL)
882 err = dsl_fs_ss_limit_check(dd->dd_parent, delta, prop,
889 * Adjust the filesystem or snapshot count for the specified dsl_dir_t and all
890 * parents. When a new filesystem/snapshot is created, increment the count on
891 * all parents, and when a filesystem/snapshot is destroyed, decrement the
895 dsl_fs_ss_count_adjust(dsl_dir_t *dd, int64_t delta, const char *prop,
899 objset_t *os = dd->dd_pool->dp_meta_objset;
902 ASSERT(dsl_pool_config_held(dd->dd_pool));
903 ASSERT(dmu_tx_is_syncing(tx));
904 ASSERT(strcmp(prop, DD_FIELD_FILESYSTEM_COUNT) == 0 ||
905 strcmp(prop, DD_FIELD_SNAPSHOT_COUNT) == 0);
908 * We don't do accounting for hidden ($FREE, $MOS & $ORIGIN) objsets.
910 if (dd->dd_myname[0] == '$' && strcmp(prop,
911 DD_FIELD_FILESYSTEM_COUNT) == 0) {
916 * e.g. if renaming a dataset with no snapshots, count adjustment is 0
922 * If we hit an uninitialized node while recursing up the tree, we can
923 * stop since we know the counts are not valid on this node and we
924 * know we shouldn't touch this node's counts. An uninitialized count
925 * on the node indicates that either the feature has not yet been
926 * activated or there are no limits on this part of the tree.
928 if (!dsl_dir_is_zapified(dd) || (err = zap_lookup(os, dd->dd_object,
929 prop, sizeof (count), 1, &count)) == ENOENT)
934 /* Use a signed verify to make sure we're not neg. */
935 VERIFY3S(count, >=, 0);
937 VERIFY0(zap_update(os, dd->dd_object, prop, sizeof (count), 1, &count,
940 /* Roll up this additional count into our ancestors */
941 if (dd->dd_parent != NULL)
942 dsl_fs_ss_count_adjust(dd->dd_parent, delta, prop, tx);
946 dsl_dir_create_sync(dsl_pool_t *dp, dsl_dir_t *pds, const char *name,
949 objset_t *mos = dp->dp_meta_objset;
951 dsl_dir_phys_t *ddphys;
954 ddobj = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0,
955 DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx);
957 VERIFY0(zap_add(mos, dsl_dir_phys(pds)->dd_child_dir_zapobj,
958 name, sizeof (uint64_t), 1, &ddobj, tx));
960 /* it's the root dir */
961 VERIFY0(zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
962 DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, &ddobj, tx));
964 VERIFY0(dmu_bonus_hold(mos, ddobj, FTAG, &dbuf));
965 dmu_buf_will_dirty(dbuf, tx);
966 ddphys = dbuf->db_data;
968 ddphys->dd_creation_time = gethrestime_sec();
970 ddphys->dd_parent_obj = pds->dd_object;
972 /* update the filesystem counts */
973 dsl_fs_ss_count_adjust(pds, 1, DD_FIELD_FILESYSTEM_COUNT, tx);
975 ddphys->dd_props_zapobj = zap_create(mos,
976 DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx);
977 ddphys->dd_child_dir_zapobj = zap_create(mos,
978 DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx);
979 if (spa_version(dp->dp_spa) >= SPA_VERSION_USED_BREAKDOWN)
980 ddphys->dd_flags |= DD_FLAG_USED_BREAKDOWN;
982 dmu_buf_rele(dbuf, FTAG);
988 dsl_dir_is_clone(dsl_dir_t *dd)
990 return (dsl_dir_phys(dd)->dd_origin_obj &&
991 (dd->dd_pool->dp_origin_snap == NULL ||
992 dsl_dir_phys(dd)->dd_origin_obj !=
993 dd->dd_pool->dp_origin_snap->ds_object));
997 dsl_dir_get_used(dsl_dir_t *dd)
999 return (dsl_dir_phys(dd)->dd_used_bytes);
1003 dsl_dir_get_compressed(dsl_dir_t *dd)
1005 return (dsl_dir_phys(dd)->dd_compressed_bytes);
1009 dsl_dir_get_quota(dsl_dir_t *dd)
1011 return (dsl_dir_phys(dd)->dd_quota);
1015 dsl_dir_get_reservation(dsl_dir_t *dd)
1017 return (dsl_dir_phys(dd)->dd_reserved);
1021 dsl_dir_get_compressratio(dsl_dir_t *dd)
1023 /* a fixed point number, 100x the ratio */
1024 return (dsl_dir_phys(dd)->dd_compressed_bytes == 0 ? 100 :
1025 (dsl_dir_phys(dd)->dd_uncompressed_bytes * 100 /
1026 dsl_dir_phys(dd)->dd_compressed_bytes));
1030 dsl_dir_get_logicalused(dsl_dir_t *dd)
1032 return (dsl_dir_phys(dd)->dd_uncompressed_bytes);
1036 dsl_dir_get_usedsnap(dsl_dir_t *dd)
1038 return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_SNAP]);
1042 dsl_dir_get_usedds(dsl_dir_t *dd)
1044 return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_HEAD]);
1048 dsl_dir_get_usedrefreserv(dsl_dir_t *dd)
1050 return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_REFRSRV]);
1054 dsl_dir_get_usedchild(dsl_dir_t *dd)
1056 return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_CHILD] +
1057 dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_CHILD_RSRV]);
1061 dsl_dir_get_origin(dsl_dir_t *dd, char *buf)
1064 VERIFY0(dsl_dataset_hold_obj(dd->dd_pool,
1065 dsl_dir_phys(dd)->dd_origin_obj, FTAG, &ds));
1067 dsl_dataset_name(ds, buf);
1069 dsl_dataset_rele(ds, FTAG);
1073 dsl_dir_get_filesystem_count(dsl_dir_t *dd, uint64_t *count)
1075 if (dsl_dir_is_zapified(dd)) {
1076 objset_t *os = dd->dd_pool->dp_meta_objset;
1077 return (zap_lookup(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT,
1078 sizeof (*count), 1, count));
1080 return (SET_ERROR(ENOENT));
1085 dsl_dir_get_snapshot_count(dsl_dir_t *dd, uint64_t *count)
1087 if (dsl_dir_is_zapified(dd)) {
1088 objset_t *os = dd->dd_pool->dp_meta_objset;
1089 return (zap_lookup(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT,
1090 sizeof (*count), 1, count));
1092 return (SET_ERROR(ENOENT));
1097 dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv)
1099 mutex_enter(&dd->dd_lock);
1100 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_QUOTA,
1101 dsl_dir_get_quota(dd));
1102 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_RESERVATION,
1103 dsl_dir_get_reservation(dd));
1104 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_LOGICALUSED,
1105 dsl_dir_get_logicalused(dd));
1106 if (dsl_dir_phys(dd)->dd_flags & DD_FLAG_USED_BREAKDOWN) {
1107 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDSNAP,
1108 dsl_dir_get_usedsnap(dd));
1109 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDDS,
1110 dsl_dir_get_usedds(dd));
1111 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDREFRESERV,
1112 dsl_dir_get_usedrefreserv(dd));
1113 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDCHILD,
1114 dsl_dir_get_usedchild(dd));
1116 mutex_exit(&dd->dd_lock);
1119 if (dsl_dir_get_filesystem_count(dd, &count) == 0) {
1120 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_FILESYSTEM_COUNT,
1123 if (dsl_dir_get_snapshot_count(dd, &count) == 0) {
1124 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_SNAPSHOT_COUNT,
1128 if (dsl_dir_is_clone(dd)) {
1129 char buf[ZFS_MAX_DATASET_NAME_LEN];
1130 dsl_dir_get_origin(dd, buf);
1131 dsl_prop_nvlist_add_string(nv, ZFS_PROP_ORIGIN, buf);
1137 dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx)
1139 dsl_pool_t *dp = dd->dd_pool;
1141 ASSERT(dsl_dir_phys(dd));
1143 if (txg_list_add(&dp->dp_dirty_dirs, dd, tx->tx_txg)) {
1144 /* up the hold count until we can be written out */
1145 dmu_buf_add_ref(dd->dd_dbuf, dd);
1150 parent_delta(dsl_dir_t *dd, uint64_t used, int64_t delta)
1152 uint64_t old_accounted = MAX(used, dsl_dir_phys(dd)->dd_reserved);
1153 uint64_t new_accounted =
1154 MAX(used + delta, dsl_dir_phys(dd)->dd_reserved);
1155 return (new_accounted - old_accounted);
1159 dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx)
1161 ASSERT(dmu_tx_is_syncing(tx));
1163 mutex_enter(&dd->dd_lock);
1164 ASSERT0(dd->dd_tempreserved[tx->tx_txg & TXG_MASK]);
1165 dprintf_dd(dd, "txg=%llu towrite=%lluK\n", (u_longlong_t)tx->tx_txg,
1166 (u_longlong_t)dd->dd_space_towrite[tx->tx_txg & TXG_MASK] / 1024);
1167 dd->dd_space_towrite[tx->tx_txg & TXG_MASK] = 0;
1168 mutex_exit(&dd->dd_lock);
1170 /* release the hold from dsl_dir_dirty */
1171 dmu_buf_rele(dd->dd_dbuf, dd);
1175 dsl_dir_space_towrite(dsl_dir_t *dd)
1179 ASSERT(MUTEX_HELD(&dd->dd_lock));
1181 for (int i = 0; i < TXG_SIZE; i++) {
1182 space += dd->dd_space_towrite[i & TXG_MASK];
1183 ASSERT3U(dd->dd_space_towrite[i & TXG_MASK], >=, 0);
1189 * How much space would dd have available if ancestor had delta applied
1190 * to it? If ondiskonly is set, we're only interested in what's
1191 * on-disk, not estimated pending changes.
1194 dsl_dir_space_available(dsl_dir_t *dd,
1195 dsl_dir_t *ancestor, int64_t delta, int ondiskonly)
1197 uint64_t parentspace, myspace, quota, used;
1200 * If there are no restrictions otherwise, assume we have
1201 * unlimited space available.
1204 parentspace = UINT64_MAX;
1206 if (dd->dd_parent != NULL) {
1207 parentspace = dsl_dir_space_available(dd->dd_parent,
1208 ancestor, delta, ondiskonly);
1211 mutex_enter(&dd->dd_lock);
1212 if (dsl_dir_phys(dd)->dd_quota != 0)
1213 quota = dsl_dir_phys(dd)->dd_quota;
1214 used = dsl_dir_phys(dd)->dd_used_bytes;
1216 used += dsl_dir_space_towrite(dd);
1218 if (dd->dd_parent == NULL) {
1219 uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool,
1220 ZFS_SPACE_CHECK_NORMAL);
1221 quota = MIN(quota, poolsize);
1224 if (dsl_dir_phys(dd)->dd_reserved > used && parentspace != UINT64_MAX) {
1226 * We have some space reserved, in addition to what our
1229 parentspace += dsl_dir_phys(dd)->dd_reserved - used;
1232 if (dd == ancestor) {
1234 ASSERT(used >= -delta);
1236 if (parentspace != UINT64_MAX)
1237 parentspace -= delta;
1245 * the lesser of the space provided by our parent and
1246 * the space left in our quota
1248 myspace = MIN(parentspace, quota - used);
1251 mutex_exit(&dd->dd_lock);
1256 struct tempreserve {
1257 list_node_t tr_node;
1263 dsl_dir_tempreserve_impl(dsl_dir_t *dd, uint64_t asize, boolean_t netfree,
1264 boolean_t ignorequota, list_t *tr_list,
1265 dmu_tx_t *tx, boolean_t first)
1269 struct tempreserve *tr;
1278 ASSERT3U(txg, !=, 0);
1279 ASSERT3S(asize, >, 0);
1281 mutex_enter(&dd->dd_lock);
1284 * Check against the dsl_dir's quota. We don't add in the delta
1285 * when checking for over-quota because they get one free hit.
1287 uint64_t est_inflight = dsl_dir_space_towrite(dd);
1288 for (int i = 0; i < TXG_SIZE; i++)
1289 est_inflight += dd->dd_tempreserved[i];
1290 uint64_t used_on_disk = dsl_dir_phys(dd)->dd_used_bytes;
1293 * On the first iteration, fetch the dataset's used-on-disk and
1294 * refreservation values. Also, if checkrefquota is set, test if
1295 * allocating this space would exceed the dataset's refquota.
1297 if (first && tx->tx_objset) {
1299 dsl_dataset_t *ds = tx->tx_objset->os_dsl_dataset;
1301 error = dsl_dataset_check_quota(ds, !netfree,
1302 asize, est_inflight, &used_on_disk, &ref_rsrv);
1304 mutex_exit(&dd->dd_lock);
1305 DMU_TX_STAT_BUMP(dmu_tx_quota);
1311 * If this transaction will result in a net free of space,
1312 * we want to let it through.
1314 if (ignorequota || netfree || dsl_dir_phys(dd)->dd_quota == 0)
1317 quota = dsl_dir_phys(dd)->dd_quota;
1320 * Adjust the quota against the actual pool size at the root
1321 * minus any outstanding deferred frees.
1322 * To ensure that it's possible to remove files from a full
1323 * pool without inducing transient overcommits, we throttle
1324 * netfree transactions against a quota that is slightly larger,
1325 * but still within the pool's allocation slop. In cases where
1326 * we're very close to full, this will allow a steady trickle of
1327 * removes to get through.
1329 if (dd->dd_parent == NULL) {
1330 uint64_t avail = dsl_pool_unreserved_space(dd->dd_pool,
1332 ZFS_SPACE_CHECK_RESERVED : ZFS_SPACE_CHECK_NORMAL);
1334 if (avail < quota) {
1336 retval = SET_ERROR(ENOSPC);
1341 * If they are requesting more space, and our current estimate
1342 * is over quota, they get to try again unless the actual
1343 * on-disk is over quota and there are no pending changes
1344 * or deferred frees (which may free up space for us).
1346 if (used_on_disk + est_inflight >= quota) {
1347 if (est_inflight > 0 || used_on_disk < quota) {
1348 retval = SET_ERROR(ERESTART);
1350 ASSERT3U(used_on_disk, >=, quota);
1352 if (retval == ENOSPC && (used_on_disk - quota) <
1353 dsl_pool_deferred_space(dd->dd_pool)) {
1354 retval = SET_ERROR(ERESTART);
1358 dprintf_dd(dd, "failing: used=%lluK inflight = %lluK "
1359 "quota=%lluK tr=%lluK err=%d\n",
1360 (u_longlong_t)used_on_disk>>10,
1361 (u_longlong_t)est_inflight>>10,
1362 (u_longlong_t)quota>>10, (u_longlong_t)asize>>10, retval);
1363 mutex_exit(&dd->dd_lock);
1364 DMU_TX_STAT_BUMP(dmu_tx_quota);
1368 /* We need to up our estimated delta before dropping dd_lock */
1369 dd->dd_tempreserved[txg & TXG_MASK] += asize;
1371 uint64_t parent_rsrv = parent_delta(dd, used_on_disk + est_inflight,
1373 mutex_exit(&dd->dd_lock);
1375 tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
1377 tr->tr_size = asize;
1378 list_insert_tail(tr_list, tr);
1380 /* see if it's OK with our parent */
1381 if (dd->dd_parent != NULL && parent_rsrv != 0) {
1383 * Recurse on our parent without recursion. This has been
1384 * observed to be potentially large stack usage even within
1385 * the test suite. Largest seen stack was 7632 bytes on linux.
1389 asize = parent_rsrv;
1390 ignorequota = (dsl_dir_phys(dd)->dd_head_dataset_obj == 0);
1392 goto top_of_function;
1400 * Reserve space in this dsl_dir, to be used in this tx's txg.
1401 * After the space has been dirtied (and dsl_dir_willuse_space()
1402 * has been called), the reservation should be canceled, using
1403 * dsl_dir_tempreserve_clear().
1406 dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize,
1407 boolean_t netfree, void **tr_cookiep, dmu_tx_t *tx)
1417 tr_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
1418 list_create(tr_list, sizeof (struct tempreserve),
1419 offsetof(struct tempreserve, tr_node));
1420 ASSERT3S(asize, >, 0);
1422 err = arc_tempreserve_space(dd->dd_pool->dp_spa, lsize, tx->tx_txg);
1424 struct tempreserve *tr;
1426 tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
1427 tr->tr_size = lsize;
1428 list_insert_tail(tr_list, tr);
1430 if (err == EAGAIN) {
1432 * If arc_memory_throttle() detected that pageout
1433 * is running and we are low on memory, we delay new
1434 * non-pageout transactions to give pageout an
1437 * It is unfortunate to be delaying while the caller's
1440 txg_delay(dd->dd_pool, tx->tx_txg,
1441 MSEC2NSEC(10), MSEC2NSEC(10));
1442 err = SET_ERROR(ERESTART);
1447 err = dsl_dir_tempreserve_impl(dd, asize, netfree,
1448 B_FALSE, tr_list, tx, B_TRUE);
1452 dsl_dir_tempreserve_clear(tr_list, tx);
1454 *tr_cookiep = tr_list;
1460 * Clear a temporary reservation that we previously made with
1461 * dsl_dir_tempreserve_space().
1464 dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx)
1466 int txgidx = tx->tx_txg & TXG_MASK;
1467 list_t *tr_list = tr_cookie;
1468 struct tempreserve *tr;
1470 ASSERT3U(tx->tx_txg, !=, 0);
1472 if (tr_cookie == NULL)
1475 while ((tr = list_head(tr_list)) != NULL) {
1477 mutex_enter(&tr->tr_ds->dd_lock);
1478 ASSERT3U(tr->tr_ds->dd_tempreserved[txgidx], >=,
1480 tr->tr_ds->dd_tempreserved[txgidx] -= tr->tr_size;
1481 mutex_exit(&tr->tr_ds->dd_lock);
1483 arc_tempreserve_clear(tr->tr_size);
1485 list_remove(tr_list, tr);
1486 kmem_free(tr, sizeof (struct tempreserve));
1489 kmem_free(tr_list, sizeof (list_t));
1493 * This should be called from open context when we think we're going to write
1494 * or free space, for example when dirtying data. Be conservative; it's okay
1495 * to write less space or free more, but we don't want to write more or free
1496 * less than the amount specified.
1498 * NOTE: The behavior of this function is identical to the Illumos / FreeBSD
1499 * version however it has been adjusted to use an iterative rather than
1500 * recursive algorithm to minimize stack usage.
1503 dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx)
1505 int64_t parent_space;
1509 mutex_enter(&dd->dd_lock);
1511 dd->dd_space_towrite[tx->tx_txg & TXG_MASK] += space;
1513 est_used = dsl_dir_space_towrite(dd) +
1514 dsl_dir_phys(dd)->dd_used_bytes;
1515 parent_space = parent_delta(dd, est_used, space);
1516 mutex_exit(&dd->dd_lock);
1518 /* Make sure that we clean up dd_space_to* */
1519 dsl_dir_dirty(dd, tx);
1522 space = parent_space;
1523 } while (space && dd);
1526 /* call from syncing context when we actually write/free space for this dd */
1528 dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
1529 int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx)
1531 int64_t accounted_delta;
1533 ASSERT(dmu_tx_is_syncing(tx));
1534 ASSERT(type < DD_USED_NUM);
1536 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1539 * dsl_dataset_set_refreservation_sync_impl() calls this with
1540 * dd_lock held, so that it can atomically update
1541 * ds->ds_reserved and the dsl_dir accounting, so that
1542 * dsl_dataset_check_quota() can see dataset and dir accounting
1545 boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
1547 mutex_enter(&dd->dd_lock);
1548 dsl_dir_phys_t *ddp = dsl_dir_phys(dd);
1549 accounted_delta = parent_delta(dd, ddp->dd_used_bytes, used);
1550 ASSERT(used >= 0 || ddp->dd_used_bytes >= -used);
1551 ASSERT(compressed >= 0 || ddp->dd_compressed_bytes >= -compressed);
1552 ASSERT(uncompressed >= 0 ||
1553 ddp->dd_uncompressed_bytes >= -uncompressed);
1554 ddp->dd_used_bytes += used;
1555 ddp->dd_uncompressed_bytes += uncompressed;
1556 ddp->dd_compressed_bytes += compressed;
1558 if (ddp->dd_flags & DD_FLAG_USED_BREAKDOWN) {
1559 ASSERT(used >= 0 || ddp->dd_used_breakdown[type] >= -used);
1560 ddp->dd_used_breakdown[type] += used;
1565 for (t = 0; t < DD_USED_NUM; t++)
1566 u += ddp->dd_used_breakdown[t];
1567 ASSERT3U(u, ==, ddp->dd_used_bytes);
1572 mutex_exit(&dd->dd_lock);
1574 if (dd->dd_parent != NULL) {
1575 dsl_dir_diduse_transfer_space(dd->dd_parent,
1576 accounted_delta, compressed, uncompressed,
1577 used, DD_USED_CHILD_RSRV, DD_USED_CHILD, tx);
1582 dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta,
1583 dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx)
1585 ASSERT(dmu_tx_is_syncing(tx));
1586 ASSERT(oldtype < DD_USED_NUM);
1587 ASSERT(newtype < DD_USED_NUM);
1589 dsl_dir_phys_t *ddp = dsl_dir_phys(dd);
1591 !(ddp->dd_flags & DD_FLAG_USED_BREAKDOWN))
1594 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1595 mutex_enter(&dd->dd_lock);
1597 ddp->dd_used_breakdown[oldtype] >= delta :
1598 ddp->dd_used_breakdown[newtype] >= -delta);
1599 ASSERT(ddp->dd_used_bytes >= ABS(delta));
1600 ddp->dd_used_breakdown[oldtype] -= delta;
1601 ddp->dd_used_breakdown[newtype] += delta;
1602 mutex_exit(&dd->dd_lock);
1606 dsl_dir_diduse_transfer_space(dsl_dir_t *dd, int64_t used,
1607 int64_t compressed, int64_t uncompressed, int64_t tonew,
1608 dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx)
1610 int64_t accounted_delta;
1612 ASSERT(dmu_tx_is_syncing(tx));
1613 ASSERT(oldtype < DD_USED_NUM);
1614 ASSERT(newtype < DD_USED_NUM);
1616 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1618 mutex_enter(&dd->dd_lock);
1619 dsl_dir_phys_t *ddp = dsl_dir_phys(dd);
1620 accounted_delta = parent_delta(dd, ddp->dd_used_bytes, used);
1621 ASSERT(used >= 0 || ddp->dd_used_bytes >= -used);
1622 ASSERT(compressed >= 0 || ddp->dd_compressed_bytes >= -compressed);
1623 ASSERT(uncompressed >= 0 ||
1624 ddp->dd_uncompressed_bytes >= -uncompressed);
1625 ddp->dd_used_bytes += used;
1626 ddp->dd_uncompressed_bytes += uncompressed;
1627 ddp->dd_compressed_bytes += compressed;
1629 if (ddp->dd_flags & DD_FLAG_USED_BREAKDOWN) {
1630 ASSERT(tonew - used <= 0 ||
1631 ddp->dd_used_breakdown[oldtype] >= tonew - used);
1632 ASSERT(tonew >= 0 ||
1633 ddp->dd_used_breakdown[newtype] >= -tonew);
1634 ddp->dd_used_breakdown[oldtype] -= tonew - used;
1635 ddp->dd_used_breakdown[newtype] += tonew;
1640 for (t = 0; t < DD_USED_NUM; t++)
1641 u += ddp->dd_used_breakdown[t];
1642 ASSERT3U(u, ==, ddp->dd_used_bytes);
1646 mutex_exit(&dd->dd_lock);
1648 if (dd->dd_parent != NULL) {
1649 dsl_dir_diduse_transfer_space(dd->dd_parent,
1650 accounted_delta, compressed, uncompressed,
1651 used, DD_USED_CHILD_RSRV, DD_USED_CHILD, tx);
1655 typedef struct dsl_dir_set_qr_arg {
1656 const char *ddsqra_name;
1657 zprop_source_t ddsqra_source;
1658 uint64_t ddsqra_value;
1659 } dsl_dir_set_qr_arg_t;
1662 dsl_dir_set_quota_check(void *arg, dmu_tx_t *tx)
1664 dsl_dir_set_qr_arg_t *ddsqra = arg;
1665 dsl_pool_t *dp = dmu_tx_pool(tx);
1668 uint64_t towrite, newval;
1670 error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
1674 error = dsl_prop_predict(ds->ds_dir, "quota",
1675 ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
1677 dsl_dataset_rele(ds, FTAG);
1682 dsl_dataset_rele(ds, FTAG);
1686 mutex_enter(&ds->ds_dir->dd_lock);
1688 * If we are doing the preliminary check in open context, and
1689 * there are pending changes, then don't fail it, since the
1690 * pending changes could under-estimate the amount of space to be
1693 towrite = dsl_dir_space_towrite(ds->ds_dir);
1694 if ((dmu_tx_is_syncing(tx) || towrite == 0) &&
1695 (newval < dsl_dir_phys(ds->ds_dir)->dd_reserved ||
1696 newval < dsl_dir_phys(ds->ds_dir)->dd_used_bytes + towrite)) {
1697 error = SET_ERROR(ENOSPC);
1699 mutex_exit(&ds->ds_dir->dd_lock);
1700 dsl_dataset_rele(ds, FTAG);
1705 dsl_dir_set_quota_sync(void *arg, dmu_tx_t *tx)
1707 dsl_dir_set_qr_arg_t *ddsqra = arg;
1708 dsl_pool_t *dp = dmu_tx_pool(tx);
1712 VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
1714 if (spa_version(dp->dp_spa) >= SPA_VERSION_RECVD_PROPS) {
1715 dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_QUOTA),
1716 ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
1717 &ddsqra->ddsqra_value, tx);
1719 VERIFY0(dsl_prop_get_int_ds(ds,
1720 zfs_prop_to_name(ZFS_PROP_QUOTA), &newval));
1722 newval = ddsqra->ddsqra_value;
1723 spa_history_log_internal_ds(ds, "set", tx, "%s=%lld",
1724 zfs_prop_to_name(ZFS_PROP_QUOTA), (longlong_t)newval);
1727 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1728 mutex_enter(&ds->ds_dir->dd_lock);
1729 dsl_dir_phys(ds->ds_dir)->dd_quota = newval;
1730 mutex_exit(&ds->ds_dir->dd_lock);
1731 dsl_dataset_rele(ds, FTAG);
1735 dsl_dir_set_quota(const char *ddname, zprop_source_t source, uint64_t quota)
1737 dsl_dir_set_qr_arg_t ddsqra;
1739 ddsqra.ddsqra_name = ddname;
1740 ddsqra.ddsqra_source = source;
1741 ddsqra.ddsqra_value = quota;
1743 return (dsl_sync_task(ddname, dsl_dir_set_quota_check,
1744 dsl_dir_set_quota_sync, &ddsqra, 0,
1745 ZFS_SPACE_CHECK_EXTRA_RESERVED));
1749 dsl_dir_set_reservation_check(void *arg, dmu_tx_t *tx)
1751 dsl_dir_set_qr_arg_t *ddsqra = arg;
1752 dsl_pool_t *dp = dmu_tx_pool(tx);
1755 uint64_t newval, used, avail;
1758 error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
1764 * If we are doing the preliminary check in open context, the
1765 * space estimates may be inaccurate.
1767 if (!dmu_tx_is_syncing(tx)) {
1768 dsl_dataset_rele(ds, FTAG);
1772 error = dsl_prop_predict(ds->ds_dir,
1773 zfs_prop_to_name(ZFS_PROP_RESERVATION),
1774 ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
1776 dsl_dataset_rele(ds, FTAG);
1780 mutex_enter(&dd->dd_lock);
1781 used = dsl_dir_phys(dd)->dd_used_bytes;
1782 mutex_exit(&dd->dd_lock);
1784 if (dd->dd_parent) {
1785 avail = dsl_dir_space_available(dd->dd_parent,
1788 avail = dsl_pool_adjustedsize(dd->dd_pool,
1789 ZFS_SPACE_CHECK_NORMAL) - used;
1792 if (MAX(used, newval) > MAX(used, dsl_dir_phys(dd)->dd_reserved)) {
1793 uint64_t delta = MAX(used, newval) -
1794 MAX(used, dsl_dir_phys(dd)->dd_reserved);
1796 if (delta > avail ||
1797 (dsl_dir_phys(dd)->dd_quota > 0 &&
1798 newval > dsl_dir_phys(dd)->dd_quota))
1799 error = SET_ERROR(ENOSPC);
1802 dsl_dataset_rele(ds, FTAG);
1807 dsl_dir_set_reservation_sync_impl(dsl_dir_t *dd, uint64_t value, dmu_tx_t *tx)
1812 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1814 mutex_enter(&dd->dd_lock);
1815 used = dsl_dir_phys(dd)->dd_used_bytes;
1816 delta = MAX(used, value) - MAX(used, dsl_dir_phys(dd)->dd_reserved);
1817 dsl_dir_phys(dd)->dd_reserved = value;
1819 if (dd->dd_parent != NULL) {
1820 /* Roll up this additional usage into our ancestors */
1821 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
1824 mutex_exit(&dd->dd_lock);
1828 dsl_dir_set_reservation_sync(void *arg, dmu_tx_t *tx)
1830 dsl_dir_set_qr_arg_t *ddsqra = arg;
1831 dsl_pool_t *dp = dmu_tx_pool(tx);
1835 VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
1837 if (spa_version(dp->dp_spa) >= SPA_VERSION_RECVD_PROPS) {
1838 dsl_prop_set_sync_impl(ds,
1839 zfs_prop_to_name(ZFS_PROP_RESERVATION),
1840 ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
1841 &ddsqra->ddsqra_value, tx);
1843 VERIFY0(dsl_prop_get_int_ds(ds,
1844 zfs_prop_to_name(ZFS_PROP_RESERVATION), &newval));
1846 newval = ddsqra->ddsqra_value;
1847 spa_history_log_internal_ds(ds, "set", tx, "%s=%lld",
1848 zfs_prop_to_name(ZFS_PROP_RESERVATION),
1849 (longlong_t)newval);
1852 dsl_dir_set_reservation_sync_impl(ds->ds_dir, newval, tx);
1853 dsl_dataset_rele(ds, FTAG);
1857 dsl_dir_set_reservation(const char *ddname, zprop_source_t source,
1858 uint64_t reservation)
1860 dsl_dir_set_qr_arg_t ddsqra;
1862 ddsqra.ddsqra_name = ddname;
1863 ddsqra.ddsqra_source = source;
1864 ddsqra.ddsqra_value = reservation;
1866 return (dsl_sync_task(ddname, dsl_dir_set_reservation_check,
1867 dsl_dir_set_reservation_sync, &ddsqra, 0,
1868 ZFS_SPACE_CHECK_EXTRA_RESERVED));
1872 closest_common_ancestor(dsl_dir_t *ds1, dsl_dir_t *ds2)
1874 for (; ds1; ds1 = ds1->dd_parent) {
1876 for (dd = ds2; dd; dd = dd->dd_parent) {
1885 * If delta is applied to dd, how much of that delta would be applied to
1886 * ancestor? Syncing context only.
1889 would_change(dsl_dir_t *dd, int64_t delta, dsl_dir_t *ancestor)
1894 mutex_enter(&dd->dd_lock);
1895 delta = parent_delta(dd, dsl_dir_phys(dd)->dd_used_bytes, delta);
1896 mutex_exit(&dd->dd_lock);
1897 return (would_change(dd->dd_parent, delta, ancestor));
1900 typedef struct dsl_dir_rename_arg {
1901 const char *ddra_oldname;
1902 const char *ddra_newname;
1905 } dsl_dir_rename_arg_t;
1907 typedef struct dsl_valid_rename_arg {
1910 } dsl_valid_rename_arg_t;
1913 dsl_valid_rename(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
1916 dsl_valid_rename_arg_t *dvra = arg;
1917 char namebuf[ZFS_MAX_DATASET_NAME_LEN];
1919 dsl_dataset_name(ds, namebuf);
1921 ASSERT3U(strnlen(namebuf, ZFS_MAX_DATASET_NAME_LEN),
1922 <, ZFS_MAX_DATASET_NAME_LEN);
1923 int namelen = strlen(namebuf) + dvra->char_delta;
1924 int depth = get_dataset_depth(namebuf) + dvra->nest_delta;
1926 if (namelen >= ZFS_MAX_DATASET_NAME_LEN)
1927 return (SET_ERROR(ENAMETOOLONG));
1928 if (dvra->nest_delta > 0 && depth >= zfs_max_dataset_nesting)
1929 return (SET_ERROR(ENAMETOOLONG));
1934 dsl_dir_rename_check(void *arg, dmu_tx_t *tx)
1936 dsl_dir_rename_arg_t *ddra = arg;
1937 dsl_pool_t *dp = dmu_tx_pool(tx);
1938 dsl_dir_t *dd, *newparent;
1939 dsl_valid_rename_arg_t dvra;
1940 dsl_dataset_t *parentds;
1942 const char *mynewname;
1945 /* target dir should exist */
1946 error = dsl_dir_hold(dp, ddra->ddra_oldname, FTAG, &dd, NULL);
1950 /* new parent should exist */
1951 error = dsl_dir_hold(dp, ddra->ddra_newname, FTAG,
1952 &newparent, &mynewname);
1954 dsl_dir_rele(dd, FTAG);
1958 /* can't rename to different pool */
1959 if (dd->dd_pool != newparent->dd_pool) {
1960 dsl_dir_rele(newparent, FTAG);
1961 dsl_dir_rele(dd, FTAG);
1962 return (SET_ERROR(EXDEV));
1965 /* new name should not already exist */
1966 if (mynewname == NULL) {
1967 dsl_dir_rele(newparent, FTAG);
1968 dsl_dir_rele(dd, FTAG);
1969 return (SET_ERROR(EEXIST));
1972 /* can't rename below anything but filesystems (eg. no ZVOLs) */
1973 error = dsl_dataset_hold_obj(newparent->dd_pool,
1974 dsl_dir_phys(newparent)->dd_head_dataset_obj, FTAG, &parentds);
1976 dsl_dir_rele(newparent, FTAG);
1977 dsl_dir_rele(dd, FTAG);
1980 error = dmu_objset_from_ds(parentds, &parentos);
1982 dsl_dataset_rele(parentds, FTAG);
1983 dsl_dir_rele(newparent, FTAG);
1984 dsl_dir_rele(dd, FTAG);
1987 if (dmu_objset_type(parentos) != DMU_OST_ZFS) {
1988 dsl_dataset_rele(parentds, FTAG);
1989 dsl_dir_rele(newparent, FTAG);
1990 dsl_dir_rele(dd, FTAG);
1991 return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
1993 dsl_dataset_rele(parentds, FTAG);
1995 ASSERT3U(strnlen(ddra->ddra_newname, ZFS_MAX_DATASET_NAME_LEN),
1996 <, ZFS_MAX_DATASET_NAME_LEN);
1997 ASSERT3U(strnlen(ddra->ddra_oldname, ZFS_MAX_DATASET_NAME_LEN),
1998 <, ZFS_MAX_DATASET_NAME_LEN);
1999 dvra.char_delta = strlen(ddra->ddra_newname)
2000 - strlen(ddra->ddra_oldname);
2001 dvra.nest_delta = get_dataset_depth(ddra->ddra_newname)
2002 - get_dataset_depth(ddra->ddra_oldname);
2004 /* if the name length is growing, validate child name lengths */
2005 if (dvra.char_delta > 0 || dvra.nest_delta > 0) {
2006 error = dmu_objset_find_dp(dp, dd->dd_object, dsl_valid_rename,
2007 &dvra, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2009 dsl_dir_rele(newparent, FTAG);
2010 dsl_dir_rele(dd, FTAG);
2015 if (dmu_tx_is_syncing(tx)) {
2016 if (spa_feature_is_active(dp->dp_spa,
2017 SPA_FEATURE_FS_SS_LIMIT)) {
2019 * Although this is the check function and we don't
2020 * normally make on-disk changes in check functions,
2021 * we need to do that here.
2023 * Ensure this portion of the tree's counts have been
2024 * initialized in case the new parent has limits set.
2026 dsl_dir_init_fs_ss_count(dd, tx);
2030 if (newparent != dd->dd_parent) {
2031 /* is there enough space? */
2033 MAX(dsl_dir_phys(dd)->dd_used_bytes,
2034 dsl_dir_phys(dd)->dd_reserved);
2035 objset_t *os = dd->dd_pool->dp_meta_objset;
2036 uint64_t fs_cnt = 0;
2037 uint64_t ss_cnt = 0;
2039 if (dsl_dir_is_zapified(dd)) {
2042 err = zap_lookup(os, dd->dd_object,
2043 DD_FIELD_FILESYSTEM_COUNT, sizeof (fs_cnt), 1,
2045 if (err != ENOENT && err != 0) {
2046 dsl_dir_rele(newparent, FTAG);
2047 dsl_dir_rele(dd, FTAG);
2052 * have to add 1 for the filesystem itself that we're
2057 err = zap_lookup(os, dd->dd_object,
2058 DD_FIELD_SNAPSHOT_COUNT, sizeof (ss_cnt), 1,
2060 if (err != ENOENT && err != 0) {
2061 dsl_dir_rele(newparent, FTAG);
2062 dsl_dir_rele(dd, FTAG);
2067 /* check for encryption errors */
2068 error = dsl_dir_rename_crypt_check(dd, newparent);
2070 dsl_dir_rele(newparent, FTAG);
2071 dsl_dir_rele(dd, FTAG);
2072 return (SET_ERROR(EACCES));
2075 /* no rename into our descendant */
2076 if (closest_common_ancestor(dd, newparent) == dd) {
2077 dsl_dir_rele(newparent, FTAG);
2078 dsl_dir_rele(dd, FTAG);
2079 return (SET_ERROR(EINVAL));
2082 error = dsl_dir_transfer_possible(dd->dd_parent,
2083 newparent, fs_cnt, ss_cnt, myspace,
2084 ddra->ddra_cred, ddra->ddra_proc);
2086 dsl_dir_rele(newparent, FTAG);
2087 dsl_dir_rele(dd, FTAG);
2092 dsl_dir_rele(newparent, FTAG);
2093 dsl_dir_rele(dd, FTAG);
2098 dsl_dir_rename_sync(void *arg, dmu_tx_t *tx)
2100 dsl_dir_rename_arg_t *ddra = arg;
2101 dsl_pool_t *dp = dmu_tx_pool(tx);
2102 dsl_dir_t *dd, *newparent;
2103 const char *mynewname;
2104 objset_t *mos = dp->dp_meta_objset;
2106 VERIFY0(dsl_dir_hold(dp, ddra->ddra_oldname, FTAG, &dd, NULL));
2107 VERIFY0(dsl_dir_hold(dp, ddra->ddra_newname, FTAG, &newparent,
2110 /* Log this before we change the name. */
2111 spa_history_log_internal_dd(dd, "rename", tx,
2112 "-> %s", ddra->ddra_newname);
2114 if (newparent != dd->dd_parent) {
2115 objset_t *os = dd->dd_pool->dp_meta_objset;
2116 uint64_t fs_cnt = 0;
2117 uint64_t ss_cnt = 0;
2120 * We already made sure the dd counts were initialized in the
2123 if (spa_feature_is_active(dp->dp_spa,
2124 SPA_FEATURE_FS_SS_LIMIT)) {
2125 VERIFY0(zap_lookup(os, dd->dd_object,
2126 DD_FIELD_FILESYSTEM_COUNT, sizeof (fs_cnt), 1,
2128 /* add 1 for the filesystem itself that we're moving */
2131 VERIFY0(zap_lookup(os, dd->dd_object,
2132 DD_FIELD_SNAPSHOT_COUNT, sizeof (ss_cnt), 1,
2136 dsl_fs_ss_count_adjust(dd->dd_parent, -fs_cnt,
2137 DD_FIELD_FILESYSTEM_COUNT, tx);
2138 dsl_fs_ss_count_adjust(newparent, fs_cnt,
2139 DD_FIELD_FILESYSTEM_COUNT, tx);
2141 dsl_fs_ss_count_adjust(dd->dd_parent, -ss_cnt,
2142 DD_FIELD_SNAPSHOT_COUNT, tx);
2143 dsl_fs_ss_count_adjust(newparent, ss_cnt,
2144 DD_FIELD_SNAPSHOT_COUNT, tx);
2146 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
2147 -dsl_dir_phys(dd)->dd_used_bytes,
2148 -dsl_dir_phys(dd)->dd_compressed_bytes,
2149 -dsl_dir_phys(dd)->dd_uncompressed_bytes, tx);
2150 dsl_dir_diduse_space(newparent, DD_USED_CHILD,
2151 dsl_dir_phys(dd)->dd_used_bytes,
2152 dsl_dir_phys(dd)->dd_compressed_bytes,
2153 dsl_dir_phys(dd)->dd_uncompressed_bytes, tx);
2155 if (dsl_dir_phys(dd)->dd_reserved >
2156 dsl_dir_phys(dd)->dd_used_bytes) {
2157 uint64_t unused_rsrv = dsl_dir_phys(dd)->dd_reserved -
2158 dsl_dir_phys(dd)->dd_used_bytes;
2160 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
2161 -unused_rsrv, 0, 0, tx);
2162 dsl_dir_diduse_space(newparent, DD_USED_CHILD_RSRV,
2163 unused_rsrv, 0, 0, tx);
2167 dmu_buf_will_dirty(dd->dd_dbuf, tx);
2169 /* remove from old parent zapobj */
2170 VERIFY0(zap_remove(mos,
2171 dsl_dir_phys(dd->dd_parent)->dd_child_dir_zapobj,
2172 dd->dd_myname, tx));
2174 (void) strlcpy(dd->dd_myname, mynewname,
2175 sizeof (dd->dd_myname));
2176 dsl_dir_rele(dd->dd_parent, dd);
2177 dsl_dir_phys(dd)->dd_parent_obj = newparent->dd_object;
2178 VERIFY0(dsl_dir_hold_obj(dp,
2179 newparent->dd_object, NULL, dd, &dd->dd_parent));
2181 /* add to new parent zapobj */
2182 VERIFY0(zap_add(mos, dsl_dir_phys(newparent)->dd_child_dir_zapobj,
2183 dd->dd_myname, 8, 1, &dd->dd_object, tx));
2185 /* TODO: A rename callback to avoid these layering violations. */
2186 zfsvfs_update_fromname(ddra->ddra_oldname, ddra->ddra_newname);
2187 zvol_rename_minors(dp->dp_spa, ddra->ddra_oldname,
2188 ddra->ddra_newname, B_TRUE);
2190 dsl_prop_notify_all(dd);
2192 dsl_dir_rele(newparent, FTAG);
2193 dsl_dir_rele(dd, FTAG);
2197 dsl_dir_rename(const char *oldname, const char *newname)
2199 dsl_dir_rename_arg_t ddra;
2201 ddra.ddra_oldname = oldname;
2202 ddra.ddra_newname = newname;
2203 ddra.ddra_cred = CRED();
2204 ddra.ddra_proc = curproc;
2206 return (dsl_sync_task(oldname,
2207 dsl_dir_rename_check, dsl_dir_rename_sync, &ddra,
2208 3, ZFS_SPACE_CHECK_RESERVED));
2212 dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd,
2213 uint64_t fs_cnt, uint64_t ss_cnt, uint64_t space,
2214 cred_t *cr, proc_t *proc)
2216 dsl_dir_t *ancestor;
2221 ancestor = closest_common_ancestor(sdd, tdd);
2222 adelta = would_change(sdd, -space, ancestor);
2223 avail = dsl_dir_space_available(tdd, ancestor, adelta, FALSE);
2225 return (SET_ERROR(ENOSPC));
2227 err = dsl_fs_ss_limit_check(tdd, fs_cnt, ZFS_PROP_FILESYSTEM_LIMIT,
2228 ancestor, cr, proc);
2231 err = dsl_fs_ss_limit_check(tdd, ss_cnt, ZFS_PROP_SNAPSHOT_LIMIT,
2232 ancestor, cr, proc);
2240 dsl_dir_snap_cmtime(dsl_dir_t *dd)
2244 mutex_enter(&dd->dd_lock);
2245 t = dd->dd_snap_cmtime;
2246 mutex_exit(&dd->dd_lock);
2252 dsl_dir_snap_cmtime_update(dsl_dir_t *dd, dmu_tx_t *tx)
2254 dsl_pool_t *dp = dmu_tx_pool(tx);
2258 mutex_enter(&dd->dd_lock);
2259 dd->dd_snap_cmtime = t;
2260 if (spa_feature_is_enabled(dp->dp_spa,
2261 SPA_FEATURE_EXTENSIBLE_DATASET)) {
2262 objset_t *mos = dd->dd_pool->dp_meta_objset;
2263 uint64_t ddobj = dd->dd_object;
2264 dsl_dir_zapify(dd, tx);
2265 VERIFY0(zap_update(mos, ddobj,
2266 DD_FIELD_SNAPSHOTS_CHANGED,
2268 sizeof (inode_timespec_t) / sizeof (uint64_t),
2271 mutex_exit(&dd->dd_lock);
2275 dsl_dir_zapify(dsl_dir_t *dd, dmu_tx_t *tx)
2277 objset_t *mos = dd->dd_pool->dp_meta_objset;
2278 dmu_object_zapify(mos, dd->dd_object, DMU_OT_DSL_DIR, tx);
2282 dsl_dir_is_zapified(dsl_dir_t *dd)
2284 dmu_object_info_t doi;
2286 dmu_object_info_from_db(dd->dd_dbuf, &doi);
2287 return (doi.doi_type == DMU_OTN_ZAP_METADATA);
2291 dsl_dir_livelist_open(dsl_dir_t *dd, uint64_t obj)
2293 objset_t *mos = dd->dd_pool->dp_meta_objset;
2294 ASSERT(spa_feature_is_active(dd->dd_pool->dp_spa,
2295 SPA_FEATURE_LIVELIST));
2296 dsl_deadlist_open(&dd->dd_livelist, mos, obj);
2297 bplist_create(&dd->dd_pending_allocs);
2298 bplist_create(&dd->dd_pending_frees);
2302 dsl_dir_livelist_close(dsl_dir_t *dd)
2304 dsl_deadlist_close(&dd->dd_livelist);
2305 bplist_destroy(&dd->dd_pending_allocs);
2306 bplist_destroy(&dd->dd_pending_frees);
2310 dsl_dir_remove_livelist(dsl_dir_t *dd, dmu_tx_t *tx, boolean_t total)
2313 dsl_pool_t *dp = dmu_tx_pool(tx);
2314 spa_t *spa = dp->dp_spa;
2315 livelist_condense_entry_t to_condense = spa->spa_to_condense;
2317 if (!dsl_deadlist_is_open(&dd->dd_livelist))
2321 * If the livelist being removed is set to be condensed, stop the
2322 * condense zthr and indicate the cancellation in the spa_to_condense
2323 * struct in case the condense no-wait synctask has already started
2325 zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
2326 if (ll_condense_thread != NULL &&
2327 (to_condense.ds != NULL) && (to_condense.ds->ds_dir == dd)) {
2329 * We use zthr_wait_cycle_done instead of zthr_cancel
2330 * because we don't want to destroy the zthr, just have
2331 * it skip its current task.
2333 spa->spa_to_condense.cancelled = B_TRUE;
2334 zthr_wait_cycle_done(ll_condense_thread);
2336 * If we've returned from zthr_wait_cycle_done without
2337 * clearing the to_condense data structure it's either
2338 * because the no-wait synctask has started (which is
2339 * indicated by 'syncing' field of to_condense) and we
2340 * can expect it to clear to_condense on its own.
2341 * Otherwise, we returned before the zthr ran. The
2342 * checkfunc will now fail as cancelled == B_TRUE so we
2343 * can safely NULL out ds, allowing a different dir's
2344 * livelist to be condensed.
2346 * We can be sure that the to_condense struct will not
2347 * be repopulated at this stage because both this
2348 * function and dsl_livelist_try_condense execute in
2351 if ((spa->spa_to_condense.ds != NULL) &&
2352 !spa->spa_to_condense.syncing) {
2353 dmu_buf_rele(spa->spa_to_condense.ds->ds_dbuf,
2355 spa->spa_to_condense.ds = NULL;
2359 dsl_dir_livelist_close(dd);
2360 VERIFY0(zap_lookup(dp->dp_meta_objset, dd->dd_object,
2361 DD_FIELD_LIVELIST, sizeof (uint64_t), 1, &obj));
2362 VERIFY0(zap_remove(dp->dp_meta_objset, dd->dd_object,
2363 DD_FIELD_LIVELIST, tx));
2365 dsl_deadlist_free(dp->dp_meta_objset, obj, tx);
2366 spa_feature_decr(spa, SPA_FEATURE_LIVELIST, tx);
2371 dsl_dir_activity_in_progress(dsl_dir_t *dd, dsl_dataset_t *ds,
2372 zfs_wait_activity_t activity, boolean_t *in_progress)
2376 ASSERT(MUTEX_HELD(&dd->dd_activity_lock));
2379 case ZFS_WAIT_DELETEQ: {
2382 error = dmu_objset_from_ds(ds, &os);
2386 mutex_enter(&os->os_user_ptr_lock);
2387 void *user = dmu_objset_get_user(os);
2388 mutex_exit(&os->os_user_ptr_lock);
2389 if (dmu_objset_type(os) != DMU_OST_ZFS ||
2390 user == NULL || zfs_get_vfs_flag_unmounted(os)) {
2391 *in_progress = B_FALSE;
2395 uint64_t readonly = B_FALSE;
2396 error = zfs_get_temporary_prop(ds, ZFS_PROP_READONLY, &readonly,
2402 if (readonly || !spa_writeable(dd->dd_pool->dp_spa)) {
2403 *in_progress = B_FALSE;
2407 uint64_t count, unlinked_obj;
2408 error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
2411 dsl_dataset_rele(ds, FTAG);
2414 error = zap_count(os, unlinked_obj, &count);
2417 *in_progress = (count != 0);
2421 * The delete queue is ZPL specific, and libzpool doesn't have
2422 * it. It doesn't make sense to wait for it.
2425 *in_progress = B_FALSE;
2430 panic("unrecognized value for activity %d", activity);
2437 dsl_dir_wait(dsl_dir_t *dd, dsl_dataset_t *ds, zfs_wait_activity_t activity,
2441 boolean_t in_progress;
2442 dsl_pool_t *dp = dd->dd_pool;
2444 dsl_pool_config_enter(dp, FTAG);
2445 error = dsl_dir_activity_in_progress(dd, ds, activity,
2447 dsl_pool_config_exit(dp, FTAG);
2448 if (error != 0 || !in_progress)
2453 if (cv_wait_sig(&dd->dd_activity_cv, &dd->dd_activity_lock) ==
2454 0 || dd->dd_activity_cancelled) {
2455 error = SET_ERROR(EINTR);
2463 dsl_dir_cancel_waiters(dsl_dir_t *dd)
2465 mutex_enter(&dd->dd_activity_lock);
2466 dd->dd_activity_cancelled = B_TRUE;
2467 cv_broadcast(&dd->dd_activity_cv);
2468 while (dd->dd_activity_waiters > 0)
2469 cv_wait(&dd->dd_activity_cv, &dd->dd_activity_lock);
2470 mutex_exit(&dd->dd_activity_lock);
2473 #if defined(_KERNEL)
2474 EXPORT_SYMBOL(dsl_dir_set_quota);
2475 EXPORT_SYMBOL(dsl_dir_set_reservation);