4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
24 * Copyright (c) 2013 Martin Matuska. All rights reserved.
25 * Copyright (c) 2014 Joyent, Inc. All rights reserved.
26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
31 #include <sys/dmu_objset.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dsl_dataset.h>
34 #include <sys/dsl_dir.h>
35 #include <sys/dsl_prop.h>
36 #include <sys/dsl_synctask.h>
37 #include <sys/dsl_deleg.h>
38 #include <sys/dmu_impl.h>
40 #include <sys/metaslab.h>
44 #include <sys/sunddi.h>
45 #include <sys/zfeature.h>
46 #include <sys/policy.h>
47 #include <sys/zfs_znode.h>
49 #include "zfs_namecheck.h"
53 * Filesystem and Snapshot Limits
54 * ------------------------------
56 * These limits are used to restrict the number of filesystems and/or snapshots
57 * that can be created at a given level in the tree or below. A typical
58 * use-case is with a delegated dataset where the administrator wants to ensure
59 * that a user within the zone is not creating too many additional filesystems
60 * or snapshots, even though they're not exceeding their space quota.
62 * The filesystem and snapshot counts are stored as extensible properties. This
63 * capability is controlled by a feature flag and must be enabled to be used.
64 * Once enabled, the feature is not active until the first limit is set. At
65 * that point, future operations to create/destroy filesystems or snapshots
66 * will validate and update the counts.
68 * Because the count properties will not exist before the feature is active,
69 * the counts are updated when a limit is first set on an uninitialized
70 * dsl_dir node in the tree (The filesystem/snapshot count on a node includes
71 * all of the nested filesystems/snapshots. Thus, a new leaf node has a
72 * filesystem count of 0 and a snapshot count of 0. Non-existent filesystem and
73 * snapshot count properties on a node indicate uninitialized counts on that
74 * node.) When first setting a limit on an uninitialized node, the code starts
75 * at the filesystem with the new limit and descends into all sub-filesystems
76 * to add the count properties.
78 * In practice this is lightweight since a limit is typically set when the
79 * filesystem is created and thus has no children. Once valid, changing the
80 * limit value won't require a re-traversal since the counts are already valid.
81 * When recursively fixing the counts, if a node with a limit is encountered
82 * during the descent, the counts are known to be valid and there is no need to
83 * descend into that filesystem's children. The counts on filesystems above the
84 * one with the new limit will still be uninitialized, unless a limit is
85 * eventually set on one of those filesystems. The counts are always recursively
86 * updated when a limit is set on a dataset, unless there is already a limit.
87 * When a new limit value is set on a filesystem with an existing limit, it is
88 * possible for the new limit to be less than the current count at that level
89 * since a user who can change the limit is also allowed to exceed the limit.
91 * Once the feature is active, then whenever a filesystem or snapshot is
92 * created, the code recurses up the tree, validating the new count against the
93 * limit at each initialized level. In practice, most levels will not have a
94 * limit set. If there is a limit at any initialized level up the tree, the
95 * check must pass or the creation will fail. Likewise, when a filesystem or
96 * snapshot is destroyed, the counts are recursively adjusted all the way up
97 * the initizized nodes in the tree. Renaming a filesystem into different point
98 * in the tree will first validate, then update the counts on each branch up to
99 * the common ancestor. A receive will also validate the counts and then update
102 * An exception to the above behavior is that the limit is not enforced if the
103 * user has permission to modify the limit. This is primarily so that
104 * recursive snapshots in the global zone always work. We want to prevent a
105 * denial-of-service in which a lower level delegated dataset could max out its
106 * limit and thus block recursive snapshots from being taken in the global zone.
107 * Because of this, it is possible for the snapshot count to be over the limit
108 * and snapshots taken in the global zone could cause a lower level dataset to
109 * hit or exceed its limit. The administrator taking the global zone recursive
110 * snapshot should be aware of this side-effect and behave accordingly.
111 * For consistency, the filesystem limit is also not enforced if the user can
114 * The filesystem and snapshot limits are validated by dsl_fs_ss_limit_check()
115 * and updated by dsl_fs_ss_count_adjust(). A new limit value is setup in
116 * dsl_dir_activate_fs_ss_limit() and the counts are adjusted, if necessary, by
117 * dsl_dir_init_fs_ss_count().
119 * There is a special case when we receive a filesystem that already exists. In
120 * this case a temporary clone name of %X is created (see dmu_recv_begin). We
121 * never update the filesystem counts for temporary clones.
123 * Likewise, we do not update the snapshot counts for temporary snapshots,
124 * such as those created by zfs diff.
127 extern inline dsl_dir_phys_t *dsl_dir_phys(dsl_dir_t *dd);
129 static uint64_t dsl_dir_space_towrite(dsl_dir_t *dd);
132 dsl_dir_evict(void *dbu)
136 ASSERTV(dsl_pool_t *dp = dd->dd_pool);
140 for (t = 0; t < TXG_SIZE; t++) {
141 ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t));
142 ASSERT(dd->dd_tempreserved[t] == 0);
143 ASSERT(dd->dd_space_towrite[t] == 0);
147 dsl_dir_async_rele(dd->dd_parent, dd);
149 spa_async_close(dd->dd_pool->dp_spa, dd);
152 mutex_destroy(&dd->dd_lock);
153 kmem_free(dd, sizeof (dsl_dir_t));
157 dsl_dir_hold_obj(dsl_pool_t *dp, uint64_t ddobj,
158 const char *tail, void *tag, dsl_dir_t **ddp)
164 ASSERT(dsl_pool_config_held(dp));
166 err = dmu_bonus_hold(dp->dp_meta_objset, ddobj, tag, &dbuf);
169 dd = dmu_buf_get_user(dbuf);
172 dmu_object_info_t doi;
173 dmu_object_info_from_db(dbuf, &doi);
174 ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_DSL_DIR);
175 ASSERT3U(doi.doi_bonus_size, >=, sizeof (dsl_dir_phys_t));
181 dd = kmem_zalloc(sizeof (dsl_dir_t), KM_SLEEP);
182 dd->dd_object = ddobj;
185 mutex_init(&dd->dd_lock, NULL, MUTEX_DEFAULT, NULL);
188 dsl_dir_snap_cmtime_update(dd);
190 if (dsl_dir_phys(dd)->dd_parent_obj) {
191 err = dsl_dir_hold_obj(dp,
192 dsl_dir_phys(dd)->dd_parent_obj, NULL, dd,
200 err = zap_lookup(dp->dp_meta_objset,
201 dsl_dir_phys(dd->dd_parent)->
202 dd_child_dir_zapobj, tail,
203 sizeof (foundobj), 1, &foundobj);
204 ASSERT(err || foundobj == ddobj);
206 (void) strlcpy(dd->dd_myname, tail,
207 sizeof (dd->dd_myname));
209 err = zap_value_search(dp->dp_meta_objset,
210 dsl_dir_phys(dd->dd_parent)->
212 ddobj, 0, dd->dd_myname);
217 (void) strcpy(dd->dd_myname, spa_name(dp->dp_spa));
220 if (dsl_dir_is_clone(dd)) {
221 dmu_buf_t *origin_bonus;
222 dsl_dataset_phys_t *origin_phys;
225 * We can't open the origin dataset, because
226 * that would require opening this dsl_dir.
227 * Just look at its phys directly instead.
229 err = dmu_bonus_hold(dp->dp_meta_objset,
230 dsl_dir_phys(dd)->dd_origin_obj, FTAG,
234 origin_phys = origin_bonus->db_data;
236 origin_phys->ds_creation_txg;
237 dmu_buf_rele(origin_bonus, FTAG);
240 dmu_buf_init_user(&dd->dd_dbu, dsl_dir_evict, &dd->dd_dbuf);
241 winner = dmu_buf_set_user_ie(dbuf, &dd->dd_dbu);
242 if (winner != NULL) {
244 dsl_dir_rele(dd->dd_parent, dd);
246 mutex_destroy(&dd->dd_lock);
247 kmem_free(dd, sizeof (dsl_dir_t));
250 spa_open_ref(dp->dp_spa, dd);
255 * The dsl_dir_t has both open-to-close and instantiate-to-evict
256 * holds on the spa. We need the open-to-close holds because
257 * otherwise the spa_refcnt wouldn't change when we open a
258 * dir which the spa also has open, so we could incorrectly
259 * think it was OK to unload/export/destroy the pool. We need
260 * the instantiate-to-evict hold because the dsl_dir_t has a
261 * pointer to the dd_pool, which has a pointer to the spa_t.
263 spa_open_ref(dp->dp_spa, tag);
264 ASSERT3P(dd->dd_pool, ==, dp);
265 ASSERT3U(dd->dd_object, ==, ddobj);
266 ASSERT3P(dd->dd_dbuf, ==, dbuf);
272 dsl_dir_rele(dd->dd_parent, dd);
274 mutex_destroy(&dd->dd_lock);
275 kmem_free(dd, sizeof (dsl_dir_t));
276 dmu_buf_rele(dbuf, tag);
281 dsl_dir_rele(dsl_dir_t *dd, void *tag)
283 dprintf_dd(dd, "%s\n", "");
284 spa_close(dd->dd_pool->dp_spa, tag);
285 dmu_buf_rele(dd->dd_dbuf, tag);
289 * Remove a reference to the given dsl dir that is being asynchronously
290 * released. Async releases occur from a taskq performing eviction of
291 * dsl datasets and dirs. This process is identical to a normal release
292 * with the exception of using the async API for releasing the reference on
296 dsl_dir_async_rele(dsl_dir_t *dd, void *tag)
298 dprintf_dd(dd, "%s\n", "");
299 spa_async_close(dd->dd_pool->dp_spa, tag);
300 dmu_buf_rele(dd->dd_dbuf, tag);
303 /* buf must be at least ZFS_MAX_DATASET_NAME_LEN bytes */
305 dsl_dir_name(dsl_dir_t *dd, char *buf)
308 dsl_dir_name(dd->dd_parent, buf);
309 VERIFY3U(strlcat(buf, "/", ZFS_MAX_DATASET_NAME_LEN), <,
310 ZFS_MAX_DATASET_NAME_LEN);
314 if (!MUTEX_HELD(&dd->dd_lock)) {
316 * recursive mutex so that we can use
317 * dprintf_dd() with dd_lock held
319 mutex_enter(&dd->dd_lock);
320 VERIFY3U(strlcat(buf, dd->dd_myname, ZFS_MAX_DATASET_NAME_LEN),
321 <, ZFS_MAX_DATASET_NAME_LEN);
322 mutex_exit(&dd->dd_lock);
324 VERIFY3U(strlcat(buf, dd->dd_myname, ZFS_MAX_DATASET_NAME_LEN),
325 <, ZFS_MAX_DATASET_NAME_LEN);
329 /* Calculate name length, avoiding all the strcat calls of dsl_dir_name */
331 dsl_dir_namelen(dsl_dir_t *dd)
336 /* parent's name + 1 for the "/" */
337 result = dsl_dir_namelen(dd->dd_parent) + 1;
340 if (!MUTEX_HELD(&dd->dd_lock)) {
341 /* see dsl_dir_name */
342 mutex_enter(&dd->dd_lock);
343 result += strlen(dd->dd_myname);
344 mutex_exit(&dd->dd_lock);
346 result += strlen(dd->dd_myname);
353 getcomponent(const char *path, char *component, const char **nextp)
357 if ((path == NULL) || (path[0] == '\0'))
358 return (SET_ERROR(ENOENT));
359 /* This would be a good place to reserve some namespace... */
360 p = strpbrk(path, "/@");
361 if (p && (p[1] == '/' || p[1] == '@')) {
362 /* two separators in a row */
363 return (SET_ERROR(EINVAL));
365 if (p == NULL || p == path) {
367 * if the first thing is an @ or /, it had better be an
368 * @ and it had better not have any more ats or slashes,
369 * and it had better have something after the @.
372 (p[0] != '@' || strpbrk(path+1, "/@") || p[1] == '\0'))
373 return (SET_ERROR(EINVAL));
374 if (strlen(path) >= ZFS_MAX_DATASET_NAME_LEN)
375 return (SET_ERROR(ENAMETOOLONG));
376 (void) strcpy(component, path);
378 } else if (p[0] == '/') {
379 if (p - path >= ZFS_MAX_DATASET_NAME_LEN)
380 return (SET_ERROR(ENAMETOOLONG));
381 (void) strncpy(component, path, p - path);
382 component[p - path] = '\0';
384 } else if (p[0] == '@') {
386 * if the next separator is an @, there better not be
389 if (strchr(path, '/'))
390 return (SET_ERROR(EINVAL));
391 if (p - path >= ZFS_MAX_DATASET_NAME_LEN)
392 return (SET_ERROR(ENAMETOOLONG));
393 (void) strncpy(component, path, p - path);
394 component[p - path] = '\0';
396 panic("invalid p=%p", (void *)p);
403 * Return the dsl_dir_t, and possibly the last component which couldn't
404 * be found in *tail. The name must be in the specified dsl_pool_t. This
405 * thread must hold the dp_config_rwlock for the pool. Returns NULL if the
406 * path is bogus, or if tail==NULL and we couldn't parse the whole name.
407 * (*tail)[0] == '@' means that the last component is a snapshot.
410 dsl_dir_hold(dsl_pool_t *dp, const char *name, void *tag,
411 dsl_dir_t **ddp, const char **tailp)
414 const char *spaname, *next, *nextnext = NULL;
419 buf = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
420 err = getcomponent(name, buf, &next);
424 /* Make sure the name is in the specified pool. */
425 spaname = spa_name(dp->dp_spa);
426 if (strcmp(buf, spaname) != 0) {
427 err = SET_ERROR(EXDEV);
431 ASSERT(dsl_pool_config_held(dp));
433 err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, NULL, tag, &dd);
438 while (next != NULL) {
440 err = getcomponent(next, buf, &nextnext);
443 ASSERT(next[0] != '\0');
446 dprintf("looking up %s in obj%lld\n",
447 buf, dsl_dir_phys(dd)->dd_child_dir_zapobj);
449 err = zap_lookup(dp->dp_meta_objset,
450 dsl_dir_phys(dd)->dd_child_dir_zapobj,
451 buf, sizeof (ddobj), 1, &ddobj);
458 err = dsl_dir_hold_obj(dp, ddobj, buf, tag, &child_dd);
461 dsl_dir_rele(dd, tag);
467 dsl_dir_rele(dd, tag);
472 * It's an error if there's more than one component left, or
473 * tailp==NULL and there's any component left.
476 (tailp == NULL || (nextnext && nextnext[0] != '\0'))) {
478 dsl_dir_rele(dd, tag);
479 dprintf("next=%p (%s) tail=%p\n", next, next?next:"", tailp);
480 err = SET_ERROR(ENOENT);
486 kmem_free(buf, ZFS_MAX_DATASET_NAME_LEN);
491 * If the counts are already initialized for this filesystem and its
492 * descendants then do nothing, otherwise initialize the counts.
494 * The counts on this filesystem, and those below, may be uninitialized due to
495 * either the use of a pre-existing pool which did not support the
496 * filesystem/snapshot limit feature, or one in which the feature had not yet
499 * Recursively descend the filesystem tree and update the filesystem/snapshot
500 * counts on each filesystem below, then update the cumulative count on the
501 * current filesystem. If the filesystem already has a count set on it,
502 * then we know that its counts, and the counts on the filesystems below it,
503 * are already correct, so we don't have to update this filesystem.
506 dsl_dir_init_fs_ss_count(dsl_dir_t *dd, dmu_tx_t *tx)
508 uint64_t my_fs_cnt = 0;
509 uint64_t my_ss_cnt = 0;
510 dsl_pool_t *dp = dd->dd_pool;
511 objset_t *os = dp->dp_meta_objset;
516 ASSERT(spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT));
517 ASSERT(dsl_pool_config_held(dp));
518 ASSERT(dmu_tx_is_syncing(tx));
520 dsl_dir_zapify(dd, tx);
523 * If the filesystem count has already been initialized then we
524 * don't need to recurse down any further.
526 if (zap_contains(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT) == 0)
529 zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
530 za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
532 /* Iterate my child dirs */
533 for (zap_cursor_init(zc, os, dsl_dir_phys(dd)->dd_child_dir_zapobj);
534 zap_cursor_retrieve(zc, za) == 0; zap_cursor_advance(zc)) {
538 VERIFY0(dsl_dir_hold_obj(dp, za->za_first_integer, NULL, FTAG,
542 * Ignore hidden ($FREE, $MOS & $ORIGIN) objsets and
543 * temporary datasets.
545 if (chld_dd->dd_myname[0] == '$' ||
546 chld_dd->dd_myname[0] == '%') {
547 dsl_dir_rele(chld_dd, FTAG);
551 my_fs_cnt++; /* count this child */
553 dsl_dir_init_fs_ss_count(chld_dd, tx);
555 VERIFY0(zap_lookup(os, chld_dd->dd_object,
556 DD_FIELD_FILESYSTEM_COUNT, sizeof (count), 1, &count));
558 VERIFY0(zap_lookup(os, chld_dd->dd_object,
559 DD_FIELD_SNAPSHOT_COUNT, sizeof (count), 1, &count));
562 dsl_dir_rele(chld_dd, FTAG);
565 /* Count my snapshots (we counted children's snapshots above) */
566 VERIFY0(dsl_dataset_hold_obj(dd->dd_pool,
567 dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds));
569 for (zap_cursor_init(zc, os, dsl_dataset_phys(ds)->ds_snapnames_zapobj);
570 zap_cursor_retrieve(zc, za) == 0;
571 zap_cursor_advance(zc)) {
572 /* Don't count temporary snapshots */
573 if (za->za_name[0] != '%')
578 dsl_dataset_rele(ds, FTAG);
580 kmem_free(zc, sizeof (zap_cursor_t));
581 kmem_free(za, sizeof (zap_attribute_t));
583 /* we're in a sync task, update counts */
584 dmu_buf_will_dirty(dd->dd_dbuf, tx);
585 VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT,
586 sizeof (my_fs_cnt), 1, &my_fs_cnt, tx));
587 VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT,
588 sizeof (my_ss_cnt), 1, &my_ss_cnt, tx));
592 dsl_dir_actv_fs_ss_limit_check(void *arg, dmu_tx_t *tx)
594 char *ddname = (char *)arg;
595 dsl_pool_t *dp = dmu_tx_pool(tx);
600 error = dsl_dataset_hold(dp, ddname, FTAG, &ds);
604 if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT)) {
605 dsl_dataset_rele(ds, FTAG);
606 return (SET_ERROR(ENOTSUP));
610 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT) &&
611 dsl_dir_is_zapified(dd) &&
612 zap_contains(dp->dp_meta_objset, dd->dd_object,
613 DD_FIELD_FILESYSTEM_COUNT) == 0) {
614 dsl_dataset_rele(ds, FTAG);
615 return (SET_ERROR(EALREADY));
618 dsl_dataset_rele(ds, FTAG);
623 dsl_dir_actv_fs_ss_limit_sync(void *arg, dmu_tx_t *tx)
625 char *ddname = (char *)arg;
626 dsl_pool_t *dp = dmu_tx_pool(tx);
630 VERIFY0(dsl_dataset_hold(dp, ddname, FTAG, &ds));
632 spa = dsl_dataset_get_spa(ds);
634 if (!spa_feature_is_active(spa, SPA_FEATURE_FS_SS_LIMIT)) {
636 * Since the feature was not active and we're now setting a
637 * limit, increment the feature-active counter so that the
638 * feature becomes active for the first time.
640 * We are already in a sync task so we can update the MOS.
642 spa_feature_incr(spa, SPA_FEATURE_FS_SS_LIMIT, tx);
646 * Since we are now setting a non-UINT64_MAX limit on the filesystem,
647 * we need to ensure the counts are correct. Descend down the tree from
648 * this point and update all of the counts to be accurate.
650 dsl_dir_init_fs_ss_count(ds->ds_dir, tx);
652 dsl_dataset_rele(ds, FTAG);
656 * Make sure the feature is enabled and activate it if necessary.
657 * Since we're setting a limit, ensure the on-disk counts are valid.
658 * This is only called by the ioctl path when setting a limit value.
660 * We do not need to validate the new limit, since users who can change the
661 * limit are also allowed to exceed the limit.
664 dsl_dir_activate_fs_ss_limit(const char *ddname)
668 error = dsl_sync_task(ddname, dsl_dir_actv_fs_ss_limit_check,
669 dsl_dir_actv_fs_ss_limit_sync, (void *)ddname, 0,
670 ZFS_SPACE_CHECK_RESERVED);
672 if (error == EALREADY)
679 * Used to determine if the filesystem_limit or snapshot_limit should be
680 * enforced. We allow the limit to be exceeded if the user has permission to
681 * write the property value. We pass in the creds that we got in the open
682 * context since we will always be the GZ root in syncing context. We also have
683 * to handle the case where we are allowed to change the limit on the current
684 * dataset, but there may be another limit in the tree above.
686 * We can never modify these two properties within a non-global zone. In
687 * addition, the other checks are modeled on zfs_secpolicy_write_perms. We
688 * can't use that function since we are already holding the dp_config_rwlock.
689 * In addition, we already have the dd and dealing with snapshots is simplified
700 dsl_enforce_ds_ss_limits(dsl_dir_t *dd, zfs_prop_t prop, cred_t *cr)
702 enforce_res_t enforce = ENFORCE_ALWAYS;
707 ASSERT(prop == ZFS_PROP_FILESYSTEM_LIMIT ||
708 prop == ZFS_PROP_SNAPSHOT_LIMIT);
711 if (crgetzoneid(cr) != GLOBAL_ZONEID)
712 return (ENFORCE_ALWAYS);
714 if (secpolicy_zfs(cr) == 0)
715 return (ENFORCE_NEVER);
718 if ((obj = dsl_dir_phys(dd)->dd_head_dataset_obj) == 0)
719 return (ENFORCE_ALWAYS);
721 ASSERT(dsl_pool_config_held(dd->dd_pool));
723 if (dsl_dataset_hold_obj(dd->dd_pool, obj, FTAG, &ds) != 0)
724 return (ENFORCE_ALWAYS);
726 if (dsl_prop_get_ds(ds, "zoned", 8, 1, &zoned, NULL) || zoned) {
727 /* Only root can access zoned fs's from the GZ */
728 enforce = ENFORCE_ALWAYS;
730 if (dsl_deleg_access_impl(ds, zfs_prop_to_name(prop), cr) == 0)
731 enforce = ENFORCE_ABOVE;
734 dsl_dataset_rele(ds, FTAG);
739 * Check if adding additional child filesystem(s) would exceed any filesystem
740 * limits or adding additional snapshot(s) would exceed any snapshot limits.
741 * The prop argument indicates which limit to check.
743 * Note that all filesystem limits up to the root (or the highest
744 * initialized) filesystem or the given ancestor must be satisfied.
747 dsl_fs_ss_limit_check(dsl_dir_t *dd, uint64_t delta, zfs_prop_t prop,
748 dsl_dir_t *ancestor, cred_t *cr)
750 objset_t *os = dd->dd_pool->dp_meta_objset;
751 uint64_t limit, count;
753 enforce_res_t enforce;
756 ASSERT(dsl_pool_config_held(dd->dd_pool));
757 ASSERT(prop == ZFS_PROP_FILESYSTEM_LIMIT ||
758 prop == ZFS_PROP_SNAPSHOT_LIMIT);
761 * If we're allowed to change the limit, don't enforce the limit
762 * e.g. this can happen if a snapshot is taken by an administrative
763 * user in the global zone (i.e. a recursive snapshot by root).
764 * However, we must handle the case of delegated permissions where we
765 * are allowed to change the limit on the current dataset, but there
766 * is another limit in the tree above.
768 enforce = dsl_enforce_ds_ss_limits(dd, prop, cr);
769 if (enforce == ENFORCE_NEVER)
773 * e.g. if renaming a dataset with no snapshots, count adjustment
779 if (prop == ZFS_PROP_SNAPSHOT_LIMIT) {
781 * We don't enforce the limit for temporary snapshots. This is
782 * indicated by a NULL cred_t argument.
787 count_prop = DD_FIELD_SNAPSHOT_COUNT;
789 count_prop = DD_FIELD_FILESYSTEM_COUNT;
793 * If an ancestor has been provided, stop checking the limit once we
794 * hit that dir. We need this during rename so that we don't overcount
795 * the check once we recurse up to the common ancestor.
801 * If we hit an uninitialized node while recursing up the tree, we can
802 * stop since we know there is no limit here (or above). The counts are
803 * not valid on this node and we know we won't touch this node's counts.
805 if (!dsl_dir_is_zapified(dd) || zap_lookup(os, dd->dd_object,
806 count_prop, sizeof (count), 1, &count) == ENOENT)
809 err = dsl_prop_get_dd(dd, zfs_prop_to_name(prop), 8, 1, &limit, NULL,
814 /* Is there a limit which we've hit? */
815 if (enforce == ENFORCE_ALWAYS && (count + delta) > limit)
816 return (SET_ERROR(EDQUOT));
818 if (dd->dd_parent != NULL)
819 err = dsl_fs_ss_limit_check(dd->dd_parent, delta, prop,
826 * Adjust the filesystem or snapshot count for the specified dsl_dir_t and all
827 * parents. When a new filesystem/snapshot is created, increment the count on
828 * all parents, and when a filesystem/snapshot is destroyed, decrement the
832 dsl_fs_ss_count_adjust(dsl_dir_t *dd, int64_t delta, const char *prop,
836 objset_t *os = dd->dd_pool->dp_meta_objset;
839 ASSERT(dsl_pool_config_held(dd->dd_pool));
840 ASSERT(dmu_tx_is_syncing(tx));
841 ASSERT(strcmp(prop, DD_FIELD_FILESYSTEM_COUNT) == 0 ||
842 strcmp(prop, DD_FIELD_SNAPSHOT_COUNT) == 0);
845 * When we receive an incremental stream into a filesystem that already
846 * exists, a temporary clone is created. We don't count this temporary
847 * clone, whose name begins with a '%'. We also ignore hidden ($FREE,
848 * $MOS & $ORIGIN) objsets.
850 if ((dd->dd_myname[0] == '%' || dd->dd_myname[0] == '$') &&
851 strcmp(prop, DD_FIELD_FILESYSTEM_COUNT) == 0)
855 * e.g. if renaming a dataset with no snapshots, count adjustment is 0
861 * If we hit an uninitialized node while recursing up the tree, we can
862 * stop since we know the counts are not valid on this node and we
863 * know we shouldn't touch this node's counts. An uninitialized count
864 * on the node indicates that either the feature has not yet been
865 * activated or there are no limits on this part of the tree.
867 if (!dsl_dir_is_zapified(dd) || (err = zap_lookup(os, dd->dd_object,
868 prop, sizeof (count), 1, &count)) == ENOENT)
873 /* Use a signed verify to make sure we're not neg. */
874 VERIFY3S(count, >=, 0);
876 VERIFY0(zap_update(os, dd->dd_object, prop, sizeof (count), 1, &count,
879 /* Roll up this additional count into our ancestors */
880 if (dd->dd_parent != NULL)
881 dsl_fs_ss_count_adjust(dd->dd_parent, delta, prop, tx);
885 dsl_dir_create_sync(dsl_pool_t *dp, dsl_dir_t *pds, const char *name,
888 objset_t *mos = dp->dp_meta_objset;
890 dsl_dir_phys_t *ddphys;
893 ddobj = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0,
894 DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx);
896 VERIFY(0 == zap_add(mos, dsl_dir_phys(pds)->dd_child_dir_zapobj,
897 name, sizeof (uint64_t), 1, &ddobj, tx));
899 /* it's the root dir */
900 VERIFY(0 == zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
901 DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, &ddobj, tx));
903 VERIFY(0 == dmu_bonus_hold(mos, ddobj, FTAG, &dbuf));
904 dmu_buf_will_dirty(dbuf, tx);
905 ddphys = dbuf->db_data;
907 ddphys->dd_creation_time = gethrestime_sec();
909 ddphys->dd_parent_obj = pds->dd_object;
911 /* update the filesystem counts */
912 dsl_fs_ss_count_adjust(pds, 1, DD_FIELD_FILESYSTEM_COUNT, tx);
914 ddphys->dd_props_zapobj = zap_create(mos,
915 DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx);
916 ddphys->dd_child_dir_zapobj = zap_create(mos,
917 DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx);
918 if (spa_version(dp->dp_spa) >= SPA_VERSION_USED_BREAKDOWN)
919 ddphys->dd_flags |= DD_FLAG_USED_BREAKDOWN;
920 dmu_buf_rele(dbuf, FTAG);
926 dsl_dir_is_clone(dsl_dir_t *dd)
928 return (dsl_dir_phys(dd)->dd_origin_obj &&
929 (dd->dd_pool->dp_origin_snap == NULL ||
930 dsl_dir_phys(dd)->dd_origin_obj !=
931 dd->dd_pool->dp_origin_snap->ds_object));
935 dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv)
937 mutex_enter(&dd->dd_lock);
938 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
939 dsl_dir_phys(dd)->dd_used_bytes);
940 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_QUOTA,
941 dsl_dir_phys(dd)->dd_quota);
942 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_RESERVATION,
943 dsl_dir_phys(dd)->dd_reserved);
944 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
945 dsl_dir_phys(dd)->dd_compressed_bytes == 0 ? 100 :
946 (dsl_dir_phys(dd)->dd_uncompressed_bytes * 100 /
947 dsl_dir_phys(dd)->dd_compressed_bytes));
948 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_LOGICALUSED,
949 dsl_dir_phys(dd)->dd_uncompressed_bytes);
950 if (dsl_dir_phys(dd)->dd_flags & DD_FLAG_USED_BREAKDOWN) {
951 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDSNAP,
952 dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_SNAP]);
953 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDDS,
954 dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_HEAD]);
955 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDREFRESERV,
956 dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_REFRSRV]);
957 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDCHILD,
958 dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_CHILD] +
959 dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_CHILD_RSRV]);
961 mutex_exit(&dd->dd_lock);
963 if (dsl_dir_is_zapified(dd)) {
965 objset_t *os = dd->dd_pool->dp_meta_objset;
967 if (zap_lookup(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT,
968 sizeof (count), 1, &count) == 0) {
969 dsl_prop_nvlist_add_uint64(nv,
970 ZFS_PROP_FILESYSTEM_COUNT, count);
972 if (zap_lookup(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT,
973 sizeof (count), 1, &count) == 0) {
974 dsl_prop_nvlist_add_uint64(nv,
975 ZFS_PROP_SNAPSHOT_COUNT, count);
979 if (dsl_dir_is_clone(dd)) {
981 char buf[ZFS_MAX_DATASET_NAME_LEN];
983 VERIFY0(dsl_dataset_hold_obj(dd->dd_pool,
984 dsl_dir_phys(dd)->dd_origin_obj, FTAG, &ds));
985 dsl_dataset_name(ds, buf);
986 dsl_dataset_rele(ds, FTAG);
987 dsl_prop_nvlist_add_string(nv, ZFS_PROP_ORIGIN, buf);
992 dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx)
994 dsl_pool_t *dp = dd->dd_pool;
996 ASSERT(dsl_dir_phys(dd));
998 if (txg_list_add(&dp->dp_dirty_dirs, dd, tx->tx_txg)) {
999 /* up the hold count until we can be written out */
1000 dmu_buf_add_ref(dd->dd_dbuf, dd);
1005 parent_delta(dsl_dir_t *dd, uint64_t used, int64_t delta)
1007 uint64_t old_accounted = MAX(used, dsl_dir_phys(dd)->dd_reserved);
1008 uint64_t new_accounted =
1009 MAX(used + delta, dsl_dir_phys(dd)->dd_reserved);
1010 return (new_accounted - old_accounted);
1014 dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx)
1016 ASSERT(dmu_tx_is_syncing(tx));
1018 mutex_enter(&dd->dd_lock);
1019 ASSERT0(dd->dd_tempreserved[tx->tx_txg&TXG_MASK]);
1020 dprintf_dd(dd, "txg=%llu towrite=%lluK\n", tx->tx_txg,
1021 dd->dd_space_towrite[tx->tx_txg&TXG_MASK] / 1024);
1022 dd->dd_space_towrite[tx->tx_txg&TXG_MASK] = 0;
1023 mutex_exit(&dd->dd_lock);
1025 /* release the hold from dsl_dir_dirty */
1026 dmu_buf_rele(dd->dd_dbuf, dd);
1030 dsl_dir_space_towrite(dsl_dir_t *dd)
1035 ASSERT(MUTEX_HELD(&dd->dd_lock));
1037 for (i = 0; i < TXG_SIZE; i++) {
1038 space += dd->dd_space_towrite[i&TXG_MASK];
1039 ASSERT3U(dd->dd_space_towrite[i&TXG_MASK], >=, 0);
1045 * How much space would dd have available if ancestor had delta applied
1046 * to it? If ondiskonly is set, we're only interested in what's
1047 * on-disk, not estimated pending changes.
1050 dsl_dir_space_available(dsl_dir_t *dd,
1051 dsl_dir_t *ancestor, int64_t delta, int ondiskonly)
1053 uint64_t parentspace, myspace, quota, used;
1056 * If there are no restrictions otherwise, assume we have
1057 * unlimited space available.
1060 parentspace = UINT64_MAX;
1062 if (dd->dd_parent != NULL) {
1063 parentspace = dsl_dir_space_available(dd->dd_parent,
1064 ancestor, delta, ondiskonly);
1067 mutex_enter(&dd->dd_lock);
1068 if (dsl_dir_phys(dd)->dd_quota != 0)
1069 quota = dsl_dir_phys(dd)->dd_quota;
1070 used = dsl_dir_phys(dd)->dd_used_bytes;
1072 used += dsl_dir_space_towrite(dd);
1074 if (dd->dd_parent == NULL) {
1075 uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, FALSE);
1076 quota = MIN(quota, poolsize);
1079 if (dsl_dir_phys(dd)->dd_reserved > used && parentspace != UINT64_MAX) {
1081 * We have some space reserved, in addition to what our
1084 parentspace += dsl_dir_phys(dd)->dd_reserved - used;
1087 if (dd == ancestor) {
1089 ASSERT(used >= -delta);
1091 if (parentspace != UINT64_MAX)
1092 parentspace -= delta;
1100 * the lesser of the space provided by our parent and
1101 * the space left in our quota
1103 myspace = MIN(parentspace, quota - used);
1106 mutex_exit(&dd->dd_lock);
1111 struct tempreserve {
1112 list_node_t tr_node;
1118 dsl_dir_tempreserve_impl(dsl_dir_t *dd, uint64_t asize, boolean_t netfree,
1119 boolean_t ignorequota, boolean_t checkrefquota, list_t *tr_list,
1120 dmu_tx_t *tx, boolean_t first)
1122 uint64_t txg = tx->tx_txg;
1123 uint64_t est_inflight, used_on_disk, quota, parent_rsrv;
1124 uint64_t deferred = 0;
1125 struct tempreserve *tr;
1126 int retval = EDQUOT;
1127 int txgidx = txg & TXG_MASK;
1129 uint64_t ref_rsrv = 0;
1131 ASSERT3U(txg, !=, 0);
1132 ASSERT3S(asize, >, 0);
1134 mutex_enter(&dd->dd_lock);
1137 * Check against the dsl_dir's quota. We don't add in the delta
1138 * when checking for over-quota because they get one free hit.
1140 est_inflight = dsl_dir_space_towrite(dd);
1141 for (i = 0; i < TXG_SIZE; i++)
1142 est_inflight += dd->dd_tempreserved[i];
1143 used_on_disk = dsl_dir_phys(dd)->dd_used_bytes;
1146 * On the first iteration, fetch the dataset's used-on-disk and
1147 * refreservation values. Also, if checkrefquota is set, test if
1148 * allocating this space would exceed the dataset's refquota.
1150 if (first && tx->tx_objset) {
1152 dsl_dataset_t *ds = tx->tx_objset->os_dsl_dataset;
1154 error = dsl_dataset_check_quota(ds, checkrefquota,
1155 asize, est_inflight, &used_on_disk, &ref_rsrv);
1157 mutex_exit(&dd->dd_lock);
1158 DMU_TX_STAT_BUMP(dmu_tx_quota);
1164 * If this transaction will result in a net free of space,
1165 * we want to let it through.
1167 if (ignorequota || netfree || dsl_dir_phys(dd)->dd_quota == 0)
1170 quota = dsl_dir_phys(dd)->dd_quota;
1173 * Adjust the quota against the actual pool size at the root
1174 * minus any outstanding deferred frees.
1175 * To ensure that it's possible to remove files from a full
1176 * pool without inducing transient overcommits, we throttle
1177 * netfree transactions against a quota that is slightly larger,
1178 * but still within the pool's allocation slop. In cases where
1179 * we're very close to full, this will allow a steady trickle of
1180 * removes to get through.
1182 if (dd->dd_parent == NULL) {
1183 spa_t *spa = dd->dd_pool->dp_spa;
1184 uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, netfree);
1185 deferred = metaslab_class_get_deferred(spa_normal_class(spa));
1186 if (poolsize - deferred < quota) {
1187 quota = poolsize - deferred;
1193 * If they are requesting more space, and our current estimate
1194 * is over quota, they get to try again unless the actual
1195 * on-disk is over quota and there are no pending changes (which
1196 * may free up space for us).
1198 if (used_on_disk + est_inflight >= quota) {
1199 if (est_inflight > 0 || used_on_disk < quota ||
1200 (retval == ENOSPC && used_on_disk < quota + deferred))
1202 dprintf_dd(dd, "failing: used=%lluK inflight = %lluK "
1203 "quota=%lluK tr=%lluK err=%d\n",
1204 used_on_disk>>10, est_inflight>>10,
1205 quota>>10, asize>>10, retval);
1206 mutex_exit(&dd->dd_lock);
1207 DMU_TX_STAT_BUMP(dmu_tx_quota);
1208 return (SET_ERROR(retval));
1211 /* We need to up our estimated delta before dropping dd_lock */
1212 dd->dd_tempreserved[txgidx] += asize;
1214 parent_rsrv = parent_delta(dd, used_on_disk + est_inflight,
1216 mutex_exit(&dd->dd_lock);
1218 tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
1220 tr->tr_size = asize;
1221 list_insert_tail(tr_list, tr);
1223 /* see if it's OK with our parent */
1224 if (dd->dd_parent && parent_rsrv) {
1225 boolean_t ismos = (dsl_dir_phys(dd)->dd_head_dataset_obj == 0);
1227 return (dsl_dir_tempreserve_impl(dd->dd_parent,
1228 parent_rsrv, netfree, ismos, TRUE, tr_list, tx, FALSE));
1235 * Reserve space in this dsl_dir, to be used in this tx's txg.
1236 * After the space has been dirtied (and dsl_dir_willuse_space()
1237 * has been called), the reservation should be canceled, using
1238 * dsl_dir_tempreserve_clear().
1241 dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize,
1242 uint64_t fsize, uint64_t usize, void **tr_cookiep, dmu_tx_t *tx)
1252 tr_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
1253 list_create(tr_list, sizeof (struct tempreserve),
1254 offsetof(struct tempreserve, tr_node));
1255 ASSERT3S(asize, >, 0);
1256 ASSERT3S(fsize, >=, 0);
1258 err = arc_tempreserve_space(lsize, tx->tx_txg);
1260 struct tempreserve *tr;
1262 tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
1263 tr->tr_size = lsize;
1264 list_insert_tail(tr_list, tr);
1266 if (err == EAGAIN) {
1268 * If arc_memory_throttle() detected that pageout
1269 * is running and we are low on memory, we delay new
1270 * non-pageout transactions to give pageout an
1273 * It is unfortunate to be delaying while the caller's
1276 txg_delay(dd->dd_pool, tx->tx_txg,
1277 MSEC2NSEC(10), MSEC2NSEC(10));
1278 err = SET_ERROR(ERESTART);
1283 err = dsl_dir_tempreserve_impl(dd, asize, fsize >= asize,
1284 FALSE, asize > usize, tr_list, tx, TRUE);
1288 dsl_dir_tempreserve_clear(tr_list, tx);
1290 *tr_cookiep = tr_list;
1296 * Clear a temporary reservation that we previously made with
1297 * dsl_dir_tempreserve_space().
1300 dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx)
1302 int txgidx = tx->tx_txg & TXG_MASK;
1303 list_t *tr_list = tr_cookie;
1304 struct tempreserve *tr;
1306 ASSERT3U(tx->tx_txg, !=, 0);
1308 if (tr_cookie == NULL)
1311 while ((tr = list_head(tr_list)) != NULL) {
1313 mutex_enter(&tr->tr_ds->dd_lock);
1314 ASSERT3U(tr->tr_ds->dd_tempreserved[txgidx], >=,
1316 tr->tr_ds->dd_tempreserved[txgidx] -= tr->tr_size;
1317 mutex_exit(&tr->tr_ds->dd_lock);
1319 arc_tempreserve_clear(tr->tr_size);
1321 list_remove(tr_list, tr);
1322 kmem_free(tr, sizeof (struct tempreserve));
1325 kmem_free(tr_list, sizeof (list_t));
1329 * This should be called from open context when we think we're going to write
1330 * or free space, for example when dirtying data. Be conservative; it's okay
1331 * to write less space or free more, but we don't want to write more or free
1332 * less than the amount specified.
1334 * NOTE: The behavior of this function is identical to the Illumos / FreeBSD
1335 * version however it has been adjusted to use an iterative rather then
1336 * recursive algorithm to minimize stack usage.
1339 dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx)
1341 int64_t parent_space;
1345 mutex_enter(&dd->dd_lock);
1347 dd->dd_space_towrite[tx->tx_txg & TXG_MASK] += space;
1349 est_used = dsl_dir_space_towrite(dd) +
1350 dsl_dir_phys(dd)->dd_used_bytes;
1351 parent_space = parent_delta(dd, est_used, space);
1352 mutex_exit(&dd->dd_lock);
1354 /* Make sure that we clean up dd_space_to* */
1355 dsl_dir_dirty(dd, tx);
1358 space = parent_space;
1359 } while (space && dd);
1362 /* call from syncing context when we actually write/free space for this dd */
1364 dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
1365 int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx)
1367 int64_t accounted_delta;
1370 * dsl_dataset_set_refreservation_sync_impl() calls this with
1371 * dd_lock held, so that it can atomically update
1372 * ds->ds_reserved and the dsl_dir accounting, so that
1373 * dsl_dataset_check_quota() can see dataset and dir accounting
1376 boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
1378 ASSERT(dmu_tx_is_syncing(tx));
1379 ASSERT(type < DD_USED_NUM);
1381 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1384 mutex_enter(&dd->dd_lock);
1386 parent_delta(dd, dsl_dir_phys(dd)->dd_used_bytes, used);
1387 ASSERT(used >= 0 || dsl_dir_phys(dd)->dd_used_bytes >= -used);
1388 ASSERT(compressed >= 0 ||
1389 dsl_dir_phys(dd)->dd_compressed_bytes >= -compressed);
1390 ASSERT(uncompressed >= 0 ||
1391 dsl_dir_phys(dd)->dd_uncompressed_bytes >= -uncompressed);
1392 dsl_dir_phys(dd)->dd_used_bytes += used;
1393 dsl_dir_phys(dd)->dd_uncompressed_bytes += uncompressed;
1394 dsl_dir_phys(dd)->dd_compressed_bytes += compressed;
1396 if (dsl_dir_phys(dd)->dd_flags & DD_FLAG_USED_BREAKDOWN) {
1398 dsl_dir_phys(dd)->dd_used_breakdown[type] >= -used);
1399 dsl_dir_phys(dd)->dd_used_breakdown[type] += used;
1404 for (t = 0; t < DD_USED_NUM; t++)
1405 u += dsl_dir_phys(dd)->dd_used_breakdown[t];
1406 ASSERT3U(u, ==, dsl_dir_phys(dd)->dd_used_bytes);
1411 mutex_exit(&dd->dd_lock);
1413 if (dd->dd_parent != NULL) {
1414 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
1415 accounted_delta, compressed, uncompressed, tx);
1416 dsl_dir_transfer_space(dd->dd_parent,
1417 used - accounted_delta,
1418 DD_USED_CHILD_RSRV, DD_USED_CHILD, tx);
1423 dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta,
1424 dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx)
1426 ASSERT(dmu_tx_is_syncing(tx));
1427 ASSERT(oldtype < DD_USED_NUM);
1428 ASSERT(newtype < DD_USED_NUM);
1431 !(dsl_dir_phys(dd)->dd_flags & DD_FLAG_USED_BREAKDOWN))
1434 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1435 mutex_enter(&dd->dd_lock);
1437 dsl_dir_phys(dd)->dd_used_breakdown[oldtype] >= delta :
1438 dsl_dir_phys(dd)->dd_used_breakdown[newtype] >= -delta);
1439 ASSERT(dsl_dir_phys(dd)->dd_used_bytes >= ABS(delta));
1440 dsl_dir_phys(dd)->dd_used_breakdown[oldtype] -= delta;
1441 dsl_dir_phys(dd)->dd_used_breakdown[newtype] += delta;
1442 mutex_exit(&dd->dd_lock);
1445 typedef struct dsl_dir_set_qr_arg {
1446 const char *ddsqra_name;
1447 zprop_source_t ddsqra_source;
1448 uint64_t ddsqra_value;
1449 } dsl_dir_set_qr_arg_t;
1452 dsl_dir_set_quota_check(void *arg, dmu_tx_t *tx)
1454 dsl_dir_set_qr_arg_t *ddsqra = arg;
1455 dsl_pool_t *dp = dmu_tx_pool(tx);
1458 uint64_t towrite, newval;
1460 error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
1464 error = dsl_prop_predict(ds->ds_dir, "quota",
1465 ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
1467 dsl_dataset_rele(ds, FTAG);
1472 dsl_dataset_rele(ds, FTAG);
1476 mutex_enter(&ds->ds_dir->dd_lock);
1478 * If we are doing the preliminary check in open context, and
1479 * there are pending changes, then don't fail it, since the
1480 * pending changes could under-estimate the amount of space to be
1483 towrite = dsl_dir_space_towrite(ds->ds_dir);
1484 if ((dmu_tx_is_syncing(tx) || towrite == 0) &&
1485 (newval < dsl_dir_phys(ds->ds_dir)->dd_reserved ||
1486 newval < dsl_dir_phys(ds->ds_dir)->dd_used_bytes + towrite)) {
1487 error = SET_ERROR(ENOSPC);
1489 mutex_exit(&ds->ds_dir->dd_lock);
1490 dsl_dataset_rele(ds, FTAG);
1495 dsl_dir_set_quota_sync(void *arg, dmu_tx_t *tx)
1497 dsl_dir_set_qr_arg_t *ddsqra = arg;
1498 dsl_pool_t *dp = dmu_tx_pool(tx);
1502 VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
1504 if (spa_version(dp->dp_spa) >= SPA_VERSION_RECVD_PROPS) {
1505 dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_QUOTA),
1506 ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
1507 &ddsqra->ddsqra_value, tx);
1509 VERIFY0(dsl_prop_get_int_ds(ds,
1510 zfs_prop_to_name(ZFS_PROP_QUOTA), &newval));
1512 newval = ddsqra->ddsqra_value;
1513 spa_history_log_internal_ds(ds, "set", tx, "%s=%lld",
1514 zfs_prop_to_name(ZFS_PROP_QUOTA), (longlong_t)newval);
1517 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1518 mutex_enter(&ds->ds_dir->dd_lock);
1519 dsl_dir_phys(ds->ds_dir)->dd_quota = newval;
1520 mutex_exit(&ds->ds_dir->dd_lock);
1521 dsl_dataset_rele(ds, FTAG);
1525 dsl_dir_set_quota(const char *ddname, zprop_source_t source, uint64_t quota)
1527 dsl_dir_set_qr_arg_t ddsqra;
1529 ddsqra.ddsqra_name = ddname;
1530 ddsqra.ddsqra_source = source;
1531 ddsqra.ddsqra_value = quota;
1533 return (dsl_sync_task(ddname, dsl_dir_set_quota_check,
1534 dsl_dir_set_quota_sync, &ddsqra, 0, ZFS_SPACE_CHECK_NONE));
1538 dsl_dir_set_reservation_check(void *arg, dmu_tx_t *tx)
1540 dsl_dir_set_qr_arg_t *ddsqra = arg;
1541 dsl_pool_t *dp = dmu_tx_pool(tx);
1544 uint64_t newval, used, avail;
1547 error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
1553 * If we are doing the preliminary check in open context, the
1554 * space estimates may be inaccurate.
1556 if (!dmu_tx_is_syncing(tx)) {
1557 dsl_dataset_rele(ds, FTAG);
1561 error = dsl_prop_predict(ds->ds_dir,
1562 zfs_prop_to_name(ZFS_PROP_RESERVATION),
1563 ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
1565 dsl_dataset_rele(ds, FTAG);
1569 mutex_enter(&dd->dd_lock);
1570 used = dsl_dir_phys(dd)->dd_used_bytes;
1571 mutex_exit(&dd->dd_lock);
1573 if (dd->dd_parent) {
1574 avail = dsl_dir_space_available(dd->dd_parent,
1577 avail = dsl_pool_adjustedsize(dd->dd_pool, B_FALSE) - used;
1580 if (MAX(used, newval) > MAX(used, dsl_dir_phys(dd)->dd_reserved)) {
1581 uint64_t delta = MAX(used, newval) -
1582 MAX(used, dsl_dir_phys(dd)->dd_reserved);
1584 if (delta > avail ||
1585 (dsl_dir_phys(dd)->dd_quota > 0 &&
1586 newval > dsl_dir_phys(dd)->dd_quota))
1587 error = SET_ERROR(ENOSPC);
1590 dsl_dataset_rele(ds, FTAG);
1595 dsl_dir_set_reservation_sync_impl(dsl_dir_t *dd, uint64_t value, dmu_tx_t *tx)
1600 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1602 mutex_enter(&dd->dd_lock);
1603 used = dsl_dir_phys(dd)->dd_used_bytes;
1604 delta = MAX(used, value) - MAX(used, dsl_dir_phys(dd)->dd_reserved);
1605 dsl_dir_phys(dd)->dd_reserved = value;
1607 if (dd->dd_parent != NULL) {
1608 /* Roll up this additional usage into our ancestors */
1609 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
1612 mutex_exit(&dd->dd_lock);
1616 dsl_dir_set_reservation_sync(void *arg, dmu_tx_t *tx)
1618 dsl_dir_set_qr_arg_t *ddsqra = arg;
1619 dsl_pool_t *dp = dmu_tx_pool(tx);
1623 VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
1625 if (spa_version(dp->dp_spa) >= SPA_VERSION_RECVD_PROPS) {
1626 dsl_prop_set_sync_impl(ds,
1627 zfs_prop_to_name(ZFS_PROP_RESERVATION),
1628 ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
1629 &ddsqra->ddsqra_value, tx);
1631 VERIFY0(dsl_prop_get_int_ds(ds,
1632 zfs_prop_to_name(ZFS_PROP_RESERVATION), &newval));
1634 newval = ddsqra->ddsqra_value;
1635 spa_history_log_internal_ds(ds, "set", tx, "%s=%lld",
1636 zfs_prop_to_name(ZFS_PROP_RESERVATION),
1637 (longlong_t)newval);
1640 dsl_dir_set_reservation_sync_impl(ds->ds_dir, newval, tx);
1641 dsl_dataset_rele(ds, FTAG);
1645 dsl_dir_set_reservation(const char *ddname, zprop_source_t source,
1646 uint64_t reservation)
1648 dsl_dir_set_qr_arg_t ddsqra;
1650 ddsqra.ddsqra_name = ddname;
1651 ddsqra.ddsqra_source = source;
1652 ddsqra.ddsqra_value = reservation;
1654 return (dsl_sync_task(ddname, dsl_dir_set_reservation_check,
1655 dsl_dir_set_reservation_sync, &ddsqra, 0, ZFS_SPACE_CHECK_NONE));
1659 closest_common_ancestor(dsl_dir_t *ds1, dsl_dir_t *ds2)
1661 for (; ds1; ds1 = ds1->dd_parent) {
1663 for (dd = ds2; dd; dd = dd->dd_parent) {
1672 * If delta is applied to dd, how much of that delta would be applied to
1673 * ancestor? Syncing context only.
1676 would_change(dsl_dir_t *dd, int64_t delta, dsl_dir_t *ancestor)
1681 mutex_enter(&dd->dd_lock);
1682 delta = parent_delta(dd, dsl_dir_phys(dd)->dd_used_bytes, delta);
1683 mutex_exit(&dd->dd_lock);
1684 return (would_change(dd->dd_parent, delta, ancestor));
1687 typedef struct dsl_dir_rename_arg {
1688 const char *ddra_oldname;
1689 const char *ddra_newname;
1691 } dsl_dir_rename_arg_t;
1695 dsl_valid_rename(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
1698 char namebuf[ZFS_MAX_DATASET_NAME_LEN];
1700 dsl_dataset_name(ds, namebuf);
1702 if (strlen(namebuf) + *deltap >= ZFS_MAX_DATASET_NAME_LEN)
1703 return (SET_ERROR(ENAMETOOLONG));
1708 dsl_dir_rename_check(void *arg, dmu_tx_t *tx)
1710 dsl_dir_rename_arg_t *ddra = arg;
1711 dsl_pool_t *dp = dmu_tx_pool(tx);
1712 dsl_dir_t *dd, *newparent;
1713 const char *mynewname;
1715 int delta = strlen(ddra->ddra_newname) - strlen(ddra->ddra_oldname);
1717 /* target dir should exist */
1718 error = dsl_dir_hold(dp, ddra->ddra_oldname, FTAG, &dd, NULL);
1722 /* new parent should exist */
1723 error = dsl_dir_hold(dp, ddra->ddra_newname, FTAG,
1724 &newparent, &mynewname);
1726 dsl_dir_rele(dd, FTAG);
1730 /* can't rename to different pool */
1731 if (dd->dd_pool != newparent->dd_pool) {
1732 dsl_dir_rele(newparent, FTAG);
1733 dsl_dir_rele(dd, FTAG);
1734 return (SET_ERROR(EXDEV));
1737 /* new name should not already exist */
1738 if (mynewname == NULL) {
1739 dsl_dir_rele(newparent, FTAG);
1740 dsl_dir_rele(dd, FTAG);
1741 return (SET_ERROR(EEXIST));
1744 /* if the name length is growing, validate child name lengths */
1746 error = dmu_objset_find_dp(dp, dd->dd_object, dsl_valid_rename,
1747 &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
1749 dsl_dir_rele(newparent, FTAG);
1750 dsl_dir_rele(dd, FTAG);
1755 if (dmu_tx_is_syncing(tx)) {
1756 if (spa_feature_is_active(dp->dp_spa,
1757 SPA_FEATURE_FS_SS_LIMIT)) {
1759 * Although this is the check function and we don't
1760 * normally make on-disk changes in check functions,
1761 * we need to do that here.
1763 * Ensure this portion of the tree's counts have been
1764 * initialized in case the new parent has limits set.
1766 dsl_dir_init_fs_ss_count(dd, tx);
1770 if (newparent != dd->dd_parent) {
1771 /* is there enough space? */
1773 MAX(dsl_dir_phys(dd)->dd_used_bytes,
1774 dsl_dir_phys(dd)->dd_reserved);
1775 objset_t *os = dd->dd_pool->dp_meta_objset;
1776 uint64_t fs_cnt = 0;
1777 uint64_t ss_cnt = 0;
1779 if (dsl_dir_is_zapified(dd)) {
1782 err = zap_lookup(os, dd->dd_object,
1783 DD_FIELD_FILESYSTEM_COUNT, sizeof (fs_cnt), 1,
1785 if (err != ENOENT && err != 0) {
1786 dsl_dir_rele(newparent, FTAG);
1787 dsl_dir_rele(dd, FTAG);
1792 * have to add 1 for the filesystem itself that we're
1797 err = zap_lookup(os, dd->dd_object,
1798 DD_FIELD_SNAPSHOT_COUNT, sizeof (ss_cnt), 1,
1800 if (err != ENOENT && err != 0) {
1801 dsl_dir_rele(newparent, FTAG);
1802 dsl_dir_rele(dd, FTAG);
1807 /* no rename into our descendant */
1808 if (closest_common_ancestor(dd, newparent) == dd) {
1809 dsl_dir_rele(newparent, FTAG);
1810 dsl_dir_rele(dd, FTAG);
1811 return (SET_ERROR(EINVAL));
1814 error = dsl_dir_transfer_possible(dd->dd_parent,
1815 newparent, fs_cnt, ss_cnt, myspace, ddra->ddra_cred);
1817 dsl_dir_rele(newparent, FTAG);
1818 dsl_dir_rele(dd, FTAG);
1823 dsl_dir_rele(newparent, FTAG);
1824 dsl_dir_rele(dd, FTAG);
1829 dsl_dir_rename_sync(void *arg, dmu_tx_t *tx)
1831 dsl_dir_rename_arg_t *ddra = arg;
1832 dsl_pool_t *dp = dmu_tx_pool(tx);
1833 dsl_dir_t *dd, *newparent;
1834 const char *mynewname;
1836 objset_t *mos = dp->dp_meta_objset;
1838 VERIFY0(dsl_dir_hold(dp, ddra->ddra_oldname, FTAG, &dd, NULL));
1839 VERIFY0(dsl_dir_hold(dp, ddra->ddra_newname, FTAG, &newparent,
1842 /* Log this before we change the name. */
1843 spa_history_log_internal_dd(dd, "rename", tx,
1844 "-> %s", ddra->ddra_newname);
1846 if (newparent != dd->dd_parent) {
1847 objset_t *os = dd->dd_pool->dp_meta_objset;
1848 uint64_t fs_cnt = 0;
1849 uint64_t ss_cnt = 0;
1852 * We already made sure the dd counts were initialized in the
1855 if (spa_feature_is_active(dp->dp_spa,
1856 SPA_FEATURE_FS_SS_LIMIT)) {
1857 VERIFY0(zap_lookup(os, dd->dd_object,
1858 DD_FIELD_FILESYSTEM_COUNT, sizeof (fs_cnt), 1,
1860 /* add 1 for the filesystem itself that we're moving */
1863 VERIFY0(zap_lookup(os, dd->dd_object,
1864 DD_FIELD_SNAPSHOT_COUNT, sizeof (ss_cnt), 1,
1868 dsl_fs_ss_count_adjust(dd->dd_parent, -fs_cnt,
1869 DD_FIELD_FILESYSTEM_COUNT, tx);
1870 dsl_fs_ss_count_adjust(newparent, fs_cnt,
1871 DD_FIELD_FILESYSTEM_COUNT, tx);
1873 dsl_fs_ss_count_adjust(dd->dd_parent, -ss_cnt,
1874 DD_FIELD_SNAPSHOT_COUNT, tx);
1875 dsl_fs_ss_count_adjust(newparent, ss_cnt,
1876 DD_FIELD_SNAPSHOT_COUNT, tx);
1878 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
1879 -dsl_dir_phys(dd)->dd_used_bytes,
1880 -dsl_dir_phys(dd)->dd_compressed_bytes,
1881 -dsl_dir_phys(dd)->dd_uncompressed_bytes, tx);
1882 dsl_dir_diduse_space(newparent, DD_USED_CHILD,
1883 dsl_dir_phys(dd)->dd_used_bytes,
1884 dsl_dir_phys(dd)->dd_compressed_bytes,
1885 dsl_dir_phys(dd)->dd_uncompressed_bytes, tx);
1887 if (dsl_dir_phys(dd)->dd_reserved >
1888 dsl_dir_phys(dd)->dd_used_bytes) {
1889 uint64_t unused_rsrv = dsl_dir_phys(dd)->dd_reserved -
1890 dsl_dir_phys(dd)->dd_used_bytes;
1892 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
1893 -unused_rsrv, 0, 0, tx);
1894 dsl_dir_diduse_space(newparent, DD_USED_CHILD_RSRV,
1895 unused_rsrv, 0, 0, tx);
1899 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1901 /* remove from old parent zapobj */
1902 error = zap_remove(mos,
1903 dsl_dir_phys(dd->dd_parent)->dd_child_dir_zapobj,
1907 (void) strcpy(dd->dd_myname, mynewname);
1908 dsl_dir_rele(dd->dd_parent, dd);
1909 dsl_dir_phys(dd)->dd_parent_obj = newparent->dd_object;
1910 VERIFY0(dsl_dir_hold_obj(dp,
1911 newparent->dd_object, NULL, dd, &dd->dd_parent));
1913 /* add to new parent zapobj */
1914 VERIFY0(zap_add(mos, dsl_dir_phys(newparent)->dd_child_dir_zapobj,
1915 dd->dd_myname, 8, 1, &dd->dd_object, tx));
1917 zvol_rename_minors(dp->dp_spa, ddra->ddra_oldname,
1918 ddra->ddra_newname, B_TRUE);
1920 dsl_prop_notify_all(dd);
1922 dsl_dir_rele(newparent, FTAG);
1923 dsl_dir_rele(dd, FTAG);
1927 dsl_dir_rename(const char *oldname, const char *newname)
1929 dsl_dir_rename_arg_t ddra;
1931 ddra.ddra_oldname = oldname;
1932 ddra.ddra_newname = newname;
1933 ddra.ddra_cred = CRED();
1935 return (dsl_sync_task(oldname,
1936 dsl_dir_rename_check, dsl_dir_rename_sync, &ddra,
1937 3, ZFS_SPACE_CHECK_RESERVED));
1941 dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd,
1942 uint64_t fs_cnt, uint64_t ss_cnt, uint64_t space, cred_t *cr)
1944 dsl_dir_t *ancestor;
1949 ancestor = closest_common_ancestor(sdd, tdd);
1950 adelta = would_change(sdd, -space, ancestor);
1951 avail = dsl_dir_space_available(tdd, ancestor, adelta, FALSE);
1953 return (SET_ERROR(ENOSPC));
1955 err = dsl_fs_ss_limit_check(tdd, fs_cnt, ZFS_PROP_FILESYSTEM_LIMIT,
1959 err = dsl_fs_ss_limit_check(tdd, ss_cnt, ZFS_PROP_SNAPSHOT_LIMIT,
1968 dsl_dir_snap_cmtime(dsl_dir_t *dd)
1972 mutex_enter(&dd->dd_lock);
1973 t = dd->dd_snap_cmtime;
1974 mutex_exit(&dd->dd_lock);
1980 dsl_dir_snap_cmtime_update(dsl_dir_t *dd)
1985 mutex_enter(&dd->dd_lock);
1986 dd->dd_snap_cmtime = t;
1987 mutex_exit(&dd->dd_lock);
1991 dsl_dir_zapify(dsl_dir_t *dd, dmu_tx_t *tx)
1993 objset_t *mos = dd->dd_pool->dp_meta_objset;
1994 dmu_object_zapify(mos, dd->dd_object, DMU_OT_DSL_DIR, tx);
1998 dsl_dir_is_zapified(dsl_dir_t *dd)
2000 dmu_object_info_t doi;
2002 dmu_object_info_from_db(dd->dd_dbuf, &doi);
2003 return (doi.doi_type == DMU_OTN_ZAP_METADATA);
2006 #if defined(_KERNEL) && defined(HAVE_SPL)
2007 EXPORT_SYMBOL(dsl_dir_set_quota);
2008 EXPORT_SYMBOL(dsl_dir_set_reservation);