4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011 Pawel Jakub Dawidek <pawel@dawidek.net>.
24 * All rights reserved.
25 * Copyright (c) 2013 by Delphix. All rights reserved.
29 #include <sys/dmu_objset.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/dsl_dataset.h>
32 #include <sys/dsl_dir.h>
33 #include <sys/dsl_prop.h>
34 #include <sys/dsl_synctask.h>
35 #include <sys/dsl_deleg.h>
36 #include <sys/dmu_impl.h>
38 #include <sys/metaslab.h>
42 #include <sys/sunddi.h>
45 #include <sys/zfs_vfsops.h>
47 #include "zfs_namecheck.h"
49 static uint64_t dsl_dir_space_towrite(dsl_dir_t *dd);
53 dsl_dir_evict(dmu_buf_t *db, void *arg)
56 dsl_pool_t *dp = dd->dd_pool;
59 for (t = 0; t < TXG_SIZE; t++) {
60 ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t));
61 ASSERT(dd->dd_tempreserved[t] == 0);
62 ASSERT(dd->dd_space_towrite[t] == 0);
66 dsl_dir_rele(dd->dd_parent, dd);
68 spa_close(dd->dd_pool->dp_spa, dd);
71 * The props callback list should have been cleaned up by
74 list_destroy(&dd->dd_prop_cbs);
75 mutex_destroy(&dd->dd_lock);
76 kmem_free(dd, sizeof (dsl_dir_t));
80 dsl_dir_hold_obj(dsl_pool_t *dp, uint64_t ddobj,
81 const char *tail, void *tag, dsl_dir_t **ddp)
87 ASSERT(dsl_pool_config_held(dp));
89 err = dmu_bonus_hold(dp->dp_meta_objset, ddobj, tag, &dbuf);
92 dd = dmu_buf_get_user(dbuf);
95 dmu_object_info_t doi;
96 dmu_object_info_from_db(dbuf, &doi);
97 ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_DSL_DIR);
98 ASSERT3U(doi.doi_bonus_size, >=, sizeof (dsl_dir_phys_t));
104 dd = kmem_zalloc(sizeof (dsl_dir_t), KM_SLEEP);
105 dd->dd_object = ddobj;
108 dd->dd_phys = dbuf->db_data;
109 mutex_init(&dd->dd_lock, NULL, MUTEX_DEFAULT, NULL);
111 list_create(&dd->dd_prop_cbs, sizeof (dsl_prop_cb_record_t),
112 offsetof(dsl_prop_cb_record_t, cbr_node));
114 dsl_dir_snap_cmtime_update(dd);
116 if (dd->dd_phys->dd_parent_obj) {
117 err = dsl_dir_hold_obj(dp, dd->dd_phys->dd_parent_obj,
118 NULL, dd, &dd->dd_parent);
125 err = zap_lookup(dp->dp_meta_objset,
126 dd->dd_parent->dd_phys->dd_child_dir_zapobj,
127 tail, sizeof (foundobj), 1, &foundobj);
128 ASSERT(err || foundobj == ddobj);
130 (void) strcpy(dd->dd_myname, tail);
132 err = zap_value_search(dp->dp_meta_objset,
133 dd->dd_parent->dd_phys->dd_child_dir_zapobj,
134 ddobj, 0, dd->dd_myname);
139 (void) strcpy(dd->dd_myname, spa_name(dp->dp_spa));
142 if (dsl_dir_is_clone(dd)) {
143 dmu_buf_t *origin_bonus;
144 dsl_dataset_phys_t *origin_phys;
147 * We can't open the origin dataset, because
148 * that would require opening this dsl_dir.
149 * Just look at its phys directly instead.
151 err = dmu_bonus_hold(dp->dp_meta_objset,
152 dd->dd_phys->dd_origin_obj, FTAG, &origin_bonus);
155 origin_phys = origin_bonus->db_data;
157 origin_phys->ds_creation_txg;
158 dmu_buf_rele(origin_bonus, FTAG);
161 winner = dmu_buf_set_user_ie(dbuf, dd, &dd->dd_phys,
165 dsl_dir_rele(dd->dd_parent, dd);
166 mutex_destroy(&dd->dd_lock);
167 kmem_free(dd, sizeof (dsl_dir_t));
170 spa_open_ref(dp->dp_spa, dd);
175 * The dsl_dir_t has both open-to-close and instantiate-to-evict
176 * holds on the spa. We need the open-to-close holds because
177 * otherwise the spa_refcnt wouldn't change when we open a
178 * dir which the spa also has open, so we could incorrectly
179 * think it was OK to unload/export/destroy the pool. We need
180 * the instantiate-to-evict hold because the dsl_dir_t has a
181 * pointer to the dd_pool, which has a pointer to the spa_t.
183 spa_open_ref(dp->dp_spa, tag);
184 ASSERT3P(dd->dd_pool, ==, dp);
185 ASSERT3U(dd->dd_object, ==, ddobj);
186 ASSERT3P(dd->dd_dbuf, ==, dbuf);
192 dsl_dir_rele(dd->dd_parent, dd);
193 mutex_destroy(&dd->dd_lock);
194 kmem_free(dd, sizeof (dsl_dir_t));
195 dmu_buf_rele(dbuf, tag);
200 dsl_dir_rele(dsl_dir_t *dd, void *tag)
202 dprintf_dd(dd, "%s\n", "");
203 spa_close(dd->dd_pool->dp_spa, tag);
204 dmu_buf_rele(dd->dd_dbuf, tag);
207 /* buf must be long enough (MAXNAMELEN + strlen(MOS_DIR_NAME) + 1 should do) */
209 dsl_dir_name(dsl_dir_t *dd, char *buf)
212 dsl_dir_name(dd->dd_parent, buf);
213 (void) strcat(buf, "/");
217 if (!MUTEX_HELD(&dd->dd_lock)) {
219 * recursive mutex so that we can use
220 * dprintf_dd() with dd_lock held
222 mutex_enter(&dd->dd_lock);
223 (void) strcat(buf, dd->dd_myname);
224 mutex_exit(&dd->dd_lock);
226 (void) strcat(buf, dd->dd_myname);
230 /* Calculate name length, avoiding all the strcat calls of dsl_dir_name */
232 dsl_dir_namelen(dsl_dir_t *dd)
237 /* parent's name + 1 for the "/" */
238 result = dsl_dir_namelen(dd->dd_parent) + 1;
241 if (!MUTEX_HELD(&dd->dd_lock)) {
242 /* see dsl_dir_name */
243 mutex_enter(&dd->dd_lock);
244 result += strlen(dd->dd_myname);
245 mutex_exit(&dd->dd_lock);
247 result += strlen(dd->dd_myname);
254 getcomponent(const char *path, char *component, const char **nextp)
258 if ((path == NULL) || (path[0] == '\0'))
259 return (SET_ERROR(ENOENT));
260 /* This would be a good place to reserve some namespace... */
261 p = strpbrk(path, "/@");
262 if (p && (p[1] == '/' || p[1] == '@')) {
263 /* two separators in a row */
264 return (SET_ERROR(EINVAL));
266 if (p == NULL || p == path) {
268 * if the first thing is an @ or /, it had better be an
269 * @ and it had better not have any more ats or slashes,
270 * and it had better have something after the @.
273 (p[0] != '@' || strpbrk(path+1, "/@") || p[1] == '\0'))
274 return (SET_ERROR(EINVAL));
275 if (strlen(path) >= MAXNAMELEN)
276 return (SET_ERROR(ENAMETOOLONG));
277 (void) strcpy(component, path);
279 } else if (p[0] == '/') {
280 if (p - path >= MAXNAMELEN)
281 return (SET_ERROR(ENAMETOOLONG));
282 (void) strncpy(component, path, p - path);
283 component[p - path] = '\0';
285 } else if (p[0] == '@') {
287 * if the next separator is an @, there better not be
290 if (strchr(path, '/'))
291 return (SET_ERROR(EINVAL));
292 if (p - path >= MAXNAMELEN)
293 return (SET_ERROR(ENAMETOOLONG));
294 (void) strncpy(component, path, p - path);
295 component[p - path] = '\0';
297 panic("invalid p=%p", (void *)p);
304 * Return the dsl_dir_t, and possibly the last component which couldn't
305 * be found in *tail. The name must be in the specified dsl_pool_t. This
306 * thread must hold the dp_config_rwlock for the pool. Returns NULL if the
307 * path is bogus, or if tail==NULL and we couldn't parse the whole name.
308 * (*tail)[0] == '@' means that the last component is a snapshot.
311 dsl_dir_hold(dsl_pool_t *dp, const char *name, void *tag,
312 dsl_dir_t **ddp, const char **tailp)
314 char buf[MAXNAMELEN];
315 const char *spaname, *next, *nextnext = NULL;
320 err = getcomponent(name, buf, &next);
324 /* Make sure the name is in the specified pool. */
325 spaname = spa_name(dp->dp_spa);
326 if (strcmp(buf, spaname) != 0)
327 return (SET_ERROR(EINVAL));
329 ASSERT(dsl_pool_config_held(dp));
331 err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, NULL, tag, &dd);
336 while (next != NULL) {
338 err = getcomponent(next, buf, &nextnext);
341 ASSERT(next[0] != '\0');
344 dprintf("looking up %s in obj%lld\n",
345 buf, dd->dd_phys->dd_child_dir_zapobj);
347 err = zap_lookup(dp->dp_meta_objset,
348 dd->dd_phys->dd_child_dir_zapobj,
349 buf, sizeof (ddobj), 1, &ddobj);
356 err = dsl_dir_hold_obj(dp, ddobj, buf, tag, &child_ds);
359 dsl_dir_rele(dd, tag);
365 dsl_dir_rele(dd, tag);
370 * It's an error if there's more than one component left, or
371 * tailp==NULL and there's any component left.
374 (tailp == NULL || (nextnext && nextnext[0] != '\0'))) {
376 dsl_dir_rele(dd, tag);
377 dprintf("next=%p (%s) tail=%p\n", next, next?next:"", tailp);
378 err = SET_ERROR(ENOENT);
387 dsl_dir_create_sync(dsl_pool_t *dp, dsl_dir_t *pds, const char *name,
390 objset_t *mos = dp->dp_meta_objset;
392 dsl_dir_phys_t *ddphys;
395 ddobj = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0,
396 DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx);
398 VERIFY(0 == zap_add(mos, pds->dd_phys->dd_child_dir_zapobj,
399 name, sizeof (uint64_t), 1, &ddobj, tx));
401 /* it's the root dir */
402 VERIFY(0 == zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
403 DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, &ddobj, tx));
405 VERIFY(0 == dmu_bonus_hold(mos, ddobj, FTAG, &dbuf));
406 dmu_buf_will_dirty(dbuf, tx);
407 ddphys = dbuf->db_data;
409 ddphys->dd_creation_time = gethrestime_sec();
411 ddphys->dd_parent_obj = pds->dd_object;
412 ddphys->dd_props_zapobj = zap_create(mos,
413 DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx);
414 ddphys->dd_child_dir_zapobj = zap_create(mos,
415 DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx);
416 if (spa_version(dp->dp_spa) >= SPA_VERSION_USED_BREAKDOWN)
417 ddphys->dd_flags |= DD_FLAG_USED_BREAKDOWN;
418 dmu_buf_rele(dbuf, FTAG);
424 dsl_dir_is_clone(dsl_dir_t *dd)
426 return (dd->dd_phys->dd_origin_obj &&
427 (dd->dd_pool->dp_origin_snap == NULL ||
428 dd->dd_phys->dd_origin_obj !=
429 dd->dd_pool->dp_origin_snap->ds_object));
433 dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv)
435 mutex_enter(&dd->dd_lock);
436 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
437 dd->dd_phys->dd_used_bytes);
438 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_QUOTA, dd->dd_phys->dd_quota);
439 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_RESERVATION,
440 dd->dd_phys->dd_reserved);
441 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
442 dd->dd_phys->dd_compressed_bytes == 0 ? 100 :
443 (dd->dd_phys->dd_uncompressed_bytes * 100 /
444 dd->dd_phys->dd_compressed_bytes));
445 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_LOGICALUSED,
446 dd->dd_phys->dd_uncompressed_bytes);
447 if (dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
448 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDSNAP,
449 dd->dd_phys->dd_used_breakdown[DD_USED_SNAP]);
450 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDDS,
451 dd->dd_phys->dd_used_breakdown[DD_USED_HEAD]);
452 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDREFRESERV,
453 dd->dd_phys->dd_used_breakdown[DD_USED_REFRSRV]);
454 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDCHILD,
455 dd->dd_phys->dd_used_breakdown[DD_USED_CHILD] +
456 dd->dd_phys->dd_used_breakdown[DD_USED_CHILD_RSRV]);
458 mutex_exit(&dd->dd_lock);
460 if (dsl_dir_is_clone(dd)) {
462 char buf[MAXNAMELEN];
464 VERIFY0(dsl_dataset_hold_obj(dd->dd_pool,
465 dd->dd_phys->dd_origin_obj, FTAG, &ds));
466 dsl_dataset_name(ds, buf);
467 dsl_dataset_rele(ds, FTAG);
468 dsl_prop_nvlist_add_string(nv, ZFS_PROP_ORIGIN, buf);
473 dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx)
475 dsl_pool_t *dp = dd->dd_pool;
479 if (txg_list_add(&dp->dp_dirty_dirs, dd, tx->tx_txg)) {
480 /* up the hold count until we can be written out */
481 dmu_buf_add_ref(dd->dd_dbuf, dd);
486 parent_delta(dsl_dir_t *dd, uint64_t used, int64_t delta)
488 uint64_t old_accounted = MAX(used, dd->dd_phys->dd_reserved);
489 uint64_t new_accounted = MAX(used + delta, dd->dd_phys->dd_reserved);
490 return (new_accounted - old_accounted);
494 dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx)
496 ASSERT(dmu_tx_is_syncing(tx));
498 mutex_enter(&dd->dd_lock);
499 ASSERT0(dd->dd_tempreserved[tx->tx_txg&TXG_MASK]);
500 dprintf_dd(dd, "txg=%llu towrite=%lluK\n", tx->tx_txg,
501 dd->dd_space_towrite[tx->tx_txg&TXG_MASK] / 1024);
502 dd->dd_space_towrite[tx->tx_txg&TXG_MASK] = 0;
503 mutex_exit(&dd->dd_lock);
505 /* release the hold from dsl_dir_dirty */
506 dmu_buf_rele(dd->dd_dbuf, dd);
510 dsl_dir_space_towrite(dsl_dir_t *dd)
515 ASSERT(MUTEX_HELD(&dd->dd_lock));
517 for (i = 0; i < TXG_SIZE; i++) {
518 space += dd->dd_space_towrite[i&TXG_MASK];
519 ASSERT3U(dd->dd_space_towrite[i&TXG_MASK], >=, 0);
525 * How much space would dd have available if ancestor had delta applied
526 * to it? If ondiskonly is set, we're only interested in what's
527 * on-disk, not estimated pending changes.
530 dsl_dir_space_available(dsl_dir_t *dd,
531 dsl_dir_t *ancestor, int64_t delta, int ondiskonly)
533 uint64_t parentspace, myspace, quota, used;
536 * If there are no restrictions otherwise, assume we have
537 * unlimited space available.
540 parentspace = UINT64_MAX;
542 if (dd->dd_parent != NULL) {
543 parentspace = dsl_dir_space_available(dd->dd_parent,
544 ancestor, delta, ondiskonly);
547 mutex_enter(&dd->dd_lock);
548 if (dd->dd_phys->dd_quota != 0)
549 quota = dd->dd_phys->dd_quota;
550 used = dd->dd_phys->dd_used_bytes;
552 used += dsl_dir_space_towrite(dd);
554 if (dd->dd_parent == NULL) {
555 uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, FALSE);
556 quota = MIN(quota, poolsize);
559 if (dd->dd_phys->dd_reserved > used && parentspace != UINT64_MAX) {
561 * We have some space reserved, in addition to what our
564 parentspace += dd->dd_phys->dd_reserved - used;
567 if (dd == ancestor) {
569 ASSERT(used >= -delta);
571 if (parentspace != UINT64_MAX)
572 parentspace -= delta;
580 * the lesser of the space provided by our parent and
581 * the space left in our quota
583 myspace = MIN(parentspace, quota - used);
586 mutex_exit(&dd->dd_lock);
598 dsl_dir_tempreserve_impl(dsl_dir_t *dd, uint64_t asize, boolean_t netfree,
599 boolean_t ignorequota, boolean_t checkrefquota, list_t *tr_list,
600 dmu_tx_t *tx, boolean_t first)
602 uint64_t txg = tx->tx_txg;
603 uint64_t est_inflight, used_on_disk, quota, parent_rsrv;
604 uint64_t deferred = 0;
605 struct tempreserve *tr;
607 int txgidx = txg & TXG_MASK;
609 uint64_t ref_rsrv = 0;
611 ASSERT3U(txg, !=, 0);
612 ASSERT3S(asize, >, 0);
614 mutex_enter(&dd->dd_lock);
617 * Check against the dsl_dir's quota. We don't add in the delta
618 * when checking for over-quota because they get one free hit.
620 est_inflight = dsl_dir_space_towrite(dd);
621 for (i = 0; i < TXG_SIZE; i++)
622 est_inflight += dd->dd_tempreserved[i];
623 used_on_disk = dd->dd_phys->dd_used_bytes;
626 * On the first iteration, fetch the dataset's used-on-disk and
627 * refreservation values. Also, if checkrefquota is set, test if
628 * allocating this space would exceed the dataset's refquota.
630 if (first && tx->tx_objset) {
632 dsl_dataset_t *ds = tx->tx_objset->os_dsl_dataset;
634 error = dsl_dataset_check_quota(ds, checkrefquota,
635 asize, est_inflight, &used_on_disk, &ref_rsrv);
637 mutex_exit(&dd->dd_lock);
643 * If this transaction will result in a net free of space,
644 * we want to let it through.
646 if (ignorequota || netfree || dd->dd_phys->dd_quota == 0)
649 quota = dd->dd_phys->dd_quota;
652 * Adjust the quota against the actual pool size at the root
653 * minus any outstanding deferred frees.
654 * To ensure that it's possible to remove files from a full
655 * pool without inducing transient overcommits, we throttle
656 * netfree transactions against a quota that is slightly larger,
657 * but still within the pool's allocation slop. In cases where
658 * we're very close to full, this will allow a steady trickle of
659 * removes to get through.
661 if (dd->dd_parent == NULL) {
662 spa_t *spa = dd->dd_pool->dp_spa;
663 uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, netfree);
664 deferred = metaslab_class_get_deferred(spa_normal_class(spa));
665 if (poolsize - deferred < quota) {
666 quota = poolsize - deferred;
672 * If they are requesting more space, and our current estimate
673 * is over quota, they get to try again unless the actual
674 * on-disk is over quota and there are no pending changes (which
675 * may free up space for us).
677 if (used_on_disk + est_inflight >= quota) {
678 if (est_inflight > 0 || used_on_disk < quota ||
679 (retval == ENOSPC && used_on_disk < quota + deferred))
681 dprintf_dd(dd, "failing: used=%lluK inflight = %lluK "
682 "quota=%lluK tr=%lluK err=%d\n",
683 used_on_disk>>10, est_inflight>>10,
684 quota>>10, asize>>10, retval);
685 mutex_exit(&dd->dd_lock);
686 return (SET_ERROR(retval));
689 /* We need to up our estimated delta before dropping dd_lock */
690 dd->dd_tempreserved[txgidx] += asize;
692 parent_rsrv = parent_delta(dd, used_on_disk + est_inflight,
694 mutex_exit(&dd->dd_lock);
696 tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
699 list_insert_tail(tr_list, tr);
701 /* see if it's OK with our parent */
702 if (dd->dd_parent && parent_rsrv) {
703 boolean_t ismos = (dd->dd_phys->dd_head_dataset_obj == 0);
705 return (dsl_dir_tempreserve_impl(dd->dd_parent,
706 parent_rsrv, netfree, ismos, TRUE, tr_list, tx, FALSE));
713 * Reserve space in this dsl_dir, to be used in this tx's txg.
714 * After the space has been dirtied (and dsl_dir_willuse_space()
715 * has been called), the reservation should be canceled, using
716 * dsl_dir_tempreserve_clear().
719 dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize,
720 uint64_t fsize, uint64_t usize, void **tr_cookiep, dmu_tx_t *tx)
730 tr_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
731 list_create(tr_list, sizeof (struct tempreserve),
732 offsetof(struct tempreserve, tr_node));
733 ASSERT3S(asize, >, 0);
734 ASSERT3S(fsize, >=, 0);
736 err = arc_tempreserve_space(lsize, tx->tx_txg);
738 struct tempreserve *tr;
740 tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
742 list_insert_tail(tr_list, tr);
746 * If arc_memory_throttle() detected that pageout
747 * is running and we are low on memory, we delay new
748 * non-pageout transactions to give pageout an
751 * It is unfortunate to be delaying while the caller's
754 txg_delay(dd->dd_pool, tx->tx_txg,
755 MSEC2NSEC(10), MSEC2NSEC(10));
756 err = SET_ERROR(ERESTART);
761 err = dsl_dir_tempreserve_impl(dd, asize, fsize >= asize,
762 FALSE, asize > usize, tr_list, tx, TRUE);
766 dsl_dir_tempreserve_clear(tr_list, tx);
768 *tr_cookiep = tr_list;
774 * Clear a temporary reservation that we previously made with
775 * dsl_dir_tempreserve_space().
778 dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx)
780 int txgidx = tx->tx_txg & TXG_MASK;
781 list_t *tr_list = tr_cookie;
782 struct tempreserve *tr;
784 ASSERT3U(tx->tx_txg, !=, 0);
786 if (tr_cookie == NULL)
789 while ((tr = list_head(tr_list)) != NULL) {
791 mutex_enter(&tr->tr_ds->dd_lock);
792 ASSERT3U(tr->tr_ds->dd_tempreserved[txgidx], >=,
794 tr->tr_ds->dd_tempreserved[txgidx] -= tr->tr_size;
795 mutex_exit(&tr->tr_ds->dd_lock);
797 arc_tempreserve_clear(tr->tr_size);
799 list_remove(tr_list, tr);
800 kmem_free(tr, sizeof (struct tempreserve));
803 kmem_free(tr_list, sizeof (list_t));
807 * This should be called from open context when we think we're going to write
808 * or free space, for example when dirtying data. Be conservative; it's okay
809 * to write less space or free more, but we don't want to write more or free
810 * less than the amount specified.
813 dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx)
815 int64_t parent_space;
818 mutex_enter(&dd->dd_lock);
820 dd->dd_space_towrite[tx->tx_txg & TXG_MASK] += space;
822 est_used = dsl_dir_space_towrite(dd) + dd->dd_phys->dd_used_bytes;
823 parent_space = parent_delta(dd, est_used, space);
824 mutex_exit(&dd->dd_lock);
826 /* Make sure that we clean up dd_space_to* */
827 dsl_dir_dirty(dd, tx);
829 /* XXX this is potentially expensive and unnecessary... */
830 if (parent_space && dd->dd_parent)
831 dsl_dir_willuse_space(dd->dd_parent, parent_space, tx);
834 /* call from syncing context when we actually write/free space for this dd */
836 dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
837 int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx)
839 int64_t accounted_delta;
842 * dsl_dataset_set_refreservation_sync_impl() calls this with
843 * dd_lock held, so that it can atomically update
844 * ds->ds_reserved and the dsl_dir accounting, so that
845 * dsl_dataset_check_quota() can see dataset and dir accounting
848 boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
850 ASSERT(dmu_tx_is_syncing(tx));
851 ASSERT(type < DD_USED_NUM);
853 dmu_buf_will_dirty(dd->dd_dbuf, tx);
856 mutex_enter(&dd->dd_lock);
857 accounted_delta = parent_delta(dd, dd->dd_phys->dd_used_bytes, used);
858 ASSERT(used >= 0 || dd->dd_phys->dd_used_bytes >= -used);
859 ASSERT(compressed >= 0 ||
860 dd->dd_phys->dd_compressed_bytes >= -compressed);
861 ASSERT(uncompressed >= 0 ||
862 dd->dd_phys->dd_uncompressed_bytes >= -uncompressed);
863 dd->dd_phys->dd_used_bytes += used;
864 dd->dd_phys->dd_uncompressed_bytes += uncompressed;
865 dd->dd_phys->dd_compressed_bytes += compressed;
867 if (dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
869 dd->dd_phys->dd_used_breakdown[type] >= -used);
870 dd->dd_phys->dd_used_breakdown[type] += used;
874 for (t = 0; t < DD_USED_NUM; t++)
875 u += dd->dd_phys->dd_used_breakdown[t];
876 ASSERT3U(u, ==, dd->dd_phys->dd_used_bytes);
880 mutex_exit(&dd->dd_lock);
882 if (dd->dd_parent != NULL) {
883 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
884 accounted_delta, compressed, uncompressed, tx);
885 dsl_dir_transfer_space(dd->dd_parent,
886 used - accounted_delta,
887 DD_USED_CHILD_RSRV, DD_USED_CHILD, tx);
892 dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta,
893 dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx)
895 ASSERT(dmu_tx_is_syncing(tx));
896 ASSERT(oldtype < DD_USED_NUM);
897 ASSERT(newtype < DD_USED_NUM);
899 if (delta == 0 || !(dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN))
902 dmu_buf_will_dirty(dd->dd_dbuf, tx);
903 mutex_enter(&dd->dd_lock);
905 dd->dd_phys->dd_used_breakdown[oldtype] >= delta :
906 dd->dd_phys->dd_used_breakdown[newtype] >= -delta);
907 ASSERT(dd->dd_phys->dd_used_bytes >= ABS(delta));
908 dd->dd_phys->dd_used_breakdown[oldtype] -= delta;
909 dd->dd_phys->dd_used_breakdown[newtype] += delta;
910 mutex_exit(&dd->dd_lock);
913 typedef struct dsl_dir_set_qr_arg {
914 const char *ddsqra_name;
915 zprop_source_t ddsqra_source;
916 uint64_t ddsqra_value;
917 } dsl_dir_set_qr_arg_t;
920 dsl_dir_set_quota_check(void *arg, dmu_tx_t *tx)
922 dsl_dir_set_qr_arg_t *ddsqra = arg;
923 dsl_pool_t *dp = dmu_tx_pool(tx);
926 uint64_t towrite, newval;
928 error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
932 error = dsl_prop_predict(ds->ds_dir, "quota",
933 ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
935 dsl_dataset_rele(ds, FTAG);
940 dsl_dataset_rele(ds, FTAG);
944 mutex_enter(&ds->ds_dir->dd_lock);
946 * If we are doing the preliminary check in open context, and
947 * there are pending changes, then don't fail it, since the
948 * pending changes could under-estimate the amount of space to be
951 towrite = dsl_dir_space_towrite(ds->ds_dir);
952 if ((dmu_tx_is_syncing(tx) || towrite == 0) &&
953 (newval < ds->ds_dir->dd_phys->dd_reserved ||
954 newval < ds->ds_dir->dd_phys->dd_used_bytes + towrite)) {
955 error = SET_ERROR(ENOSPC);
957 mutex_exit(&ds->ds_dir->dd_lock);
958 dsl_dataset_rele(ds, FTAG);
963 dsl_dir_set_quota_sync(void *arg, dmu_tx_t *tx)
965 dsl_dir_set_qr_arg_t *ddsqra = arg;
966 dsl_pool_t *dp = dmu_tx_pool(tx);
970 VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
972 if (spa_version(dp->dp_spa) >= SPA_VERSION_RECVD_PROPS) {
973 dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_QUOTA),
974 ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
975 &ddsqra->ddsqra_value, tx);
977 VERIFY0(dsl_prop_get_int_ds(ds,
978 zfs_prop_to_name(ZFS_PROP_QUOTA), &newval));
980 newval = ddsqra->ddsqra_value;
981 spa_history_log_internal_ds(ds, "set", tx, "%s=%lld",
982 zfs_prop_to_name(ZFS_PROP_QUOTA), (longlong_t)newval);
985 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
986 mutex_enter(&ds->ds_dir->dd_lock);
987 ds->ds_dir->dd_phys->dd_quota = newval;
988 mutex_exit(&ds->ds_dir->dd_lock);
989 dsl_dataset_rele(ds, FTAG);
993 dsl_dir_set_quota(const char *ddname, zprop_source_t source, uint64_t quota)
995 dsl_dir_set_qr_arg_t ddsqra;
997 ddsqra.ddsqra_name = ddname;
998 ddsqra.ddsqra_source = source;
999 ddsqra.ddsqra_value = quota;
1001 return (dsl_sync_task(ddname, dsl_dir_set_quota_check,
1002 dsl_dir_set_quota_sync, &ddsqra, 0));
1006 dsl_dir_set_reservation_check(void *arg, dmu_tx_t *tx)
1008 dsl_dir_set_qr_arg_t *ddsqra = arg;
1009 dsl_pool_t *dp = dmu_tx_pool(tx);
1012 uint64_t newval, used, avail;
1015 error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
1021 * If we are doing the preliminary check in open context, the
1022 * space estimates may be inaccurate.
1024 if (!dmu_tx_is_syncing(tx)) {
1025 dsl_dataset_rele(ds, FTAG);
1029 error = dsl_prop_predict(ds->ds_dir,
1030 zfs_prop_to_name(ZFS_PROP_RESERVATION),
1031 ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
1033 dsl_dataset_rele(ds, FTAG);
1037 mutex_enter(&dd->dd_lock);
1038 used = dd->dd_phys->dd_used_bytes;
1039 mutex_exit(&dd->dd_lock);
1041 if (dd->dd_parent) {
1042 avail = dsl_dir_space_available(dd->dd_parent,
1045 avail = dsl_pool_adjustedsize(dd->dd_pool, B_FALSE) - used;
1048 if (MAX(used, newval) > MAX(used, dd->dd_phys->dd_reserved)) {
1049 uint64_t delta = MAX(used, newval) -
1050 MAX(used, dd->dd_phys->dd_reserved);
1052 if (delta > avail ||
1053 (dd->dd_phys->dd_quota > 0 &&
1054 newval > dd->dd_phys->dd_quota))
1055 error = SET_ERROR(ENOSPC);
1058 dsl_dataset_rele(ds, FTAG);
1063 dsl_dir_set_reservation_sync_impl(dsl_dir_t *dd, uint64_t value, dmu_tx_t *tx)
1068 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1070 mutex_enter(&dd->dd_lock);
1071 used = dd->dd_phys->dd_used_bytes;
1072 delta = MAX(used, value) - MAX(used, dd->dd_phys->dd_reserved);
1073 dd->dd_phys->dd_reserved = value;
1075 if (dd->dd_parent != NULL) {
1076 /* Roll up this additional usage into our ancestors */
1077 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
1080 mutex_exit(&dd->dd_lock);
1084 dsl_dir_set_reservation_sync(void *arg, dmu_tx_t *tx)
1086 dsl_dir_set_qr_arg_t *ddsqra = arg;
1087 dsl_pool_t *dp = dmu_tx_pool(tx);
1091 VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
1093 if (spa_version(dp->dp_spa) >= SPA_VERSION_RECVD_PROPS) {
1094 dsl_prop_set_sync_impl(ds,
1095 zfs_prop_to_name(ZFS_PROP_RESERVATION),
1096 ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
1097 &ddsqra->ddsqra_value, tx);
1099 VERIFY0(dsl_prop_get_int_ds(ds,
1100 zfs_prop_to_name(ZFS_PROP_RESERVATION), &newval));
1102 newval = ddsqra->ddsqra_value;
1103 spa_history_log_internal_ds(ds, "set", tx, "%s=%lld",
1104 zfs_prop_to_name(ZFS_PROP_RESERVATION),
1105 (longlong_t)newval);
1108 dsl_dir_set_reservation_sync_impl(ds->ds_dir, newval, tx);
1109 dsl_dataset_rele(ds, FTAG);
1113 dsl_dir_set_reservation(const char *ddname, zprop_source_t source,
1114 uint64_t reservation)
1116 dsl_dir_set_qr_arg_t ddsqra;
1118 ddsqra.ddsqra_name = ddname;
1119 ddsqra.ddsqra_source = source;
1120 ddsqra.ddsqra_value = reservation;
1122 return (dsl_sync_task(ddname, dsl_dir_set_reservation_check,
1123 dsl_dir_set_reservation_sync, &ddsqra, 0));
1127 closest_common_ancestor(dsl_dir_t *ds1, dsl_dir_t *ds2)
1129 for (; ds1; ds1 = ds1->dd_parent) {
1131 for (dd = ds2; dd; dd = dd->dd_parent) {
1140 * If delta is applied to dd, how much of that delta would be applied to
1141 * ancestor? Syncing context only.
1144 would_change(dsl_dir_t *dd, int64_t delta, dsl_dir_t *ancestor)
1149 mutex_enter(&dd->dd_lock);
1150 delta = parent_delta(dd, dd->dd_phys->dd_used_bytes, delta);
1151 mutex_exit(&dd->dd_lock);
1152 return (would_change(dd->dd_parent, delta, ancestor));
1155 typedef struct dsl_dir_rename_arg {
1156 const char *ddra_oldname;
1157 const char *ddra_newname;
1158 } dsl_dir_rename_arg_t;
1162 dsl_valid_rename(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
1165 char namebuf[MAXNAMELEN];
1167 dsl_dataset_name(ds, namebuf);
1169 if (strlen(namebuf) + *deltap >= MAXNAMELEN)
1170 return (SET_ERROR(ENAMETOOLONG));
1175 dsl_dir_rename_check(void *arg, dmu_tx_t *tx)
1177 dsl_dir_rename_arg_t *ddra = arg;
1178 dsl_pool_t *dp = dmu_tx_pool(tx);
1179 dsl_dir_t *dd, *newparent;
1180 const char *mynewname;
1182 int delta = strlen(ddra->ddra_newname) - strlen(ddra->ddra_oldname);
1184 /* target dir should exist */
1185 error = dsl_dir_hold(dp, ddra->ddra_oldname, FTAG, &dd, NULL);
1189 /* new parent should exist */
1190 error = dsl_dir_hold(dp, ddra->ddra_newname, FTAG,
1191 &newparent, &mynewname);
1193 dsl_dir_rele(dd, FTAG);
1197 /* can't rename to different pool */
1198 if (dd->dd_pool != newparent->dd_pool) {
1199 dsl_dir_rele(newparent, FTAG);
1200 dsl_dir_rele(dd, FTAG);
1201 return (SET_ERROR(ENXIO));
1204 /* new name should not already exist */
1205 if (mynewname == NULL) {
1206 dsl_dir_rele(newparent, FTAG);
1207 dsl_dir_rele(dd, FTAG);
1208 return (SET_ERROR(EEXIST));
1211 /* if the name length is growing, validate child name lengths */
1213 error = dmu_objset_find_dp(dp, dd->dd_object, dsl_valid_rename,
1214 &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
1216 dsl_dir_rele(newparent, FTAG);
1217 dsl_dir_rele(dd, FTAG);
1222 if (newparent != dd->dd_parent) {
1223 /* is there enough space? */
1225 MAX(dd->dd_phys->dd_used_bytes, dd->dd_phys->dd_reserved);
1227 /* no rename into our descendant */
1228 if (closest_common_ancestor(dd, newparent) == dd) {
1229 dsl_dir_rele(newparent, FTAG);
1230 dsl_dir_rele(dd, FTAG);
1231 return (SET_ERROR(EINVAL));
1234 error = dsl_dir_transfer_possible(dd->dd_parent,
1235 newparent, myspace);
1237 dsl_dir_rele(newparent, FTAG);
1238 dsl_dir_rele(dd, FTAG);
1243 dsl_dir_rele(newparent, FTAG);
1244 dsl_dir_rele(dd, FTAG);
1249 dsl_dir_rename_sync(void *arg, dmu_tx_t *tx)
1251 dsl_dir_rename_arg_t *ddra = arg;
1252 dsl_pool_t *dp = dmu_tx_pool(tx);
1253 dsl_dir_t *dd, *newparent;
1254 const char *mynewname;
1256 objset_t *mos = dp->dp_meta_objset;
1258 VERIFY0(dsl_dir_hold(dp, ddra->ddra_oldname, FTAG, &dd, NULL));
1259 VERIFY0(dsl_dir_hold(dp, ddra->ddra_newname, FTAG, &newparent,
1262 /* Log this before we change the name. */
1263 spa_history_log_internal_dd(dd, "rename", tx,
1264 "-> %s", ddra->ddra_newname);
1266 if (newparent != dd->dd_parent) {
1267 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
1268 -dd->dd_phys->dd_used_bytes,
1269 -dd->dd_phys->dd_compressed_bytes,
1270 -dd->dd_phys->dd_uncompressed_bytes, tx);
1271 dsl_dir_diduse_space(newparent, DD_USED_CHILD,
1272 dd->dd_phys->dd_used_bytes,
1273 dd->dd_phys->dd_compressed_bytes,
1274 dd->dd_phys->dd_uncompressed_bytes, tx);
1276 if (dd->dd_phys->dd_reserved > dd->dd_phys->dd_used_bytes) {
1277 uint64_t unused_rsrv = dd->dd_phys->dd_reserved -
1278 dd->dd_phys->dd_used_bytes;
1280 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
1281 -unused_rsrv, 0, 0, tx);
1282 dsl_dir_diduse_space(newparent, DD_USED_CHILD_RSRV,
1283 unused_rsrv, 0, 0, tx);
1287 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1289 /* remove from old parent zapobj */
1290 error = zap_remove(mos, dd->dd_parent->dd_phys->dd_child_dir_zapobj,
1294 (void) strcpy(dd->dd_myname, mynewname);
1295 dsl_dir_rele(dd->dd_parent, dd);
1296 dd->dd_phys->dd_parent_obj = newparent->dd_object;
1297 VERIFY0(dsl_dir_hold_obj(dp,
1298 newparent->dd_object, NULL, dd, &dd->dd_parent));
1300 /* add to new parent zapobj */
1301 VERIFY0(zap_add(mos, newparent->dd_phys->dd_child_dir_zapobj,
1302 dd->dd_myname, 8, 1, &dd->dd_object, tx));
1306 zfsvfs_update_fromname(ddra->ddra_oldname, ddra->ddra_newname);
1307 zvol_rename_minors(ddra->ddra_oldname, ddra->ddra_newname);
1311 dsl_prop_notify_all(dd);
1313 dsl_dir_rele(newparent, FTAG);
1314 dsl_dir_rele(dd, FTAG);
1318 dsl_dir_rename(const char *oldname, const char *newname)
1320 dsl_dir_rename_arg_t ddra;
1322 ddra.ddra_oldname = oldname;
1323 ddra.ddra_newname = newname;
1325 return (dsl_sync_task(oldname,
1326 dsl_dir_rename_check, dsl_dir_rename_sync, &ddra, 3));
1330 dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd, uint64_t space)
1332 dsl_dir_t *ancestor;
1336 ancestor = closest_common_ancestor(sdd, tdd);
1337 adelta = would_change(sdd, -space, ancestor);
1338 avail = dsl_dir_space_available(tdd, ancestor, adelta, FALSE);
1340 return (SET_ERROR(ENOSPC));
1346 dsl_dir_snap_cmtime(dsl_dir_t *dd)
1350 mutex_enter(&dd->dd_lock);
1351 t = dd->dd_snap_cmtime;
1352 mutex_exit(&dd->dd_lock);
1358 dsl_dir_snap_cmtime_update(dsl_dir_t *dd)
1363 mutex_enter(&dd->dd_lock);
1364 dd->dd_snap_cmtime = t;
1365 mutex_exit(&dd->dd_lock);
1369 dsl_dir_zapify(dsl_dir_t *dd, dmu_tx_t *tx)
1371 objset_t *mos = dd->dd_pool->dp_meta_objset;
1372 dmu_object_zapify(mos, dd->dd_object, DMU_OT_DSL_DIR, tx);