4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011 Pawel Jakub Dawidek <pawel@dawidek.net>.
24 * All rights reserved.
25 * Copyright (c) 2012 by Delphix. All rights reserved.
29 #include <sys/dmu_objset.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/dsl_dataset.h>
32 #include <sys/dsl_dir.h>
33 #include <sys/dsl_prop.h>
34 #include <sys/dsl_synctask.h>
35 #include <sys/dsl_deleg.h>
37 #include <sys/metaslab.h>
41 #include <sys/sunddi.h>
44 #include <sys/zfs_vfsops.h>
46 #include "zfs_namecheck.h"
48 static uint64_t dsl_dir_space_towrite(dsl_dir_t *dd);
49 static void dsl_dir_set_reservation_sync_impl(dsl_dir_t *dd,
50 uint64_t value, dmu_tx_t *tx);
54 dsl_dir_evict(dmu_buf_t *db, void *arg)
57 dsl_pool_t *dp = dd->dd_pool;
60 for (t = 0; t < TXG_SIZE; t++) {
61 ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t));
62 ASSERT(dd->dd_tempreserved[t] == 0);
63 ASSERT(dd->dd_space_towrite[t] == 0);
67 dsl_dir_close(dd->dd_parent, dd);
69 spa_close(dd->dd_pool->dp_spa, dd);
72 * The props callback list should have been cleaned up by
75 list_destroy(&dd->dd_prop_cbs);
76 mutex_destroy(&dd->dd_lock);
77 kmem_free(dd, sizeof (dsl_dir_t));
81 dsl_dir_open_obj(dsl_pool_t *dp, uint64_t ddobj,
82 const char *tail, void *tag, dsl_dir_t **ddp)
88 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
89 dsl_pool_sync_context(dp));
91 err = dmu_bonus_hold(dp->dp_meta_objset, ddobj, tag, &dbuf);
94 dd = dmu_buf_get_user(dbuf);
97 dmu_object_info_t doi;
98 dmu_object_info_from_db(dbuf, &doi);
99 ASSERT3U(doi.doi_type, ==, DMU_OT_DSL_DIR);
100 ASSERT3U(doi.doi_bonus_size, >=, sizeof (dsl_dir_phys_t));
106 dd = kmem_zalloc(sizeof (dsl_dir_t), KM_SLEEP);
107 dd->dd_object = ddobj;
110 dd->dd_phys = dbuf->db_data;
111 mutex_init(&dd->dd_lock, NULL, MUTEX_DEFAULT, NULL);
113 list_create(&dd->dd_prop_cbs, sizeof (dsl_prop_cb_record_t),
114 offsetof(dsl_prop_cb_record_t, cbr_node));
116 dsl_dir_snap_cmtime_update(dd);
118 if (dd->dd_phys->dd_parent_obj) {
119 err = dsl_dir_open_obj(dp, dd->dd_phys->dd_parent_obj,
120 NULL, dd, &dd->dd_parent);
127 err = zap_lookup(dp->dp_meta_objset,
128 dd->dd_parent->dd_phys->dd_child_dir_zapobj,
129 tail, sizeof (foundobj), 1, &foundobj);
130 ASSERT(err || foundobj == ddobj);
132 (void) strcpy(dd->dd_myname, tail);
134 err = zap_value_search(dp->dp_meta_objset,
135 dd->dd_parent->dd_phys->dd_child_dir_zapobj,
136 ddobj, 0, dd->dd_myname);
141 (void) strcpy(dd->dd_myname, spa_name(dp->dp_spa));
144 if (dsl_dir_is_clone(dd)) {
145 dmu_buf_t *origin_bonus;
146 dsl_dataset_phys_t *origin_phys;
149 * We can't open the origin dataset, because
150 * that would require opening this dsl_dir.
151 * Just look at its phys directly instead.
153 err = dmu_bonus_hold(dp->dp_meta_objset,
154 dd->dd_phys->dd_origin_obj, FTAG, &origin_bonus);
157 origin_phys = origin_bonus->db_data;
159 origin_phys->ds_creation_txg;
160 dmu_buf_rele(origin_bonus, FTAG);
163 winner = dmu_buf_set_user_ie(dbuf, dd, &dd->dd_phys,
167 dsl_dir_close(dd->dd_parent, dd);
168 mutex_destroy(&dd->dd_lock);
169 kmem_free(dd, sizeof (dsl_dir_t));
172 spa_open_ref(dp->dp_spa, dd);
177 * The dsl_dir_t has both open-to-close and instantiate-to-evict
178 * holds on the spa. We need the open-to-close holds because
179 * otherwise the spa_refcnt wouldn't change when we open a
180 * dir which the spa also has open, so we could incorrectly
181 * think it was OK to unload/export/destroy the pool. We need
182 * the instantiate-to-evict hold because the dsl_dir_t has a
183 * pointer to the dd_pool, which has a pointer to the spa_t.
185 spa_open_ref(dp->dp_spa, tag);
186 ASSERT3P(dd->dd_pool, ==, dp);
187 ASSERT3U(dd->dd_object, ==, ddobj);
188 ASSERT3P(dd->dd_dbuf, ==, dbuf);
194 dsl_dir_close(dd->dd_parent, dd);
195 mutex_destroy(&dd->dd_lock);
196 kmem_free(dd, sizeof (dsl_dir_t));
197 dmu_buf_rele(dbuf, tag);
202 dsl_dir_close(dsl_dir_t *dd, void *tag)
204 dprintf_dd(dd, "%s\n", "");
205 spa_close(dd->dd_pool->dp_spa, tag);
206 dmu_buf_rele(dd->dd_dbuf, tag);
209 /* buf must be long enough (MAXNAMELEN + strlen(MOS_DIR_NAME) + 1 should do) */
211 dsl_dir_name(dsl_dir_t *dd, char *buf)
214 dsl_dir_name(dd->dd_parent, buf);
215 (void) strcat(buf, "/");
219 if (!MUTEX_HELD(&dd->dd_lock)) {
221 * recursive mutex so that we can use
222 * dprintf_dd() with dd_lock held
224 mutex_enter(&dd->dd_lock);
225 (void) strcat(buf, dd->dd_myname);
226 mutex_exit(&dd->dd_lock);
228 (void) strcat(buf, dd->dd_myname);
232 /* Calculate name length, avoiding all the strcat calls of dsl_dir_name */
234 dsl_dir_namelen(dsl_dir_t *dd)
239 /* parent's name + 1 for the "/" */
240 result = dsl_dir_namelen(dd->dd_parent) + 1;
243 if (!MUTEX_HELD(&dd->dd_lock)) {
244 /* see dsl_dir_name */
245 mutex_enter(&dd->dd_lock);
246 result += strlen(dd->dd_myname);
247 mutex_exit(&dd->dd_lock);
249 result += strlen(dd->dd_myname);
256 getcomponent(const char *path, char *component, const char **nextp)
259 if ((path == NULL) || (path[0] == '\0'))
261 /* This would be a good place to reserve some namespace... */
262 p = strpbrk(path, "/@");
263 if (p && (p[1] == '/' || p[1] == '@')) {
264 /* two separators in a row */
267 if (p == NULL || p == path) {
269 * if the first thing is an @ or /, it had better be an
270 * @ and it had better not have any more ats or slashes,
271 * and it had better have something after the @.
274 (p[0] != '@' || strpbrk(path+1, "/@") || p[1] == '\0'))
276 if (strlen(path) >= MAXNAMELEN)
277 return (ENAMETOOLONG);
278 (void) strcpy(component, path);
280 } else if (p[0] == '/') {
281 if (p-path >= MAXNAMELEN)
282 return (ENAMETOOLONG);
283 (void) strncpy(component, path, p - path);
284 component[p-path] = '\0';
286 } else if (p[0] == '@') {
288 * if the next separator is an @, there better not be
291 if (strchr(path, '/'))
293 if (p-path >= MAXNAMELEN)
294 return (ENAMETOOLONG);
295 (void) strncpy(component, path, p - path);
296 component[p-path] = '\0';
298 ASSERT(!"invalid p");
305 * same as dsl_open_dir, ignore the first component of name and use the
309 dsl_dir_open_spa(spa_t *spa, const char *name, void *tag,
310 dsl_dir_t **ddp, const char **tailp)
312 char buf[MAXNAMELEN];
313 const char *next, *nextnext = NULL;
318 int openedspa = FALSE;
320 dprintf("%s\n", name);
322 err = getcomponent(name, buf, &next);
326 err = spa_open(buf, &spa, FTAG);
328 dprintf("spa_open(%s) failed\n", buf);
333 /* XXX this assertion belongs in spa_open */
334 ASSERT(!dsl_pool_sync_context(spa_get_dsl(spa)));
337 dp = spa_get_dsl(spa);
339 rw_enter(&dp->dp_config_rwlock, RW_READER);
340 err = dsl_dir_open_obj(dp, dp->dp_root_dir_obj, NULL, tag, &dd);
342 rw_exit(&dp->dp_config_rwlock);
344 spa_close(spa, FTAG);
348 while (next != NULL) {
350 err = getcomponent(next, buf, &nextnext);
353 ASSERT(next[0] != '\0');
356 dprintf("looking up %s in obj%lld\n",
357 buf, dd->dd_phys->dd_child_dir_zapobj);
359 err = zap_lookup(dp->dp_meta_objset,
360 dd->dd_phys->dd_child_dir_zapobj,
361 buf, sizeof (ddobj), 1, &ddobj);
368 err = dsl_dir_open_obj(dp, ddobj, buf, tag, &child_ds);
371 dsl_dir_close(dd, tag);
375 rw_exit(&dp->dp_config_rwlock);
378 dsl_dir_close(dd, tag);
380 spa_close(spa, FTAG);
385 * It's an error if there's more than one component left, or
386 * tailp==NULL and there's any component left.
389 (tailp == NULL || (nextnext && nextnext[0] != '\0'))) {
391 dsl_dir_close(dd, tag);
392 dprintf("next=%p (%s) tail=%p\n", next, next?next:"", tailp);
398 spa_close(spa, FTAG);
404 * Return the dsl_dir_t, and possibly the last component which couldn't
405 * be found in *tail. Return NULL if the path is bogus, or if
406 * tail==NULL and we couldn't parse the whole name. (*tail)[0] == '@'
407 * means that the last component is a snapshot.
410 dsl_dir_open(const char *name, void *tag, dsl_dir_t **ddp, const char **tailp)
412 return (dsl_dir_open_spa(NULL, name, tag, ddp, tailp));
416 dsl_dir_create_sync(dsl_pool_t *dp, dsl_dir_t *pds, const char *name,
419 objset_t *mos = dp->dp_meta_objset;
421 dsl_dir_phys_t *ddphys;
424 ddobj = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0,
425 DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx);
427 VERIFY(0 == zap_add(mos, pds->dd_phys->dd_child_dir_zapobj,
428 name, sizeof (uint64_t), 1, &ddobj, tx));
430 /* it's the root dir */
431 VERIFY(0 == zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
432 DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, &ddobj, tx));
434 VERIFY(0 == dmu_bonus_hold(mos, ddobj, FTAG, &dbuf));
435 dmu_buf_will_dirty(dbuf, tx);
436 ddphys = dbuf->db_data;
438 ddphys->dd_creation_time = gethrestime_sec();
440 ddphys->dd_parent_obj = pds->dd_object;
441 ddphys->dd_props_zapobj = zap_create(mos,
442 DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx);
443 ddphys->dd_child_dir_zapobj = zap_create(mos,
444 DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx);
445 if (spa_version(dp->dp_spa) >= SPA_VERSION_USED_BREAKDOWN)
446 ddphys->dd_flags |= DD_FLAG_USED_BREAKDOWN;
447 dmu_buf_rele(dbuf, FTAG);
454 dsl_dir_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
456 dsl_dir_t *dd = arg1;
457 dsl_pool_t *dp = dd->dd_pool;
458 objset_t *mos = dp->dp_meta_objset;
463 * There should be exactly two holds, both from
464 * dsl_dataset_destroy: one on the dd directory, and one on its
465 * head ds. If there are more holds, then a concurrent thread is
466 * performing a lookup inside this dir while we're trying to destroy
467 * it. To minimize this possibility, we perform this check only
468 * in syncing context and fail the operation if we encounter
469 * additional holds. The dp_config_rwlock ensures that nobody else
470 * opens it after we check.
472 if (dmu_tx_is_syncing(tx) && dmu_buf_refcount(dd->dd_dbuf) > 2)
475 err = zap_count(mos, dd->dd_phys->dd_child_dir_zapobj, &count);
485 dsl_dir_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx)
487 dsl_dir_t *dd = arg1;
488 objset_t *mos = dd->dd_pool->dp_meta_objset;
492 ASSERT(RW_WRITE_HELD(&dd->dd_pool->dp_config_rwlock));
493 ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
496 * Remove our reservation. The impl() routine avoids setting the
497 * actual property, which would require the (already destroyed) ds.
499 dsl_dir_set_reservation_sync_impl(dd, 0, tx);
501 ASSERT0(dd->dd_phys->dd_used_bytes);
502 ASSERT0(dd->dd_phys->dd_reserved);
503 for (t = 0; t < DD_USED_NUM; t++)
504 ASSERT0(dd->dd_phys->dd_used_breakdown[t]);
506 VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_child_dir_zapobj, tx));
507 VERIFY(0 == zap_destroy(mos, dd->dd_phys->dd_props_zapobj, tx));
508 VERIFY(0 == dsl_deleg_destroy(mos, dd->dd_phys->dd_deleg_zapobj, tx));
509 VERIFY(0 == zap_remove(mos,
510 dd->dd_parent->dd_phys->dd_child_dir_zapobj, dd->dd_myname, tx));
513 dsl_dir_close(dd, tag);
514 VERIFY(0 == dmu_object_free(mos, obj, tx));
518 dsl_dir_is_clone(dsl_dir_t *dd)
520 return (dd->dd_phys->dd_origin_obj &&
521 (dd->dd_pool->dp_origin_snap == NULL ||
522 dd->dd_phys->dd_origin_obj !=
523 dd->dd_pool->dp_origin_snap->ds_object));
527 dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv)
529 mutex_enter(&dd->dd_lock);
530 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
531 dd->dd_phys->dd_used_bytes);
532 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_QUOTA, dd->dd_phys->dd_quota);
533 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_RESERVATION,
534 dd->dd_phys->dd_reserved);
535 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
536 dd->dd_phys->dd_compressed_bytes == 0 ? 100 :
537 (dd->dd_phys->dd_uncompressed_bytes * 100 /
538 dd->dd_phys->dd_compressed_bytes));
539 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_LOGICALUSED,
540 dd->dd_phys->dd_uncompressed_bytes);
541 if (dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
542 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDSNAP,
543 dd->dd_phys->dd_used_breakdown[DD_USED_SNAP]);
544 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDDS,
545 dd->dd_phys->dd_used_breakdown[DD_USED_HEAD]);
546 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDREFRESERV,
547 dd->dd_phys->dd_used_breakdown[DD_USED_REFRSRV]);
548 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDCHILD,
549 dd->dd_phys->dd_used_breakdown[DD_USED_CHILD] +
550 dd->dd_phys->dd_used_breakdown[DD_USED_CHILD_RSRV]);
552 mutex_exit(&dd->dd_lock);
554 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
555 if (dsl_dir_is_clone(dd)) {
557 char buf[MAXNAMELEN];
559 VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
560 dd->dd_phys->dd_origin_obj, FTAG, &ds));
561 dsl_dataset_name(ds, buf);
562 dsl_dataset_rele(ds, FTAG);
563 dsl_prop_nvlist_add_string(nv, ZFS_PROP_ORIGIN, buf);
565 rw_exit(&dd->dd_pool->dp_config_rwlock);
569 dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx)
571 dsl_pool_t *dp = dd->dd_pool;
575 if (txg_list_add(&dp->dp_dirty_dirs, dd, tx->tx_txg) == 0) {
576 /* up the hold count until we can be written out */
577 dmu_buf_add_ref(dd->dd_dbuf, dd);
582 parent_delta(dsl_dir_t *dd, uint64_t used, int64_t delta)
584 uint64_t old_accounted = MAX(used, dd->dd_phys->dd_reserved);
585 uint64_t new_accounted = MAX(used + delta, dd->dd_phys->dd_reserved);
586 return (new_accounted - old_accounted);
590 dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx)
592 ASSERT(dmu_tx_is_syncing(tx));
594 mutex_enter(&dd->dd_lock);
595 ASSERT0(dd->dd_tempreserved[tx->tx_txg&TXG_MASK]);
596 dprintf_dd(dd, "txg=%llu towrite=%lluK\n", tx->tx_txg,
597 dd->dd_space_towrite[tx->tx_txg&TXG_MASK] / 1024);
598 dd->dd_space_towrite[tx->tx_txg&TXG_MASK] = 0;
599 mutex_exit(&dd->dd_lock);
601 /* release the hold from dsl_dir_dirty */
602 dmu_buf_rele(dd->dd_dbuf, dd);
606 dsl_dir_space_towrite(dsl_dir_t *dd)
611 ASSERT(MUTEX_HELD(&dd->dd_lock));
613 for (i = 0; i < TXG_SIZE; i++) {
614 space += dd->dd_space_towrite[i&TXG_MASK];
615 ASSERT3U(dd->dd_space_towrite[i&TXG_MASK], >=, 0);
621 * How much space would dd have available if ancestor had delta applied
622 * to it? If ondiskonly is set, we're only interested in what's
623 * on-disk, not estimated pending changes.
626 dsl_dir_space_available(dsl_dir_t *dd,
627 dsl_dir_t *ancestor, int64_t delta, int ondiskonly)
629 uint64_t parentspace, myspace, quota, used;
632 * If there are no restrictions otherwise, assume we have
633 * unlimited space available.
636 parentspace = UINT64_MAX;
638 if (dd->dd_parent != NULL) {
639 parentspace = dsl_dir_space_available(dd->dd_parent,
640 ancestor, delta, ondiskonly);
643 mutex_enter(&dd->dd_lock);
644 if (dd->dd_phys->dd_quota != 0)
645 quota = dd->dd_phys->dd_quota;
646 used = dd->dd_phys->dd_used_bytes;
648 used += dsl_dir_space_towrite(dd);
650 if (dd->dd_parent == NULL) {
651 uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, FALSE);
652 quota = MIN(quota, poolsize);
655 if (dd->dd_phys->dd_reserved > used && parentspace != UINT64_MAX) {
657 * We have some space reserved, in addition to what our
660 parentspace += dd->dd_phys->dd_reserved - used;
663 if (dd == ancestor) {
665 ASSERT(used >= -delta);
667 if (parentspace != UINT64_MAX)
668 parentspace -= delta;
676 * the lesser of the space provided by our parent and
677 * the space left in our quota
679 myspace = MIN(parentspace, quota - used);
682 mutex_exit(&dd->dd_lock);
695 dsl_dir_tempreserve_impl(dsl_dir_t *dd, uint64_t asize, boolean_t netfree,
696 boolean_t ignorequota, boolean_t checkrefquota, list_t *tr_list,
697 dmu_tx_t *tx, boolean_t first)
699 uint64_t txg = tx->tx_txg;
700 uint64_t est_inflight, used_on_disk, quota, parent_rsrv;
701 uint64_t deferred = 0;
702 struct tempreserve *tr;
704 int txgidx = txg & TXG_MASK;
706 uint64_t ref_rsrv = 0;
708 ASSERT3U(txg, !=, 0);
709 ASSERT3S(asize, >, 0);
711 mutex_enter(&dd->dd_lock);
714 * Check against the dsl_dir's quota. We don't add in the delta
715 * when checking for over-quota because they get one free hit.
717 est_inflight = dsl_dir_space_towrite(dd);
718 for (i = 0; i < TXG_SIZE; i++)
719 est_inflight += dd->dd_tempreserved[i];
720 used_on_disk = dd->dd_phys->dd_used_bytes;
723 * On the first iteration, fetch the dataset's used-on-disk and
724 * refreservation values. Also, if checkrefquota is set, test if
725 * allocating this space would exceed the dataset's refquota.
727 if (first && tx->tx_objset) {
729 dsl_dataset_t *ds = tx->tx_objset->os_dsl_dataset;
731 error = dsl_dataset_check_quota(ds, checkrefquota,
732 asize, est_inflight, &used_on_disk, &ref_rsrv);
734 mutex_exit(&dd->dd_lock);
740 * If this transaction will result in a net free of space,
741 * we want to let it through.
743 if (ignorequota || netfree || dd->dd_phys->dd_quota == 0)
746 quota = dd->dd_phys->dd_quota;
749 * Adjust the quota against the actual pool size at the root
750 * minus any outstanding deferred frees.
751 * To ensure that it's possible to remove files from a full
752 * pool without inducing transient overcommits, we throttle
753 * netfree transactions against a quota that is slightly larger,
754 * but still within the pool's allocation slop. In cases where
755 * we're very close to full, this will allow a steady trickle of
756 * removes to get through.
758 if (dd->dd_parent == NULL) {
759 spa_t *spa = dd->dd_pool->dp_spa;
760 uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, netfree);
761 deferred = metaslab_class_get_deferred(spa_normal_class(spa));
762 if (poolsize - deferred < quota) {
763 quota = poolsize - deferred;
769 * If they are requesting more space, and our current estimate
770 * is over quota, they get to try again unless the actual
771 * on-disk is over quota and there are no pending changes (which
772 * may free up space for us).
774 if (used_on_disk + est_inflight >= quota) {
775 if (est_inflight > 0 || used_on_disk < quota ||
776 (retval == ENOSPC && used_on_disk < quota + deferred))
778 dprintf_dd(dd, "failing: used=%lluK inflight = %lluK "
779 "quota=%lluK tr=%lluK err=%d\n",
780 used_on_disk>>10, est_inflight>>10,
781 quota>>10, asize>>10, retval);
782 mutex_exit(&dd->dd_lock);
786 /* We need to up our estimated delta before dropping dd_lock */
787 dd->dd_tempreserved[txgidx] += asize;
789 parent_rsrv = parent_delta(dd, used_on_disk + est_inflight,
791 mutex_exit(&dd->dd_lock);
793 tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
796 list_insert_tail(tr_list, tr);
798 /* see if it's OK with our parent */
799 if (dd->dd_parent && parent_rsrv) {
800 boolean_t ismos = (dd->dd_phys->dd_head_dataset_obj == 0);
802 return (dsl_dir_tempreserve_impl(dd->dd_parent,
803 parent_rsrv, netfree, ismos, TRUE, tr_list, tx, FALSE));
810 * Reserve space in this dsl_dir, to be used in this tx's txg.
811 * After the space has been dirtied (and dsl_dir_willuse_space()
812 * has been called), the reservation should be canceled, using
813 * dsl_dir_tempreserve_clear().
816 dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize,
817 uint64_t fsize, uint64_t usize, void **tr_cookiep, dmu_tx_t *tx)
827 tr_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
828 list_create(tr_list, sizeof (struct tempreserve),
829 offsetof(struct tempreserve, tr_node));
830 ASSERT3S(asize, >, 0);
831 ASSERT3S(fsize, >=, 0);
833 err = arc_tempreserve_space(lsize, tx->tx_txg);
835 struct tempreserve *tr;
837 tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
839 list_insert_tail(tr_list, tr);
841 err = dsl_pool_tempreserve_space(dd->dd_pool, asize, tx);
844 txg_delay(dd->dd_pool, tx->tx_txg, 1);
847 dsl_pool_memory_pressure(dd->dd_pool);
851 struct tempreserve *tr;
853 tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
854 tr->tr_dp = dd->dd_pool;
856 list_insert_tail(tr_list, tr);
858 err = dsl_dir_tempreserve_impl(dd, asize, fsize >= asize,
859 FALSE, asize > usize, tr_list, tx, TRUE);
863 dsl_dir_tempreserve_clear(tr_list, tx);
865 *tr_cookiep = tr_list;
871 * Clear a temporary reservation that we previously made with
872 * dsl_dir_tempreserve_space().
875 dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx)
877 int txgidx = tx->tx_txg & TXG_MASK;
878 list_t *tr_list = tr_cookie;
879 struct tempreserve *tr;
881 ASSERT3U(tx->tx_txg, !=, 0);
883 if (tr_cookie == NULL)
886 while (tr = list_head(tr_list)) {
888 dsl_pool_tempreserve_clear(tr->tr_dp, tr->tr_size, tx);
889 } else if (tr->tr_ds) {
890 mutex_enter(&tr->tr_ds->dd_lock);
891 ASSERT3U(tr->tr_ds->dd_tempreserved[txgidx], >=,
893 tr->tr_ds->dd_tempreserved[txgidx] -= tr->tr_size;
894 mutex_exit(&tr->tr_ds->dd_lock);
896 arc_tempreserve_clear(tr->tr_size);
898 list_remove(tr_list, tr);
899 kmem_free(tr, sizeof (struct tempreserve));
902 kmem_free(tr_list, sizeof (list_t));
906 dsl_dir_willuse_space_impl(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx)
908 int64_t parent_space;
911 mutex_enter(&dd->dd_lock);
913 dd->dd_space_towrite[tx->tx_txg & TXG_MASK] += space;
915 est_used = dsl_dir_space_towrite(dd) + dd->dd_phys->dd_used_bytes;
916 parent_space = parent_delta(dd, est_used, space);
917 mutex_exit(&dd->dd_lock);
919 /* Make sure that we clean up dd_space_to* */
920 dsl_dir_dirty(dd, tx);
922 /* XXX this is potentially expensive and unnecessary... */
923 if (parent_space && dd->dd_parent)
924 dsl_dir_willuse_space_impl(dd->dd_parent, parent_space, tx);
928 * Call in open context when we think we're going to write/free space,
929 * eg. when dirtying data. Be conservative (ie. OK to write less than
930 * this or free more than this, but don't write more or free less).
933 dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx)
935 dsl_pool_willuse_space(dd->dd_pool, space, tx);
936 dsl_dir_willuse_space_impl(dd, space, tx);
939 /* call from syncing context when we actually write/free space for this dd */
941 dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
942 int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx)
944 int64_t accounted_delta;
945 boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
947 ASSERT(dmu_tx_is_syncing(tx));
948 ASSERT(type < DD_USED_NUM);
951 mutex_enter(&dd->dd_lock);
952 accounted_delta = parent_delta(dd, dd->dd_phys->dd_used_bytes, used);
953 ASSERT(used >= 0 || dd->dd_phys->dd_used_bytes >= -used);
954 ASSERT(compressed >= 0 ||
955 dd->dd_phys->dd_compressed_bytes >= -compressed);
956 ASSERT(uncompressed >= 0 ||
957 dd->dd_phys->dd_uncompressed_bytes >= -uncompressed);
958 dmu_buf_will_dirty(dd->dd_dbuf, tx);
959 dd->dd_phys->dd_used_bytes += used;
960 dd->dd_phys->dd_uncompressed_bytes += uncompressed;
961 dd->dd_phys->dd_compressed_bytes += compressed;
963 if (dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
965 dd->dd_phys->dd_used_breakdown[type] >= -used);
966 dd->dd_phys->dd_used_breakdown[type] += used;
970 for (t = 0; t < DD_USED_NUM; t++)
971 u += dd->dd_phys->dd_used_breakdown[t];
972 ASSERT3U(u, ==, dd->dd_phys->dd_used_bytes);
976 mutex_exit(&dd->dd_lock);
978 if (dd->dd_parent != NULL) {
979 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
980 accounted_delta, compressed, uncompressed, tx);
981 dsl_dir_transfer_space(dd->dd_parent,
982 used - accounted_delta,
983 DD_USED_CHILD_RSRV, DD_USED_CHILD, tx);
988 dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta,
989 dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx)
991 boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
993 ASSERT(dmu_tx_is_syncing(tx));
994 ASSERT(oldtype < DD_USED_NUM);
995 ASSERT(newtype < DD_USED_NUM);
997 if (delta == 0 || !(dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN))
1001 mutex_enter(&dd->dd_lock);
1003 dd->dd_phys->dd_used_breakdown[oldtype] >= delta :
1004 dd->dd_phys->dd_used_breakdown[newtype] >= -delta);
1005 ASSERT(dd->dd_phys->dd_used_bytes >= ABS(delta));
1006 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1007 dd->dd_phys->dd_used_breakdown[oldtype] -= delta;
1008 dd->dd_phys->dd_used_breakdown[newtype] += delta;
1010 mutex_exit(&dd->dd_lock);
1014 dsl_dir_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
1016 dsl_dataset_t *ds = arg1;
1017 dsl_dir_t *dd = ds->ds_dir;
1018 dsl_prop_setarg_t *psa = arg2;
1022 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
1025 if (psa->psa_effective_value == 0)
1028 mutex_enter(&dd->dd_lock);
1030 * If we are doing the preliminary check in open context, and
1031 * there are pending changes, then don't fail it, since the
1032 * pending changes could under-estimate the amount of space to be
1035 towrite = dsl_dir_space_towrite(dd);
1036 if ((dmu_tx_is_syncing(tx) || towrite == 0) &&
1037 (psa->psa_effective_value < dd->dd_phys->dd_reserved ||
1038 psa->psa_effective_value < dd->dd_phys->dd_used_bytes + towrite)) {
1041 mutex_exit(&dd->dd_lock);
1045 extern dsl_syncfunc_t dsl_prop_set_sync;
1048 dsl_dir_set_quota_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1050 dsl_dataset_t *ds = arg1;
1051 dsl_dir_t *dd = ds->ds_dir;
1052 dsl_prop_setarg_t *psa = arg2;
1053 uint64_t effective_value = psa->psa_effective_value;
1055 dsl_prop_set_sync(ds, psa, tx);
1056 DSL_PROP_CHECK_PREDICTION(dd, psa);
1058 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1060 mutex_enter(&dd->dd_lock);
1061 dd->dd_phys->dd_quota = effective_value;
1062 mutex_exit(&dd->dd_lock);
1066 dsl_dir_set_quota(const char *ddname, zprop_source_t source, uint64_t quota)
1070 dsl_prop_setarg_t psa;
1073 dsl_prop_setarg_init_uint64(&psa, "quota", source, "a);
1075 err = dsl_dataset_hold(ddname, FTAG, &ds);
1079 err = dsl_dir_open(ddname, FTAG, &dd, NULL);
1081 dsl_dataset_rele(ds, FTAG);
1085 ASSERT(ds->ds_dir == dd);
1088 * If someone removes a file, then tries to set the quota, we want to
1089 * make sure the file freeing takes effect.
1091 txg_wait_open(dd->dd_pool, 0);
1093 err = dsl_sync_task_do(dd->dd_pool, dsl_dir_set_quota_check,
1094 dsl_dir_set_quota_sync, ds, &psa, 0);
1096 dsl_dir_close(dd, FTAG);
1097 dsl_dataset_rele(ds, FTAG);
1102 dsl_dir_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
1104 dsl_dataset_t *ds = arg1;
1105 dsl_dir_t *dd = ds->ds_dir;
1106 dsl_prop_setarg_t *psa = arg2;
1107 uint64_t effective_value;
1108 uint64_t used, avail;
1111 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
1114 effective_value = psa->psa_effective_value;
1117 * If we are doing the preliminary check in open context, the
1118 * space estimates may be inaccurate.
1120 if (!dmu_tx_is_syncing(tx))
1123 mutex_enter(&dd->dd_lock);
1124 used = dd->dd_phys->dd_used_bytes;
1125 mutex_exit(&dd->dd_lock);
1127 if (dd->dd_parent) {
1128 avail = dsl_dir_space_available(dd->dd_parent,
1131 avail = dsl_pool_adjustedsize(dd->dd_pool, B_FALSE) - used;
1134 if (MAX(used, effective_value) > MAX(used, dd->dd_phys->dd_reserved)) {
1135 uint64_t delta = MAX(used, effective_value) -
1136 MAX(used, dd->dd_phys->dd_reserved);
1140 if (dd->dd_phys->dd_quota > 0 &&
1141 effective_value > dd->dd_phys->dd_quota)
1149 dsl_dir_set_reservation_sync_impl(dsl_dir_t *dd, uint64_t value, dmu_tx_t *tx)
1154 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1156 mutex_enter(&dd->dd_lock);
1157 used = dd->dd_phys->dd_used_bytes;
1158 delta = MAX(used, value) - MAX(used, dd->dd_phys->dd_reserved);
1159 dd->dd_phys->dd_reserved = value;
1161 if (dd->dd_parent != NULL) {
1162 /* Roll up this additional usage into our ancestors */
1163 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
1166 mutex_exit(&dd->dd_lock);
1170 dsl_dir_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1172 dsl_dataset_t *ds = arg1;
1173 dsl_dir_t *dd = ds->ds_dir;
1174 dsl_prop_setarg_t *psa = arg2;
1175 uint64_t value = psa->psa_effective_value;
1177 dsl_prop_set_sync(ds, psa, tx);
1178 DSL_PROP_CHECK_PREDICTION(dd, psa);
1180 dsl_dir_set_reservation_sync_impl(dd, value, tx);
1182 spa_history_log_internal_dd(dd, "set reservation", tx,
1183 "reservation=%lld", (longlong_t)value);
1187 dsl_dir_set_reservation(const char *ddname, zprop_source_t source,
1188 uint64_t reservation)
1192 dsl_prop_setarg_t psa;
1195 dsl_prop_setarg_init_uint64(&psa, "reservation", source, &reservation);
1197 err = dsl_dataset_hold(ddname, FTAG, &ds);
1201 err = dsl_dir_open(ddname, FTAG, &dd, NULL);
1203 dsl_dataset_rele(ds, FTAG);
1207 ASSERT(ds->ds_dir == dd);
1209 err = dsl_sync_task_do(dd->dd_pool, dsl_dir_set_reservation_check,
1210 dsl_dir_set_reservation_sync, ds, &psa, 0);
1212 dsl_dir_close(dd, FTAG);
1213 dsl_dataset_rele(ds, FTAG);
1218 closest_common_ancestor(dsl_dir_t *ds1, dsl_dir_t *ds2)
1220 for (; ds1; ds1 = ds1->dd_parent) {
1222 for (dd = ds2; dd; dd = dd->dd_parent) {
1231 * If delta is applied to dd, how much of that delta would be applied to
1232 * ancestor? Syncing context only.
1235 would_change(dsl_dir_t *dd, int64_t delta, dsl_dir_t *ancestor)
1240 mutex_enter(&dd->dd_lock);
1241 delta = parent_delta(dd, dd->dd_phys->dd_used_bytes, delta);
1242 mutex_exit(&dd->dd_lock);
1243 return (would_change(dd->dd_parent, delta, ancestor));
1247 dsl_dir_t *newparent;
1248 const char *mynewname;
1249 boolean_t allowmounted;
1253 dsl_dir_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
1255 dsl_dir_t *dd = arg1;
1256 struct renamearg *ra = arg2;
1257 dsl_pool_t *dp = dd->dd_pool;
1258 objset_t *mos = dp->dp_meta_objset;
1263 * There should only be one reference, from dmu_objset_rename().
1264 * Fleeting holds are also possible (eg, from "zfs list" getting
1265 * stats), but any that are present in open context will likely
1266 * be gone by syncing context, so only fail from syncing
1268 * Don't check if we allow renaming of busy (mounted) dataset.
1270 if (!ra->allowmounted && dmu_tx_is_syncing(tx) &&
1271 dmu_buf_refcount(dd->dd_dbuf) > 1) {
1275 /* check for existing name */
1276 err = zap_lookup(mos, ra->newparent->dd_phys->dd_child_dir_zapobj,
1277 ra->mynewname, 8, 1, &val);
1283 if (ra->newparent != dd->dd_parent) {
1284 /* is there enough space? */
1286 MAX(dd->dd_phys->dd_used_bytes, dd->dd_phys->dd_reserved);
1288 /* no rename into our descendant */
1289 if (closest_common_ancestor(dd, ra->newparent) == dd)
1292 if (err = dsl_dir_transfer_possible(dd->dd_parent,
1293 ra->newparent, myspace))
1301 dsl_dir_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1303 char oldname[MAXPATHLEN], newname[MAXPATHLEN];
1304 dsl_dir_t *dd = arg1;
1305 struct renamearg *ra = arg2;
1306 dsl_pool_t *dp = dd->dd_pool;
1307 objset_t *mos = dp->dp_meta_objset;
1309 char namebuf[MAXNAMELEN];
1311 ASSERT(ra->allowmounted || dmu_buf_refcount(dd->dd_dbuf) <= 2);
1313 /* Log this before we change the name. */
1314 dsl_dir_name(ra->newparent, namebuf);
1315 spa_history_log_internal_dd(dd, "rename", tx,
1316 "-> %s/%s", namebuf, ra->mynewname);
1318 if (ra->newparent != dd->dd_parent) {
1319 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
1320 -dd->dd_phys->dd_used_bytes,
1321 -dd->dd_phys->dd_compressed_bytes,
1322 -dd->dd_phys->dd_uncompressed_bytes, tx);
1323 dsl_dir_diduse_space(ra->newparent, DD_USED_CHILD,
1324 dd->dd_phys->dd_used_bytes,
1325 dd->dd_phys->dd_compressed_bytes,
1326 dd->dd_phys->dd_uncompressed_bytes, tx);
1328 if (dd->dd_phys->dd_reserved > dd->dd_phys->dd_used_bytes) {
1329 uint64_t unused_rsrv = dd->dd_phys->dd_reserved -
1330 dd->dd_phys->dd_used_bytes;
1332 dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
1333 -unused_rsrv, 0, 0, tx);
1334 dsl_dir_diduse_space(ra->newparent, DD_USED_CHILD_RSRV,
1335 unused_rsrv, 0, 0, tx);
1339 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1341 /* remove from old parent zapobj */
1342 dsl_dir_name(dd, oldname);
1343 err = zap_remove(mos, dd->dd_parent->dd_phys->dd_child_dir_zapobj,
1347 (void) strcpy(dd->dd_myname, ra->mynewname);
1348 dsl_dir_close(dd->dd_parent, dd);
1349 dd->dd_phys->dd_parent_obj = ra->newparent->dd_object;
1350 VERIFY(0 == dsl_dir_open_obj(dd->dd_pool,
1351 ra->newparent->dd_object, NULL, dd, &dd->dd_parent));
1353 /* add to new parent zapobj */
1354 err = zap_add(mos, ra->newparent->dd_phys->dd_child_dir_zapobj,
1355 dd->dd_myname, 8, 1, &dd->dd_object, tx);
1357 dsl_dir_name(dd, newname);
1359 zfsvfs_update_fromname(oldname, newname);
1360 zvol_rename_minors(oldname, newname);
1366 dsl_dir_rename(dsl_dir_t *dd, const char *newname, int flags)
1368 struct renamearg ra;
1371 /* new parent should exist */
1372 err = dsl_dir_open(newname, FTAG, &ra.newparent, &ra.mynewname);
1376 /* can't rename to different pool */
1377 if (dd->dd_pool != ra.newparent->dd_pool) {
1382 /* new name should not already exist */
1383 if (ra.mynewname == NULL) {
1388 ra.allowmounted = !!(flags & ZFS_RENAME_ALLOW_MOUNTED);
1390 err = dsl_sync_task_do(dd->dd_pool,
1391 dsl_dir_rename_check, dsl_dir_rename_sync, dd, &ra, 3);
1394 dsl_dir_close(ra.newparent, FTAG);
1399 dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd, uint64_t space)
1401 dsl_dir_t *ancestor;
1405 ancestor = closest_common_ancestor(sdd, tdd);
1406 adelta = would_change(sdd, -space, ancestor);
1407 avail = dsl_dir_space_available(tdd, ancestor, adelta, FALSE);
1415 dsl_dir_snap_cmtime(dsl_dir_t *dd)
1419 mutex_enter(&dd->dd_lock);
1420 t = dd->dd_snap_cmtime;
1421 mutex_exit(&dd->dd_lock);
1427 dsl_dir_snap_cmtime_update(dsl_dir_t *dd)
1432 mutex_enter(&dd->dd_lock);
1433 dd->dd_snap_cmtime = t;
1434 mutex_exit(&dd->dd_lock);