4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011 by Delphix. All rights reserved.
24 * Copyright (c) 2011 Pawel Jakub Dawidek <pawel@dawidek.net>.
25 * All rights reserved.
26 * Portions Copyright (c) 2011 Martin Matuska <mm@FreeBSD.org>
29 #include <sys/dmu_objset.h>
30 #include <sys/dsl_dataset.h>
31 #include <sys/dsl_dir.h>
32 #include <sys/dsl_prop.h>
33 #include <sys/dsl_synctask.h>
34 #include <sys/dmu_traverse.h>
35 #include <sys/dmu_tx.h>
39 #include <sys/unique.h>
40 #include <sys/zfs_context.h>
41 #include <sys/zfs_ioctl.h>
43 #include <sys/zfs_znode.h>
44 #include <sys/zfs_onexit.h>
46 #include <sys/dsl_scan.h>
47 #include <sys/dsl_deadlist.h>
49 static char *dsl_reaper = "the grim reaper";
51 static dsl_checkfunc_t dsl_dataset_destroy_begin_check;
52 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync;
53 static dsl_syncfunc_t dsl_dataset_set_reservation_sync;
55 #define SWITCH64(x, y) \
57 uint64_t __tmp = (x); \
62 #define DS_REF_MAX (1ULL << 62)
64 #define DSL_DEADLIST_BLOCKSIZE SPA_MAXBLOCKSIZE
66 #define DSL_DATASET_IS_DESTROYED(ds) ((ds)->ds_owner == dsl_reaper)
70 * Figure out how much of this delta should be propogated to the dsl_dir
71 * layer. If there's a refreservation, that space has already been
72 * partially accounted for in our ancestors.
75 parent_delta(dsl_dataset_t *ds, int64_t delta)
77 uint64_t old_bytes, new_bytes;
79 if (ds->ds_reserved == 0)
82 old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
83 new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
85 ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
86 return (new_bytes - old_bytes);
90 dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
92 int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
93 int compressed = BP_GET_PSIZE(bp);
94 int uncompressed = BP_GET_UCSIZE(bp);
97 dprintf_bp(bp, "ds=%p", ds);
99 ASSERT(dmu_tx_is_syncing(tx));
100 /* It could have been compressed away to nothing */
103 ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
104 ASSERT3U(BP_GET_TYPE(bp), <, DMU_OT_NUMTYPES);
107 * Account for the meta-objset space in its placeholder
110 ASSERT3U(compressed, ==, uncompressed); /* it's all metadata */
111 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
112 used, compressed, uncompressed, tx);
113 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
116 dmu_buf_will_dirty(ds->ds_dbuf, tx);
118 mutex_enter(&ds->ds_dir->dd_lock);
119 mutex_enter(&ds->ds_lock);
120 delta = parent_delta(ds, used);
121 ds->ds_phys->ds_used_bytes += used;
122 ds->ds_phys->ds_compressed_bytes += compressed;
123 ds->ds_phys->ds_uncompressed_bytes += uncompressed;
124 ds->ds_phys->ds_unique_bytes += used;
125 mutex_exit(&ds->ds_lock);
126 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta,
127 compressed, uncompressed, tx);
128 dsl_dir_transfer_space(ds->ds_dir, used - delta,
129 DD_USED_REFRSRV, DD_USED_HEAD, tx);
130 mutex_exit(&ds->ds_dir->dd_lock);
134 dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
140 ASSERT(dmu_tx_is_syncing(tx));
141 ASSERT(bp->blk_birth <= tx->tx_txg);
143 int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
144 int compressed = BP_GET_PSIZE(bp);
145 int uncompressed = BP_GET_UCSIZE(bp);
150 * Account for the meta-objset space in its placeholder
153 dsl_free(tx->tx_pool, tx->tx_txg, bp);
155 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
156 -used, -compressed, -uncompressed, tx);
157 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
160 ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
162 ASSERT(!dsl_dataset_is_snapshot(ds));
163 dmu_buf_will_dirty(ds->ds_dbuf, tx);
165 if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) {
168 dprintf_bp(bp, "freeing ds=%llu", ds->ds_object);
169 dsl_free(tx->tx_pool, tx->tx_txg, bp);
171 mutex_enter(&ds->ds_dir->dd_lock);
172 mutex_enter(&ds->ds_lock);
173 ASSERT(ds->ds_phys->ds_unique_bytes >= used ||
174 !DS_UNIQUE_IS_ACCURATE(ds));
175 delta = parent_delta(ds, -used);
176 ds->ds_phys->ds_unique_bytes -= used;
177 mutex_exit(&ds->ds_lock);
178 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
179 delta, -compressed, -uncompressed, tx);
180 dsl_dir_transfer_space(ds->ds_dir, -used - delta,
181 DD_USED_REFRSRV, DD_USED_HEAD, tx);
182 mutex_exit(&ds->ds_dir->dd_lock);
184 dprintf_bp(bp, "putting on dead list: %s", "");
187 * We are here as part of zio's write done callback,
188 * which means we're a zio interrupt thread. We can't
189 * call dsl_deadlist_insert() now because it may block
190 * waiting for I/O. Instead, put bp on the deferred
191 * queue and let dsl_pool_sync() finish the job.
193 bplist_append(&ds->ds_pending_deadlist, bp);
195 dsl_deadlist_insert(&ds->ds_deadlist, bp, tx);
197 ASSERT3U(ds->ds_prev->ds_object, ==,
198 ds->ds_phys->ds_prev_snap_obj);
199 ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0);
200 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
201 if (ds->ds_prev->ds_phys->ds_next_snap_obj ==
202 ds->ds_object && bp->blk_birth >
203 ds->ds_prev->ds_phys->ds_prev_snap_txg) {
204 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
205 mutex_enter(&ds->ds_prev->ds_lock);
206 ds->ds_prev->ds_phys->ds_unique_bytes += used;
207 mutex_exit(&ds->ds_prev->ds_lock);
209 if (bp->blk_birth > ds->ds_dir->dd_origin_txg) {
210 dsl_dir_transfer_space(ds->ds_dir, used,
211 DD_USED_HEAD, DD_USED_SNAP, tx);
214 mutex_enter(&ds->ds_lock);
215 ASSERT3U(ds->ds_phys->ds_used_bytes, >=, used);
216 ds->ds_phys->ds_used_bytes -= used;
217 ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed);
218 ds->ds_phys->ds_compressed_bytes -= compressed;
219 ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed);
220 ds->ds_phys->ds_uncompressed_bytes -= uncompressed;
221 mutex_exit(&ds->ds_lock);
227 dsl_dataset_prev_snap_txg(dsl_dataset_t *ds)
229 uint64_t trysnap = 0;
234 * The snapshot creation could fail, but that would cause an
235 * incorrect FALSE return, which would only result in an
236 * overestimation of the amount of space that an operation would
237 * consume, which is OK.
239 * There's also a small window where we could miss a pending
240 * snapshot, because we could set the sync task in the quiescing
241 * phase. So this should only be used as a guess.
243 if (ds->ds_trysnap_txg >
244 spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa))
245 trysnap = ds->ds_trysnap_txg;
246 return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap));
250 dsl_dataset_block_freeable(dsl_dataset_t *ds, const blkptr_t *bp,
253 if (blk_birth <= dsl_dataset_prev_snap_txg(ds))
256 ddt_prefetch(dsl_dataset_get_spa(ds), bp);
263 dsl_dataset_evict(dmu_buf_t *db, void *dsv)
265 dsl_dataset_t *ds = dsv;
267 ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds));
269 unique_remove(ds->ds_fsid_guid);
271 if (ds->ds_objset != NULL)
272 dmu_objset_evict(ds->ds_objset);
275 dsl_dataset_drop_ref(ds->ds_prev, ds);
279 bplist_destroy(&ds->ds_pending_deadlist);
281 dsl_deadlist_close(&ds->ds_deadlist);
283 ASSERT(ds->ds_deadlist.dl_dbuf == NULL);
284 ASSERT(!ds->ds_deadlist.dl_oldfmt);
287 dsl_dir_close(ds->ds_dir, ds);
289 ASSERT(!list_link_active(&ds->ds_synced_link));
291 if (mutex_owned(&ds->ds_lock))
292 mutex_exit(&ds->ds_lock);
293 mutex_destroy(&ds->ds_lock);
294 mutex_destroy(&ds->ds_recvlock);
295 if (mutex_owned(&ds->ds_opening_lock))
296 mutex_exit(&ds->ds_opening_lock);
297 mutex_destroy(&ds->ds_opening_lock);
298 rw_destroy(&ds->ds_rwlock);
299 cv_destroy(&ds->ds_exclusive_cv);
301 kmem_free(ds, sizeof (dsl_dataset_t));
305 dsl_dataset_get_snapname(dsl_dataset_t *ds)
307 dsl_dataset_phys_t *headphys;
310 dsl_pool_t *dp = ds->ds_dir->dd_pool;
311 objset_t *mos = dp->dp_meta_objset;
313 if (ds->ds_snapname[0])
315 if (ds->ds_phys->ds_next_snap_obj == 0)
318 err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj,
322 headphys = headdbuf->db_data;
323 err = zap_value_search(dp->dp_meta_objset,
324 headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
325 dmu_buf_rele(headdbuf, FTAG);
330 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
332 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
333 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
337 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
342 err = zap_lookup_norm(mos, snapobj, name, 8, 1,
343 value, mt, NULL, 0, NULL);
344 if (err == ENOTSUP && mt == MT_FIRST)
345 err = zap_lookup(mos, snapobj, name, 8, 1, value);
350 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx)
352 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
353 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
357 dsl_dir_snap_cmtime_update(ds->ds_dir);
359 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
364 err = zap_remove_norm(mos, snapobj, name, mt, tx);
365 if (err == ENOTSUP && mt == MT_FIRST)
366 err = zap_remove(mos, snapobj, name, tx);
371 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
374 objset_t *mos = dp->dp_meta_objset;
378 dmu_object_info_t doi;
380 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
381 dsl_pool_sync_context(dp));
383 err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
387 /* Make sure dsobj has the correct object type. */
388 dmu_object_info_from_db(dbuf, &doi);
389 if (doi.doi_type != DMU_OT_DSL_DATASET)
392 ds = dmu_buf_get_user(dbuf);
394 dsl_dataset_t *winner;
396 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
398 ds->ds_object = dsobj;
399 ds->ds_phys = dbuf->db_data;
401 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
402 mutex_init(&ds->ds_recvlock, NULL, MUTEX_DEFAULT, NULL);
403 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
404 rw_init(&ds->ds_rwlock, 0, 0, 0);
405 cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
407 bplist_create(&ds->ds_pending_deadlist);
408 dsl_deadlist_open(&ds->ds_deadlist,
409 mos, ds->ds_phys->ds_deadlist_obj);
412 err = dsl_dir_open_obj(dp,
413 ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir);
416 mutex_destroy(&ds->ds_lock);
417 mutex_destroy(&ds->ds_recvlock);
418 mutex_destroy(&ds->ds_opening_lock);
419 rw_destroy(&ds->ds_rwlock);
420 cv_destroy(&ds->ds_exclusive_cv);
421 bplist_destroy(&ds->ds_pending_deadlist);
422 dsl_deadlist_close(&ds->ds_deadlist);
423 kmem_free(ds, sizeof (dsl_dataset_t));
424 dmu_buf_rele(dbuf, tag);
428 if (!dsl_dataset_is_snapshot(ds)) {
429 ds->ds_snapname[0] = '\0';
430 if (ds->ds_phys->ds_prev_snap_obj) {
431 err = dsl_dataset_get_ref(dp,
432 ds->ds_phys->ds_prev_snap_obj,
436 if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
437 err = dsl_dataset_get_snapname(ds);
438 if (err == 0 && ds->ds_phys->ds_userrefs_obj != 0) {
440 ds->ds_dir->dd_pool->dp_meta_objset,
441 ds->ds_phys->ds_userrefs_obj,
446 if (err == 0 && !dsl_dataset_is_snapshot(ds)) {
448 * In sync context, we're called with either no lock
449 * or with the write lock. If we're not syncing,
450 * we're always called with the read lock held.
452 boolean_t need_lock =
453 !RW_WRITE_HELD(&dp->dp_config_rwlock) &&
454 dsl_pool_sync_context(dp);
457 rw_enter(&dp->dp_config_rwlock, RW_READER);
459 err = dsl_prop_get_ds(ds,
460 "refreservation", sizeof (uint64_t), 1,
461 &ds->ds_reserved, NULL);
463 err = dsl_prop_get_ds(ds,
464 "refquota", sizeof (uint64_t), 1,
465 &ds->ds_quota, NULL);
469 rw_exit(&dp->dp_config_rwlock);
471 ds->ds_reserved = ds->ds_quota = 0;
475 winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys,
479 bplist_destroy(&ds->ds_pending_deadlist);
480 dsl_deadlist_close(&ds->ds_deadlist);
482 dsl_dataset_drop_ref(ds->ds_prev, ds);
483 dsl_dir_close(ds->ds_dir, ds);
484 mutex_destroy(&ds->ds_lock);
485 mutex_destroy(&ds->ds_recvlock);
486 mutex_destroy(&ds->ds_opening_lock);
487 rw_destroy(&ds->ds_rwlock);
488 cv_destroy(&ds->ds_exclusive_cv);
489 kmem_free(ds, sizeof (dsl_dataset_t));
491 dmu_buf_rele(dbuf, tag);
497 unique_insert(ds->ds_phys->ds_fsid_guid);
500 ASSERT3P(ds->ds_dbuf, ==, dbuf);
501 ASSERT3P(ds->ds_phys, ==, dbuf->db_data);
502 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 ||
503 spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
504 dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
505 mutex_enter(&ds->ds_lock);
506 if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) {
507 mutex_exit(&ds->ds_lock);
508 dmu_buf_rele(ds->ds_dbuf, tag);
511 mutex_exit(&ds->ds_lock);
517 dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag)
519 dsl_pool_t *dp = ds->ds_dir->dd_pool;
522 * In syncing context we don't want the rwlock lock: there
523 * may be an existing writer waiting for sync phase to
524 * finish. We don't need to worry about such writers, since
525 * sync phase is single-threaded, so the writer can't be
526 * doing anything while we are active.
528 if (dsl_pool_sync_context(dp)) {
529 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
534 * Normal users will hold the ds_rwlock as a READER until they
535 * are finished (i.e., call dsl_dataset_rele()). "Owners" will
536 * drop their READER lock after they set the ds_owner field.
538 * If the dataset is being destroyed, the destroy thread will
539 * obtain a WRITER lock for exclusive access after it's done its
540 * open-context work and then change the ds_owner to
541 * dsl_reaper once destruction is assured. So threads
542 * may block here temporarily, until the "destructability" of
543 * the dataset is determined.
545 ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock));
546 mutex_enter(&ds->ds_lock);
547 while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) {
548 rw_exit(&dp->dp_config_rwlock);
549 cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock);
550 if (DSL_DATASET_IS_DESTROYED(ds)) {
551 mutex_exit(&ds->ds_lock);
552 dsl_dataset_drop_ref(ds, tag);
553 rw_enter(&dp->dp_config_rwlock, RW_READER);
557 * The dp_config_rwlock lives above the ds_lock. And
558 * we need to check DSL_DATASET_IS_DESTROYED() while
559 * holding the ds_lock, so we have to drop and reacquire
562 mutex_exit(&ds->ds_lock);
563 rw_enter(&dp->dp_config_rwlock, RW_READER);
564 mutex_enter(&ds->ds_lock);
566 mutex_exit(&ds->ds_lock);
571 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
574 int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp);
578 return (dsl_dataset_hold_ref(*dsp, tag));
582 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, boolean_t inconsistentok,
583 void *tag, dsl_dataset_t **dsp)
585 int err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp);
588 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
589 dsl_dataset_rele(*dsp, tag);
597 dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp)
601 const char *snapname;
605 err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname);
610 obj = dd->dd_phys->dd_head_dataset_obj;
611 rw_enter(&dp->dp_config_rwlock, RW_READER);
613 err = dsl_dataset_get_ref(dp, obj, tag, dsp);
619 err = dsl_dataset_hold_ref(*dsp, tag);
621 /* we may be looking for a snapshot */
622 if (err == 0 && snapname != NULL) {
623 dsl_dataset_t *ds = NULL;
625 if (*snapname++ != '@') {
626 dsl_dataset_rele(*dsp, tag);
631 dprintf("looking for snapshot '%s'\n", snapname);
632 err = dsl_dataset_snap_lookup(*dsp, snapname, &obj);
634 err = dsl_dataset_get_ref(dp, obj, tag, &ds);
635 dsl_dataset_rele(*dsp, tag);
637 ASSERT3U((err == 0), ==, (ds != NULL));
640 mutex_enter(&ds->ds_lock);
641 if (ds->ds_snapname[0] == 0)
642 (void) strlcpy(ds->ds_snapname, snapname,
643 sizeof (ds->ds_snapname));
644 mutex_exit(&ds->ds_lock);
645 err = dsl_dataset_hold_ref(ds, tag);
646 *dsp = err ? NULL : ds;
650 rw_exit(&dp->dp_config_rwlock);
651 dsl_dir_close(dd, FTAG);
656 dsl_dataset_own(const char *name, boolean_t inconsistentok,
657 void *tag, dsl_dataset_t **dsp)
659 int err = dsl_dataset_hold(name, tag, dsp);
662 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
663 dsl_dataset_rele(*dsp, tag);
670 dsl_dataset_name(dsl_dataset_t *ds, char *name)
673 (void) strcpy(name, "mos");
675 dsl_dir_name(ds->ds_dir, name);
676 VERIFY(0 == dsl_dataset_get_snapname(ds));
677 if (ds->ds_snapname[0]) {
678 (void) strcat(name, "@");
680 * We use a "recursive" mutex so that we
681 * can call dprintf_ds() with ds_lock held.
683 if (!MUTEX_HELD(&ds->ds_lock)) {
684 mutex_enter(&ds->ds_lock);
685 (void) strcat(name, ds->ds_snapname);
686 mutex_exit(&ds->ds_lock);
688 (void) strcat(name, ds->ds_snapname);
695 dsl_dataset_namelen(dsl_dataset_t *ds)
700 result = 3; /* "mos" */
702 result = dsl_dir_namelen(ds->ds_dir);
703 VERIFY(0 == dsl_dataset_get_snapname(ds));
704 if (ds->ds_snapname[0]) {
705 ++result; /* adding one for the @-sign */
706 if (!MUTEX_HELD(&ds->ds_lock)) {
707 mutex_enter(&ds->ds_lock);
708 result += strlen(ds->ds_snapname);
709 mutex_exit(&ds->ds_lock);
711 result += strlen(ds->ds_snapname);
720 dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag)
722 dmu_buf_rele(ds->ds_dbuf, tag);
726 dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
728 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) {
729 rw_exit(&ds->ds_rwlock);
731 dsl_dataset_drop_ref(ds, tag);
735 dsl_dataset_disown(dsl_dataset_t *ds, void *tag)
737 ASSERT((ds->ds_owner == tag && ds->ds_dbuf) ||
738 (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL));
740 mutex_enter(&ds->ds_lock);
742 if (RW_WRITE_HELD(&ds->ds_rwlock)) {
743 rw_exit(&ds->ds_rwlock);
744 cv_broadcast(&ds->ds_exclusive_cv);
746 mutex_exit(&ds->ds_lock);
748 dsl_dataset_drop_ref(ds, tag);
750 dsl_dataset_evict(NULL, ds);
754 dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *tag)
756 boolean_t gotit = FALSE;
758 mutex_enter(&ds->ds_lock);
759 if (ds->ds_owner == NULL &&
760 (!DS_IS_INCONSISTENT(ds) || inconsistentok)) {
762 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool))
763 rw_exit(&ds->ds_rwlock);
766 mutex_exit(&ds->ds_lock);
771 dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner)
773 ASSERT3P(owner, ==, ds->ds_owner);
774 if (!RW_WRITE_HELD(&ds->ds_rwlock))
775 rw_enter(&ds->ds_rwlock, RW_WRITER);
779 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
780 uint64_t flags, dmu_tx_t *tx)
782 dsl_pool_t *dp = dd->dd_pool;
784 dsl_dataset_phys_t *dsphys;
786 objset_t *mos = dp->dp_meta_objset;
789 origin = dp->dp_origin_snap;
791 ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
792 ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0);
793 ASSERT(dmu_tx_is_syncing(tx));
794 ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
796 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
797 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
798 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
799 dmu_buf_will_dirty(dbuf, tx);
800 dsphys = dbuf->db_data;
801 bzero(dsphys, sizeof (dsl_dataset_phys_t));
802 dsphys->ds_dir_obj = dd->dd_object;
803 dsphys->ds_flags = flags;
804 dsphys->ds_fsid_guid = unique_create();
805 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
806 sizeof (dsphys->ds_guid));
807 dsphys->ds_snapnames_zapobj =
808 zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
810 dsphys->ds_creation_time = gethrestime_sec();
811 dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
813 if (origin == NULL) {
814 dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx);
818 dsphys->ds_prev_snap_obj = origin->ds_object;
819 dsphys->ds_prev_snap_txg =
820 origin->ds_phys->ds_creation_txg;
821 dsphys->ds_used_bytes =
822 origin->ds_phys->ds_used_bytes;
823 dsphys->ds_compressed_bytes =
824 origin->ds_phys->ds_compressed_bytes;
825 dsphys->ds_uncompressed_bytes =
826 origin->ds_phys->ds_uncompressed_bytes;
827 dsphys->ds_bp = origin->ds_phys->ds_bp;
828 dsphys->ds_flags |= origin->ds_phys->ds_flags;
830 dmu_buf_will_dirty(origin->ds_dbuf, tx);
831 origin->ds_phys->ds_num_children++;
833 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
834 origin->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ohds));
835 dsphys->ds_deadlist_obj = dsl_deadlist_clone(&ohds->ds_deadlist,
836 dsphys->ds_prev_snap_txg, dsphys->ds_prev_snap_obj, tx);
837 dsl_dataset_rele(ohds, FTAG);
839 if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
840 if (origin->ds_phys->ds_next_clones_obj == 0) {
841 origin->ds_phys->ds_next_clones_obj =
843 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
845 VERIFY(0 == zap_add_int(mos,
846 origin->ds_phys->ds_next_clones_obj,
850 dmu_buf_will_dirty(dd->dd_dbuf, tx);
851 dd->dd_phys->dd_origin_obj = origin->ds_object;
852 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
853 if (origin->ds_dir->dd_phys->dd_clones == 0) {
854 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
855 origin->ds_dir->dd_phys->dd_clones =
857 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
859 VERIFY3U(0, ==, zap_add_int(mos,
860 origin->ds_dir->dd_phys->dd_clones, dsobj, tx));
864 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
865 dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
867 dmu_buf_rele(dbuf, FTAG);
869 dmu_buf_will_dirty(dd->dd_dbuf, tx);
870 dd->dd_phys->dd_head_dataset_obj = dsobj;
876 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
877 dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx)
879 dsl_pool_t *dp = pdd->dd_pool;
880 uint64_t dsobj, ddobj;
883 ASSERT(lastname[0] != '@');
885 ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
886 VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd));
888 dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx);
890 dsl_deleg_set_create_perms(dd, tx, cr);
892 dsl_dir_close(dd, FTAG);
895 * If we are creating a clone, make sure we zero out any stale
896 * data from the origin snapshots zil header.
898 if (origin != NULL) {
902 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
903 VERIFY3U(0, ==, dmu_objset_from_ds(ds, &os));
904 bzero(&os->os_zil_header, sizeof (os->os_zil_header));
905 dsl_dataset_dirty(ds, tx);
906 dsl_dataset_rele(ds, FTAG);
913 /* FreeBSD ioctl compat begin */
916 const char *snapname;
920 dsl_check_snap_cb(const char *name, void *arg)
922 struct destroyarg *da = arg;
926 dsname = kmem_asprintf("%s@%s", name, da->snapname);
927 VERIFY(nvlist_add_boolean(da->nvl, dsname) == 0);
933 dmu_get_recursive_snaps_nvl(const char *fsname, const char *snapname,
936 struct destroyarg *da;
939 da = kmem_zalloc(sizeof (struct destroyarg), KM_SLEEP);
941 da->snapname = snapname;
942 err = dmu_objset_find(fsname, dsl_check_snap_cb, da,
944 kmem_free(da, sizeof (struct destroyarg));
948 /* FreeBSD ioctl compat end */
949 #endif /* __FreeBSD__ */
952 * The snapshots must all be in the same pool.
955 dmu_snapshots_destroy_nvl(nvlist_t *snaps, boolean_t defer, char *failed)
958 dsl_sync_task_t *dst;
961 dsl_sync_task_group_t *dstg;
963 pair = nvlist_next_nvpair(snaps, NULL);
967 err = spa_open(nvpair_name(pair), &spa, FTAG);
970 dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
972 for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
973 pair = nvlist_next_nvpair(snaps, pair)) {
977 err = dsl_dataset_own(nvpair_name(pair), B_TRUE, dstg, &ds);
979 struct dsl_ds_destroyarg *dsda;
981 dsl_dataset_make_exclusive(ds, dstg);
982 dsda = kmem_zalloc(sizeof (struct dsl_ds_destroyarg),
986 dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
987 dsl_dataset_destroy_sync, dsda, dstg, 0);
988 } else if (err == ENOENT) {
991 (void) strcpy(failed, nvpair_name(pair));
997 err = dsl_sync_task_group_wait(dstg);
999 for (dst = list_head(&dstg->dstg_tasks); dst;
1000 dst = list_next(&dstg->dstg_tasks, dst)) {
1001 struct dsl_ds_destroyarg *dsda = dst->dst_arg1;
1002 dsl_dataset_t *ds = dsda->ds;
1005 * Return the file system name that triggered the error
1008 dsl_dataset_name(ds, failed);
1010 ASSERT3P(dsda->rm_origin, ==, NULL);
1011 dsl_dataset_disown(ds, dstg);
1012 kmem_free(dsda, sizeof (struct dsl_ds_destroyarg));
1015 dsl_sync_task_group_destroy(dstg);
1016 spa_close(spa, FTAG);
1022 dsl_dataset_might_destroy_origin(dsl_dataset_t *ds)
1024 boolean_t might_destroy = B_FALSE;
1026 mutex_enter(&ds->ds_lock);
1027 if (ds->ds_phys->ds_num_children == 2 && ds->ds_userrefs == 0 &&
1028 DS_IS_DEFER_DESTROY(ds))
1029 might_destroy = B_TRUE;
1030 mutex_exit(&ds->ds_lock);
1032 return (might_destroy);
1036 * If we're removing a clone, and these three conditions are true:
1037 * 1) the clone's origin has no other children
1038 * 2) the clone's origin has no user references
1039 * 3) the clone's origin has been marked for deferred destruction
1040 * Then, prepare to remove the origin as part of this sync task group.
1043 dsl_dataset_origin_rm_prep(struct dsl_ds_destroyarg *dsda, void *tag)
1045 dsl_dataset_t *ds = dsda->ds;
1046 dsl_dataset_t *origin = ds->ds_prev;
1048 if (dsl_dataset_might_destroy_origin(origin)) {
1053 namelen = dsl_dataset_namelen(origin) + 1;
1054 name = kmem_alloc(namelen, KM_SLEEP);
1055 dsl_dataset_name(origin, name);
1057 error = zfs_unmount_snap(name, NULL);
1059 kmem_free(name, namelen);
1063 error = dsl_dataset_own(name, B_TRUE, tag, &origin);
1064 kmem_free(name, namelen);
1067 dsda->rm_origin = origin;
1068 dsl_dataset_make_exclusive(origin, tag);
1075 * ds must be opened as OWNER. On return (whether successful or not),
1076 * ds will be closed and caller can no longer dereference it.
1079 dsl_dataset_destroy(dsl_dataset_t *ds, void *tag, boolean_t defer)
1082 dsl_sync_task_group_t *dstg;
1086 struct dsl_ds_destroyarg dsda = { 0 };
1087 dsl_dataset_t dummy_ds = { 0 };
1091 if (dsl_dataset_is_snapshot(ds)) {
1092 /* Destroying a snapshot is simpler */
1093 dsl_dataset_make_exclusive(ds, tag);
1096 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1097 dsl_dataset_destroy_check, dsl_dataset_destroy_sync,
1099 ASSERT3P(dsda.rm_origin, ==, NULL);
1107 dummy_ds.ds_dir = dd;
1108 dummy_ds.ds_object = ds->ds_object;
1111 * Check for errors and mark this ds as inconsistent, in
1112 * case we crash while freeing the objects.
1114 err = dsl_sync_task_do(dd->dd_pool, dsl_dataset_destroy_begin_check,
1115 dsl_dataset_destroy_begin_sync, ds, NULL, 0);
1119 err = dmu_objset_from_ds(ds, &os);
1124 * remove the objects in open context, so that we won't
1125 * have too much to do in syncing context.
1127 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE,
1128 ds->ds_phys->ds_prev_snap_txg)) {
1130 * Ignore errors, if there is not enough disk space
1131 * we will deal with it in dsl_dataset_destroy_sync().
1133 (void) dmu_free_object(os, obj);
1139 * Only the ZIL knows how to free log blocks.
1141 zil_destroy(dmu_objset_zil(os), B_FALSE);
1144 * Sync out all in-flight IO.
1146 txg_wait_synced(dd->dd_pool, 0);
1149 * If we managed to free all the objects in open
1150 * context, the user space accounting should be zero.
1152 if (ds->ds_phys->ds_bp.blk_fill == 0 &&
1153 dmu_objset_userused_enabled(os)) {
1156 ASSERT(zap_count(os, DMU_USERUSED_OBJECT, &count) != 0 ||
1158 ASSERT(zap_count(os, DMU_GROUPUSED_OBJECT, &count) != 0 ||
1162 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
1163 err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd);
1164 rw_exit(&dd->dd_pool->dp_config_rwlock);
1170 * Blow away the dsl_dir + head dataset.
1172 dsl_dataset_make_exclusive(ds, tag);
1174 * If we're removing a clone, we might also need to remove its
1178 dsda.need_prep = B_FALSE;
1179 if (dsl_dir_is_clone(dd)) {
1180 err = dsl_dataset_origin_rm_prep(&dsda, tag);
1182 dsl_dir_close(dd, FTAG);
1187 dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool);
1188 dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
1189 dsl_dataset_destroy_sync, &dsda, tag, 0);
1190 dsl_sync_task_create(dstg, dsl_dir_destroy_check,
1191 dsl_dir_destroy_sync, &dummy_ds, FTAG, 0);
1192 err = dsl_sync_task_group_wait(dstg);
1193 dsl_sync_task_group_destroy(dstg);
1196 * We could be racing against 'zfs release' or 'zfs destroy -d'
1197 * on the origin snap, in which case we can get EBUSY if we
1198 * needed to destroy the origin snap but were not ready to
1201 if (dsda.need_prep) {
1202 ASSERT(err == EBUSY);
1203 ASSERT(dsl_dir_is_clone(dd));
1204 ASSERT(dsda.rm_origin == NULL);
1206 } while (dsda.need_prep);
1208 if (dsda.rm_origin != NULL)
1209 dsl_dataset_disown(dsda.rm_origin, tag);
1211 /* if it is successful, dsl_dir_destroy_sync will close the dd */
1213 dsl_dir_close(dd, FTAG);
1215 dsl_dataset_disown(ds, tag);
1220 dsl_dataset_get_blkptr(dsl_dataset_t *ds)
1222 return (&ds->ds_phys->ds_bp);
1226 dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
1228 ASSERT(dmu_tx_is_syncing(tx));
1229 /* If it's the meta-objset, set dp_meta_rootbp */
1231 tx->tx_pool->dp_meta_rootbp = *bp;
1233 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1234 ds->ds_phys->ds_bp = *bp;
1239 dsl_dataset_get_spa(dsl_dataset_t *ds)
1241 return (ds->ds_dir->dd_pool->dp_spa);
1245 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
1249 if (ds == NULL) /* this is the meta-objset */
1252 ASSERT(ds->ds_objset != NULL);
1254 if (ds->ds_phys->ds_next_snap_obj != 0)
1255 panic("dirtying snapshot!");
1257 dp = ds->ds_dir->dd_pool;
1259 if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) {
1260 /* up the hold count until we can be written out */
1261 dmu_buf_add_ref(ds->ds_dbuf, ds);
1266 * The unique space in the head dataset can be calculated by subtracting
1267 * the space used in the most recent snapshot, that is still being used
1268 * in this file system, from the space currently in use. To figure out
1269 * the space in the most recent snapshot still in use, we need to take
1270 * the total space used in the snapshot and subtract out the space that
1271 * has been freed up since the snapshot was taken.
1274 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
1277 uint64_t dlused, dlcomp, dluncomp;
1279 ASSERT(!dsl_dataset_is_snapshot(ds));
1281 if (ds->ds_phys->ds_prev_snap_obj != 0)
1282 mrs_used = ds->ds_prev->ds_phys->ds_used_bytes;
1286 dsl_deadlist_space(&ds->ds_deadlist, &dlused, &dlcomp, &dluncomp);
1288 ASSERT3U(dlused, <=, mrs_used);
1289 ds->ds_phys->ds_unique_bytes =
1290 ds->ds_phys->ds_used_bytes - (mrs_used - dlused);
1292 if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1293 SPA_VERSION_UNIQUE_ACCURATE)
1294 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1304 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
1305 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
1307 struct killarg *ka = arg;
1308 dmu_tx_t *tx = ka->tx;
1313 if (zb->zb_level == ZB_ZIL_LEVEL) {
1314 ASSERT(zilog != NULL);
1316 * It's a block in the intent log. It has no
1317 * accounting, so just free it.
1319 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
1321 ASSERT(zilog == NULL);
1322 ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
1323 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
1331 dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx)
1333 dsl_dataset_t *ds = arg1;
1334 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1339 * Can't delete a head dataset if there are snapshots of it.
1340 * (Except if the only snapshots are from the branch we cloned
1343 if (ds->ds_prev != NULL &&
1344 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1348 * This is really a dsl_dir thing, but check it here so that
1349 * we'll be less likely to leave this dataset inconsistent &
1352 err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
1363 dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1365 dsl_dataset_t *ds = arg1;
1366 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1368 /* Mark it as inconsistent on-disk, in case we crash */
1369 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1370 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
1372 spa_history_log_internal(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx,
1373 "dataset = %llu", ds->ds_object);
1377 dsl_dataset_origin_check(struct dsl_ds_destroyarg *dsda, void *tag,
1380 dsl_dataset_t *ds = dsda->ds;
1381 dsl_dataset_t *ds_prev = ds->ds_prev;
1383 if (dsl_dataset_might_destroy_origin(ds_prev)) {
1384 struct dsl_ds_destroyarg ndsda = {0};
1387 * If we're not prepared to remove the origin, don't remove
1390 if (dsda->rm_origin == NULL) {
1391 dsda->need_prep = B_TRUE;
1396 ndsda.is_origin_rm = B_TRUE;
1397 return (dsl_dataset_destroy_check(&ndsda, tag, tx));
1401 * If we're not going to remove the origin after all,
1402 * undo the open context setup.
1404 if (dsda->rm_origin != NULL) {
1405 dsl_dataset_disown(dsda->rm_origin, tag);
1406 dsda->rm_origin = NULL;
1413 * If you add new checks here, you may need to add
1414 * additional checks to the "temporary" case in
1415 * snapshot_check() in dmu_objset.c.
1419 dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
1421 struct dsl_ds_destroyarg *dsda = arg1;
1422 dsl_dataset_t *ds = dsda->ds;
1424 /* we have an owner hold, so noone else can destroy us */
1425 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
1428 * Only allow deferred destroy on pools that support it.
1429 * NOTE: deferred destroy is only supported on snapshots.
1432 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
1433 SPA_VERSION_USERREFS)
1435 ASSERT(dsl_dataset_is_snapshot(ds));
1440 * Can't delete a head dataset if there are snapshots of it.
1441 * (Except if the only snapshots are from the branch we cloned
1444 if (ds->ds_prev != NULL &&
1445 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1449 * If we made changes this txg, traverse_dsl_dataset won't find
1452 if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1455 if (dsl_dataset_is_snapshot(ds)) {
1457 * If this snapshot has an elevated user reference count,
1458 * we can't destroy it yet.
1460 if (ds->ds_userrefs > 0 && !dsda->releasing)
1463 mutex_enter(&ds->ds_lock);
1465 * Can't delete a branch point. However, if we're destroying
1466 * a clone and removing its origin due to it having a user
1467 * hold count of 0 and having been marked for deferred destroy,
1468 * it's OK for the origin to have a single clone.
1470 if (ds->ds_phys->ds_num_children >
1471 (dsda->is_origin_rm ? 2 : 1)) {
1472 mutex_exit(&ds->ds_lock);
1475 mutex_exit(&ds->ds_lock);
1476 } else if (dsl_dir_is_clone(ds->ds_dir)) {
1477 return (dsl_dataset_origin_check(dsda, arg2, tx));
1480 /* XXX we should do some i/o error checking... */
1492 dsl_dataset_refs_gone(dmu_buf_t *db, void *argv)
1494 struct refsarg *arg = argv;
1496 mutex_enter(&arg->lock);
1498 cv_signal(&arg->cv);
1499 mutex_exit(&arg->lock);
1503 dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag)
1507 bzero(&arg, sizeof(arg));
1508 mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL);
1509 cv_init(&arg.cv, NULL, CV_DEFAULT, NULL);
1511 (void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys,
1512 dsl_dataset_refs_gone);
1513 dmu_buf_rele(ds->ds_dbuf, tag);
1514 mutex_enter(&arg.lock);
1516 cv_wait(&arg.cv, &arg.lock);
1518 mutex_exit(&arg.lock);
1521 mutex_destroy(&arg.lock);
1522 cv_destroy(&arg.cv);
1526 remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj, dmu_tx_t *tx)
1528 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1532 ASSERT(ds->ds_phys->ds_num_children >= 2);
1533 err = zap_remove_int(mos, ds->ds_phys->ds_next_clones_obj, obj, tx);
1535 * The err should not be ENOENT, but a bug in a previous version
1536 * of the code could cause upgrade_clones_cb() to not set
1537 * ds_next_snap_obj when it should, leading to a missing entry.
1538 * If we knew that the pool was created after
1539 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
1540 * ENOENT. However, at least we can check that we don't have
1541 * too many entries in the next_clones_obj even after failing to
1544 if (err != ENOENT) {
1545 VERIFY3U(err, ==, 0);
1547 ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
1549 ASSERT3U(count, <=, ds->ds_phys->ds_num_children - 2);
1553 dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
1555 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1560 * If it is the old version, dd_clones doesn't exist so we can't
1561 * find the clones, but deadlist_remove_key() is a no-op so it
1564 if (ds->ds_dir->dd_phys->dd_clones == 0)
1567 for (zap_cursor_init(&zc, mos, ds->ds_dir->dd_phys->dd_clones);
1568 zap_cursor_retrieve(&zc, &za) == 0;
1569 zap_cursor_advance(&zc)) {
1570 dsl_dataset_t *clone;
1572 VERIFY3U(0, ==, dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
1573 za.za_first_integer, FTAG, &clone));
1574 if (clone->ds_dir->dd_origin_txg > mintxg) {
1575 dsl_deadlist_remove_key(&clone->ds_deadlist,
1577 dsl_dataset_remove_clones_key(clone, mintxg, tx);
1579 dsl_dataset_rele(clone, FTAG);
1581 zap_cursor_fini(&zc);
1584 struct process_old_arg {
1586 dsl_dataset_t *ds_prev;
1587 boolean_t after_branch_point;
1589 uint64_t used, comp, uncomp;
1593 process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1595 struct process_old_arg *poa = arg;
1596 dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
1598 if (bp->blk_birth <= poa->ds->ds_phys->ds_prev_snap_txg) {
1599 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
1600 if (poa->ds_prev && !poa->after_branch_point &&
1602 poa->ds_prev->ds_phys->ds_prev_snap_txg) {
1603 poa->ds_prev->ds_phys->ds_unique_bytes +=
1604 bp_get_dsize_sync(dp->dp_spa, bp);
1607 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
1608 poa->comp += BP_GET_PSIZE(bp);
1609 poa->uncomp += BP_GET_UCSIZE(bp);
1610 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
1616 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
1617 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
1619 struct process_old_arg poa = { 0 };
1620 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1621 objset_t *mos = dp->dp_meta_objset;
1623 ASSERT(ds->ds_deadlist.dl_oldfmt);
1624 ASSERT(ds_next->ds_deadlist.dl_oldfmt);
1627 poa.ds_prev = ds_prev;
1628 poa.after_branch_point = after_branch_point;
1629 poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
1630 VERIFY3U(0, ==, bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
1631 process_old_cb, &poa, tx));
1632 VERIFY3U(zio_wait(poa.pio), ==, 0);
1633 ASSERT3U(poa.used, ==, ds->ds_phys->ds_unique_bytes);
1635 /* change snapused */
1636 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1637 -poa.used, -poa.comp, -poa.uncomp, tx);
1639 /* swap next's deadlist to our deadlist */
1640 dsl_deadlist_close(&ds->ds_deadlist);
1641 dsl_deadlist_close(&ds_next->ds_deadlist);
1642 SWITCH64(ds_next->ds_phys->ds_deadlist_obj,
1643 ds->ds_phys->ds_deadlist_obj);
1644 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
1645 dsl_deadlist_open(&ds_next->ds_deadlist, mos,
1646 ds_next->ds_phys->ds_deadlist_obj);
1650 dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx)
1652 struct dsl_ds_destroyarg *dsda = arg1;
1653 dsl_dataset_t *ds = dsda->ds;
1655 int after_branch_point = FALSE;
1656 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1657 objset_t *mos = dp->dp_meta_objset;
1658 dsl_dataset_t *ds_prev = NULL;
1659 boolean_t wont_destroy;
1662 wont_destroy = (dsda->defer &&
1663 (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1));
1665 ASSERT(ds->ds_owner || wont_destroy);
1666 ASSERT(dsda->defer || ds->ds_phys->ds_num_children <= 1);
1667 ASSERT(ds->ds_prev == NULL ||
1668 ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
1669 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
1672 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1673 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1674 ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY;
1678 /* signal any waiters that this dataset is going away */
1679 mutex_enter(&ds->ds_lock);
1680 ds->ds_owner = dsl_reaper;
1681 cv_broadcast(&ds->ds_exclusive_cv);
1682 mutex_exit(&ds->ds_lock);
1684 /* Remove our reservation */
1685 if (ds->ds_reserved != 0) {
1686 dsl_prop_setarg_t psa;
1689 dsl_prop_setarg_init_uint64(&psa, "refreservation",
1690 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
1692 psa.psa_effective_value = 0; /* predict default value */
1694 dsl_dataset_set_reservation_sync(ds, &psa, tx);
1695 ASSERT3U(ds->ds_reserved, ==, 0);
1698 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1700 dsl_scan_ds_destroyed(ds, tx);
1702 obj = ds->ds_object;
1704 if (ds->ds_phys->ds_prev_snap_obj != 0) {
1706 ds_prev = ds->ds_prev;
1708 VERIFY(0 == dsl_dataset_hold_obj(dp,
1709 ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
1711 after_branch_point =
1712 (ds_prev->ds_phys->ds_next_snap_obj != obj);
1714 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
1715 if (after_branch_point &&
1716 ds_prev->ds_phys->ds_next_clones_obj != 0) {
1717 remove_from_next_clones(ds_prev, obj, tx);
1718 if (ds->ds_phys->ds_next_snap_obj != 0) {
1719 VERIFY(0 == zap_add_int(mos,
1720 ds_prev->ds_phys->ds_next_clones_obj,
1721 ds->ds_phys->ds_next_snap_obj, tx));
1724 if (after_branch_point &&
1725 ds->ds_phys->ds_next_snap_obj == 0) {
1726 /* This clone is toast. */
1727 ASSERT(ds_prev->ds_phys->ds_num_children > 1);
1728 ds_prev->ds_phys->ds_num_children--;
1731 * If the clone's origin has no other clones, no
1732 * user holds, and has been marked for deferred
1733 * deletion, then we should have done the necessary
1734 * destroy setup for it.
1736 if (ds_prev->ds_phys->ds_num_children == 1 &&
1737 ds_prev->ds_userrefs == 0 &&
1738 DS_IS_DEFER_DESTROY(ds_prev)) {
1739 ASSERT3P(dsda->rm_origin, !=, NULL);
1741 ASSERT3P(dsda->rm_origin, ==, NULL);
1743 } else if (!after_branch_point) {
1744 ds_prev->ds_phys->ds_next_snap_obj =
1745 ds->ds_phys->ds_next_snap_obj;
1749 if (dsl_dataset_is_snapshot(ds)) {
1750 dsl_dataset_t *ds_next;
1751 uint64_t old_unique;
1752 uint64_t used = 0, comp = 0, uncomp = 0;
1754 VERIFY(0 == dsl_dataset_hold_obj(dp,
1755 ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
1756 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
1758 old_unique = ds_next->ds_phys->ds_unique_bytes;
1760 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
1761 ds_next->ds_phys->ds_prev_snap_obj =
1762 ds->ds_phys->ds_prev_snap_obj;
1763 ds_next->ds_phys->ds_prev_snap_txg =
1764 ds->ds_phys->ds_prev_snap_txg;
1765 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1766 ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
1769 if (ds_next->ds_deadlist.dl_oldfmt) {
1770 process_old_deadlist(ds, ds_prev, ds_next,
1771 after_branch_point, tx);
1773 /* Adjust prev's unique space. */
1774 if (ds_prev && !after_branch_point) {
1775 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1776 ds_prev->ds_phys->ds_prev_snap_txg,
1777 ds->ds_phys->ds_prev_snap_txg,
1778 &used, &comp, &uncomp);
1779 ds_prev->ds_phys->ds_unique_bytes += used;
1782 /* Adjust snapused. */
1783 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1784 ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
1785 &used, &comp, &uncomp);
1786 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1787 -used, -comp, -uncomp, tx);
1789 /* Move blocks to be freed to pool's free list. */
1790 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
1791 &dp->dp_free_bpobj, ds->ds_phys->ds_prev_snap_txg,
1793 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
1794 DD_USED_HEAD, used, comp, uncomp, tx);
1795 dsl_dir_dirty(tx->tx_pool->dp_free_dir, tx);
1797 /* Merge our deadlist into next's and free it. */
1798 dsl_deadlist_merge(&ds_next->ds_deadlist,
1799 ds->ds_phys->ds_deadlist_obj, tx);
1801 dsl_deadlist_close(&ds->ds_deadlist);
1802 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1804 /* Collapse range in clone heads */
1805 dsl_dataset_remove_clones_key(ds,
1806 ds->ds_phys->ds_creation_txg, tx);
1808 if (dsl_dataset_is_snapshot(ds_next)) {
1809 dsl_dataset_t *ds_nextnext;
1812 * Update next's unique to include blocks which
1813 * were previously shared by only this snapshot
1814 * and it. Those blocks will be born after the
1815 * prev snap and before this snap, and will have
1816 * died after the next snap and before the one
1817 * after that (ie. be on the snap after next's
1820 VERIFY(0 == dsl_dataset_hold_obj(dp,
1821 ds_next->ds_phys->ds_next_snap_obj,
1822 FTAG, &ds_nextnext));
1823 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
1824 ds->ds_phys->ds_prev_snap_txg,
1825 ds->ds_phys->ds_creation_txg,
1826 &used, &comp, &uncomp);
1827 ds_next->ds_phys->ds_unique_bytes += used;
1828 dsl_dataset_rele(ds_nextnext, FTAG);
1829 ASSERT3P(ds_next->ds_prev, ==, NULL);
1831 /* Collapse range in this head. */
1833 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
1834 ds->ds_dir->dd_phys->dd_head_dataset_obj,
1836 dsl_deadlist_remove_key(&hds->ds_deadlist,
1837 ds->ds_phys->ds_creation_txg, tx);
1838 dsl_dataset_rele(hds, FTAG);
1841 ASSERT3P(ds_next->ds_prev, ==, ds);
1842 dsl_dataset_drop_ref(ds_next->ds_prev, ds_next);
1843 ds_next->ds_prev = NULL;
1845 VERIFY(0 == dsl_dataset_get_ref(dp,
1846 ds->ds_phys->ds_prev_snap_obj,
1847 ds_next, &ds_next->ds_prev));
1850 dsl_dataset_recalc_head_uniq(ds_next);
1853 * Reduce the amount of our unconsmed refreservation
1854 * being charged to our parent by the amount of
1855 * new unique data we have gained.
1857 if (old_unique < ds_next->ds_reserved) {
1859 uint64_t new_unique =
1860 ds_next->ds_phys->ds_unique_bytes;
1862 ASSERT(old_unique <= new_unique);
1863 mrsdelta = MIN(new_unique - old_unique,
1864 ds_next->ds_reserved - old_unique);
1865 dsl_dir_diduse_space(ds->ds_dir,
1866 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
1869 dsl_dataset_rele(ds_next, FTAG);
1872 * There's no next snapshot, so this is a head dataset.
1873 * Destroy the deadlist. Unless it's a clone, the
1874 * deadlist should be empty. (If it's a clone, it's
1875 * safe to ignore the deadlist contents.)
1879 dsl_deadlist_close(&ds->ds_deadlist);
1880 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1881 ds->ds_phys->ds_deadlist_obj = 0;
1884 * Free everything that we point to (that's born after
1885 * the previous snapshot, if we are a clone)
1887 * NB: this should be very quick, because we already
1888 * freed all the objects in open context.
1892 err = traverse_dataset(ds, ds->ds_phys->ds_prev_snap_txg,
1893 TRAVERSE_POST, kill_blkptr, &ka);
1894 ASSERT3U(err, ==, 0);
1895 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
1896 ds->ds_phys->ds_unique_bytes == 0);
1898 if (ds->ds_prev != NULL) {
1899 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
1900 VERIFY3U(0, ==, zap_remove_int(mos,
1901 ds->ds_prev->ds_dir->dd_phys->dd_clones,
1902 ds->ds_object, tx));
1904 dsl_dataset_rele(ds->ds_prev, ds);
1905 ds->ds_prev = ds_prev = NULL;
1910 * This must be done after the dsl_traverse(), because it will
1911 * re-open the objset.
1913 if (ds->ds_objset) {
1914 dmu_objset_evict(ds->ds_objset);
1915 ds->ds_objset = NULL;
1918 if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) {
1919 /* Erase the link in the dir */
1920 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1921 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
1922 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
1923 err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx);
1926 /* remove from snapshot namespace */
1927 dsl_dataset_t *ds_head;
1928 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
1929 VERIFY(0 == dsl_dataset_hold_obj(dp,
1930 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
1931 VERIFY(0 == dsl_dataset_get_snapname(ds));
1936 err = dsl_dataset_snap_lookup(ds_head,
1937 ds->ds_snapname, &val);
1938 ASSERT3U(err, ==, 0);
1939 ASSERT3U(val, ==, obj);
1942 err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx);
1944 dsl_dataset_rele(ds_head, FTAG);
1947 if (ds_prev && ds->ds_prev != ds_prev)
1948 dsl_dataset_rele(ds_prev, FTAG);
1950 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
1951 spa_history_log_internal(LOG_DS_DESTROY, dp->dp_spa, tx,
1952 "dataset = %llu", ds->ds_object);
1954 if (ds->ds_phys->ds_next_clones_obj != 0) {
1956 ASSERT(0 == zap_count(mos,
1957 ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
1958 VERIFY(0 == dmu_object_free(mos,
1959 ds->ds_phys->ds_next_clones_obj, tx));
1961 if (ds->ds_phys->ds_props_obj != 0)
1962 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
1963 if (ds->ds_phys->ds_userrefs_obj != 0)
1964 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_userrefs_obj, tx));
1965 dsl_dir_close(ds->ds_dir, ds);
1967 dsl_dataset_drain_refs(ds, tag);
1968 VERIFY(0 == dmu_object_free(mos, obj, tx));
1970 if (dsda->rm_origin) {
1972 * Remove the origin of the clone we just destroyed.
1974 struct dsl_ds_destroyarg ndsda = {0};
1976 ndsda.ds = dsda->rm_origin;
1977 dsl_dataset_destroy_sync(&ndsda, tag, tx);
1982 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
1986 if (!dmu_tx_is_syncing(tx))
1990 * If there's an fs-only reservation, any blocks that might become
1991 * owned by the snapshot dataset must be accommodated by space
1992 * outside of the reservation.
1994 ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
1995 asize = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
1996 if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
2000 * Propogate any reserved space for this snapshot to other
2001 * snapshot checks in this sync group.
2004 dsl_dir_willuse_space(ds->ds_dir, asize, tx);
2010 dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
2012 dsl_dataset_t *ds = arg1;
2013 const char *snapname = arg2;
2018 * We don't allow multiple snapshots of the same txg. If there
2019 * is already one, try again.
2021 if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
2025 * Check for conflicting name snapshot name.
2027 err = dsl_dataset_snap_lookup(ds, snapname, &value);
2034 * Check that the dataset's name is not too long. Name consists
2035 * of the dataset's length + 1 for the @-sign + snapshot name's length
2037 if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN)
2038 return (ENAMETOOLONG);
2040 err = dsl_dataset_snapshot_reserve_space(ds, tx);
2044 ds->ds_trysnap_txg = tx->tx_txg;
2049 dsl_dataset_snapshot_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2051 dsl_dataset_t *ds = arg1;
2052 const char *snapname = arg2;
2053 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2055 dsl_dataset_phys_t *dsphys;
2056 uint64_t dsobj, crtxg;
2057 objset_t *mos = dp->dp_meta_objset;
2060 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
2063 * The origin's ds_creation_txg has to be < TXG_INITIAL
2065 if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
2070 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
2071 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
2072 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
2073 dmu_buf_will_dirty(dbuf, tx);
2074 dsphys = dbuf->db_data;
2075 bzero(dsphys, sizeof (dsl_dataset_phys_t));
2076 dsphys->ds_dir_obj = ds->ds_dir->dd_object;
2077 dsphys->ds_fsid_guid = unique_create();
2078 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
2079 sizeof (dsphys->ds_guid));
2080 dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj;
2081 dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg;
2082 dsphys->ds_next_snap_obj = ds->ds_object;
2083 dsphys->ds_num_children = 1;
2084 dsphys->ds_creation_time = gethrestime_sec();
2085 dsphys->ds_creation_txg = crtxg;
2086 dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj;
2087 dsphys->ds_used_bytes = ds->ds_phys->ds_used_bytes;
2088 dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes;
2089 dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes;
2090 dsphys->ds_flags = ds->ds_phys->ds_flags;
2091 dsphys->ds_bp = ds->ds_phys->ds_bp;
2092 dmu_buf_rele(dbuf, FTAG);
2094 ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0);
2096 uint64_t next_clones_obj =
2097 ds->ds_prev->ds_phys->ds_next_clones_obj;
2098 ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj ==
2100 ds->ds_prev->ds_phys->ds_num_children > 1);
2101 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
2102 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
2103 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
2104 ds->ds_prev->ds_phys->ds_creation_txg);
2105 ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj;
2106 } else if (next_clones_obj != 0) {
2107 remove_from_next_clones(ds->ds_prev,
2108 dsphys->ds_next_snap_obj, tx);
2109 VERIFY3U(0, ==, zap_add_int(mos,
2110 next_clones_obj, dsobj, tx));
2115 * If we have a reference-reservation on this dataset, we will
2116 * need to increase the amount of refreservation being charged
2117 * since our unique space is going to zero.
2119 if (ds->ds_reserved) {
2121 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
2122 delta = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
2123 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
2127 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2128 zfs_dbgmsg("taking snapshot %s@%s/%llu; newkey=%llu",
2129 ds->ds_dir->dd_myname, snapname, dsobj,
2130 ds->ds_phys->ds_prev_snap_txg);
2131 ds->ds_phys->ds_deadlist_obj = dsl_deadlist_clone(&ds->ds_deadlist,
2132 UINT64_MAX, ds->ds_phys->ds_prev_snap_obj, tx);
2133 dsl_deadlist_close(&ds->ds_deadlist);
2134 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
2135 dsl_deadlist_add_key(&ds->ds_deadlist,
2136 ds->ds_phys->ds_prev_snap_txg, tx);
2138 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg);
2139 ds->ds_phys->ds_prev_snap_obj = dsobj;
2140 ds->ds_phys->ds_prev_snap_txg = crtxg;
2141 ds->ds_phys->ds_unique_bytes = 0;
2142 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
2143 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
2145 err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj,
2146 snapname, 8, 1, &dsobj, tx);
2150 dsl_dataset_drop_ref(ds->ds_prev, ds);
2151 VERIFY(0 == dsl_dataset_get_ref(dp,
2152 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
2154 dsl_scan_ds_snapshotted(ds, tx);
2156 dsl_dir_snap_cmtime_update(ds->ds_dir);
2158 spa_history_log_internal(LOG_DS_SNAPSHOT, dp->dp_spa, tx,
2159 "dataset = %llu", dsobj);
2163 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
2165 ASSERT(dmu_tx_is_syncing(tx));
2166 ASSERT(ds->ds_objset != NULL);
2167 ASSERT(ds->ds_phys->ds_next_snap_obj == 0);
2170 * in case we had to change ds_fsid_guid when we opened it,
2173 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2174 ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid;
2176 dsl_dir_dirty(ds->ds_dir, tx);
2177 dmu_objset_sync(ds->ds_objset, zio, tx);
2181 get_clones_stat(dsl_dataset_t *ds, nvlist_t *nv)
2184 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
2190 rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2191 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2192 VERIFY(nvlist_alloc(&val, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2195 * There may me missing entries in ds_next_clones_obj
2196 * due to a bug in a previous version of the code.
2197 * Only trust it if it has the right number of entries.
2199 if (ds->ds_phys->ds_next_clones_obj != 0) {
2200 ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
2203 if (count != ds->ds_phys->ds_num_children - 1) {
2206 for (zap_cursor_init(&zc, mos, ds->ds_phys->ds_next_clones_obj);
2207 zap_cursor_retrieve(&zc, &za) == 0;
2208 zap_cursor_advance(&zc)) {
2209 dsl_dataset_t *clone;
2210 char buf[ZFS_MAXNAMELEN];
2211 if (dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
2212 za.za_first_integer, FTAG, &clone) != 0) {
2215 dsl_dir_name(clone->ds_dir, buf);
2216 VERIFY(nvlist_add_boolean(val, buf) == 0);
2217 dsl_dataset_rele(clone, FTAG);
2219 zap_cursor_fini(&zc);
2220 VERIFY(nvlist_add_nvlist(propval, ZPROP_VALUE, val) == 0);
2221 VERIFY(nvlist_add_nvlist(nv, zfs_prop_to_name(ZFS_PROP_CLONES),
2225 nvlist_free(propval);
2226 rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2230 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
2232 uint64_t refd, avail, uobjs, aobjs, ratio;
2234 dsl_dir_stats(ds->ds_dir, nv);
2236 dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs);
2237 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail);
2238 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd);
2240 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
2241 ds->ds_phys->ds_creation_time);
2242 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
2243 ds->ds_phys->ds_creation_txg);
2244 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
2246 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
2248 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
2249 ds->ds_phys->ds_guid);
2250 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE,
2251 ds->ds_phys->ds_unique_bytes);
2252 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID,
2254 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS,
2256 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY,
2257 DS_IS_DEFER_DESTROY(ds) ? 1 : 0);
2259 if (ds->ds_phys->ds_prev_snap_obj != 0) {
2260 uint64_t written, comp, uncomp;
2261 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2262 dsl_dataset_t *prev;
2264 rw_enter(&dp->dp_config_rwlock, RW_READER);
2265 int err = dsl_dataset_hold_obj(dp,
2266 ds->ds_phys->ds_prev_snap_obj, FTAG, &prev);
2267 rw_exit(&dp->dp_config_rwlock);
2269 err = dsl_dataset_space_written(prev, ds, &written,
2271 dsl_dataset_rele(prev, FTAG);
2273 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_WRITTEN,
2279 ratio = ds->ds_phys->ds_compressed_bytes == 0 ? 100 :
2280 (ds->ds_phys->ds_uncompressed_bytes * 100 /
2281 ds->ds_phys->ds_compressed_bytes);
2282 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRATIO, ratio);
2284 if (ds->ds_phys->ds_next_snap_obj) {
2286 * This is a snapshot; override the dd's space used with
2287 * our unique space and compression ratio.
2289 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
2290 ds->ds_phys->ds_unique_bytes);
2291 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO, ratio);
2293 get_clones_stat(ds, nv);
2298 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
2300 stat->dds_creation_txg = ds->ds_phys->ds_creation_txg;
2301 stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT;
2302 stat->dds_guid = ds->ds_phys->ds_guid;
2303 if (ds->ds_phys->ds_next_snap_obj) {
2304 stat->dds_is_snapshot = B_TRUE;
2305 stat->dds_num_clones = ds->ds_phys->ds_num_children - 1;
2307 stat->dds_is_snapshot = B_FALSE;
2308 stat->dds_num_clones = 0;
2311 /* clone origin is really a dsl_dir thing... */
2312 rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2313 if (dsl_dir_is_clone(ds->ds_dir)) {
2316 VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool,
2317 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods));
2318 dsl_dataset_name(ods, stat->dds_origin);
2319 dsl_dataset_drop_ref(ods, FTAG);
2321 stat->dds_origin[0] = '\0';
2323 rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2327 dsl_dataset_fsid_guid(dsl_dataset_t *ds)
2329 return (ds->ds_fsid_guid);
2333 dsl_dataset_space(dsl_dataset_t *ds,
2334 uint64_t *refdbytesp, uint64_t *availbytesp,
2335 uint64_t *usedobjsp, uint64_t *availobjsp)
2337 *refdbytesp = ds->ds_phys->ds_used_bytes;
2338 *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
2339 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes)
2340 *availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes;
2341 if (ds->ds_quota != 0) {
2343 * Adjust available bytes according to refquota
2345 if (*refdbytesp < ds->ds_quota)
2346 *availbytesp = MIN(*availbytesp,
2347 ds->ds_quota - *refdbytesp);
2351 *usedobjsp = ds->ds_phys->ds_bp.blk_fill;
2352 *availobjsp = DN_MAX_OBJECT - *usedobjsp;
2356 dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds)
2358 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2360 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
2361 dsl_pool_sync_context(dp));
2362 if (ds->ds_prev == NULL)
2364 if (ds->ds_phys->ds_bp.blk_birth >
2365 ds->ds_prev->ds_phys->ds_creation_txg) {
2366 objset_t *os, *os_prev;
2368 * It may be that only the ZIL differs, because it was
2369 * reset in the head. Don't count that as being
2372 if (dmu_objset_from_ds(ds, &os) != 0)
2374 if (dmu_objset_from_ds(ds->ds_prev, &os_prev) != 0)
2376 return (bcmp(&os->os_phys->os_meta_dnode,
2377 &os_prev->os_phys->os_meta_dnode,
2378 sizeof (os->os_phys->os_meta_dnode)) != 0);
2385 dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
2387 dsl_dataset_t *ds = arg1;
2388 char *newsnapname = arg2;
2389 dsl_dir_t *dd = ds->ds_dir;
2394 err = dsl_dataset_hold_obj(dd->dd_pool,
2395 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds);
2399 /* new name better not be in use */
2400 err = dsl_dataset_snap_lookup(hds, newsnapname, &val);
2401 dsl_dataset_rele(hds, FTAG);
2405 else if (err == ENOENT)
2408 /* dataset name + 1 for the "@" + the new snapshot name must fit */
2409 if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN)
2416 dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2418 char oldname[MAXPATHLEN], newname[MAXPATHLEN];
2419 dsl_dataset_t *ds = arg1;
2420 const char *newsnapname = arg2;
2421 dsl_dir_t *dd = ds->ds_dir;
2422 objset_t *mos = dd->dd_pool->dp_meta_objset;
2426 ASSERT(ds->ds_phys->ds_next_snap_obj != 0);
2428 VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
2429 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds));
2431 VERIFY(0 == dsl_dataset_get_snapname(ds));
2432 err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx);
2433 ASSERT3U(err, ==, 0);
2434 dsl_dataset_name(ds, oldname);
2435 mutex_enter(&ds->ds_lock);
2436 (void) strcpy(ds->ds_snapname, newsnapname);
2437 mutex_exit(&ds->ds_lock);
2438 err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj,
2439 ds->ds_snapname, 8, 1, &ds->ds_object, tx);
2440 ASSERT3U(err, ==, 0);
2441 dsl_dataset_name(ds, newname);
2443 zvol_rename_minors(oldname, newname);
2446 spa_history_log_internal(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx,
2447 "dataset = %llu", ds->ds_object);
2448 dsl_dataset_rele(hds, FTAG);
2451 struct renamesnaparg {
2452 dsl_sync_task_group_t *dstg;
2453 char failed[MAXPATHLEN];
2459 dsl_snapshot_rename_one(const char *name, void *arg)
2461 struct renamesnaparg *ra = arg;
2462 dsl_dataset_t *ds = NULL;
2466 snapname = kmem_asprintf("%s@%s", name, ra->oldsnap);
2467 (void) strlcpy(ra->failed, snapname, sizeof (ra->failed));
2470 * For recursive snapshot renames the parent won't be changing
2471 * so we just pass name for both the to/from argument.
2473 err = zfs_secpolicy_rename_perms(snapname, snapname, CRED());
2476 return (err == ENOENT ? 0 : err);
2481 * For all filesystems undergoing rename, we'll need to unmount it.
2483 (void) zfs_unmount_snap(snapname, NULL);
2485 err = dsl_dataset_hold(snapname, ra->dstg, &ds);
2488 return (err == ENOENT ? 0 : err);
2490 dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check,
2491 dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0);
2497 dsl_recursive_rename(char *oldname, const char *newname)
2500 struct renamesnaparg *ra;
2501 dsl_sync_task_t *dst;
2503 char *cp, *fsname = spa_strdup(oldname);
2504 int len = strlen(oldname) + 1;
2506 /* truncate the snapshot name to get the fsname */
2507 cp = strchr(fsname, '@');
2510 err = spa_open(fsname, &spa, FTAG);
2512 kmem_free(fsname, len);
2515 ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP);
2516 ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
2518 ra->oldsnap = strchr(oldname, '@') + 1;
2519 ra->newsnap = strchr(newname, '@') + 1;
2522 err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra,
2524 kmem_free(fsname, len);
2527 err = dsl_sync_task_group_wait(ra->dstg);
2530 for (dst = list_head(&ra->dstg->dstg_tasks); dst;
2531 dst = list_next(&ra->dstg->dstg_tasks, dst)) {
2532 dsl_dataset_t *ds = dst->dst_arg1;
2534 dsl_dir_name(ds->ds_dir, ra->failed);
2535 (void) strlcat(ra->failed, "@", sizeof (ra->failed));
2536 (void) strlcat(ra->failed, ra->newsnap,
2537 sizeof (ra->failed));
2539 dsl_dataset_rele(ds, ra->dstg);
2543 (void) strlcpy(oldname, ra->failed, sizeof (ra->failed));
2545 dsl_sync_task_group_destroy(ra->dstg);
2546 kmem_free(ra, sizeof (struct renamesnaparg));
2547 spa_close(spa, FTAG);
2552 dsl_valid_rename(const char *oldname, void *arg)
2554 int delta = *(int *)arg;
2556 if (strlen(oldname) + delta >= MAXNAMELEN)
2557 return (ENAMETOOLONG);
2562 #pragma weak dmu_objset_rename = dsl_dataset_rename
2564 dsl_dataset_rename(char *oldname, const char *newname, int flags)
2571 err = dsl_dir_open(oldname, FTAG, &dd, &tail);
2576 int delta = strlen(newname) - strlen(oldname);
2578 /* if we're growing, validate child name lengths */
2580 err = dmu_objset_find(oldname, dsl_valid_rename,
2581 &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2584 err = dsl_dir_rename(dd, newname, flags);
2585 dsl_dir_close(dd, FTAG);
2589 if (tail[0] != '@') {
2590 /* the name ended in a nonexistent component */
2591 dsl_dir_close(dd, FTAG);
2595 dsl_dir_close(dd, FTAG);
2597 /* new name must be snapshot in same filesystem */
2598 tail = strchr(newname, '@');
2602 if (strncmp(oldname, newname, tail - newname) != 0)
2605 if (flags & ZFS_RENAME_RECURSIVE) {
2606 err = dsl_recursive_rename(oldname, newname);
2608 err = dsl_dataset_hold(oldname, FTAG, &ds);
2612 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
2613 dsl_dataset_snapshot_rename_check,
2614 dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
2616 dsl_dataset_rele(ds, FTAG);
2622 struct promotenode {
2628 list_t shared_snaps, origin_snaps, clone_snaps;
2629 dsl_dataset_t *origin_origin;
2630 uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap;
2634 static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
2635 static boolean_t snaplist_unstable(list_t *l);
2638 dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx)
2640 dsl_dataset_t *hds = arg1;
2641 struct promotearg *pa = arg2;
2642 struct promotenode *snap = list_head(&pa->shared_snaps);
2643 dsl_dataset_t *origin_ds = snap->ds;
2647 /* Check that it is a real clone */
2648 if (!dsl_dir_is_clone(hds->ds_dir))
2651 /* Since this is so expensive, don't do the preliminary check */
2652 if (!dmu_tx_is_syncing(tx))
2655 if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)
2658 /* compute origin's new unique space */
2659 snap = list_tail(&pa->clone_snaps);
2660 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2661 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2662 origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
2663 &pa->unique, &unused, &unused);
2666 * Walk the snapshots that we are moving
2668 * Compute space to transfer. Consider the incremental changes
2669 * to used for each snapshot:
2670 * (my used) = (prev's used) + (blocks born) - (blocks killed)
2671 * So each snapshot gave birth to:
2672 * (blocks born) = (my used) - (prev's used) + (blocks killed)
2673 * So a sequence would look like:
2674 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2675 * Which simplifies to:
2676 * uN + kN + kN-1 + ... + k1 + k0
2677 * Note however, if we stop before we reach the ORIGIN we get:
2678 * uN + kN + kN-1 + ... + kM - uM-1
2680 pa->used = origin_ds->ds_phys->ds_used_bytes;
2681 pa->comp = origin_ds->ds_phys->ds_compressed_bytes;
2682 pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes;
2683 for (snap = list_head(&pa->shared_snaps); snap;
2684 snap = list_next(&pa->shared_snaps, snap)) {
2685 uint64_t val, dlused, dlcomp, dluncomp;
2686 dsl_dataset_t *ds = snap->ds;
2688 /* Check that the snapshot name does not conflict */
2689 VERIFY(0 == dsl_dataset_get_snapname(ds));
2690 err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
2698 /* The very first snapshot does not have a deadlist */
2699 if (ds->ds_phys->ds_prev_snap_obj == 0)
2702 dsl_deadlist_space(&ds->ds_deadlist,
2703 &dlused, &dlcomp, &dluncomp);
2706 pa->uncomp += dluncomp;
2710 * If we are a clone of a clone then we never reached ORIGIN,
2711 * so we need to subtract out the clone origin's used space.
2713 if (pa->origin_origin) {
2714 pa->used -= pa->origin_origin->ds_phys->ds_used_bytes;
2715 pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes;
2716 pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes;
2719 /* Check that there is enough space here */
2720 err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
2726 * Compute the amounts of space that will be used by snapshots
2727 * after the promotion (for both origin and clone). For each,
2728 * it is the amount of space that will be on all of their
2729 * deadlists (that was not born before their new origin).
2731 if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2735 * Note, typically this will not be a clone of a clone,
2736 * so dd_origin_txg will be < TXG_INITIAL, so
2737 * these snaplist_space() -> dsl_deadlist_space_range()
2738 * calls will be fast because they do not have to
2739 * iterate over all bps.
2741 snap = list_head(&pa->origin_snaps);
2742 err = snaplist_space(&pa->shared_snaps,
2743 snap->ds->ds_dir->dd_origin_txg, &pa->cloneusedsnap);
2747 err = snaplist_space(&pa->clone_snaps,
2748 snap->ds->ds_dir->dd_origin_txg, &space);
2751 pa->cloneusedsnap += space;
2753 if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2754 err = snaplist_space(&pa->origin_snaps,
2755 origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap);
2762 pa->err_ds = snap->ds->ds_snapname;
2767 dsl_dataset_promote_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2769 dsl_dataset_t *hds = arg1;
2770 struct promotearg *pa = arg2;
2771 struct promotenode *snap = list_head(&pa->shared_snaps);
2772 dsl_dataset_t *origin_ds = snap->ds;
2773 dsl_dataset_t *origin_head;
2774 dsl_dir_t *dd = hds->ds_dir;
2775 dsl_pool_t *dp = hds->ds_dir->dd_pool;
2776 dsl_dir_t *odd = NULL;
2777 uint64_t oldnext_obj;
2780 ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE));
2782 snap = list_head(&pa->origin_snaps);
2783 origin_head = snap->ds;
2786 * We need to explicitly open odd, since origin_ds's dd will be
2789 VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object,
2792 /* change origin's next snap */
2793 dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
2794 oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj;
2795 snap = list_tail(&pa->clone_snaps);
2796 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2797 origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object;
2799 /* change the origin's next clone */
2800 if (origin_ds->ds_phys->ds_next_clones_obj) {
2801 remove_from_next_clones(origin_ds, snap->ds->ds_object, tx);
2802 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2803 origin_ds->ds_phys->ds_next_clones_obj,
2808 dmu_buf_will_dirty(dd->dd_dbuf, tx);
2809 ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object);
2810 dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj;
2811 dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg;
2812 dmu_buf_will_dirty(odd->dd_dbuf, tx);
2813 odd->dd_phys->dd_origin_obj = origin_ds->ds_object;
2814 origin_head->ds_dir->dd_origin_txg =
2815 origin_ds->ds_phys->ds_creation_txg;
2817 /* change dd_clone entries */
2818 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2819 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2820 odd->dd_phys->dd_clones, hds->ds_object, tx));
2821 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2822 pa->origin_origin->ds_dir->dd_phys->dd_clones,
2823 hds->ds_object, tx));
2825 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2826 pa->origin_origin->ds_dir->dd_phys->dd_clones,
2827 origin_head->ds_object, tx));
2828 if (dd->dd_phys->dd_clones == 0) {
2829 dd->dd_phys->dd_clones = zap_create(dp->dp_meta_objset,
2830 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
2832 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2833 dd->dd_phys->dd_clones, origin_head->ds_object, tx));
2837 /* move snapshots to this dir */
2838 for (snap = list_head(&pa->shared_snaps); snap;
2839 snap = list_next(&pa->shared_snaps, snap)) {
2840 dsl_dataset_t *ds = snap->ds;
2842 /* unregister props as dsl_dir is changing */
2843 if (ds->ds_objset) {
2844 dmu_objset_evict(ds->ds_objset);
2845 ds->ds_objset = NULL;
2847 /* move snap name entry */
2848 VERIFY(0 == dsl_dataset_get_snapname(ds));
2849 VERIFY(0 == dsl_dataset_snap_remove(origin_head,
2850 ds->ds_snapname, tx));
2851 VERIFY(0 == zap_add(dp->dp_meta_objset,
2852 hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname,
2853 8, 1, &ds->ds_object, tx));
2855 /* change containing dsl_dir */
2856 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2857 ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object);
2858 ds->ds_phys->ds_dir_obj = dd->dd_object;
2859 ASSERT3P(ds->ds_dir, ==, odd);
2860 dsl_dir_close(ds->ds_dir, ds);
2861 VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object,
2862 NULL, ds, &ds->ds_dir));
2864 /* move any clone references */
2865 if (ds->ds_phys->ds_next_clones_obj &&
2866 spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2870 for (zap_cursor_init(&zc, dp->dp_meta_objset,
2871 ds->ds_phys->ds_next_clones_obj);
2872 zap_cursor_retrieve(&zc, &za) == 0;
2873 zap_cursor_advance(&zc)) {
2874 dsl_dataset_t *cnds;
2877 if (za.za_first_integer == oldnext_obj) {
2879 * We've already moved the
2880 * origin's reference.
2885 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
2886 za.za_first_integer, FTAG, &cnds));
2887 o = cnds->ds_dir->dd_phys->dd_head_dataset_obj;
2889 VERIFY3U(zap_remove_int(dp->dp_meta_objset,
2890 odd->dd_phys->dd_clones, o, tx), ==, 0);
2891 VERIFY3U(zap_add_int(dp->dp_meta_objset,
2892 dd->dd_phys->dd_clones, o, tx), ==, 0);
2893 dsl_dataset_rele(cnds, FTAG);
2895 zap_cursor_fini(&zc);
2898 ASSERT3U(dsl_prop_numcb(ds), ==, 0);
2902 * Change space accounting.
2903 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2904 * both be valid, or both be 0 (resulting in delta == 0). This
2905 * is true for each of {clone,origin} independently.
2908 delta = pa->cloneusedsnap -
2909 dd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2910 ASSERT3S(delta, >=, 0);
2911 ASSERT3U(pa->used, >=, delta);
2912 dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
2913 dsl_dir_diduse_space(dd, DD_USED_HEAD,
2914 pa->used - delta, pa->comp, pa->uncomp, tx);
2916 delta = pa->originusedsnap -
2917 odd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2918 ASSERT3S(delta, <=, 0);
2919 ASSERT3U(pa->used, >=, -delta);
2920 dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
2921 dsl_dir_diduse_space(odd, DD_USED_HEAD,
2922 -pa->used - delta, -pa->comp, -pa->uncomp, tx);
2924 origin_ds->ds_phys->ds_unique_bytes = pa->unique;
2926 /* log history record */
2927 spa_history_log_internal(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx,
2928 "dataset = %llu", hds->ds_object);
2930 dsl_dir_close(odd, FTAG);
2933 static char *snaplist_tag = "snaplist";
2935 * Make a list of dsl_dataset_t's for the snapshots between first_obj
2936 * (exclusive) and last_obj (inclusive). The list will be in reverse
2937 * order (last_obj will be the list_head()). If first_obj == 0, do all
2938 * snapshots back to this dataset's origin.
2941 snaplist_make(dsl_pool_t *dp, boolean_t own,
2942 uint64_t first_obj, uint64_t last_obj, list_t *l)
2944 uint64_t obj = last_obj;
2946 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock));
2948 list_create(l, sizeof (struct promotenode),
2949 offsetof(struct promotenode, link));
2951 while (obj != first_obj) {
2953 struct promotenode *snap;
2957 err = dsl_dataset_own_obj(dp, obj,
2958 0, snaplist_tag, &ds);
2960 dsl_dataset_make_exclusive(ds, snaplist_tag);
2962 err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds);
2964 if (err == ENOENT) {
2965 /* lost race with snapshot destroy */
2966 struct promotenode *last = list_tail(l);
2967 ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj);
2968 obj = last->ds->ds_phys->ds_prev_snap_obj;
2975 first_obj = ds->ds_dir->dd_phys->dd_origin_obj;
2977 snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP);
2979 list_insert_tail(l, snap);
2980 obj = ds->ds_phys->ds_prev_snap_obj;
2987 snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
2989 struct promotenode *snap;
2992 for (snap = list_head(l); snap; snap = list_next(l, snap)) {
2993 uint64_t used, comp, uncomp;
2994 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2995 mintxg, UINT64_MAX, &used, &comp, &uncomp);
3002 snaplist_destroy(list_t *l, boolean_t own)
3004 struct promotenode *snap;
3006 if (!l || !list_link_active(&l->list_head))
3009 while ((snap = list_tail(l)) != NULL) {
3010 list_remove(l, snap);
3012 dsl_dataset_disown(snap->ds, snaplist_tag);
3014 dsl_dataset_rele(snap->ds, snaplist_tag);
3015 kmem_free(snap, sizeof (struct promotenode));
3021 * Promote a clone. Nomenclature note:
3022 * "clone" or "cds": the original clone which is being promoted
3023 * "origin" or "ods": the snapshot which is originally clone's origin
3024 * "origin head" or "ohds": the dataset which is the head
3025 * (filesystem/volume) for the origin
3026 * "origin origin": the origin of the origin's filesystem (typically
3027 * NULL, indicating that the clone is not a clone of a clone).
3030 dsl_dataset_promote(const char *name, char *conflsnap)
3035 dmu_object_info_t doi;
3036 struct promotearg pa = { 0 };
3037 struct promotenode *snap;
3040 err = dsl_dataset_hold(name, FTAG, &ds);
3046 err = dmu_object_info(dp->dp_meta_objset,
3047 ds->ds_phys->ds_snapnames_zapobj, &doi);
3049 dsl_dataset_rele(ds, FTAG);
3053 if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) {
3054 dsl_dataset_rele(ds, FTAG);
3059 * We are going to inherit all the snapshots taken before our
3060 * origin (i.e., our new origin will be our parent's origin).
3061 * Take ownership of them so that we can rename them into our
3064 rw_enter(&dp->dp_config_rwlock, RW_READER);
3066 err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj,
3071 err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps);
3075 snap = list_head(&pa.shared_snaps);
3076 ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj);
3077 err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj,
3078 snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps);
3082 if (snap->ds->ds_dir->dd_phys->dd_origin_obj != 0) {
3083 err = dsl_dataset_hold_obj(dp,
3084 snap->ds->ds_dir->dd_phys->dd_origin_obj,
3085 FTAG, &pa.origin_origin);
3091 rw_exit(&dp->dp_config_rwlock);
3094 * Add in 128x the snapnames zapobj size, since we will be moving
3095 * a bunch of snapnames to the promoted ds, and dirtying their
3099 err = dsl_sync_task_do(dp, dsl_dataset_promote_check,
3100 dsl_dataset_promote_sync, ds, &pa,
3101 2 + 2 * doi.doi_physical_blocks_512);
3102 if (err && pa.err_ds && conflsnap)
3103 (void) strncpy(conflsnap, pa.err_ds, MAXNAMELEN);
3106 snaplist_destroy(&pa.shared_snaps, B_TRUE);
3107 snaplist_destroy(&pa.clone_snaps, B_FALSE);
3108 snaplist_destroy(&pa.origin_snaps, B_FALSE);
3109 if (pa.origin_origin)
3110 dsl_dataset_rele(pa.origin_origin, FTAG);
3111 dsl_dataset_rele(ds, FTAG);
3115 struct cloneswaparg {
3116 dsl_dataset_t *cds; /* clone dataset */
3117 dsl_dataset_t *ohds; /* origin's head dataset */
3119 int64_t unused_refres_delta; /* change in unconsumed refreservation */
3124 dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx)
3126 struct cloneswaparg *csa = arg1;
3128 /* they should both be heads */
3129 if (dsl_dataset_is_snapshot(csa->cds) ||
3130 dsl_dataset_is_snapshot(csa->ohds))
3133 /* the branch point should be just before them */
3134 if (csa->cds->ds_prev != csa->ohds->ds_prev)
3137 /* cds should be the clone (unless they are unrelated) */
3138 if (csa->cds->ds_prev != NULL &&
3139 csa->cds->ds_prev != csa->cds->ds_dir->dd_pool->dp_origin_snap &&
3140 csa->ohds->ds_object !=
3141 csa->cds->ds_prev->ds_phys->ds_next_snap_obj)
3144 /* the clone should be a child of the origin */
3145 if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir)
3148 /* ohds shouldn't be modified unless 'force' */
3149 if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds))
3152 /* adjust amount of any unconsumed refreservation */
3153 csa->unused_refres_delta =
3154 (int64_t)MIN(csa->ohds->ds_reserved,
3155 csa->ohds->ds_phys->ds_unique_bytes) -
3156 (int64_t)MIN(csa->ohds->ds_reserved,
3157 csa->cds->ds_phys->ds_unique_bytes);
3159 if (csa->unused_refres_delta > 0 &&
3160 csa->unused_refres_delta >
3161 dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE))
3164 if (csa->ohds->ds_quota != 0 &&
3165 csa->cds->ds_phys->ds_unique_bytes > csa->ohds->ds_quota)
3173 dsl_dataset_clone_swap_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3175 struct cloneswaparg *csa = arg1;
3176 dsl_pool_t *dp = csa->cds->ds_dir->dd_pool;
3178 ASSERT(csa->cds->ds_reserved == 0);
3179 ASSERT(csa->ohds->ds_quota == 0 ||
3180 csa->cds->ds_phys->ds_unique_bytes <= csa->ohds->ds_quota);
3182 dmu_buf_will_dirty(csa->cds->ds_dbuf, tx);
3183 dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx);
3185 if (csa->cds->ds_objset != NULL) {
3186 dmu_objset_evict(csa->cds->ds_objset);
3187 csa->cds->ds_objset = NULL;
3190 if (csa->ohds->ds_objset != NULL) {
3191 dmu_objset_evict(csa->ohds->ds_objset);
3192 csa->ohds->ds_objset = NULL;
3196 * Reset origin's unique bytes, if it exists.
3198 if (csa->cds->ds_prev) {
3199 dsl_dataset_t *origin = csa->cds->ds_prev;
3200 uint64_t comp, uncomp;
3202 dmu_buf_will_dirty(origin->ds_dbuf, tx);
3203 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3204 origin->ds_phys->ds_prev_snap_txg, UINT64_MAX,
3205 &origin->ds_phys->ds_unique_bytes, &comp, &uncomp);
3211 tmp = csa->ohds->ds_phys->ds_bp;
3212 csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp;
3213 csa->cds->ds_phys->ds_bp = tmp;
3216 /* set dd_*_bytes */
3218 int64_t dused, dcomp, duncomp;
3219 uint64_t cdl_used, cdl_comp, cdl_uncomp;
3220 uint64_t odl_used, odl_comp, odl_uncomp;
3222 ASSERT3U(csa->cds->ds_dir->dd_phys->
3223 dd_used_breakdown[DD_USED_SNAP], ==, 0);
3225 dsl_deadlist_space(&csa->cds->ds_deadlist,
3226 &cdl_used, &cdl_comp, &cdl_uncomp);
3227 dsl_deadlist_space(&csa->ohds->ds_deadlist,
3228 &odl_used, &odl_comp, &odl_uncomp);
3230 dused = csa->cds->ds_phys->ds_used_bytes + cdl_used -
3231 (csa->ohds->ds_phys->ds_used_bytes + odl_used);
3232 dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp -
3233 (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp);
3234 duncomp = csa->cds->ds_phys->ds_uncompressed_bytes +
3236 (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp);
3238 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD,
3239 dused, dcomp, duncomp, tx);
3240 dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD,
3241 -dused, -dcomp, -duncomp, tx);
3244 * The difference in the space used by snapshots is the
3245 * difference in snapshot space due to the head's
3246 * deadlist (since that's the only thing that's
3247 * changing that affects the snapused).
3249 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3250 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3251 &cdl_used, &cdl_comp, &cdl_uncomp);
3252 dsl_deadlist_space_range(&csa->ohds->ds_deadlist,
3253 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3254 &odl_used, &odl_comp, &odl_uncomp);
3255 dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used,
3256 DD_USED_HEAD, DD_USED_SNAP, tx);
3259 /* swap ds_*_bytes */
3260 SWITCH64(csa->ohds->ds_phys->ds_used_bytes,
3261 csa->cds->ds_phys->ds_used_bytes);
3262 SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes,
3263 csa->cds->ds_phys->ds_compressed_bytes);
3264 SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes,
3265 csa->cds->ds_phys->ds_uncompressed_bytes);
3266 SWITCH64(csa->ohds->ds_phys->ds_unique_bytes,
3267 csa->cds->ds_phys->ds_unique_bytes);
3269 /* apply any parent delta for change in unconsumed refreservation */
3270 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV,
3271 csa->unused_refres_delta, 0, 0, tx);
3276 dsl_deadlist_close(&csa->cds->ds_deadlist);
3277 dsl_deadlist_close(&csa->ohds->ds_deadlist);
3278 SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj,
3279 csa->cds->ds_phys->ds_deadlist_obj);
3280 dsl_deadlist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset,
3281 csa->cds->ds_phys->ds_deadlist_obj);
3282 dsl_deadlist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset,
3283 csa->ohds->ds_phys->ds_deadlist_obj);
3285 dsl_scan_ds_clone_swapped(csa->ohds, csa->cds, tx);
3289 * Swap 'clone' with its origin head datasets. Used at the end of "zfs
3290 * recv" into an existing fs to swizzle the file system to the new
3291 * version, and by "zfs rollback". Can also be used to swap two
3292 * independent head datasets if neither has any snapshots.
3295 dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head,
3298 struct cloneswaparg csa;
3301 ASSERT(clone->ds_owner);
3302 ASSERT(origin_head->ds_owner);
3305 * Need exclusive access for the swap. If we're swapping these
3306 * datasets back after an error, we already hold the locks.
3308 if (!RW_WRITE_HELD(&clone->ds_rwlock))
3309 rw_enter(&clone->ds_rwlock, RW_WRITER);
3310 if (!RW_WRITE_HELD(&origin_head->ds_rwlock) &&
3311 !rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) {
3312 rw_exit(&clone->ds_rwlock);
3313 rw_enter(&origin_head->ds_rwlock, RW_WRITER);
3314 if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) {
3315 rw_exit(&origin_head->ds_rwlock);
3320 csa.ohds = origin_head;
3322 error = dsl_sync_task_do(clone->ds_dir->dd_pool,
3323 dsl_dataset_clone_swap_check,
3324 dsl_dataset_clone_swap_sync, &csa, NULL, 9);
3329 * Given a pool name and a dataset object number in that pool,
3330 * return the name of that dataset.
3333 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
3340 if ((error = spa_open(pname, &spa, FTAG)) != 0)
3342 dp = spa_get_dsl(spa);
3343 rw_enter(&dp->dp_config_rwlock, RW_READER);
3344 if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) {
3345 dsl_dataset_name(ds, buf);
3346 dsl_dataset_rele(ds, FTAG);
3348 rw_exit(&dp->dp_config_rwlock);
3349 spa_close(spa, FTAG);
3355 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
3356 uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
3360 ASSERT3S(asize, >, 0);
3363 * *ref_rsrv is the portion of asize that will come from any
3364 * unconsumed refreservation space.
3368 mutex_enter(&ds->ds_lock);
3370 * Make a space adjustment for reserved bytes.
3372 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) {
3374 ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3375 *used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3377 asize - MIN(asize, parent_delta(ds, asize + inflight));
3380 if (!check_quota || ds->ds_quota == 0) {
3381 mutex_exit(&ds->ds_lock);
3385 * If they are requesting more space, and our current estimate
3386 * is over quota, they get to try again unless the actual
3387 * on-disk is over quota and there are no pending changes (which
3388 * may free up space for us).
3390 if (ds->ds_phys->ds_used_bytes + inflight >= ds->ds_quota) {
3391 if (inflight > 0 || ds->ds_phys->ds_used_bytes < ds->ds_quota)
3396 mutex_exit(&ds->ds_lock);
3403 dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
3405 dsl_dataset_t *ds = arg1;
3406 dsl_prop_setarg_t *psa = arg2;
3409 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA)
3412 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3415 if (psa->psa_effective_value == 0)
3418 if (psa->psa_effective_value < ds->ds_phys->ds_used_bytes ||
3419 psa->psa_effective_value < ds->ds_reserved)
3425 extern void dsl_prop_set_sync(void *, void *, dmu_tx_t *);
3428 dsl_dataset_set_quota_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3430 dsl_dataset_t *ds = arg1;
3431 dsl_prop_setarg_t *psa = arg2;
3432 uint64_t effective_value = psa->psa_effective_value;
3434 dsl_prop_set_sync(ds, psa, tx);
3435 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3437 if (ds->ds_quota != effective_value) {
3438 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3439 ds->ds_quota = effective_value;
3441 spa_history_log_internal(LOG_DS_REFQUOTA,
3442 ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu ",
3443 (longlong_t)ds->ds_quota, ds->ds_object);
3448 dsl_dataset_set_quota(const char *dsname, zprop_source_t source, uint64_t quota)
3451 dsl_prop_setarg_t psa;
3454 dsl_prop_setarg_init_uint64(&psa, "refquota", source, "a);
3456 err = dsl_dataset_hold(dsname, FTAG, &ds);
3461 * If someone removes a file, then tries to set the quota, we
3462 * want to make sure the file freeing takes effect.
3464 txg_wait_open(ds->ds_dir->dd_pool, 0);
3466 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3467 dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync,
3470 dsl_dataset_rele(ds, FTAG);
3475 dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
3477 dsl_dataset_t *ds = arg1;
3478 dsl_prop_setarg_t *psa = arg2;
3479 uint64_t effective_value;
3483 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
3484 SPA_VERSION_REFRESERVATION)
3487 if (dsl_dataset_is_snapshot(ds))
3490 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3493 effective_value = psa->psa_effective_value;
3496 * If we are doing the preliminary check in open context, the
3497 * space estimates may be inaccurate.
3499 if (!dmu_tx_is_syncing(tx))
3502 mutex_enter(&ds->ds_lock);
3503 if (!DS_UNIQUE_IS_ACCURATE(ds))
3504 dsl_dataset_recalc_head_uniq(ds);
3505 unique = ds->ds_phys->ds_unique_bytes;
3506 mutex_exit(&ds->ds_lock);
3508 if (MAX(unique, effective_value) > MAX(unique, ds->ds_reserved)) {
3509 uint64_t delta = MAX(unique, effective_value) -
3510 MAX(unique, ds->ds_reserved);
3512 if (delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
3514 if (ds->ds_quota > 0 &&
3515 effective_value > ds->ds_quota)
3523 dsl_dataset_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3525 dsl_dataset_t *ds = arg1;
3526 dsl_prop_setarg_t *psa = arg2;
3527 uint64_t effective_value = psa->psa_effective_value;
3531 dsl_prop_set_sync(ds, psa, tx);
3532 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3534 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3536 mutex_enter(&ds->ds_dir->dd_lock);
3537 mutex_enter(&ds->ds_lock);
3538 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
3539 unique = ds->ds_phys->ds_unique_bytes;
3540 delta = MAX(0, (int64_t)(effective_value - unique)) -
3541 MAX(0, (int64_t)(ds->ds_reserved - unique));
3542 ds->ds_reserved = effective_value;
3543 mutex_exit(&ds->ds_lock);
3545 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
3546 mutex_exit(&ds->ds_dir->dd_lock);
3548 spa_history_log_internal(LOG_DS_REFRESERV,
3549 ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu",
3550 (longlong_t)effective_value, ds->ds_object);
3554 dsl_dataset_set_reservation(const char *dsname, zprop_source_t source,
3555 uint64_t reservation)
3558 dsl_prop_setarg_t psa;
3561 dsl_prop_setarg_init_uint64(&psa, "refreservation", source,
3564 err = dsl_dataset_hold(dsname, FTAG, &ds);
3568 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3569 dsl_dataset_set_reservation_check,
3570 dsl_dataset_set_reservation_sync, ds, &psa, 0);
3572 dsl_dataset_rele(ds, FTAG);
3576 typedef struct zfs_hold_cleanup_arg {
3579 char htag[MAXNAMELEN];
3580 } zfs_hold_cleanup_arg_t;
3583 dsl_dataset_user_release_onexit(void *arg)
3585 zfs_hold_cleanup_arg_t *ca = arg;
3587 (void) dsl_dataset_user_release_tmp(ca->dp, ca->dsobj, ca->htag,
3589 kmem_free(ca, sizeof (zfs_hold_cleanup_arg_t));
3593 dsl_register_onexit_hold_cleanup(dsl_dataset_t *ds, const char *htag,
3596 zfs_hold_cleanup_arg_t *ca;
3598 ca = kmem_alloc(sizeof (zfs_hold_cleanup_arg_t), KM_SLEEP);
3599 ca->dp = ds->ds_dir->dd_pool;
3600 ca->dsobj = ds->ds_object;
3601 (void) strlcpy(ca->htag, htag, sizeof (ca->htag));
3602 VERIFY3U(0, ==, zfs_onexit_add_cb(minor,
3603 dsl_dataset_user_release_onexit, ca, NULL));
3607 * If you add new checks here, you may need to add
3608 * additional checks to the "temporary" case in
3609 * snapshot_check() in dmu_objset.c.
3612 dsl_dataset_user_hold_check(void *arg1, void *arg2, dmu_tx_t *tx)
3614 dsl_dataset_t *ds = arg1;
3615 struct dsl_ds_holdarg *ha = arg2;
3616 char *htag = ha->htag;
3617 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3620 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3623 if (!dsl_dataset_is_snapshot(ds))
3626 /* tags must be unique */
3627 mutex_enter(&ds->ds_lock);
3628 if (ds->ds_phys->ds_userrefs_obj) {
3629 error = zap_lookup(mos, ds->ds_phys->ds_userrefs_obj, htag,
3633 else if (error == ENOENT)
3636 mutex_exit(&ds->ds_lock);
3638 if (error == 0 && ha->temphold &&
3639 strlen(htag) + MAX_TAG_PREFIX_LEN >= MAXNAMELEN)
3646 dsl_dataset_user_hold_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3648 dsl_dataset_t *ds = arg1;
3649 struct dsl_ds_holdarg *ha = arg2;
3650 char *htag = ha->htag;
3651 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3652 objset_t *mos = dp->dp_meta_objset;
3653 uint64_t now = gethrestime_sec();
3656 mutex_enter(&ds->ds_lock);
3657 if (ds->ds_phys->ds_userrefs_obj == 0) {
3659 * This is the first user hold for this dataset. Create
3660 * the userrefs zap object.
3662 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3663 zapobj = ds->ds_phys->ds_userrefs_obj =
3664 zap_create(mos, DMU_OT_USERREFS, DMU_OT_NONE, 0, tx);
3666 zapobj = ds->ds_phys->ds_userrefs_obj;
3669 mutex_exit(&ds->ds_lock);
3671 VERIFY(0 == zap_add(mos, zapobj, htag, 8, 1, &now, tx));
3674 VERIFY(0 == dsl_pool_user_hold(dp, ds->ds_object,
3678 spa_history_log_internal(LOG_DS_USER_HOLD,
3679 dp->dp_spa, tx, "<%s> temp = %d dataset = %llu", htag,
3680 (int)ha->temphold, ds->ds_object);
3684 dsl_dataset_user_hold_one(const char *dsname, void *arg)
3686 struct dsl_ds_holdarg *ha = arg;
3691 /* alloc a buffer to hold dsname@snapname plus terminating NULL */
3692 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3693 error = dsl_dataset_hold(name, ha->dstg, &ds);
3696 ha->gotone = B_TRUE;
3697 dsl_sync_task_create(ha->dstg, dsl_dataset_user_hold_check,
3698 dsl_dataset_user_hold_sync, ds, ha, 0);
3699 } else if (error == ENOENT && ha->recursive) {
3702 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3708 dsl_dataset_user_hold_for_send(dsl_dataset_t *ds, char *htag,
3711 struct dsl_ds_holdarg *ha;
3714 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3716 ha->temphold = temphold;
3717 error = dsl_sync_task_do(ds->ds_dir->dd_pool,
3718 dsl_dataset_user_hold_check, dsl_dataset_user_hold_sync,
3720 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3726 dsl_dataset_user_hold(char *dsname, char *snapname, char *htag,
3727 boolean_t recursive, boolean_t temphold, int cleanup_fd)
3729 struct dsl_ds_holdarg *ha;
3730 dsl_sync_task_t *dst;
3735 if (cleanup_fd != -1) {
3736 /* Currently we only support cleanup-on-exit of tempholds. */
3739 error = zfs_onexit_fd_hold(cleanup_fd, &minor);
3744 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3746 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3748 error = spa_open(dsname, &spa, FTAG);
3750 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3751 if (cleanup_fd != -1)
3752 zfs_onexit_fd_rele(cleanup_fd);
3756 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3758 ha->snapname = snapname;
3759 ha->recursive = recursive;
3760 ha->temphold = temphold;
3763 error = dmu_objset_find(dsname, dsl_dataset_user_hold_one,
3764 ha, DS_FIND_CHILDREN);
3766 error = dsl_dataset_user_hold_one(dsname, ha);
3769 error = dsl_sync_task_group_wait(ha->dstg);
3771 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3772 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3773 dsl_dataset_t *ds = dst->dst_arg1;
3776 dsl_dataset_name(ds, ha->failed);
3777 *strchr(ha->failed, '@') = '\0';
3778 } else if (error == 0 && minor != 0 && temphold) {
3780 * If this hold is to be released upon process exit,
3781 * register that action now.
3783 dsl_register_onexit_hold_cleanup(ds, htag, minor);
3785 dsl_dataset_rele(ds, ha->dstg);
3788 if (error == 0 && recursive && !ha->gotone)
3792 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3794 dsl_sync_task_group_destroy(ha->dstg);
3796 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3797 spa_close(spa, FTAG);
3798 if (cleanup_fd != -1)
3799 zfs_onexit_fd_rele(cleanup_fd);
3803 struct dsl_ds_releasearg {
3806 boolean_t own; /* do we own or just hold ds? */
3810 dsl_dataset_release_might_destroy(dsl_dataset_t *ds, const char *htag,
3811 boolean_t *might_destroy)
3813 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3818 *might_destroy = B_FALSE;
3820 mutex_enter(&ds->ds_lock);
3821 zapobj = ds->ds_phys->ds_userrefs_obj;
3823 /* The tag can't possibly exist */
3824 mutex_exit(&ds->ds_lock);
3828 /* Make sure the tag exists */
3829 error = zap_lookup(mos, zapobj, htag, 8, 1, &tmp);
3831 mutex_exit(&ds->ds_lock);
3832 if (error == ENOENT)
3837 if (ds->ds_userrefs == 1 && ds->ds_phys->ds_num_children == 1 &&
3838 DS_IS_DEFER_DESTROY(ds))
3839 *might_destroy = B_TRUE;
3841 mutex_exit(&ds->ds_lock);
3846 dsl_dataset_user_release_check(void *arg1, void *tag, dmu_tx_t *tx)
3848 struct dsl_ds_releasearg *ra = arg1;
3849 dsl_dataset_t *ds = ra->ds;
3850 boolean_t might_destroy;
3853 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3856 error = dsl_dataset_release_might_destroy(ds, ra->htag, &might_destroy);
3860 if (might_destroy) {
3861 struct dsl_ds_destroyarg dsda = {0};
3863 if (dmu_tx_is_syncing(tx)) {
3865 * If we're not prepared to remove the snapshot,
3866 * we can't allow the release to happen right now.
3872 dsda.releasing = B_TRUE;
3873 return (dsl_dataset_destroy_check(&dsda, tag, tx));
3880 dsl_dataset_user_release_sync(void *arg1, void *tag, dmu_tx_t *tx)
3882 struct dsl_ds_releasearg *ra = arg1;
3883 dsl_dataset_t *ds = ra->ds;
3884 dsl_pool_t *dp = ds->ds_dir->dd_pool;
3885 objset_t *mos = dp->dp_meta_objset;
3887 uint64_t dsobj = ds->ds_object;
3891 mutex_enter(&ds->ds_lock);
3893 refs = ds->ds_userrefs;
3894 mutex_exit(&ds->ds_lock);
3895 error = dsl_pool_user_release(dp, ds->ds_object, ra->htag, tx);
3896 VERIFY(error == 0 || error == ENOENT);
3897 zapobj = ds->ds_phys->ds_userrefs_obj;
3898 VERIFY(0 == zap_remove(mos, zapobj, ra->htag, tx));
3899 if (ds->ds_userrefs == 0 && ds->ds_phys->ds_num_children == 1 &&
3900 DS_IS_DEFER_DESTROY(ds)) {
3901 struct dsl_ds_destroyarg dsda = {0};
3905 dsda.releasing = B_TRUE;
3906 /* We already did the destroy_check */
3907 dsl_dataset_destroy_sync(&dsda, tag, tx);
3910 spa_history_log_internal(LOG_DS_USER_RELEASE,
3911 dp->dp_spa, tx, "<%s> %lld dataset = %llu",
3912 ra->htag, (longlong_t)refs, dsobj);
3916 dsl_dataset_user_release_one(const char *dsname, void *arg)
3918 struct dsl_ds_holdarg *ha = arg;
3919 struct dsl_ds_releasearg *ra;
3922 void *dtag = ha->dstg;
3924 boolean_t own = B_FALSE;
3925 boolean_t might_destroy;
3927 /* alloc a buffer to hold dsname@snapname, plus the terminating NULL */
3928 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3929 error = dsl_dataset_hold(name, dtag, &ds);
3931 if (error == ENOENT && ha->recursive)
3933 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3937 ha->gotone = B_TRUE;
3939 ASSERT(dsl_dataset_is_snapshot(ds));
3941 error = dsl_dataset_release_might_destroy(ds, ha->htag, &might_destroy);
3943 dsl_dataset_rele(ds, dtag);
3947 if (might_destroy) {
3949 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3950 error = zfs_unmount_snap(name, NULL);
3953 dsl_dataset_rele(ds, dtag);
3957 if (!dsl_dataset_tryown(ds, B_TRUE, dtag)) {
3958 dsl_dataset_rele(ds, dtag);
3962 dsl_dataset_make_exclusive(ds, dtag);
3966 ra = kmem_alloc(sizeof (struct dsl_ds_releasearg), KM_SLEEP);
3968 ra->htag = ha->htag;
3970 dsl_sync_task_create(ha->dstg, dsl_dataset_user_release_check,
3971 dsl_dataset_user_release_sync, ra, dtag, 0);
3977 dsl_dataset_user_release(char *dsname, char *snapname, char *htag,
3978 boolean_t recursive)
3980 struct dsl_ds_holdarg *ha;
3981 dsl_sync_task_t *dst;
3986 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3988 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3990 error = spa_open(dsname, &spa, FTAG);
3992 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3996 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3998 ha->snapname = snapname;
3999 ha->recursive = recursive;
4001 error = dmu_objset_find(dsname, dsl_dataset_user_release_one,
4002 ha, DS_FIND_CHILDREN);
4004 error = dsl_dataset_user_release_one(dsname, ha);
4007 error = dsl_sync_task_group_wait(ha->dstg);
4009 for (dst = list_head(&ha->dstg->dstg_tasks); dst;
4010 dst = list_next(&ha->dstg->dstg_tasks, dst)) {
4011 struct dsl_ds_releasearg *ra = dst->dst_arg1;
4012 dsl_dataset_t *ds = ra->ds;
4015 dsl_dataset_name(ds, ha->failed);
4018 dsl_dataset_disown(ds, ha->dstg);
4020 dsl_dataset_rele(ds, ha->dstg);
4022 kmem_free(ra, sizeof (struct dsl_ds_releasearg));
4025 if (error == 0 && recursive && !ha->gotone)
4028 if (error && error != EBUSY)
4029 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
4031 dsl_sync_task_group_destroy(ha->dstg);
4032 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
4033 spa_close(spa, FTAG);
4036 * We can get EBUSY if we were racing with deferred destroy and
4037 * dsl_dataset_user_release_check() hadn't done the necessary
4038 * open context setup. We can also get EBUSY if we're racing
4039 * with destroy and that thread is the ds_owner. Either way
4040 * the busy condition should be transient, and we should retry
4041 * the release operation.
4050 * Called at spa_load time (with retry == B_FALSE) to release a stale
4051 * temporary user hold. Also called by the onexit code (with retry == B_TRUE).
4054 dsl_dataset_user_release_tmp(dsl_pool_t *dp, uint64_t dsobj, char *htag,
4064 rw_enter(&dp->dp_config_rwlock, RW_READER);
4065 error = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds);
4066 rw_exit(&dp->dp_config_rwlock);
4069 namelen = dsl_dataset_namelen(ds)+1;
4070 name = kmem_alloc(namelen, KM_SLEEP);
4071 dsl_dataset_name(ds, name);
4072 dsl_dataset_rele(ds, FTAG);
4074 snap = strchr(name, '@');
4077 error = dsl_dataset_user_release(name, snap, htag, B_FALSE);
4078 kmem_free(name, namelen);
4081 * The object can't have been destroyed because we have a hold,
4082 * but it might have been renamed, resulting in ENOENT. Retry
4083 * if we've been requested to do so.
4085 * It would be nice if we could use the dsobj all the way
4086 * through and avoid ENOENT entirely. But we might need to
4087 * unmount the snapshot, and there's currently no way to lookup
4088 * a vfsp using a ZFS object id.
4090 } while ((error == ENOENT) && retry);
4096 dsl_dataset_get_holds(const char *dsname, nvlist_t **nvp)
4101 err = dsl_dataset_hold(dsname, FTAG, &ds);
4105 VERIFY(0 == nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP));
4106 if (ds->ds_phys->ds_userrefs_obj != 0) {
4107 zap_attribute_t *za;
4110 za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
4111 for (zap_cursor_init(&zc, ds->ds_dir->dd_pool->dp_meta_objset,
4112 ds->ds_phys->ds_userrefs_obj);
4113 zap_cursor_retrieve(&zc, za) == 0;
4114 zap_cursor_advance(&zc)) {
4115 VERIFY(0 == nvlist_add_uint64(*nvp, za->za_name,
4116 za->za_first_integer));
4118 zap_cursor_fini(&zc);
4119 kmem_free(za, sizeof (zap_attribute_t));
4121 dsl_dataset_rele(ds, FTAG);
4126 * Note, this function is used as the callback for dmu_objset_find(). We
4127 * always return 0 so that we will continue to find and process
4128 * inconsistent datasets, even if we encounter an error trying to
4129 * process one of them.
4133 dsl_destroy_inconsistent(const char *dsname, void *arg)
4137 if (dsl_dataset_own(dsname, B_TRUE, FTAG, &ds) == 0) {
4138 if (DS_IS_INCONSISTENT(ds))
4139 (void) dsl_dataset_destroy(ds, FTAG, B_FALSE);
4141 dsl_dataset_disown(ds, FTAG);
4147 * Return (in *usedp) the amount of space written in new that is not
4148 * present in oldsnap. New may be a snapshot or the head. Old must be
4149 * a snapshot before new, in new's filesystem (or its origin). If not then
4150 * fail and return EINVAL.
4152 * The written space is calculated by considering two components: First, we
4153 * ignore any freed space, and calculate the written as new's used space
4154 * minus old's used space. Next, we add in the amount of space that was freed
4155 * between the two snapshots, thus reducing new's used space relative to old's.
4156 * Specifically, this is the space that was born before old->ds_creation_txg,
4157 * and freed before new (ie. on new's deadlist or a previous deadlist).
4159 * space freed [---------------------]
4160 * snapshots ---O-------O--------O-------O------
4164 dsl_dataset_space_written(dsl_dataset_t *oldsnap, dsl_dataset_t *new,
4165 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
4169 dsl_pool_t *dp = new->ds_dir->dd_pool;
4172 *usedp += new->ds_phys->ds_used_bytes;
4173 *usedp -= oldsnap->ds_phys->ds_used_bytes;
4176 *compp += new->ds_phys->ds_compressed_bytes;
4177 *compp -= oldsnap->ds_phys->ds_compressed_bytes;
4180 *uncompp += new->ds_phys->ds_uncompressed_bytes;
4181 *uncompp -= oldsnap->ds_phys->ds_uncompressed_bytes;
4183 rw_enter(&dp->dp_config_rwlock, RW_READER);
4184 snapobj = new->ds_object;
4185 while (snapobj != oldsnap->ds_object) {
4186 dsl_dataset_t *snap;
4187 uint64_t used, comp, uncomp;
4189 err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &snap);
4193 if (snap->ds_phys->ds_prev_snap_txg ==
4194 oldsnap->ds_phys->ds_creation_txg) {
4196 * The blocks in the deadlist can not be born after
4197 * ds_prev_snap_txg, so get the whole deadlist space,
4198 * which is more efficient (especially for old-format
4199 * deadlists). Unfortunately the deadlist code
4200 * doesn't have enough information to make this
4201 * optimization itself.
4203 dsl_deadlist_space(&snap->ds_deadlist,
4204 &used, &comp, &uncomp);
4206 dsl_deadlist_space_range(&snap->ds_deadlist,
4207 0, oldsnap->ds_phys->ds_creation_txg,
4208 &used, &comp, &uncomp);
4215 * If we get to the beginning of the chain of snapshots
4216 * (ds_prev_snap_obj == 0) before oldsnap, then oldsnap
4217 * was not a snapshot of/before new.
4219 snapobj = snap->ds_phys->ds_prev_snap_obj;
4220 dsl_dataset_rele(snap, FTAG);
4227 rw_exit(&dp->dp_config_rwlock);
4232 * Return (in *usedp) the amount of space that will be reclaimed if firstsnap,
4233 * lastsnap, and all snapshots in between are deleted.
4235 * blocks that would be freed [---------------------------]
4236 * snapshots ---O-------O--------O-------O--------O
4237 * firstsnap lastsnap
4239 * This is the set of blocks that were born after the snap before firstsnap,
4240 * (birth > firstsnap->prev_snap_txg) and died before the snap after the
4241 * last snap (ie, is on lastsnap->ds_next->ds_deadlist or an earlier deadlist).
4242 * We calculate this by iterating over the relevant deadlists (from the snap
4243 * after lastsnap, backward to the snap after firstsnap), summing up the
4244 * space on the deadlist that was born after the snap before firstsnap.
4247 dsl_dataset_space_wouldfree(dsl_dataset_t *firstsnap,
4248 dsl_dataset_t *lastsnap,
4249 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
4253 dsl_pool_t *dp = firstsnap->ds_dir->dd_pool;
4255 ASSERT(dsl_dataset_is_snapshot(firstsnap));
4256 ASSERT(dsl_dataset_is_snapshot(lastsnap));
4259 * Check that the snapshots are in the same dsl_dir, and firstsnap
4260 * is before lastsnap.
4262 if (firstsnap->ds_dir != lastsnap->ds_dir ||
4263 firstsnap->ds_phys->ds_creation_txg >
4264 lastsnap->ds_phys->ds_creation_txg)
4267 *usedp = *compp = *uncompp = 0;
4269 rw_enter(&dp->dp_config_rwlock, RW_READER);
4270 snapobj = lastsnap->ds_phys->ds_next_snap_obj;
4271 while (snapobj != firstsnap->ds_object) {
4273 uint64_t used, comp, uncomp;
4275 err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &ds);
4279 dsl_deadlist_space_range(&ds->ds_deadlist,
4280 firstsnap->ds_phys->ds_prev_snap_txg, UINT64_MAX,
4281 &used, &comp, &uncomp);
4286 snapobj = ds->ds_phys->ds_prev_snap_obj;
4287 ASSERT3U(snapobj, !=, 0);
4288 dsl_dataset_rele(ds, FTAG);
4290 rw_exit(&dp->dp_config_rwlock);