4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Portions Copyright (c) 2011 Martin Matuska <mm@FreeBSD.org>
25 * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
26 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
27 * Copyright (c) 2014 RackTop Systems.
28 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
29 * Copyright (c) 2014 Integros [integros.com]
30 * Copyright 2016, OmniTI Computer Consulting, Inc. All rights reserved.
31 * Copyright 2017 Nexenta Systems, Inc.
34 #include <sys/dmu_objset.h>
35 #include <sys/dsl_dataset.h>
36 #include <sys/dsl_dir.h>
37 #include <sys/dsl_prop.h>
38 #include <sys/dsl_synctask.h>
39 #include <sys/dmu_traverse.h>
40 #include <sys/dmu_impl.h>
41 #include <sys/dmu_send.h>
42 #include <sys/dmu_tx.h>
46 #include <sys/zfeature.h>
47 #include <sys/unique.h>
48 #include <sys/zfs_context.h>
49 #include <sys/zfs_ioctl.h>
51 #include <sys/spa_impl.h>
53 #include <sys/zfs_znode.h>
54 #include <sys/zfs_onexit.h>
56 #include <sys/dsl_scan.h>
57 #include <sys/dsl_deadlist.h>
58 #include <sys/dsl_destroy.h>
59 #include <sys/dsl_userhold.h>
60 #include <sys/dsl_bookmark.h>
61 #include <sys/dmu_send.h>
62 #include <sys/zio_checksum.h>
63 #include <sys/zio_compress.h>
64 #include <zfs_fletcher.h>
66 SYSCTL_DECL(_vfs_zfs);
69 * The SPA supports block sizes up to 16MB. However, very large blocks
70 * can have an impact on i/o latency (e.g. tying up a spinning disk for
71 * ~300ms), and also potentially on the memory allocator. Therefore,
72 * we do not allow the recordsize to be set larger than zfs_max_recordsize
73 * (default 1MB). Larger blocks can be created by changing this tunable,
74 * and pools with larger blocks can always be imported and used, regardless
77 int zfs_max_recordsize = 1 * 1024 * 1024;
78 SYSCTL_INT(_vfs_zfs, OID_AUTO, max_recordsize, CTLFLAG_RWTUN,
79 &zfs_max_recordsize, 0,
80 "Maximum block size. Expect dragons when tuning this.");
82 #define SWITCH64(x, y) \
84 uint64_t __tmp = (x); \
89 #define DS_REF_MAX (1ULL << 62)
91 extern inline dsl_dataset_phys_t *dsl_dataset_phys(dsl_dataset_t *ds);
93 static void dsl_dataset_set_remap_deadlist_object(dsl_dataset_t *ds,
94 uint64_t obj, dmu_tx_t *tx);
95 static void dsl_dataset_unset_remap_deadlist_object(dsl_dataset_t *ds,
98 extern int spa_asize_inflation;
100 static zil_header_t zero_zil;
103 * Figure out how much of this delta should be propogated to the dsl_dir
104 * layer. If there's a refreservation, that space has already been
105 * partially accounted for in our ancestors.
108 parent_delta(dsl_dataset_t *ds, int64_t delta)
110 dsl_dataset_phys_t *ds_phys;
111 uint64_t old_bytes, new_bytes;
113 if (ds->ds_reserved == 0)
116 ds_phys = dsl_dataset_phys(ds);
117 old_bytes = MAX(ds_phys->ds_unique_bytes, ds->ds_reserved);
118 new_bytes = MAX(ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
120 ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
121 return (new_bytes - old_bytes);
125 dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
127 int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
128 int compressed = BP_GET_PSIZE(bp);
129 int uncompressed = BP_GET_UCSIZE(bp);
132 dprintf_bp(bp, "ds=%p", ds);
134 ASSERT(dmu_tx_is_syncing(tx));
135 /* It could have been compressed away to nothing */
138 ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
139 ASSERT(DMU_OT_IS_VALID(BP_GET_TYPE(bp)));
141 dsl_pool_mos_diduse_space(tx->tx_pool,
142 used, compressed, uncompressed);
146 ASSERT3U(bp->blk_birth, >, dsl_dataset_phys(ds)->ds_prev_snap_txg);
147 dmu_buf_will_dirty(ds->ds_dbuf, tx);
148 mutex_enter(&ds->ds_lock);
149 delta = parent_delta(ds, used);
150 dsl_dataset_phys(ds)->ds_referenced_bytes += used;
151 dsl_dataset_phys(ds)->ds_compressed_bytes += compressed;
152 dsl_dataset_phys(ds)->ds_uncompressed_bytes += uncompressed;
153 dsl_dataset_phys(ds)->ds_unique_bytes += used;
155 if (BP_GET_LSIZE(bp) > SPA_OLD_MAXBLOCKSIZE) {
156 ds->ds_feature_activation_needed[SPA_FEATURE_LARGE_BLOCKS] =
160 spa_feature_t f = zio_checksum_to_feature(BP_GET_CHECKSUM(bp));
161 if (f != SPA_FEATURE_NONE)
162 ds->ds_feature_activation_needed[f] = B_TRUE;
164 mutex_exit(&ds->ds_lock);
165 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta,
166 compressed, uncompressed, tx);
167 dsl_dir_transfer_space(ds->ds_dir, used - delta,
168 DD_USED_REFRSRV, DD_USED_HEAD, NULL);
172 * Called when the specified segment has been remapped, and is thus no
173 * longer referenced in the head dataset. The vdev must be indirect.
175 * If the segment is referenced by a snapshot, put it on the remap deadlist.
176 * Otherwise, add this segment to the obsolete spacemap.
179 dsl_dataset_block_remapped(dsl_dataset_t *ds, uint64_t vdev, uint64_t offset,
180 uint64_t size, uint64_t birth, dmu_tx_t *tx)
182 spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
184 ASSERT(dmu_tx_is_syncing(tx));
185 ASSERT(birth <= tx->tx_txg);
186 ASSERT(!ds->ds_is_snapshot);
188 if (birth > dsl_dataset_phys(ds)->ds_prev_snap_txg) {
189 spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
192 dva_t *dva = &fakebp.blk_dva[0];
196 mutex_enter(&ds->ds_remap_deadlist_lock);
197 if (!dsl_dataset_remap_deadlist_exists(ds)) {
198 dsl_dataset_create_remap_deadlist(ds, tx);
200 mutex_exit(&ds->ds_remap_deadlist_lock);
203 fakebp.blk_birth = birth;
204 DVA_SET_VDEV(dva, vdev);
205 DVA_SET_OFFSET(dva, offset);
206 DVA_SET_ASIZE(dva, size);
208 dsl_deadlist_insert(&ds->ds_remap_deadlist, &fakebp, tx);
213 dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
216 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
218 int used = bp_get_dsize_sync(spa, bp);
219 int compressed = BP_GET_PSIZE(bp);
220 int uncompressed = BP_GET_UCSIZE(bp);
225 ASSERT(dmu_tx_is_syncing(tx));
226 ASSERT(bp->blk_birth <= tx->tx_txg);
229 dsl_free(tx->tx_pool, tx->tx_txg, bp);
230 dsl_pool_mos_diduse_space(tx->tx_pool,
231 -used, -compressed, -uncompressed);
234 ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
236 ASSERT(!ds->ds_is_snapshot);
237 dmu_buf_will_dirty(ds->ds_dbuf, tx);
239 if (bp->blk_birth > dsl_dataset_phys(ds)->ds_prev_snap_txg) {
242 dprintf_bp(bp, "freeing ds=%llu", ds->ds_object);
243 dsl_free(tx->tx_pool, tx->tx_txg, bp);
245 mutex_enter(&ds->ds_lock);
246 ASSERT(dsl_dataset_phys(ds)->ds_unique_bytes >= used ||
247 !DS_UNIQUE_IS_ACCURATE(ds));
248 delta = parent_delta(ds, -used);
249 dsl_dataset_phys(ds)->ds_unique_bytes -= used;
250 mutex_exit(&ds->ds_lock);
251 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
252 delta, -compressed, -uncompressed, tx);
253 dsl_dir_transfer_space(ds->ds_dir, -used - delta,
254 DD_USED_REFRSRV, DD_USED_HEAD, NULL);
256 dprintf_bp(bp, "putting on dead list: %s", "");
259 * We are here as part of zio's write done callback,
260 * which means we're a zio interrupt thread. We can't
261 * call dsl_deadlist_insert() now because it may block
262 * waiting for I/O. Instead, put bp on the deferred
263 * queue and let dsl_pool_sync() finish the job.
265 bplist_append(&ds->ds_pending_deadlist, bp);
267 dsl_deadlist_insert(&ds->ds_deadlist, bp, tx);
269 ASSERT3U(ds->ds_prev->ds_object, ==,
270 dsl_dataset_phys(ds)->ds_prev_snap_obj);
271 ASSERT(dsl_dataset_phys(ds->ds_prev)->ds_num_children > 0);
272 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
273 if (dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj ==
274 ds->ds_object && bp->blk_birth >
275 dsl_dataset_phys(ds->ds_prev)->ds_prev_snap_txg) {
276 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
277 mutex_enter(&ds->ds_prev->ds_lock);
278 dsl_dataset_phys(ds->ds_prev)->ds_unique_bytes += used;
279 mutex_exit(&ds->ds_prev->ds_lock);
281 if (bp->blk_birth > ds->ds_dir->dd_origin_txg) {
282 dsl_dir_transfer_space(ds->ds_dir, used,
283 DD_USED_HEAD, DD_USED_SNAP, tx);
286 mutex_enter(&ds->ds_lock);
287 ASSERT3U(dsl_dataset_phys(ds)->ds_referenced_bytes, >=, used);
288 dsl_dataset_phys(ds)->ds_referenced_bytes -= used;
289 ASSERT3U(dsl_dataset_phys(ds)->ds_compressed_bytes, >=, compressed);
290 dsl_dataset_phys(ds)->ds_compressed_bytes -= compressed;
291 ASSERT3U(dsl_dataset_phys(ds)->ds_uncompressed_bytes, >=, uncompressed);
292 dsl_dataset_phys(ds)->ds_uncompressed_bytes -= uncompressed;
293 mutex_exit(&ds->ds_lock);
299 * We have to release the fsid syncronously or we risk that a subsequent
300 * mount of the same dataset will fail to unique_insert the fsid. This
301 * failure would manifest itself as the fsid of this dataset changing
302 * between mounts which makes NFS clients quite unhappy.
305 dsl_dataset_evict_sync(void *dbu)
307 dsl_dataset_t *ds = dbu;
309 ASSERT(ds->ds_owner == NULL);
311 unique_remove(ds->ds_fsid_guid);
315 dsl_dataset_evict_async(void *dbu)
317 dsl_dataset_t *ds = dbu;
319 ASSERT(ds->ds_owner == NULL);
323 if (ds->ds_objset != NULL)
324 dmu_objset_evict(ds->ds_objset);
327 dsl_dataset_rele(ds->ds_prev, ds);
331 bplist_destroy(&ds->ds_pending_deadlist);
332 if (dsl_deadlist_is_open(&ds->ds_deadlist))
333 dsl_deadlist_close(&ds->ds_deadlist);
334 if (dsl_deadlist_is_open(&ds->ds_remap_deadlist))
335 dsl_deadlist_close(&ds->ds_remap_deadlist);
337 dsl_dir_async_rele(ds->ds_dir, ds);
339 ASSERT(!list_link_active(&ds->ds_synced_link));
341 list_destroy(&ds->ds_prop_cbs);
342 if (mutex_owned(&ds->ds_lock))
343 mutex_exit(&ds->ds_lock);
344 mutex_destroy(&ds->ds_lock);
345 if (mutex_owned(&ds->ds_opening_lock))
346 mutex_exit(&ds->ds_opening_lock);
347 mutex_destroy(&ds->ds_opening_lock);
348 mutex_destroy(&ds->ds_sendstream_lock);
349 mutex_destroy(&ds->ds_remap_deadlist_lock);
350 zfs_refcount_destroy(&ds->ds_longholds);
351 rrw_destroy(&ds->ds_bp_rwlock);
353 kmem_free(ds, sizeof (dsl_dataset_t));
357 dsl_dataset_get_snapname(dsl_dataset_t *ds)
359 dsl_dataset_phys_t *headphys;
362 dsl_pool_t *dp = ds->ds_dir->dd_pool;
363 objset_t *mos = dp->dp_meta_objset;
365 if (ds->ds_snapname[0])
367 if (dsl_dataset_phys(ds)->ds_next_snap_obj == 0)
370 err = dmu_bonus_hold(mos, dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj,
374 headphys = headdbuf->db_data;
375 err = zap_value_search(dp->dp_meta_objset,
376 headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
377 dmu_buf_rele(headdbuf, FTAG);
382 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
384 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
385 uint64_t snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
389 if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
392 err = zap_lookup_norm(mos, snapobj, name, 8, 1,
393 value, mt, NULL, 0, NULL);
394 if (err == ENOTSUP && (mt & MT_NORMALIZE))
395 err = zap_lookup(mos, snapobj, name, 8, 1, value);
400 dsl_dataset_snap_remove(dsl_dataset_t *ds, const char *name, dmu_tx_t *tx,
403 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
404 uint64_t snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
408 dsl_dir_snap_cmtime_update(ds->ds_dir);
410 if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
413 err = zap_remove_norm(mos, snapobj, name, mt, tx);
414 if (err == ENOTSUP && (mt & MT_NORMALIZE))
415 err = zap_remove(mos, snapobj, name, tx);
417 if (err == 0 && adj_cnt)
418 dsl_fs_ss_count_adjust(ds->ds_dir, -1,
419 DD_FIELD_SNAPSHOT_COUNT, tx);
425 dsl_dataset_try_add_ref(dsl_pool_t *dp, dsl_dataset_t *ds, void *tag)
427 dmu_buf_t *dbuf = ds->ds_dbuf;
428 boolean_t result = B_FALSE;
430 if (dbuf != NULL && dmu_buf_try_add_ref(dbuf, dp->dp_meta_objset,
431 ds->ds_object, DMU_BONUS_BLKID, tag)) {
433 if (ds == dmu_buf_get_user(dbuf))
436 dmu_buf_rele(dbuf, tag);
443 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
446 objset_t *mos = dp->dp_meta_objset;
450 dmu_object_info_t doi;
452 ASSERT(dsl_pool_config_held(dp));
454 err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
458 /* Make sure dsobj has the correct object type. */
459 dmu_object_info_from_db(dbuf, &doi);
460 if (doi.doi_bonus_type != DMU_OT_DSL_DATASET) {
461 dmu_buf_rele(dbuf, tag);
462 return (SET_ERROR(EINVAL));
465 ds = dmu_buf_get_user(dbuf);
467 dsl_dataset_t *winner = NULL;
469 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
471 ds->ds_object = dsobj;
472 ds->ds_is_snapshot = dsl_dataset_phys(ds)->ds_num_children != 0;
474 err = dsl_dir_hold_obj(dp, dsl_dataset_phys(ds)->ds_dir_obj,
475 NULL, ds, &ds->ds_dir);
477 kmem_free(ds, sizeof (dsl_dataset_t));
478 dmu_buf_rele(dbuf, tag);
482 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
483 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
484 mutex_init(&ds->ds_sendstream_lock, NULL, MUTEX_DEFAULT, NULL);
485 mutex_init(&ds->ds_remap_deadlist_lock,
486 NULL, MUTEX_DEFAULT, NULL);
487 rrw_init(&ds->ds_bp_rwlock, B_FALSE);
488 zfs_refcount_create(&ds->ds_longholds);
490 bplist_create(&ds->ds_pending_deadlist);
492 list_create(&ds->ds_sendstreams, sizeof (dmu_sendarg_t),
493 offsetof(dmu_sendarg_t, dsa_link));
495 list_create(&ds->ds_prop_cbs, sizeof (dsl_prop_cb_record_t),
496 offsetof(dsl_prop_cb_record_t, cbr_ds_node));
498 if (doi.doi_type == DMU_OTN_ZAP_METADATA) {
499 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
500 if (!(spa_feature_table[f].fi_flags &
501 ZFEATURE_FLAG_PER_DATASET))
503 err = zap_contains(mos, dsobj,
504 spa_feature_table[f].fi_guid);
506 ds->ds_feature_inuse[f] = B_TRUE;
508 ASSERT3U(err, ==, ENOENT);
514 if (!ds->ds_is_snapshot) {
515 ds->ds_snapname[0] = '\0';
516 if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
517 err = dsl_dataset_hold_obj(dp,
518 dsl_dataset_phys(ds)->ds_prev_snap_obj,
521 if (doi.doi_type == DMU_OTN_ZAP_METADATA) {
522 int zaperr = zap_lookup(mos, ds->ds_object,
523 DS_FIELD_BOOKMARK_NAMES,
524 sizeof (ds->ds_bookmarks), 1,
526 if (zaperr != ENOENT)
530 if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
531 err = dsl_dataset_get_snapname(ds);
533 dsl_dataset_phys(ds)->ds_userrefs_obj != 0) {
535 ds->ds_dir->dd_pool->dp_meta_objset,
536 dsl_dataset_phys(ds)->ds_userrefs_obj,
541 if (err == 0 && !ds->ds_is_snapshot) {
542 err = dsl_prop_get_int_ds(ds,
543 zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
546 err = dsl_prop_get_int_ds(ds,
547 zfs_prop_to_name(ZFS_PROP_REFQUOTA),
551 ds->ds_reserved = ds->ds_quota = 0;
554 dsl_deadlist_open(&ds->ds_deadlist,
555 mos, dsl_dataset_phys(ds)->ds_deadlist_obj);
556 uint64_t remap_deadlist_obj =
557 dsl_dataset_get_remap_deadlist_object(ds);
558 if (remap_deadlist_obj != 0) {
559 dsl_deadlist_open(&ds->ds_remap_deadlist, mos,
563 dmu_buf_init_user(&ds->ds_dbu, dsl_dataset_evict_sync,
564 dsl_dataset_evict_async, &ds->ds_dbuf);
566 winner = dmu_buf_set_user_ie(dbuf, &ds->ds_dbu);
568 if (err != 0 || winner != NULL) {
569 bplist_destroy(&ds->ds_pending_deadlist);
570 dsl_deadlist_close(&ds->ds_deadlist);
571 if (dsl_deadlist_is_open(&ds->ds_remap_deadlist))
572 dsl_deadlist_close(&ds->ds_remap_deadlist);
574 dsl_dataset_rele(ds->ds_prev, ds);
575 dsl_dir_rele(ds->ds_dir, ds);
576 list_destroy(&ds->ds_prop_cbs);
577 list_destroy(&ds->ds_sendstreams);
578 mutex_destroy(&ds->ds_lock);
579 mutex_destroy(&ds->ds_opening_lock);
580 mutex_destroy(&ds->ds_sendstream_lock);
581 mutex_destroy(&ds->ds_remap_deadlist_lock);
582 zfs_refcount_destroy(&ds->ds_longholds);
583 rrw_destroy(&ds->ds_bp_rwlock);
584 kmem_free(ds, sizeof (dsl_dataset_t));
586 dmu_buf_rele(dbuf, tag);
592 unique_insert(dsl_dataset_phys(ds)->ds_fsid_guid);
593 if (ds->ds_fsid_guid !=
594 dsl_dataset_phys(ds)->ds_fsid_guid) {
595 zfs_dbgmsg("ds_fsid_guid changed from "
596 "%llx to %llx for pool %s dataset id %llu",
598 dsl_dataset_phys(ds)->ds_fsid_guid,
599 (long long)ds->ds_fsid_guid,
600 spa_name(dp->dp_spa),
605 ASSERT3P(ds->ds_dbuf, ==, dbuf);
606 ASSERT3P(dsl_dataset_phys(ds), ==, dbuf->db_data);
607 ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0 ||
608 spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
609 dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
615 dsl_dataset_hold(dsl_pool_t *dp, const char *name,
616 void *tag, dsl_dataset_t **dsp)
619 const char *snapname;
624 err = dsl_dir_hold(dp, name, FTAG, &dd, &snapname);
628 ASSERT(dsl_pool_config_held(dp));
629 obj = dsl_dir_phys(dd)->dd_head_dataset_obj;
631 err = dsl_dataset_hold_obj(dp, obj, tag, &ds);
633 err = SET_ERROR(ENOENT);
635 /* we may be looking for a snapshot */
636 if (err == 0 && snapname != NULL) {
637 dsl_dataset_t *snap_ds;
639 if (*snapname++ != '@') {
640 dsl_dataset_rele(ds, tag);
641 dsl_dir_rele(dd, FTAG);
642 return (SET_ERROR(ENOENT));
645 dprintf("looking for snapshot '%s'\n", snapname);
646 err = dsl_dataset_snap_lookup(ds, snapname, &obj);
648 err = dsl_dataset_hold_obj(dp, obj, tag, &snap_ds);
649 dsl_dataset_rele(ds, tag);
652 mutex_enter(&snap_ds->ds_lock);
653 if (snap_ds->ds_snapname[0] == 0)
654 (void) strlcpy(snap_ds->ds_snapname, snapname,
655 sizeof (snap_ds->ds_snapname));
656 mutex_exit(&snap_ds->ds_lock);
662 dsl_dir_rele(dd, FTAG);
667 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj,
668 void *tag, dsl_dataset_t **dsp)
670 int err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp);
673 if (!dsl_dataset_tryown(*dsp, tag)) {
674 dsl_dataset_rele(*dsp, tag);
676 return (SET_ERROR(EBUSY));
682 dsl_dataset_own(dsl_pool_t *dp, const char *name,
683 void *tag, dsl_dataset_t **dsp)
685 int err = dsl_dataset_hold(dp, name, tag, dsp);
688 if (!dsl_dataset_tryown(*dsp, tag)) {
689 dsl_dataset_rele(*dsp, tag);
690 return (SET_ERROR(EBUSY));
696 * See the comment above dsl_pool_hold() for details. In summary, a long
697 * hold is used to prevent destruction of a dataset while the pool hold
698 * is dropped, allowing other concurrent operations (e.g. spa_sync()).
700 * The dataset and pool must be held when this function is called. After it
701 * is called, the pool hold may be released while the dataset is still held
705 dsl_dataset_long_hold(dsl_dataset_t *ds, void *tag)
707 ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
708 (void) zfs_refcount_add(&ds->ds_longholds, tag);
712 dsl_dataset_long_rele(dsl_dataset_t *ds, void *tag)
714 (void) zfs_refcount_remove(&ds->ds_longholds, tag);
717 /* Return B_TRUE if there are any long holds on this dataset. */
719 dsl_dataset_long_held(dsl_dataset_t *ds)
721 return (!zfs_refcount_is_zero(&ds->ds_longholds));
725 dsl_dataset_name(dsl_dataset_t *ds, char *name)
728 (void) strcpy(name, "mos");
730 dsl_dir_name(ds->ds_dir, name);
731 VERIFY0(dsl_dataset_get_snapname(ds));
732 if (ds->ds_snapname[0]) {
733 VERIFY3U(strlcat(name, "@", ZFS_MAX_DATASET_NAME_LEN),
734 <, ZFS_MAX_DATASET_NAME_LEN);
736 * We use a "recursive" mutex so that we
737 * can call dprintf_ds() with ds_lock held.
739 if (!MUTEX_HELD(&ds->ds_lock)) {
740 mutex_enter(&ds->ds_lock);
741 VERIFY3U(strlcat(name, ds->ds_snapname,
742 ZFS_MAX_DATASET_NAME_LEN), <,
743 ZFS_MAX_DATASET_NAME_LEN);
744 mutex_exit(&ds->ds_lock);
746 VERIFY3U(strlcat(name, ds->ds_snapname,
747 ZFS_MAX_DATASET_NAME_LEN), <,
748 ZFS_MAX_DATASET_NAME_LEN);
755 dsl_dataset_namelen(dsl_dataset_t *ds)
757 VERIFY0(dsl_dataset_get_snapname(ds));
758 mutex_enter(&ds->ds_lock);
759 int len = dsl_dir_namelen(ds->ds_dir) + 1 + strlen(ds->ds_snapname);
760 mutex_exit(&ds->ds_lock);
765 dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
767 dmu_buf_rele(ds->ds_dbuf, tag);
771 dsl_dataset_disown(dsl_dataset_t *ds, void *tag)
773 ASSERT3P(ds->ds_owner, ==, tag);
774 ASSERT(ds->ds_dbuf != NULL);
776 mutex_enter(&ds->ds_lock);
778 mutex_exit(&ds->ds_lock);
779 dsl_dataset_long_rele(ds, tag);
780 dsl_dataset_rele(ds, tag);
784 dsl_dataset_tryown(dsl_dataset_t *ds, void *tag)
786 boolean_t gotit = FALSE;
788 ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
789 mutex_enter(&ds->ds_lock);
790 if (ds->ds_owner == NULL && !DS_IS_INCONSISTENT(ds)) {
792 dsl_dataset_long_hold(ds, tag);
795 mutex_exit(&ds->ds_lock);
800 dsl_dataset_has_owner(dsl_dataset_t *ds)
803 mutex_enter(&ds->ds_lock);
804 rv = (ds->ds_owner != NULL);
805 mutex_exit(&ds->ds_lock);
810 dsl_dataset_activate_feature(uint64_t dsobj, spa_feature_t f, dmu_tx_t *tx)
812 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
813 objset_t *mos = dmu_tx_pool(tx)->dp_meta_objset;
816 VERIFY(spa_feature_table[f].fi_flags & ZFEATURE_FLAG_PER_DATASET);
818 spa_feature_incr(spa, f, tx);
819 dmu_object_zapify(mos, dsobj, DMU_OT_DSL_DATASET, tx);
821 VERIFY0(zap_add(mos, dsobj, spa_feature_table[f].fi_guid,
822 sizeof (zero), 1, &zero, tx));
826 dsl_dataset_deactivate_feature(uint64_t dsobj, spa_feature_t f, dmu_tx_t *tx)
828 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
829 objset_t *mos = dmu_tx_pool(tx)->dp_meta_objset;
831 VERIFY(spa_feature_table[f].fi_flags & ZFEATURE_FLAG_PER_DATASET);
833 VERIFY0(zap_remove(mos, dsobj, spa_feature_table[f].fi_guid, tx));
834 spa_feature_decr(spa, f, tx);
838 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
839 uint64_t flags, dmu_tx_t *tx)
841 dsl_pool_t *dp = dd->dd_pool;
843 dsl_dataset_phys_t *dsphys;
845 objset_t *mos = dp->dp_meta_objset;
848 origin = dp->dp_origin_snap;
850 ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
851 ASSERT(origin == NULL || dsl_dataset_phys(origin)->ds_num_children > 0);
852 ASSERT(dmu_tx_is_syncing(tx));
853 ASSERT(dsl_dir_phys(dd)->dd_head_dataset_obj == 0);
855 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
856 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
857 VERIFY0(dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
858 dmu_buf_will_dirty(dbuf, tx);
859 dsphys = dbuf->db_data;
860 bzero(dsphys, sizeof (dsl_dataset_phys_t));
861 dsphys->ds_dir_obj = dd->dd_object;
862 dsphys->ds_flags = flags;
863 dsphys->ds_fsid_guid = unique_create();
865 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
866 sizeof (dsphys->ds_guid));
867 } while (dsphys->ds_guid == 0);
868 dsphys->ds_snapnames_zapobj =
869 zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
871 dsphys->ds_creation_time = gethrestime_sec();
872 dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
874 if (origin == NULL) {
875 dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx);
877 dsl_dataset_t *ohds; /* head of the origin snapshot */
879 dsphys->ds_prev_snap_obj = origin->ds_object;
880 dsphys->ds_prev_snap_txg =
881 dsl_dataset_phys(origin)->ds_creation_txg;
882 dsphys->ds_referenced_bytes =
883 dsl_dataset_phys(origin)->ds_referenced_bytes;
884 dsphys->ds_compressed_bytes =
885 dsl_dataset_phys(origin)->ds_compressed_bytes;
886 dsphys->ds_uncompressed_bytes =
887 dsl_dataset_phys(origin)->ds_uncompressed_bytes;
888 rrw_enter(&origin->ds_bp_rwlock, RW_READER, FTAG);
889 dsphys->ds_bp = dsl_dataset_phys(origin)->ds_bp;
890 rrw_exit(&origin->ds_bp_rwlock, FTAG);
893 * Inherit flags that describe the dataset's contents
894 * (INCONSISTENT) or properties (Case Insensitive).
896 dsphys->ds_flags |= dsl_dataset_phys(origin)->ds_flags &
897 (DS_FLAG_INCONSISTENT | DS_FLAG_CI_DATASET);
899 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
900 if (origin->ds_feature_inuse[f])
901 dsl_dataset_activate_feature(dsobj, f, tx);
904 dmu_buf_will_dirty(origin->ds_dbuf, tx);
905 dsl_dataset_phys(origin)->ds_num_children++;
907 VERIFY0(dsl_dataset_hold_obj(dp,
908 dsl_dir_phys(origin->ds_dir)->dd_head_dataset_obj,
910 dsphys->ds_deadlist_obj = dsl_deadlist_clone(&ohds->ds_deadlist,
911 dsphys->ds_prev_snap_txg, dsphys->ds_prev_snap_obj, tx);
912 dsl_dataset_rele(ohds, FTAG);
914 if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
915 if (dsl_dataset_phys(origin)->ds_next_clones_obj == 0) {
916 dsl_dataset_phys(origin)->ds_next_clones_obj =
918 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
920 VERIFY0(zap_add_int(mos,
921 dsl_dataset_phys(origin)->ds_next_clones_obj,
925 dmu_buf_will_dirty(dd->dd_dbuf, tx);
926 dsl_dir_phys(dd)->dd_origin_obj = origin->ds_object;
927 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
928 if (dsl_dir_phys(origin->ds_dir)->dd_clones == 0) {
929 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
930 dsl_dir_phys(origin->ds_dir)->dd_clones =
932 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
934 VERIFY0(zap_add_int(mos,
935 dsl_dir_phys(origin->ds_dir)->dd_clones,
940 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
941 dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
943 dmu_buf_rele(dbuf, FTAG);
945 dmu_buf_will_dirty(dd->dd_dbuf, tx);
946 dsl_dir_phys(dd)->dd_head_dataset_obj = dsobj;
952 dsl_dataset_zero_zil(dsl_dataset_t *ds, dmu_tx_t *tx)
956 VERIFY0(dmu_objset_from_ds(ds, &os));
957 if (bcmp(&os->os_zil_header, &zero_zil, sizeof (zero_zil)) != 0) {
958 dsl_pool_t *dp = ds->ds_dir->dd_pool;
961 bzero(&os->os_zil_header, sizeof (os->os_zil_header));
963 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
964 dsl_dataset_sync(ds, zio, tx);
965 VERIFY0(zio_wait(zio));
967 /* dsl_dataset_sync_done will drop this reference. */
968 dmu_buf_add_ref(ds->ds_dbuf, ds);
969 dsl_dataset_sync_done(ds, tx);
974 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
975 dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx)
977 dsl_pool_t *dp = pdd->dd_pool;
978 uint64_t dsobj, ddobj;
981 ASSERT(dmu_tx_is_syncing(tx));
982 ASSERT(lastname[0] != '@');
984 ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
985 VERIFY0(dsl_dir_hold_obj(dp, ddobj, lastname, FTAG, &dd));
987 dsobj = dsl_dataset_create_sync_dd(dd, origin,
988 flags & ~DS_CREATE_FLAG_NODIRTY, tx);
990 dsl_deleg_set_create_perms(dd, tx, cr);
993 * Since we're creating a new node we know it's a leaf, so we can
994 * initialize the counts if the limit feature is active.
996 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT)) {
998 objset_t *os = dd->dd_pool->dp_meta_objset;
1000 dsl_dir_zapify(dd, tx);
1001 VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT,
1002 sizeof (cnt), 1, &cnt, tx));
1003 VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT,
1004 sizeof (cnt), 1, &cnt, tx));
1007 dsl_dir_rele(dd, FTAG);
1010 * If we are creating a clone, make sure we zero out any stale
1011 * data from the origin snapshots zil header.
1013 if (origin != NULL && !(flags & DS_CREATE_FLAG_NODIRTY)) {
1016 VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
1017 dsl_dataset_zero_zil(ds, tx);
1018 dsl_dataset_rele(ds, FTAG);
1025 /* FreeBSD ioctl compat begin */
1028 const char *snapname;
1032 dsl_check_snap_cb(const char *name, void *arg)
1034 struct destroyarg *da = arg;
1038 dsname = kmem_asprintf("%s@%s", name, da->snapname);
1039 fnvlist_add_boolean(da->nvl, dsname);
1040 kmem_free(dsname, strlen(dsname) + 1);
1046 dmu_get_recursive_snaps_nvl(char *fsname, const char *snapname,
1049 struct destroyarg *da;
1052 da = kmem_zalloc(sizeof (struct destroyarg), KM_SLEEP);
1054 da->snapname = snapname;
1055 err = dmu_objset_find(fsname, dsl_check_snap_cb, da,
1057 kmem_free(da, sizeof (struct destroyarg));
1061 /* FreeBSD ioctl compat end */
1062 #endif /* __FreeBSD__ */
1065 * The unique space in the head dataset can be calculated by subtracting
1066 * the space used in the most recent snapshot, that is still being used
1067 * in this file system, from the space currently in use. To figure out
1068 * the space in the most recent snapshot still in use, we need to take
1069 * the total space used in the snapshot and subtract out the space that
1070 * has been freed up since the snapshot was taken.
1073 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
1076 uint64_t dlused, dlcomp, dluncomp;
1078 ASSERT(!ds->ds_is_snapshot);
1080 if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0)
1081 mrs_used = dsl_dataset_phys(ds->ds_prev)->ds_referenced_bytes;
1085 dsl_deadlist_space(&ds->ds_deadlist, &dlused, &dlcomp, &dluncomp);
1087 ASSERT3U(dlused, <=, mrs_used);
1088 dsl_dataset_phys(ds)->ds_unique_bytes =
1089 dsl_dataset_phys(ds)->ds_referenced_bytes - (mrs_used - dlused);
1091 if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1092 SPA_VERSION_UNIQUE_ACCURATE)
1093 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1097 dsl_dataset_remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj,
1100 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1104 ASSERT(dsl_dataset_phys(ds)->ds_num_children >= 2);
1105 err = zap_remove_int(mos, dsl_dataset_phys(ds)->ds_next_clones_obj,
1108 * The err should not be ENOENT, but a bug in a previous version
1109 * of the code could cause upgrade_clones_cb() to not set
1110 * ds_next_snap_obj when it should, leading to a missing entry.
1111 * If we knew that the pool was created after
1112 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
1113 * ENOENT. However, at least we can check that we don't have
1114 * too many entries in the next_clones_obj even after failing to
1119 ASSERT0(zap_count(mos, dsl_dataset_phys(ds)->ds_next_clones_obj,
1121 ASSERT3U(count, <=, dsl_dataset_phys(ds)->ds_num_children - 2);
1126 dsl_dataset_get_blkptr(dsl_dataset_t *ds)
1128 return (&dsl_dataset_phys(ds)->ds_bp);
1132 dsl_dataset_get_spa(dsl_dataset_t *ds)
1134 return (ds->ds_dir->dd_pool->dp_spa);
1138 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
1142 if (ds == NULL) /* this is the meta-objset */
1145 ASSERT(ds->ds_objset != NULL);
1147 if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0)
1148 panic("dirtying snapshot!");
1150 /* Must not dirty a dataset in the same txg where it got snapshotted. */
1151 ASSERT3U(tx->tx_txg, >, dsl_dataset_phys(ds)->ds_prev_snap_txg);
1153 dp = ds->ds_dir->dd_pool;
1154 if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg)) {
1155 /* up the hold count until we can be written out */
1156 dmu_buf_add_ref(ds->ds_dbuf, ds);
1161 dsl_dataset_is_dirty(dsl_dataset_t *ds)
1163 for (int t = 0; t < TXG_SIZE; t++) {
1164 if (txg_list_member(&ds->ds_dir->dd_pool->dp_dirty_datasets,
1172 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
1176 if (!dmu_tx_is_syncing(tx))
1180 * If there's an fs-only reservation, any blocks that might become
1181 * owned by the snapshot dataset must be accommodated by space
1182 * outside of the reservation.
1184 ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
1185 asize = MIN(dsl_dataset_phys(ds)->ds_unique_bytes, ds->ds_reserved);
1186 if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
1187 return (SET_ERROR(ENOSPC));
1190 * Propagate any reserved space for this snapshot to other
1191 * snapshot checks in this sync group.
1194 dsl_dir_willuse_space(ds->ds_dir, asize, tx);
1200 dsl_dataset_snapshot_check_impl(dsl_dataset_t *ds, const char *snapname,
1201 dmu_tx_t *tx, boolean_t recv, uint64_t cnt, cred_t *cr)
1206 ds->ds_trysnap_txg = tx->tx_txg;
1208 if (!dmu_tx_is_syncing(tx))
1212 * We don't allow multiple snapshots of the same txg. If there
1213 * is already one, try again.
1215 if (dsl_dataset_phys(ds)->ds_prev_snap_txg >= tx->tx_txg)
1216 return (SET_ERROR(EAGAIN));
1219 * Check for conflicting snapshot name.
1221 error = dsl_dataset_snap_lookup(ds, snapname, &value);
1223 return (SET_ERROR(EEXIST));
1224 if (error != ENOENT)
1228 * We don't allow taking snapshots of inconsistent datasets, such as
1229 * those into which we are currently receiving. However, if we are
1230 * creating this snapshot as part of a receive, this check will be
1231 * executed atomically with respect to the completion of the receive
1232 * itself but prior to the clearing of DS_FLAG_INCONSISTENT; in this
1233 * case we ignore this, knowing it will be fixed up for us shortly in
1234 * dmu_recv_end_sync().
1236 if (!recv && DS_IS_INCONSISTENT(ds))
1237 return (SET_ERROR(EBUSY));
1240 * Skip the check for temporary snapshots or if we have already checked
1241 * the counts in dsl_dataset_snapshot_check. This means we really only
1242 * check the count here when we're receiving a stream.
1244 if (cnt != 0 && cr != NULL) {
1245 error = dsl_fs_ss_limit_check(ds->ds_dir, cnt,
1246 ZFS_PROP_SNAPSHOT_LIMIT, NULL, cr);
1251 error = dsl_dataset_snapshot_reserve_space(ds, tx);
1259 dsl_dataset_snapshot_check(void *arg, dmu_tx_t *tx)
1261 dsl_dataset_snapshot_arg_t *ddsa = arg;
1262 dsl_pool_t *dp = dmu_tx_pool(tx);
1267 * Pre-compute how many total new snapshots will be created for each
1268 * level in the tree and below. This is needed for validating the
1269 * snapshot limit when either taking a recursive snapshot or when
1270 * taking multiple snapshots.
1272 * The problem is that the counts are not actually adjusted when
1273 * we are checking, only when we finally sync. For a single snapshot,
1274 * this is easy, the count will increase by 1 at each node up the tree,
1275 * but its more complicated for the recursive/multiple snapshot case.
1277 * The dsl_fs_ss_limit_check function does recursively check the count
1278 * at each level up the tree but since it is validating each snapshot
1279 * independently we need to be sure that we are validating the complete
1280 * count for the entire set of snapshots. We do this by rolling up the
1281 * counts for each component of the name into an nvlist and then
1282 * checking each of those cases with the aggregated count.
1284 * This approach properly handles not only the recursive snapshot
1285 * case (where we get all of those on the ddsa_snaps list) but also
1286 * the sibling case (e.g. snapshot a/b and a/c so that we will also
1287 * validate the limit on 'a' using a count of 2).
1289 * We validate the snapshot names in the third loop and only report
1292 if (dmu_tx_is_syncing(tx)) {
1293 nvlist_t *cnt_track = NULL;
1294 cnt_track = fnvlist_alloc();
1296 /* Rollup aggregated counts into the cnt_track list */
1297 for (pair = nvlist_next_nvpair(ddsa->ddsa_snaps, NULL);
1299 pair = nvlist_next_nvpair(ddsa->ddsa_snaps, pair)) {
1302 char nm[MAXPATHLEN];
1304 (void) strlcpy(nm, nvpair_name(pair), sizeof (nm));
1305 pdelim = strchr(nm, '@');
1311 if (nvlist_lookup_uint64(cnt_track, nm,
1313 /* update existing entry */
1314 fnvlist_add_uint64(cnt_track, nm,
1318 fnvlist_add_uint64(cnt_track, nm, 1);
1321 pdelim = strrchr(nm, '/');
1324 } while (pdelim != NULL);
1327 /* Check aggregated counts at each level */
1328 for (pair = nvlist_next_nvpair(cnt_track, NULL);
1329 pair != NULL; pair = nvlist_next_nvpair(cnt_track, pair)) {
1335 name = nvpair_name(pair);
1336 cnt = fnvpair_value_uint64(pair);
1339 error = dsl_dataset_hold(dp, name, FTAG, &ds);
1341 error = dsl_fs_ss_limit_check(ds->ds_dir, cnt,
1342 ZFS_PROP_SNAPSHOT_LIMIT, NULL,
1344 dsl_dataset_rele(ds, FTAG);
1348 if (ddsa->ddsa_errors != NULL)
1349 fnvlist_add_int32(ddsa->ddsa_errors,
1352 /* only report one error for this check */
1356 nvlist_free(cnt_track);
1359 for (pair = nvlist_next_nvpair(ddsa->ddsa_snaps, NULL);
1360 pair != NULL; pair = nvlist_next_nvpair(ddsa->ddsa_snaps, pair)) {
1364 char dsname[ZFS_MAX_DATASET_NAME_LEN];
1366 name = nvpair_name(pair);
1367 if (strlen(name) >= ZFS_MAX_DATASET_NAME_LEN)
1368 error = SET_ERROR(ENAMETOOLONG);
1370 atp = strchr(name, '@');
1372 error = SET_ERROR(EINVAL);
1374 (void) strlcpy(dsname, name, atp - name + 1);
1377 error = dsl_dataset_hold(dp, dsname, FTAG, &ds);
1379 /* passing 0/NULL skips dsl_fs_ss_limit_check */
1380 error = dsl_dataset_snapshot_check_impl(ds,
1381 atp + 1, tx, B_FALSE, 0, NULL);
1382 dsl_dataset_rele(ds, FTAG);
1386 if (ddsa->ddsa_errors != NULL) {
1387 fnvlist_add_int32(ddsa->ddsa_errors,
1398 dsl_dataset_snapshot_sync_impl(dsl_dataset_t *ds, const char *snapname,
1401 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1403 dsl_dataset_phys_t *dsphys;
1404 uint64_t dsobj, crtxg;
1405 objset_t *mos = dp->dp_meta_objset;
1408 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
1411 * If we are on an old pool, the zil must not be active, in which
1412 * case it will be zeroed. Usually zil_suspend() accomplishes this.
1414 ASSERT(spa_version(dmu_tx_pool(tx)->dp_spa) >= SPA_VERSION_FAST_SNAP ||
1415 dmu_objset_from_ds(ds, &os) != 0 ||
1416 bcmp(&os->os_phys->os_zil_header, &zero_zil,
1417 sizeof (zero_zil)) == 0);
1419 /* Should not snapshot a dirty dataset. */
1420 ASSERT(!txg_list_member(&ds->ds_dir->dd_pool->dp_dirty_datasets,
1423 dsl_fs_ss_count_adjust(ds->ds_dir, 1, DD_FIELD_SNAPSHOT_COUNT, tx);
1426 * The origin's ds_creation_txg has to be < TXG_INITIAL
1428 if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
1433 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
1434 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
1435 VERIFY0(dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
1436 dmu_buf_will_dirty(dbuf, tx);
1437 dsphys = dbuf->db_data;
1438 bzero(dsphys, sizeof (dsl_dataset_phys_t));
1439 dsphys->ds_dir_obj = ds->ds_dir->dd_object;
1440 dsphys->ds_fsid_guid = unique_create();
1442 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
1443 sizeof (dsphys->ds_guid));
1444 } while (dsphys->ds_guid == 0);
1445 dsphys->ds_prev_snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
1446 dsphys->ds_prev_snap_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
1447 dsphys->ds_next_snap_obj = ds->ds_object;
1448 dsphys->ds_num_children = 1;
1449 dsphys->ds_creation_time = gethrestime_sec();
1450 dsphys->ds_creation_txg = crtxg;
1451 dsphys->ds_deadlist_obj = dsl_dataset_phys(ds)->ds_deadlist_obj;
1452 dsphys->ds_referenced_bytes = dsl_dataset_phys(ds)->ds_referenced_bytes;
1453 dsphys->ds_compressed_bytes = dsl_dataset_phys(ds)->ds_compressed_bytes;
1454 dsphys->ds_uncompressed_bytes =
1455 dsl_dataset_phys(ds)->ds_uncompressed_bytes;
1456 dsphys->ds_flags = dsl_dataset_phys(ds)->ds_flags;
1457 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
1458 dsphys->ds_bp = dsl_dataset_phys(ds)->ds_bp;
1459 rrw_exit(&ds->ds_bp_rwlock, FTAG);
1460 dmu_buf_rele(dbuf, FTAG);
1462 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
1463 if (ds->ds_feature_inuse[f])
1464 dsl_dataset_activate_feature(dsobj, f, tx);
1467 ASSERT3U(ds->ds_prev != 0, ==,
1468 dsl_dataset_phys(ds)->ds_prev_snap_obj != 0);
1470 uint64_t next_clones_obj =
1471 dsl_dataset_phys(ds->ds_prev)->ds_next_clones_obj;
1472 ASSERT(dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj ==
1474 dsl_dataset_phys(ds->ds_prev)->ds_num_children > 1);
1475 if (dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj ==
1477 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1478 ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_txg, ==,
1479 dsl_dataset_phys(ds->ds_prev)->ds_creation_txg);
1480 dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj = dsobj;
1481 } else if (next_clones_obj != 0) {
1482 dsl_dataset_remove_from_next_clones(ds->ds_prev,
1483 dsphys->ds_next_snap_obj, tx);
1484 VERIFY0(zap_add_int(mos,
1485 next_clones_obj, dsobj, tx));
1490 * If we have a reference-reservation on this dataset, we will
1491 * need to increase the amount of refreservation being charged
1492 * since our unique space is going to zero.
1494 if (ds->ds_reserved) {
1496 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
1497 delta = MIN(dsl_dataset_phys(ds)->ds_unique_bytes,
1499 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
1503 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1504 dsl_dataset_phys(ds)->ds_deadlist_obj =
1505 dsl_deadlist_clone(&ds->ds_deadlist, UINT64_MAX,
1506 dsl_dataset_phys(ds)->ds_prev_snap_obj, tx);
1507 dsl_deadlist_close(&ds->ds_deadlist);
1508 dsl_deadlist_open(&ds->ds_deadlist, mos,
1509 dsl_dataset_phys(ds)->ds_deadlist_obj);
1510 dsl_deadlist_add_key(&ds->ds_deadlist,
1511 dsl_dataset_phys(ds)->ds_prev_snap_txg, tx);
1513 if (dsl_dataset_remap_deadlist_exists(ds)) {
1514 uint64_t remap_deadlist_obj =
1515 dsl_dataset_get_remap_deadlist_object(ds);
1517 * Move the remap_deadlist to the snapshot. The head
1518 * will create a new remap deadlist on demand, from
1519 * dsl_dataset_block_remapped().
1521 dsl_dataset_unset_remap_deadlist_object(ds, tx);
1522 dsl_deadlist_close(&ds->ds_remap_deadlist);
1524 dmu_object_zapify(mos, dsobj, DMU_OT_DSL_DATASET, tx);
1525 VERIFY0(zap_add(mos, dsobj, DS_FIELD_REMAP_DEADLIST,
1526 sizeof (remap_deadlist_obj), 1, &remap_deadlist_obj, tx));
1529 ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_txg, <, tx->tx_txg);
1530 dsl_dataset_phys(ds)->ds_prev_snap_obj = dsobj;
1531 dsl_dataset_phys(ds)->ds_prev_snap_txg = crtxg;
1532 dsl_dataset_phys(ds)->ds_unique_bytes = 0;
1534 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
1535 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1537 VERIFY0(zap_add(mos, dsl_dataset_phys(ds)->ds_snapnames_zapobj,
1538 snapname, 8, 1, &dsobj, tx));
1541 dsl_dataset_rele(ds->ds_prev, ds);
1542 VERIFY0(dsl_dataset_hold_obj(dp,
1543 dsl_dataset_phys(ds)->ds_prev_snap_obj, ds, &ds->ds_prev));
1545 dsl_scan_ds_snapshotted(ds, tx);
1547 dsl_dir_snap_cmtime_update(ds->ds_dir);
1549 spa_history_log_internal_ds(ds->ds_prev, "snapshot", tx, "");
1553 dsl_dataset_snapshot_sync(void *arg, dmu_tx_t *tx)
1555 dsl_dataset_snapshot_arg_t *ddsa = arg;
1556 dsl_pool_t *dp = dmu_tx_pool(tx);
1559 for (pair = nvlist_next_nvpair(ddsa->ddsa_snaps, NULL);
1560 pair != NULL; pair = nvlist_next_nvpair(ddsa->ddsa_snaps, pair)) {
1563 char dsname[ZFS_MAX_DATASET_NAME_LEN];
1565 name = nvpair_name(pair);
1566 atp = strchr(name, '@');
1567 (void) strlcpy(dsname, name, atp - name + 1);
1568 VERIFY0(dsl_dataset_hold(dp, dsname, FTAG, &ds));
1570 dsl_dataset_snapshot_sync_impl(ds, atp + 1, tx);
1571 if (ddsa->ddsa_props != NULL) {
1572 dsl_props_set_sync_impl(ds->ds_prev,
1573 ZPROP_SRC_LOCAL, ddsa->ddsa_props, tx);
1575 dsl_dataset_rele(ds, FTAG);
1580 * The snapshots must all be in the same pool.
1581 * All-or-nothing: if there are any failures, nothing will be modified.
1584 dsl_dataset_snapshot(nvlist_t *snaps, nvlist_t *props, nvlist_t *errors)
1586 dsl_dataset_snapshot_arg_t ddsa;
1588 boolean_t needsuspend;
1592 nvlist_t *suspended = NULL;
1594 pair = nvlist_next_nvpair(snaps, NULL);
1597 firstname = nvpair_name(pair);
1599 error = spa_open(firstname, &spa, FTAG);
1602 needsuspend = (spa_version(spa) < SPA_VERSION_FAST_SNAP);
1603 spa_close(spa, FTAG);
1606 suspended = fnvlist_alloc();
1607 for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
1608 pair = nvlist_next_nvpair(snaps, pair)) {
1609 char fsname[ZFS_MAX_DATASET_NAME_LEN];
1610 char *snapname = nvpair_name(pair);
1614 atp = strchr(snapname, '@');
1616 error = SET_ERROR(EINVAL);
1619 (void) strlcpy(fsname, snapname, atp - snapname + 1);
1621 error = zil_suspend(fsname, &cookie);
1624 fnvlist_add_uint64(suspended, fsname,
1629 ddsa.ddsa_snaps = snaps;
1630 ddsa.ddsa_props = props;
1631 ddsa.ddsa_errors = errors;
1632 ddsa.ddsa_cr = CRED();
1635 error = dsl_sync_task(firstname, dsl_dataset_snapshot_check,
1636 dsl_dataset_snapshot_sync, &ddsa,
1637 fnvlist_num_pairs(snaps) * 3, ZFS_SPACE_CHECK_NORMAL);
1640 if (suspended != NULL) {
1641 for (pair = nvlist_next_nvpair(suspended, NULL); pair != NULL;
1642 pair = nvlist_next_nvpair(suspended, pair)) {
1643 zil_resume((void *)(uintptr_t)
1644 fnvpair_value_uint64(pair));
1646 fnvlist_free(suspended);
1652 for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
1653 pair = nvlist_next_nvpair(snaps, pair)) {
1654 char *snapname = nvpair_name(pair);
1655 zvol_create_minors(snapname);
1663 typedef struct dsl_dataset_snapshot_tmp_arg {
1664 const char *ddsta_fsname;
1665 const char *ddsta_snapname;
1666 minor_t ddsta_cleanup_minor;
1667 const char *ddsta_htag;
1668 } dsl_dataset_snapshot_tmp_arg_t;
1671 dsl_dataset_snapshot_tmp_check(void *arg, dmu_tx_t *tx)
1673 dsl_dataset_snapshot_tmp_arg_t *ddsta = arg;
1674 dsl_pool_t *dp = dmu_tx_pool(tx);
1678 error = dsl_dataset_hold(dp, ddsta->ddsta_fsname, FTAG, &ds);
1682 /* NULL cred means no limit check for tmp snapshot */
1683 error = dsl_dataset_snapshot_check_impl(ds, ddsta->ddsta_snapname,
1684 tx, B_FALSE, 0, NULL);
1686 dsl_dataset_rele(ds, FTAG);
1690 if (spa_version(dp->dp_spa) < SPA_VERSION_USERREFS) {
1691 dsl_dataset_rele(ds, FTAG);
1692 return (SET_ERROR(ENOTSUP));
1694 error = dsl_dataset_user_hold_check_one(NULL, ddsta->ddsta_htag,
1697 dsl_dataset_rele(ds, FTAG);
1701 dsl_dataset_rele(ds, FTAG);
1706 dsl_dataset_snapshot_tmp_sync(void *arg, dmu_tx_t *tx)
1708 dsl_dataset_snapshot_tmp_arg_t *ddsta = arg;
1709 dsl_pool_t *dp = dmu_tx_pool(tx);
1712 VERIFY0(dsl_dataset_hold(dp, ddsta->ddsta_fsname, FTAG, &ds));
1714 dsl_dataset_snapshot_sync_impl(ds, ddsta->ddsta_snapname, tx);
1715 dsl_dataset_user_hold_sync_one(ds->ds_prev, ddsta->ddsta_htag,
1716 ddsta->ddsta_cleanup_minor, gethrestime_sec(), tx);
1717 dsl_destroy_snapshot_sync_impl(ds->ds_prev, B_TRUE, tx);
1719 dsl_dataset_rele(ds, FTAG);
1723 dsl_dataset_snapshot_tmp(const char *fsname, const char *snapname,
1724 minor_t cleanup_minor, const char *htag)
1726 dsl_dataset_snapshot_tmp_arg_t ddsta;
1729 boolean_t needsuspend;
1732 ddsta.ddsta_fsname = fsname;
1733 ddsta.ddsta_snapname = snapname;
1734 ddsta.ddsta_cleanup_minor = cleanup_minor;
1735 ddsta.ddsta_htag = htag;
1737 error = spa_open(fsname, &spa, FTAG);
1740 needsuspend = (spa_version(spa) < SPA_VERSION_FAST_SNAP);
1741 spa_close(spa, FTAG);
1744 error = zil_suspend(fsname, &cookie);
1749 error = dsl_sync_task(fsname, dsl_dataset_snapshot_tmp_check,
1750 dsl_dataset_snapshot_tmp_sync, &ddsta, 3, ZFS_SPACE_CHECK_RESERVED);
1758 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
1760 ASSERT(dmu_tx_is_syncing(tx));
1761 ASSERT(ds->ds_objset != NULL);
1762 ASSERT(dsl_dataset_phys(ds)->ds_next_snap_obj == 0);
1765 * in case we had to change ds_fsid_guid when we opened it,
1768 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1769 dsl_dataset_phys(ds)->ds_fsid_guid = ds->ds_fsid_guid;
1771 if (ds->ds_resume_bytes[tx->tx_txg & TXG_MASK] != 0) {
1772 VERIFY0(zap_update(tx->tx_pool->dp_meta_objset,
1773 ds->ds_object, DS_FIELD_RESUME_OBJECT, 8, 1,
1774 &ds->ds_resume_object[tx->tx_txg & TXG_MASK], tx));
1775 VERIFY0(zap_update(tx->tx_pool->dp_meta_objset,
1776 ds->ds_object, DS_FIELD_RESUME_OFFSET, 8, 1,
1777 &ds->ds_resume_offset[tx->tx_txg & TXG_MASK], tx));
1778 VERIFY0(zap_update(tx->tx_pool->dp_meta_objset,
1779 ds->ds_object, DS_FIELD_RESUME_BYTES, 8, 1,
1780 &ds->ds_resume_bytes[tx->tx_txg & TXG_MASK], tx));
1781 ds->ds_resume_object[tx->tx_txg & TXG_MASK] = 0;
1782 ds->ds_resume_offset[tx->tx_txg & TXG_MASK] = 0;
1783 ds->ds_resume_bytes[tx->tx_txg & TXG_MASK] = 0;
1786 dmu_objset_sync(ds->ds_objset, zio, tx);
1788 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
1789 if (ds->ds_feature_activation_needed[f]) {
1790 if (ds->ds_feature_inuse[f])
1792 dsl_dataset_activate_feature(ds->ds_object, f, tx);
1793 ds->ds_feature_inuse[f] = B_TRUE;
1799 deadlist_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1801 dsl_deadlist_t *dl = arg;
1802 dsl_deadlist_insert(dl, bp, tx);
1807 dsl_dataset_sync_done(dsl_dataset_t *ds, dmu_tx_t *tx)
1809 objset_t *os = ds->ds_objset;
1811 bplist_iterate(&ds->ds_pending_deadlist,
1812 deadlist_enqueue_cb, &ds->ds_deadlist, tx);
1814 if (os->os_synced_dnodes != NULL) {
1815 multilist_destroy(os->os_synced_dnodes);
1816 os->os_synced_dnodes = NULL;
1819 ASSERT(!dmu_objset_is_dirty(os, dmu_tx_get_txg(tx)));
1821 dmu_buf_rele(ds->ds_dbuf, ds);
1825 get_clones_stat_impl(dsl_dataset_t *ds, nvlist_t *val)
1828 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1832 ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
1835 * There may be missing entries in ds_next_clones_obj
1836 * due to a bug in a previous version of the code.
1837 * Only trust it if it has the right number of entries.
1839 if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
1840 VERIFY0(zap_count(mos, dsl_dataset_phys(ds)->ds_next_clones_obj,
1843 if (count != dsl_dataset_phys(ds)->ds_num_children - 1) {
1846 for (zap_cursor_init(&zc, mos,
1847 dsl_dataset_phys(ds)->ds_next_clones_obj);
1848 zap_cursor_retrieve(&zc, &za) == 0;
1849 zap_cursor_advance(&zc)) {
1850 dsl_dataset_t *clone;
1851 char buf[ZFS_MAX_DATASET_NAME_LEN];
1852 VERIFY0(dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
1853 za.za_first_integer, FTAG, &clone));
1854 dsl_dir_name(clone->ds_dir, buf);
1855 fnvlist_add_boolean(val, buf);
1856 dsl_dataset_rele(clone, FTAG);
1858 zap_cursor_fini(&zc);
1863 get_clones_stat(dsl_dataset_t *ds, nvlist_t *nv)
1865 nvlist_t *propval = fnvlist_alloc();
1869 * We use nvlist_alloc() instead of fnvlist_alloc() because the
1870 * latter would allocate the list with NV_UNIQUE_NAME flag.
1871 * As a result, every time a clone name is appended to the list
1872 * it would be (linearly) searched for for a duplicate name.
1873 * We already know that all clone names must be unique and we
1874 * want avoid the quadratic complexity of double-checking that
1875 * because we can have a large number of clones.
1877 VERIFY0(nvlist_alloc(&val, 0, KM_SLEEP));
1879 if (get_clones_stat_impl(ds, val) == 0) {
1880 fnvlist_add_nvlist(propval, ZPROP_VALUE, val);
1881 fnvlist_add_nvlist(nv, zfs_prop_to_name(ZFS_PROP_CLONES),
1886 nvlist_free(propval);
1890 * Returns a string that represents the receive resume stats token. It should
1891 * be freed with strfree().
1894 get_receive_resume_stats_impl(dsl_dataset_t *ds)
1896 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1898 if (dsl_dataset_has_resume_receive_state(ds)) {
1901 uint8_t *compressed;
1903 nvlist_t *token_nv = fnvlist_alloc();
1904 size_t packed_size, compressed_size;
1906 if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
1907 DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val) == 0) {
1908 fnvlist_add_uint64(token_nv, "fromguid", val);
1910 if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
1911 DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val) == 0) {
1912 fnvlist_add_uint64(token_nv, "object", val);
1914 if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
1915 DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val) == 0) {
1916 fnvlist_add_uint64(token_nv, "offset", val);
1918 if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
1919 DS_FIELD_RESUME_BYTES, sizeof (val), 1, &val) == 0) {
1920 fnvlist_add_uint64(token_nv, "bytes", val);
1922 if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
1923 DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val) == 0) {
1924 fnvlist_add_uint64(token_nv, "toguid", val);
1927 if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
1928 DS_FIELD_RESUME_TONAME, 1, sizeof (buf), buf) == 0) {
1929 fnvlist_add_string(token_nv, "toname", buf);
1931 if (zap_contains(dp->dp_meta_objset, ds->ds_object,
1932 DS_FIELD_RESUME_LARGEBLOCK) == 0) {
1933 fnvlist_add_boolean(token_nv, "largeblockok");
1935 if (zap_contains(dp->dp_meta_objset, ds->ds_object,
1936 DS_FIELD_RESUME_EMBEDOK) == 0) {
1937 fnvlist_add_boolean(token_nv, "embedok");
1939 if (zap_contains(dp->dp_meta_objset, ds->ds_object,
1940 DS_FIELD_RESUME_COMPRESSOK) == 0) {
1941 fnvlist_add_boolean(token_nv, "compressok");
1943 packed = fnvlist_pack(token_nv, &packed_size);
1944 fnvlist_free(token_nv);
1945 compressed = kmem_alloc(packed_size, KM_SLEEP);
1947 compressed_size = gzip_compress(packed, compressed,
1948 packed_size, packed_size, 6);
1951 fletcher_4_native(compressed, compressed_size, NULL, &cksum);
1953 str = kmem_alloc(compressed_size * 2 + 1, KM_SLEEP);
1954 for (int i = 0; i < compressed_size; i++) {
1955 (void) sprintf(str + i * 2, "%02x", compressed[i]);
1957 str[compressed_size * 2] = '\0';
1958 char *propval = kmem_asprintf("%u-%llx-%llx-%s",
1959 ZFS_SEND_RESUME_TOKEN_VERSION,
1960 (longlong_t)cksum.zc_word[0],
1961 (longlong_t)packed_size, str);
1962 kmem_free(packed, packed_size);
1963 kmem_free(str, compressed_size * 2 + 1);
1964 kmem_free(compressed, packed_size);
1967 return (spa_strdup(""));
1971 * Returns a string that represents the receive resume stats token of the
1972 * dataset's child. It should be freed with strfree().
1975 get_child_receive_stats(dsl_dataset_t *ds)
1977 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
1978 dsl_dataset_t *recv_ds;
1979 dsl_dataset_name(ds, recvname);
1980 if (strlcat(recvname, "/", sizeof (recvname)) <
1981 sizeof (recvname) &&
1982 strlcat(recvname, recv_clone_name, sizeof (recvname)) <
1983 sizeof (recvname) &&
1984 dsl_dataset_hold(ds->ds_dir->dd_pool, recvname, FTAG,
1986 char *propval = get_receive_resume_stats_impl(recv_ds);
1987 dsl_dataset_rele(recv_ds, FTAG);
1990 return (spa_strdup(""));
1994 get_receive_resume_stats(dsl_dataset_t *ds, nvlist_t *nv)
1996 char *propval = get_receive_resume_stats_impl(ds);
1997 if (strcmp(propval, "") != 0) {
1998 dsl_prop_nvlist_add_string(nv,
1999 ZFS_PROP_RECEIVE_RESUME_TOKEN, propval);
2001 char *childval = get_child_receive_stats(ds);
2002 if (strcmp(childval, "") != 0) {
2003 dsl_prop_nvlist_add_string(nv,
2004 ZFS_PROP_RECEIVE_RESUME_TOKEN, childval);
2012 dsl_get_refratio(dsl_dataset_t *ds)
2014 uint64_t ratio = dsl_dataset_phys(ds)->ds_compressed_bytes == 0 ? 100 :
2015 (dsl_dataset_phys(ds)->ds_uncompressed_bytes * 100 /
2016 dsl_dataset_phys(ds)->ds_compressed_bytes);
2021 dsl_get_logicalreferenced(dsl_dataset_t *ds)
2023 return (dsl_dataset_phys(ds)->ds_uncompressed_bytes);
2027 dsl_get_compressratio(dsl_dataset_t *ds)
2029 if (ds->ds_is_snapshot) {
2030 return (dsl_get_refratio(ds));
2032 dsl_dir_t *dd = ds->ds_dir;
2033 mutex_enter(&dd->dd_lock);
2034 uint64_t val = dsl_dir_get_compressratio(dd);
2035 mutex_exit(&dd->dd_lock);
2041 dsl_get_used(dsl_dataset_t *ds)
2043 if (ds->ds_is_snapshot) {
2044 return (dsl_dataset_phys(ds)->ds_unique_bytes);
2046 dsl_dir_t *dd = ds->ds_dir;
2047 mutex_enter(&dd->dd_lock);
2048 uint64_t val = dsl_dir_get_used(dd);
2049 mutex_exit(&dd->dd_lock);
2055 dsl_get_creation(dsl_dataset_t *ds)
2057 return (dsl_dataset_phys(ds)->ds_creation_time);
2061 dsl_get_creationtxg(dsl_dataset_t *ds)
2063 return (dsl_dataset_phys(ds)->ds_creation_txg);
2067 dsl_get_refquota(dsl_dataset_t *ds)
2069 return (ds->ds_quota);
2073 dsl_get_refreservation(dsl_dataset_t *ds)
2075 return (ds->ds_reserved);
2079 dsl_get_guid(dsl_dataset_t *ds)
2081 return (dsl_dataset_phys(ds)->ds_guid);
2085 dsl_get_unique(dsl_dataset_t *ds)
2087 return (dsl_dataset_phys(ds)->ds_unique_bytes);
2091 dsl_get_objsetid(dsl_dataset_t *ds)
2093 return (ds->ds_object);
2097 dsl_get_userrefs(dsl_dataset_t *ds)
2099 return (ds->ds_userrefs);
2103 dsl_get_defer_destroy(dsl_dataset_t *ds)
2105 return (DS_IS_DEFER_DESTROY(ds) ? 1 : 0);
2109 dsl_get_referenced(dsl_dataset_t *ds)
2111 return (dsl_dataset_phys(ds)->ds_referenced_bytes);
2115 dsl_get_numclones(dsl_dataset_t *ds)
2117 ASSERT(ds->ds_is_snapshot);
2118 return (dsl_dataset_phys(ds)->ds_num_children - 1);
2122 dsl_get_inconsistent(dsl_dataset_t *ds)
2124 return ((dsl_dataset_phys(ds)->ds_flags & DS_FLAG_INCONSISTENT) ?
2129 dsl_get_available(dsl_dataset_t *ds)
2131 uint64_t refdbytes = dsl_get_referenced(ds);
2132 uint64_t availbytes = dsl_dir_space_available(ds->ds_dir,
2134 if (ds->ds_reserved > dsl_dataset_phys(ds)->ds_unique_bytes) {
2136 ds->ds_reserved - dsl_dataset_phys(ds)->ds_unique_bytes;
2138 if (ds->ds_quota != 0) {
2140 * Adjust available bytes according to refquota
2142 if (refdbytes < ds->ds_quota) {
2143 availbytes = MIN(availbytes,
2144 ds->ds_quota - refdbytes);
2149 return (availbytes);
2153 dsl_get_written(dsl_dataset_t *ds, uint64_t *written)
2155 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2156 dsl_dataset_t *prev;
2157 int err = dsl_dataset_hold_obj(dp,
2158 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
2160 uint64_t comp, uncomp;
2161 err = dsl_dataset_space_written(prev, ds, written,
2163 dsl_dataset_rele(prev, FTAG);
2169 * 'snap' should be a buffer of size ZFS_MAX_DATASET_NAME_LEN.
2172 dsl_get_prev_snap(dsl_dataset_t *ds, char *snap)
2174 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2175 if (ds->ds_prev != NULL && ds->ds_prev != dp->dp_origin_snap) {
2176 dsl_dataset_name(ds->ds_prev, snap);
2184 * Returns the mountpoint property and source for the given dataset in the value
2185 * and source buffers. The value buffer must be at least as large as MAXPATHLEN
2186 * and the source buffer as least as large a ZFS_MAX_DATASET_NAME_LEN.
2187 * Returns 0 on success and an error on failure.
2190 dsl_get_mountpoint(dsl_dataset_t *ds, const char *dsname, char *value,
2194 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2196 /* Retrieve the mountpoint value stored in the zap opbject */
2197 error = dsl_prop_get_ds(ds, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), 1,
2198 ZAP_MAXVALUELEN, value, source);
2204 * Process the dsname and source to find the full mountpoint string.
2205 * Can be skipped for 'legacy' or 'none'.
2207 if (value[0] == '/') {
2208 char *buf = kmem_alloc(ZAP_MAXVALUELEN, KM_SLEEP);
2210 const char *relpath;
2213 * If we inherit the mountpoint, even from a dataset
2214 * with a received value, the source will be the path of
2215 * the dataset we inherit from. If source is
2216 * ZPROP_SOURCE_VAL_RECVD, the received value is not
2219 if (strcmp(source, ZPROP_SOURCE_VAL_RECVD) == 0) {
2222 ASSERT0(strncmp(dsname, source, strlen(source)));
2223 relpath = dsname + strlen(source);
2224 if (relpath[0] == '/')
2228 spa_altroot(dp->dp_spa, root, ZAP_MAXVALUELEN);
2231 * Special case an alternate root of '/'. This will
2232 * avoid having multiple leading slashes in the
2235 if (strcmp(root, "/") == 0)
2239 * If the mountpoint is '/' then skip over this
2240 * if we are obtaining either an alternate root or
2241 * an inherited mountpoint.
2244 if (value[1] == '\0' && (root[0] != '\0' ||
2245 relpath[0] != '\0'))
2248 if (relpath[0] == '\0') {
2249 (void) snprintf(value, ZAP_MAXVALUELEN, "%s%s",
2252 (void) snprintf(value, ZAP_MAXVALUELEN, "%s%s%s%s",
2253 root, mnt, relpath[0] == '@' ? "" : "/",
2256 kmem_free(buf, ZAP_MAXVALUELEN);
2263 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
2265 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2267 ASSERT(dsl_pool_config_held(dp));
2269 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRATIO,
2270 dsl_get_refratio(ds));
2271 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_LOGICALREFERENCED,
2272 dsl_get_logicalreferenced(ds));
2273 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
2274 dsl_get_compressratio(ds));
2275 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
2278 if (ds->ds_is_snapshot) {
2279 get_clones_stat(ds, nv);
2281 char buf[ZFS_MAX_DATASET_NAME_LEN];
2282 if (dsl_get_prev_snap(ds, buf) == 0)
2283 dsl_prop_nvlist_add_string(nv, ZFS_PROP_PREV_SNAP,
2285 dsl_dir_stats(ds->ds_dir, nv);
2288 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE,
2289 dsl_get_available(ds));
2290 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED,
2291 dsl_get_referenced(ds));
2292 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
2293 dsl_get_creation(ds));
2294 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
2295 dsl_get_creationtxg(ds));
2296 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
2297 dsl_get_refquota(ds));
2298 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
2299 dsl_get_refreservation(ds));
2300 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
2302 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE,
2303 dsl_get_unique(ds));
2304 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID,
2305 dsl_get_objsetid(ds));
2306 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS,
2307 dsl_get_userrefs(ds));
2308 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY,
2309 dsl_get_defer_destroy(ds));
2311 if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
2313 if (dsl_get_written(ds, &written) == 0) {
2314 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_WRITTEN,
2319 if (!dsl_dataset_is_snapshot(ds)) {
2321 * A failed "newfs" (e.g. full) resumable receive leaves
2322 * the stats set on this dataset. Check here for the prop.
2324 get_receive_resume_stats(ds, nv);
2327 * A failed incremental resumable receive leaves the
2328 * stats set on our child named "%recv". Check the child
2331 /* 6 extra bytes for /%recv */
2332 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
2333 dsl_dataset_t *recv_ds;
2334 dsl_dataset_name(ds, recvname);
2335 if (strlcat(recvname, "/", sizeof (recvname)) <
2336 sizeof (recvname) &&
2337 strlcat(recvname, recv_clone_name, sizeof (recvname)) <
2338 sizeof (recvname) &&
2339 dsl_dataset_hold(dp, recvname, FTAG, &recv_ds) == 0) {
2340 get_receive_resume_stats(recv_ds, nv);
2341 dsl_dataset_rele(recv_ds, FTAG);
2347 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
2349 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2350 ASSERT(dsl_pool_config_held(dp));
2352 stat->dds_creation_txg = dsl_get_creationtxg(ds);
2353 stat->dds_inconsistent = dsl_get_inconsistent(ds);
2354 stat->dds_guid = dsl_get_guid(ds);
2355 stat->dds_origin[0] = '\0';
2356 if (ds->ds_is_snapshot) {
2357 stat->dds_is_snapshot = B_TRUE;
2358 stat->dds_num_clones = dsl_get_numclones(ds);
2360 stat->dds_is_snapshot = B_FALSE;
2361 stat->dds_num_clones = 0;
2363 if (dsl_dir_is_clone(ds->ds_dir)) {
2364 dsl_dir_get_origin(ds->ds_dir, stat->dds_origin);
2370 dsl_dataset_fsid_guid(dsl_dataset_t *ds)
2372 return (ds->ds_fsid_guid);
2376 dsl_dataset_space(dsl_dataset_t *ds,
2377 uint64_t *refdbytesp, uint64_t *availbytesp,
2378 uint64_t *usedobjsp, uint64_t *availobjsp)
2380 *refdbytesp = dsl_dataset_phys(ds)->ds_referenced_bytes;
2381 *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
2382 if (ds->ds_reserved > dsl_dataset_phys(ds)->ds_unique_bytes)
2384 ds->ds_reserved - dsl_dataset_phys(ds)->ds_unique_bytes;
2385 if (ds->ds_quota != 0) {
2387 * Adjust available bytes according to refquota
2389 if (*refdbytesp < ds->ds_quota)
2390 *availbytesp = MIN(*availbytesp,
2391 ds->ds_quota - *refdbytesp);
2395 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
2396 *usedobjsp = BP_GET_FILL(&dsl_dataset_phys(ds)->ds_bp);
2397 rrw_exit(&ds->ds_bp_rwlock, FTAG);
2398 *availobjsp = DN_MAX_OBJECT - *usedobjsp;
2402 dsl_dataset_modified_since_snap(dsl_dataset_t *ds, dsl_dataset_t *snap)
2404 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2407 ASSERT(dsl_pool_config_held(dp));
2410 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
2411 birth = dsl_dataset_get_blkptr(ds)->blk_birth;
2412 rrw_exit(&ds->ds_bp_rwlock, FTAG);
2413 if (birth > dsl_dataset_phys(snap)->ds_creation_txg) {
2414 objset_t *os, *os_snap;
2416 * It may be that only the ZIL differs, because it was
2417 * reset in the head. Don't count that as being
2420 if (dmu_objset_from_ds(ds, &os) != 0)
2422 if (dmu_objset_from_ds(snap, &os_snap) != 0)
2424 return (bcmp(&os->os_phys->os_meta_dnode,
2425 &os_snap->os_phys->os_meta_dnode,
2426 sizeof (os->os_phys->os_meta_dnode)) != 0);
2431 typedef struct dsl_dataset_rename_snapshot_arg {
2432 const char *ddrsa_fsname;
2433 const char *ddrsa_oldsnapname;
2434 const char *ddrsa_newsnapname;
2435 boolean_t ddrsa_recursive;
2437 } dsl_dataset_rename_snapshot_arg_t;
2441 dsl_dataset_rename_snapshot_check_impl(dsl_pool_t *dp,
2442 dsl_dataset_t *hds, void *arg)
2444 dsl_dataset_rename_snapshot_arg_t *ddrsa = arg;
2448 error = dsl_dataset_snap_lookup(hds, ddrsa->ddrsa_oldsnapname, &val);
2450 /* ignore nonexistent snapshots */
2451 return (error == ENOENT ? 0 : error);
2454 /* new name should not exist */
2455 error = dsl_dataset_snap_lookup(hds, ddrsa->ddrsa_newsnapname, &val);
2457 error = SET_ERROR(EEXIST);
2458 else if (error == ENOENT)
2461 /* dataset name + 1 for the "@" + the new snapshot name must fit */
2462 if (dsl_dir_namelen(hds->ds_dir) + 1 +
2463 strlen(ddrsa->ddrsa_newsnapname) >= ZFS_MAX_DATASET_NAME_LEN)
2464 error = SET_ERROR(ENAMETOOLONG);
2470 dsl_dataset_rename_snapshot_check(void *arg, dmu_tx_t *tx)
2472 dsl_dataset_rename_snapshot_arg_t *ddrsa = arg;
2473 dsl_pool_t *dp = dmu_tx_pool(tx);
2477 error = dsl_dataset_hold(dp, ddrsa->ddrsa_fsname, FTAG, &hds);
2481 if (ddrsa->ddrsa_recursive) {
2482 error = dmu_objset_find_dp(dp, hds->ds_dir->dd_object,
2483 dsl_dataset_rename_snapshot_check_impl, ddrsa,
2486 error = dsl_dataset_rename_snapshot_check_impl(dp, hds, ddrsa);
2488 dsl_dataset_rele(hds, FTAG);
2493 dsl_dataset_rename_snapshot_sync_impl(dsl_pool_t *dp,
2494 dsl_dataset_t *hds, void *arg)
2498 char *oldname, *newname;
2501 dsl_dataset_rename_snapshot_arg_t *ddrsa = arg;
2504 dmu_tx_t *tx = ddrsa->ddrsa_tx;
2507 error = dsl_dataset_snap_lookup(hds, ddrsa->ddrsa_oldsnapname, &val);
2508 ASSERT(error == 0 || error == ENOENT);
2509 if (error == ENOENT) {
2510 /* ignore nonexistent snapshots */
2514 VERIFY0(dsl_dataset_hold_obj(dp, val, FTAG, &ds));
2516 /* log before we change the name */
2517 spa_history_log_internal_ds(ds, "rename", tx,
2518 "-> @%s", ddrsa->ddrsa_newsnapname);
2520 VERIFY0(dsl_dataset_snap_remove(hds, ddrsa->ddrsa_oldsnapname, tx,
2522 mutex_enter(&ds->ds_lock);
2523 (void) strcpy(ds->ds_snapname, ddrsa->ddrsa_newsnapname);
2524 mutex_exit(&ds->ds_lock);
2525 VERIFY0(zap_add(dp->dp_meta_objset,
2526 dsl_dataset_phys(hds)->ds_snapnames_zapobj,
2527 ds->ds_snapname, 8, 1, &ds->ds_object, tx));
2531 oldname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2532 newname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2533 snprintf(oldname, MAXPATHLEN, "%s@%s", ddrsa->ddrsa_fsname,
2534 ddrsa->ddrsa_oldsnapname);
2535 snprintf(newname, MAXPATHLEN, "%s@%s", ddrsa->ddrsa_fsname,
2536 ddrsa->ddrsa_newsnapname);
2537 zfsvfs_update_fromname(oldname, newname);
2538 zvol_rename_minors(oldname, newname);
2539 kmem_free(newname, MAXPATHLEN);
2540 kmem_free(oldname, MAXPATHLEN);
2543 dsl_dataset_rele(ds, FTAG);
2549 dsl_dataset_rename_snapshot_sync(void *arg, dmu_tx_t *tx)
2551 dsl_dataset_rename_snapshot_arg_t *ddrsa = arg;
2552 dsl_pool_t *dp = dmu_tx_pool(tx);
2555 VERIFY0(dsl_dataset_hold(dp, ddrsa->ddrsa_fsname, FTAG, &hds));
2556 ddrsa->ddrsa_tx = tx;
2557 if (ddrsa->ddrsa_recursive) {
2558 VERIFY0(dmu_objset_find_dp(dp, hds->ds_dir->dd_object,
2559 dsl_dataset_rename_snapshot_sync_impl, ddrsa,
2562 VERIFY0(dsl_dataset_rename_snapshot_sync_impl(dp, hds, ddrsa));
2564 dsl_dataset_rele(hds, FTAG);
2568 dsl_dataset_rename_snapshot(const char *fsname,
2569 const char *oldsnapname, const char *newsnapname, boolean_t recursive)
2571 dsl_dataset_rename_snapshot_arg_t ddrsa;
2573 ddrsa.ddrsa_fsname = fsname;
2574 ddrsa.ddrsa_oldsnapname = oldsnapname;
2575 ddrsa.ddrsa_newsnapname = newsnapname;
2576 ddrsa.ddrsa_recursive = recursive;
2578 return (dsl_sync_task(fsname, dsl_dataset_rename_snapshot_check,
2579 dsl_dataset_rename_snapshot_sync, &ddrsa,
2580 1, ZFS_SPACE_CHECK_RESERVED));
2584 * If we're doing an ownership handoff, we need to make sure that there is
2585 * only one long hold on the dataset. We're not allowed to change anything here
2586 * so we don't permanently release the long hold or regular hold here. We want
2587 * to do this only when syncing to avoid the dataset unexpectedly going away
2588 * when we release the long hold.
2591 dsl_dataset_handoff_check(dsl_dataset_t *ds, void *owner, dmu_tx_t *tx)
2595 if (!dmu_tx_is_syncing(tx))
2598 if (owner != NULL) {
2599 VERIFY3P(ds->ds_owner, ==, owner);
2600 dsl_dataset_long_rele(ds, owner);
2603 held = dsl_dataset_long_held(ds);
2606 dsl_dataset_long_hold(ds, owner);
2609 return (SET_ERROR(EBUSY));
2615 dsl_dataset_rollback_check(void *arg, dmu_tx_t *tx)
2617 dsl_dataset_rollback_arg_t *ddra = arg;
2618 dsl_pool_t *dp = dmu_tx_pool(tx);
2620 int64_t unused_refres_delta;
2623 error = dsl_dataset_hold(dp, ddra->ddra_fsname, FTAG, &ds);
2627 /* must not be a snapshot */
2628 if (ds->ds_is_snapshot) {
2629 dsl_dataset_rele(ds, FTAG);
2630 return (SET_ERROR(EINVAL));
2633 /* must have a most recent snapshot */
2634 if (dsl_dataset_phys(ds)->ds_prev_snap_txg < TXG_INITIAL) {
2635 dsl_dataset_rele(ds, FTAG);
2636 return (SET_ERROR(ESRCH));
2640 * No rollback to a snapshot created in the current txg, because
2641 * the rollback may dirty the dataset and create blocks that are
2642 * not reachable from the rootbp while having a birth txg that
2643 * falls into the snapshot's range.
2645 if (dmu_tx_is_syncing(tx) &&
2646 dsl_dataset_phys(ds)->ds_prev_snap_txg >= tx->tx_txg) {
2647 dsl_dataset_rele(ds, FTAG);
2648 return (SET_ERROR(EAGAIN));
2652 * If the expected target snapshot is specified, then check that
2653 * the latest snapshot is it.
2655 if (ddra->ddra_tosnap != NULL) {
2656 dsl_dataset_t *snapds;
2658 /* Check if the target snapshot exists at all. */
2659 error = dsl_dataset_hold(dp, ddra->ddra_tosnap, FTAG, &snapds);
2662 * ESRCH is used to signal that the target snapshot does
2663 * not exist, while ENOENT is used to report that
2664 * the rolled back dataset does not exist.
2665 * ESRCH is also used to cover other cases where the
2666 * target snapshot is not related to the dataset being
2667 * rolled back such as being in a different pool.
2669 if (error == ENOENT || error == EXDEV)
2670 error = SET_ERROR(ESRCH);
2671 dsl_dataset_rele(ds, FTAG);
2674 ASSERT(snapds->ds_is_snapshot);
2676 /* Check if the snapshot is the latest snapshot indeed. */
2677 if (snapds != ds->ds_prev) {
2679 * Distinguish between the case where the only problem
2680 * is intervening snapshots (EEXIST) vs the snapshot
2681 * not being a valid target for rollback (ESRCH).
2683 if (snapds->ds_dir == ds->ds_dir ||
2684 (dsl_dir_is_clone(ds->ds_dir) &&
2685 dsl_dir_phys(ds->ds_dir)->dd_origin_obj ==
2686 snapds->ds_object)) {
2687 error = SET_ERROR(EEXIST);
2689 error = SET_ERROR(ESRCH);
2691 dsl_dataset_rele(snapds, FTAG);
2692 dsl_dataset_rele(ds, FTAG);
2695 dsl_dataset_rele(snapds, FTAG);
2698 /* must not have any bookmarks after the most recent snapshot */
2699 nvlist_t *proprequest = fnvlist_alloc();
2700 fnvlist_add_boolean(proprequest, zfs_prop_to_name(ZFS_PROP_CREATETXG));
2701 nvlist_t *bookmarks = fnvlist_alloc();
2702 error = dsl_get_bookmarks_impl(ds, proprequest, bookmarks);
2703 fnvlist_free(proprequest);
2705 dsl_dataset_rele(ds, FTAG);
2708 for (nvpair_t *pair = nvlist_next_nvpair(bookmarks, NULL);
2709 pair != NULL; pair = nvlist_next_nvpair(bookmarks, pair)) {
2711 fnvlist_lookup_nvlist(fnvpair_value_nvlist(pair),
2712 zfs_prop_to_name(ZFS_PROP_CREATETXG));
2713 uint64_t createtxg = fnvlist_lookup_uint64(valuenv, "value");
2714 if (createtxg > dsl_dataset_phys(ds)->ds_prev_snap_txg) {
2715 fnvlist_free(bookmarks);
2716 dsl_dataset_rele(ds, FTAG);
2717 return (SET_ERROR(EEXIST));
2720 fnvlist_free(bookmarks);
2722 error = dsl_dataset_handoff_check(ds, ddra->ddra_owner, tx);
2724 dsl_dataset_rele(ds, FTAG);
2729 * Check if the snap we are rolling back to uses more than
2732 if (ds->ds_quota != 0 &&
2733 dsl_dataset_phys(ds->ds_prev)->ds_referenced_bytes > ds->ds_quota) {
2734 dsl_dataset_rele(ds, FTAG);
2735 return (SET_ERROR(EDQUOT));
2739 * When we do the clone swap, we will temporarily use more space
2740 * due to the refreservation (the head will no longer have any
2741 * unique space, so the entire amount of the refreservation will need
2742 * to be free). We will immediately destroy the clone, freeing
2743 * this space, but the freeing happens over many txg's.
2745 unused_refres_delta = (int64_t)MIN(ds->ds_reserved,
2746 dsl_dataset_phys(ds)->ds_unique_bytes);
2748 if (unused_refres_delta > 0 &&
2749 unused_refres_delta >
2750 dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE)) {
2751 dsl_dataset_rele(ds, FTAG);
2752 return (SET_ERROR(ENOSPC));
2755 dsl_dataset_rele(ds, FTAG);
2760 dsl_dataset_rollback_sync(void *arg, dmu_tx_t *tx)
2762 dsl_dataset_rollback_arg_t *ddra = arg;
2763 dsl_pool_t *dp = dmu_tx_pool(tx);
2764 dsl_dataset_t *ds, *clone;
2766 char namebuf[ZFS_MAX_DATASET_NAME_LEN];
2768 VERIFY0(dsl_dataset_hold(dp, ddra->ddra_fsname, FTAG, &ds));
2770 dsl_dataset_name(ds->ds_prev, namebuf);
2771 fnvlist_add_string(ddra->ddra_result, "target", namebuf);
2773 cloneobj = dsl_dataset_create_sync(ds->ds_dir, "%rollback",
2774 ds->ds_prev, DS_CREATE_FLAG_NODIRTY, kcred, tx);
2776 VERIFY0(dsl_dataset_hold_obj(dp, cloneobj, FTAG, &clone));
2778 dsl_dataset_clone_swap_sync_impl(clone, ds, tx);
2779 dsl_dataset_zero_zil(ds, tx);
2781 dsl_destroy_head_sync_impl(clone, tx);
2783 dsl_dataset_rele(clone, FTAG);
2784 dsl_dataset_rele(ds, FTAG);
2788 * Rolls back the given filesystem or volume to the most recent snapshot.
2789 * The name of the most recent snapshot will be returned under key "target"
2790 * in the result nvlist.
2793 * - The existing dataset MUST be owned by the specified owner at entry
2794 * - Upon return, dataset will still be held by the same owner, whether we
2797 * This mode is required any time the existing filesystem is mounted. See
2798 * notes above zfs_suspend_fs() for further details.
2801 dsl_dataset_rollback(const char *fsname, const char *tosnap, void *owner,
2804 dsl_dataset_rollback_arg_t ddra;
2806 ddra.ddra_fsname = fsname;
2807 ddra.ddra_tosnap = tosnap;
2808 ddra.ddra_owner = owner;
2809 ddra.ddra_result = result;
2811 return (dsl_sync_task(fsname, dsl_dataset_rollback_check,
2812 dsl_dataset_rollback_sync, &ddra,
2813 1, ZFS_SPACE_CHECK_RESERVED));
2816 struct promotenode {
2821 static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
2822 static int promote_hold(dsl_dataset_promote_arg_t *ddpa, dsl_pool_t *dp,
2824 static void promote_rele(dsl_dataset_promote_arg_t *ddpa, void *tag);
2827 dsl_dataset_promote_check(void *arg, dmu_tx_t *tx)
2829 dsl_dataset_promote_arg_t *ddpa = arg;
2830 dsl_pool_t *dp = dmu_tx_pool(tx);
2832 struct promotenode *snap;
2833 dsl_dataset_t *origin_ds;
2837 size_t max_snap_len;
2838 boolean_t conflicting_snaps;
2840 err = promote_hold(ddpa, dp, FTAG);
2844 hds = ddpa->ddpa_clone;
2845 snap = list_head(&ddpa->shared_snaps);
2846 origin_ds = snap->ds;
2847 max_snap_len = MAXNAMELEN - strlen(ddpa->ddpa_clonename) - 1;
2849 snap = list_head(&ddpa->origin_snaps);
2851 if (dsl_dataset_phys(hds)->ds_flags & DS_FLAG_NOPROMOTE) {
2852 promote_rele(ddpa, FTAG);
2853 return (SET_ERROR(EXDEV));
2857 * Compute and check the amount of space to transfer. Since this is
2858 * so expensive, don't do the preliminary check.
2860 if (!dmu_tx_is_syncing(tx)) {
2861 promote_rele(ddpa, FTAG);
2865 /* compute origin's new unique space */
2866 snap = list_tail(&ddpa->clone_snaps);
2867 ASSERT3U(dsl_dataset_phys(snap->ds)->ds_prev_snap_obj, ==,
2868 origin_ds->ds_object);
2869 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2870 dsl_dataset_phys(origin_ds)->ds_prev_snap_txg, UINT64_MAX,
2871 &ddpa->unique, &unused, &unused);
2874 * Walk the snapshots that we are moving
2876 * Compute space to transfer. Consider the incremental changes
2877 * to used by each snapshot:
2878 * (my used) = (prev's used) + (blocks born) - (blocks killed)
2879 * So each snapshot gave birth to:
2880 * (blocks born) = (my used) - (prev's used) + (blocks killed)
2881 * So a sequence would look like:
2882 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2883 * Which simplifies to:
2884 * uN + kN + kN-1 + ... + k1 + k0
2885 * Note however, if we stop before we reach the ORIGIN we get:
2886 * uN + kN + kN-1 + ... + kM - uM-1
2888 conflicting_snaps = B_FALSE;
2890 ddpa->used = dsl_dataset_phys(origin_ds)->ds_referenced_bytes;
2891 ddpa->comp = dsl_dataset_phys(origin_ds)->ds_compressed_bytes;
2892 ddpa->uncomp = dsl_dataset_phys(origin_ds)->ds_uncompressed_bytes;
2893 for (snap = list_head(&ddpa->shared_snaps); snap;
2894 snap = list_next(&ddpa->shared_snaps, snap)) {
2895 uint64_t val, dlused, dlcomp, dluncomp;
2896 dsl_dataset_t *ds = snap->ds;
2901 * If there are long holds, we won't be able to evict
2904 if (dsl_dataset_long_held(ds)) {
2905 err = SET_ERROR(EBUSY);
2909 /* Check that the snapshot name does not conflict */
2910 VERIFY0(dsl_dataset_get_snapname(ds));
2911 if (strlen(ds->ds_snapname) >= max_snap_len) {
2912 err = SET_ERROR(ENAMETOOLONG);
2915 err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
2917 fnvlist_add_boolean(ddpa->err_ds,
2918 snap->ds->ds_snapname);
2919 conflicting_snaps = B_TRUE;
2920 } else if (err != ENOENT) {
2924 /* The very first snapshot does not have a deadlist */
2925 if (dsl_dataset_phys(ds)->ds_prev_snap_obj == 0)
2928 dsl_deadlist_space(&ds->ds_deadlist,
2929 &dlused, &dlcomp, &dluncomp);
2930 ddpa->used += dlused;
2931 ddpa->comp += dlcomp;
2932 ddpa->uncomp += dluncomp;
2936 * In order to return the full list of conflicting snapshots, we check
2937 * whether there was a conflict after traversing all of them.
2939 if (conflicting_snaps) {
2940 err = SET_ERROR(EEXIST);
2945 * If we are a clone of a clone then we never reached ORIGIN,
2946 * so we need to subtract out the clone origin's used space.
2948 if (ddpa->origin_origin) {
2950 dsl_dataset_phys(ddpa->origin_origin)->ds_referenced_bytes;
2952 dsl_dataset_phys(ddpa->origin_origin)->ds_compressed_bytes;
2954 dsl_dataset_phys(ddpa->origin_origin)->
2955 ds_uncompressed_bytes;
2958 /* Check that there is enough space and limit headroom here */
2959 err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
2960 0, ss_mv_cnt, ddpa->used, ddpa->cr);
2965 * Compute the amounts of space that will be used by snapshots
2966 * after the promotion (for both origin and clone). For each,
2967 * it is the amount of space that will be on all of their
2968 * deadlists (that was not born before their new origin).
2970 if (dsl_dir_phys(hds->ds_dir)->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2974 * Note, typically this will not be a clone of a clone,
2975 * so dd_origin_txg will be < TXG_INITIAL, so
2976 * these snaplist_space() -> dsl_deadlist_space_range()
2977 * calls will be fast because they do not have to
2978 * iterate over all bps.
2980 snap = list_head(&ddpa->origin_snaps);
2981 err = snaplist_space(&ddpa->shared_snaps,
2982 snap->ds->ds_dir->dd_origin_txg, &ddpa->cloneusedsnap);
2986 err = snaplist_space(&ddpa->clone_snaps,
2987 snap->ds->ds_dir->dd_origin_txg, &space);
2990 ddpa->cloneusedsnap += space;
2992 if (dsl_dir_phys(origin_ds->ds_dir)->dd_flags &
2993 DD_FLAG_USED_BREAKDOWN) {
2994 err = snaplist_space(&ddpa->origin_snaps,
2995 dsl_dataset_phys(origin_ds)->ds_creation_txg,
2996 &ddpa->originusedsnap);
3002 promote_rele(ddpa, FTAG);
3007 dsl_dataset_promote_sync(void *arg, dmu_tx_t *tx)
3009 dsl_dataset_promote_arg_t *ddpa = arg;
3010 dsl_pool_t *dp = dmu_tx_pool(tx);
3012 struct promotenode *snap;
3013 dsl_dataset_t *origin_ds;
3014 dsl_dataset_t *origin_head;
3016 dsl_dir_t *odd = NULL;
3017 uint64_t oldnext_obj;
3019 #if defined(__FreeBSD__) && defined(_KERNEL)
3020 char *oldname, *newname;
3023 VERIFY0(promote_hold(ddpa, dp, FTAG));
3024 hds = ddpa->ddpa_clone;
3026 ASSERT0(dsl_dataset_phys(hds)->ds_flags & DS_FLAG_NOPROMOTE);
3028 snap = list_head(&ddpa->shared_snaps);
3029 origin_ds = snap->ds;
3032 snap = list_head(&ddpa->origin_snaps);
3033 origin_head = snap->ds;
3036 * We need to explicitly open odd, since origin_ds's dd will be
3039 VERIFY0(dsl_dir_hold_obj(dp, origin_ds->ds_dir->dd_object,
3042 /* change origin's next snap */
3043 dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
3044 oldnext_obj = dsl_dataset_phys(origin_ds)->ds_next_snap_obj;
3045 snap = list_tail(&ddpa->clone_snaps);
3046 ASSERT3U(dsl_dataset_phys(snap->ds)->ds_prev_snap_obj, ==,
3047 origin_ds->ds_object);
3048 dsl_dataset_phys(origin_ds)->ds_next_snap_obj = snap->ds->ds_object;
3050 /* change the origin's next clone */
3051 if (dsl_dataset_phys(origin_ds)->ds_next_clones_obj) {
3052 dsl_dataset_remove_from_next_clones(origin_ds,
3053 snap->ds->ds_object, tx);
3054 VERIFY0(zap_add_int(dp->dp_meta_objset,
3055 dsl_dataset_phys(origin_ds)->ds_next_clones_obj,
3060 dmu_buf_will_dirty(dd->dd_dbuf, tx);
3061 ASSERT3U(dsl_dir_phys(dd)->dd_origin_obj, ==, origin_ds->ds_object);
3062 dsl_dir_phys(dd)->dd_origin_obj = dsl_dir_phys(odd)->dd_origin_obj;
3063 dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg;
3064 dmu_buf_will_dirty(odd->dd_dbuf, tx);
3065 dsl_dir_phys(odd)->dd_origin_obj = origin_ds->ds_object;
3066 origin_head->ds_dir->dd_origin_txg =
3067 dsl_dataset_phys(origin_ds)->ds_creation_txg;
3069 /* change dd_clone entries */
3070 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
3071 VERIFY0(zap_remove_int(dp->dp_meta_objset,
3072 dsl_dir_phys(odd)->dd_clones, hds->ds_object, tx));
3073 VERIFY0(zap_add_int(dp->dp_meta_objset,
3074 dsl_dir_phys(ddpa->origin_origin->ds_dir)->dd_clones,
3075 hds->ds_object, tx));
3077 VERIFY0(zap_remove_int(dp->dp_meta_objset,
3078 dsl_dir_phys(ddpa->origin_origin->ds_dir)->dd_clones,
3079 origin_head->ds_object, tx));
3080 if (dsl_dir_phys(dd)->dd_clones == 0) {
3081 dsl_dir_phys(dd)->dd_clones =
3082 zap_create(dp->dp_meta_objset, DMU_OT_DSL_CLONES,
3083 DMU_OT_NONE, 0, tx);
3085 VERIFY0(zap_add_int(dp->dp_meta_objset,
3086 dsl_dir_phys(dd)->dd_clones, origin_head->ds_object, tx));
3089 #if defined(__FreeBSD__) && defined(_KERNEL)
3090 /* Take the spa_namespace_lock early so zvol renames don't deadlock. */
3091 mutex_enter(&spa_namespace_lock);
3093 oldname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3094 newname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3097 /* move snapshots to this dir */
3098 for (snap = list_head(&ddpa->shared_snaps); snap;
3099 snap = list_next(&ddpa->shared_snaps, snap)) {
3100 dsl_dataset_t *ds = snap->ds;
3103 * Property callbacks are registered to a particular
3104 * dsl_dir. Since ours is changing, evict the objset
3105 * so that they will be unregistered from the old dsl_dir.
3107 if (ds->ds_objset) {
3108 dmu_objset_evict(ds->ds_objset);
3109 ds->ds_objset = NULL;
3112 /* move snap name entry */
3113 VERIFY0(dsl_dataset_get_snapname(ds));
3114 VERIFY0(dsl_dataset_snap_remove(origin_head,
3115 ds->ds_snapname, tx, B_TRUE));
3116 VERIFY0(zap_add(dp->dp_meta_objset,
3117 dsl_dataset_phys(hds)->ds_snapnames_zapobj, ds->ds_snapname,
3118 8, 1, &ds->ds_object, tx));
3119 dsl_fs_ss_count_adjust(hds->ds_dir, 1,
3120 DD_FIELD_SNAPSHOT_COUNT, tx);
3122 /* change containing dsl_dir */
3123 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3124 ASSERT3U(dsl_dataset_phys(ds)->ds_dir_obj, ==, odd->dd_object);
3125 dsl_dataset_phys(ds)->ds_dir_obj = dd->dd_object;
3126 ASSERT3P(ds->ds_dir, ==, odd);
3127 dsl_dir_rele(ds->ds_dir, ds);
3128 VERIFY0(dsl_dir_hold_obj(dp, dd->dd_object,
3129 NULL, ds, &ds->ds_dir));
3131 #if defined(__FreeBSD__) && defined(_KERNEL)
3132 dsl_dataset_name(ds, newname);
3133 zfsvfs_update_fromname(oldname, newname);
3134 zvol_rename_minors(oldname, newname);
3137 /* move any clone references */
3138 if (dsl_dataset_phys(ds)->ds_next_clones_obj &&
3139 spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
3143 for (zap_cursor_init(&zc, dp->dp_meta_objset,
3144 dsl_dataset_phys(ds)->ds_next_clones_obj);
3145 zap_cursor_retrieve(&zc, &za) == 0;
3146 zap_cursor_advance(&zc)) {
3147 dsl_dataset_t *cnds;
3150 if (za.za_first_integer == oldnext_obj) {
3152 * We've already moved the
3153 * origin's reference.
3158 VERIFY0(dsl_dataset_hold_obj(dp,
3159 za.za_first_integer, FTAG, &cnds));
3160 o = dsl_dir_phys(cnds->ds_dir)->
3161 dd_head_dataset_obj;
3163 VERIFY0(zap_remove_int(dp->dp_meta_objset,
3164 dsl_dir_phys(odd)->dd_clones, o, tx));
3165 VERIFY0(zap_add_int(dp->dp_meta_objset,
3166 dsl_dir_phys(dd)->dd_clones, o, tx));
3167 dsl_dataset_rele(cnds, FTAG);
3169 zap_cursor_fini(&zc);
3172 ASSERT(!dsl_prop_hascb(ds));
3175 #if defined(__FreeBSD__) && defined(_KERNEL)
3176 mutex_exit(&spa_namespace_lock);
3178 kmem_free(newname, MAXPATHLEN);
3179 kmem_free(oldname, MAXPATHLEN);
3182 * Change space accounting.
3183 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
3184 * both be valid, or both be 0 (resulting in delta == 0). This
3185 * is true for each of {clone,origin} independently.
3188 delta = ddpa->cloneusedsnap -
3189 dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_SNAP];
3190 ASSERT3S(delta, >=, 0);
3191 ASSERT3U(ddpa->used, >=, delta);
3192 dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
3193 dsl_dir_diduse_space(dd, DD_USED_HEAD,
3194 ddpa->used - delta, ddpa->comp, ddpa->uncomp, tx);
3196 delta = ddpa->originusedsnap -
3197 dsl_dir_phys(odd)->dd_used_breakdown[DD_USED_SNAP];
3198 ASSERT3S(delta, <=, 0);
3199 ASSERT3U(ddpa->used, >=, -delta);
3200 dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
3201 dsl_dir_diduse_space(odd, DD_USED_HEAD,
3202 -ddpa->used - delta, -ddpa->comp, -ddpa->uncomp, tx);
3204 dsl_dataset_phys(origin_ds)->ds_unique_bytes = ddpa->unique;
3206 /* log history record */
3207 spa_history_log_internal_ds(hds, "promote", tx, "");
3209 dsl_dir_rele(odd, FTAG);
3210 promote_rele(ddpa, FTAG);
3214 * Make a list of dsl_dataset_t's for the snapshots between first_obj
3215 * (exclusive) and last_obj (inclusive). The list will be in reverse
3216 * order (last_obj will be the list_head()). If first_obj == 0, do all
3217 * snapshots back to this dataset's origin.
3220 snaplist_make(dsl_pool_t *dp,
3221 uint64_t first_obj, uint64_t last_obj, list_t *l, void *tag)
3223 uint64_t obj = last_obj;
3225 list_create(l, sizeof (struct promotenode),
3226 offsetof(struct promotenode, link));
3228 while (obj != first_obj) {
3230 struct promotenode *snap;
3233 err = dsl_dataset_hold_obj(dp, obj, tag, &ds);
3234 ASSERT(err != ENOENT);
3239 first_obj = dsl_dir_phys(ds->ds_dir)->dd_origin_obj;
3241 snap = kmem_alloc(sizeof (*snap), KM_SLEEP);
3243 list_insert_tail(l, snap);
3244 obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
3251 snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
3253 struct promotenode *snap;
3256 for (snap = list_head(l); snap; snap = list_next(l, snap)) {
3257 uint64_t used, comp, uncomp;
3258 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
3259 mintxg, UINT64_MAX, &used, &comp, &uncomp);
3266 snaplist_destroy(list_t *l, void *tag)
3268 struct promotenode *snap;
3270 if (l == NULL || !list_link_active(&l->list_head))
3273 while ((snap = list_tail(l)) != NULL) {
3274 list_remove(l, snap);
3275 dsl_dataset_rele(snap->ds, tag);
3276 kmem_free(snap, sizeof (*snap));
3282 promote_hold(dsl_dataset_promote_arg_t *ddpa, dsl_pool_t *dp, void *tag)
3286 struct promotenode *snap;
3288 error = dsl_dataset_hold(dp, ddpa->ddpa_clonename, tag,
3292 dd = ddpa->ddpa_clone->ds_dir;
3294 if (ddpa->ddpa_clone->ds_is_snapshot ||
3295 !dsl_dir_is_clone(dd)) {
3296 dsl_dataset_rele(ddpa->ddpa_clone, tag);
3297 return (SET_ERROR(EINVAL));
3300 error = snaplist_make(dp, 0, dsl_dir_phys(dd)->dd_origin_obj,
3301 &ddpa->shared_snaps, tag);
3305 error = snaplist_make(dp, 0, ddpa->ddpa_clone->ds_object,
3306 &ddpa->clone_snaps, tag);
3310 snap = list_head(&ddpa->shared_snaps);
3311 ASSERT3U(snap->ds->ds_object, ==, dsl_dir_phys(dd)->dd_origin_obj);
3312 error = snaplist_make(dp, dsl_dir_phys(dd)->dd_origin_obj,
3313 dsl_dir_phys(snap->ds->ds_dir)->dd_head_dataset_obj,
3314 &ddpa->origin_snaps, tag);
3318 if (dsl_dir_phys(snap->ds->ds_dir)->dd_origin_obj != 0) {
3319 error = dsl_dataset_hold_obj(dp,
3320 dsl_dir_phys(snap->ds->ds_dir)->dd_origin_obj,
3321 tag, &ddpa->origin_origin);
3327 promote_rele(ddpa, tag);
3332 promote_rele(dsl_dataset_promote_arg_t *ddpa, void *tag)
3334 snaplist_destroy(&ddpa->shared_snaps, tag);
3335 snaplist_destroy(&ddpa->clone_snaps, tag);
3336 snaplist_destroy(&ddpa->origin_snaps, tag);
3337 if (ddpa->origin_origin != NULL)
3338 dsl_dataset_rele(ddpa->origin_origin, tag);
3339 dsl_dataset_rele(ddpa->ddpa_clone, tag);
3345 * If it fails due to a conflicting snapshot name, "conflsnap" will be filled
3346 * in with the name. (It must be at least ZFS_MAX_DATASET_NAME_LEN bytes long.)
3349 dsl_dataset_promote(const char *name, char *conflsnap)
3351 dsl_dataset_promote_arg_t ddpa = { 0 };
3354 nvpair_t *snap_pair;
3358 * We will modify space proportional to the number of
3359 * snapshots. Compute numsnaps.
3361 error = dmu_objset_hold(name, FTAG, &os);
3364 error = zap_count(dmu_objset_pool(os)->dp_meta_objset,
3365 dsl_dataset_phys(dmu_objset_ds(os))->ds_snapnames_zapobj,
3367 dmu_objset_rele(os, FTAG);
3371 ddpa.ddpa_clonename = name;
3372 ddpa.err_ds = fnvlist_alloc();
3375 error = dsl_sync_task(name, dsl_dataset_promote_check,
3376 dsl_dataset_promote_sync, &ddpa,
3377 2 + numsnaps, ZFS_SPACE_CHECK_RESERVED);
3380 * Return the first conflicting snapshot found.
3382 snap_pair = nvlist_next_nvpair(ddpa.err_ds, NULL);
3383 if (snap_pair != NULL && conflsnap != NULL)
3384 (void) strcpy(conflsnap, nvpair_name(snap_pair));
3386 fnvlist_free(ddpa.err_ds);
3391 dsl_dataset_clone_swap_check_impl(dsl_dataset_t *clone,
3392 dsl_dataset_t *origin_head, boolean_t force, void *owner, dmu_tx_t *tx)
3395 * "slack" factor for received datasets with refquota set on them.
3396 * See the bottom of this function for details on its use.
3398 uint64_t refquota_slack = DMU_MAX_ACCESS * spa_asize_inflation;
3399 int64_t unused_refres_delta;
3401 /* they should both be heads */
3402 if (clone->ds_is_snapshot ||
3403 origin_head->ds_is_snapshot)
3404 return (SET_ERROR(EINVAL));
3406 /* if we are not forcing, the branch point should be just before them */
3407 if (!force && clone->ds_prev != origin_head->ds_prev)
3408 return (SET_ERROR(EINVAL));
3410 /* clone should be the clone (unless they are unrelated) */
3411 if (clone->ds_prev != NULL &&
3412 clone->ds_prev != clone->ds_dir->dd_pool->dp_origin_snap &&
3413 origin_head->ds_dir != clone->ds_prev->ds_dir)
3414 return (SET_ERROR(EINVAL));
3416 /* the clone should be a child of the origin */
3417 if (clone->ds_dir->dd_parent != origin_head->ds_dir)
3418 return (SET_ERROR(EINVAL));
3420 /* origin_head shouldn't be modified unless 'force' */
3422 dsl_dataset_modified_since_snap(origin_head, origin_head->ds_prev))
3423 return (SET_ERROR(ETXTBSY));
3425 /* origin_head should have no long holds (e.g. is not mounted) */
3426 if (dsl_dataset_handoff_check(origin_head, owner, tx))
3427 return (SET_ERROR(EBUSY));
3429 /* check amount of any unconsumed refreservation */
3430 unused_refres_delta =
3431 (int64_t)MIN(origin_head->ds_reserved,
3432 dsl_dataset_phys(origin_head)->ds_unique_bytes) -
3433 (int64_t)MIN(origin_head->ds_reserved,
3434 dsl_dataset_phys(clone)->ds_unique_bytes);
3436 if (unused_refres_delta > 0 &&
3437 unused_refres_delta >
3438 dsl_dir_space_available(origin_head->ds_dir, NULL, 0, TRUE))
3439 return (SET_ERROR(ENOSPC));
3442 * The clone can't be too much over the head's refquota.
3444 * To ensure that the entire refquota can be used, we allow one
3445 * transaction to exceed the the refquota. Therefore, this check
3446 * needs to also allow for the space referenced to be more than the
3447 * refquota. The maximum amount of space that one transaction can use
3448 * on disk is DMU_MAX_ACCESS * spa_asize_inflation. Allowing this
3449 * overage ensures that we are able to receive a filesystem that
3450 * exceeds the refquota on the source system.
3452 * So that overage is the refquota_slack we use below.
3454 if (origin_head->ds_quota != 0 &&
3455 dsl_dataset_phys(clone)->ds_referenced_bytes >
3456 origin_head->ds_quota + refquota_slack)
3457 return (SET_ERROR(EDQUOT));
3463 dsl_dataset_swap_remap_deadlists(dsl_dataset_t *clone,
3464 dsl_dataset_t *origin, dmu_tx_t *tx)
3466 uint64_t clone_remap_dl_obj, origin_remap_dl_obj;
3467 dsl_pool_t *dp = dmu_tx_pool(tx);
3469 ASSERT(dsl_pool_sync_context(dp));
3471 clone_remap_dl_obj = dsl_dataset_get_remap_deadlist_object(clone);
3472 origin_remap_dl_obj = dsl_dataset_get_remap_deadlist_object(origin);
3474 if (clone_remap_dl_obj != 0) {
3475 dsl_deadlist_close(&clone->ds_remap_deadlist);
3476 dsl_dataset_unset_remap_deadlist_object(clone, tx);
3478 if (origin_remap_dl_obj != 0) {
3479 dsl_deadlist_close(&origin->ds_remap_deadlist);
3480 dsl_dataset_unset_remap_deadlist_object(origin, tx);
3483 if (clone_remap_dl_obj != 0) {
3484 dsl_dataset_set_remap_deadlist_object(origin,
3485 clone_remap_dl_obj, tx);
3486 dsl_deadlist_open(&origin->ds_remap_deadlist,
3487 dp->dp_meta_objset, clone_remap_dl_obj);
3489 if (origin_remap_dl_obj != 0) {
3490 dsl_dataset_set_remap_deadlist_object(clone,
3491 origin_remap_dl_obj, tx);
3492 dsl_deadlist_open(&clone->ds_remap_deadlist,
3493 dp->dp_meta_objset, origin_remap_dl_obj);
3498 dsl_dataset_clone_swap_sync_impl(dsl_dataset_t *clone,
3499 dsl_dataset_t *origin_head, dmu_tx_t *tx)
3501 dsl_pool_t *dp = dmu_tx_pool(tx);
3502 int64_t unused_refres_delta;
3504 ASSERT(clone->ds_reserved == 0);
3506 * NOTE: On DEBUG kernels there could be a race between this and
3507 * the check function if spa_asize_inflation is adjusted...
3509 ASSERT(origin_head->ds_quota == 0 ||
3510 dsl_dataset_phys(clone)->ds_unique_bytes <= origin_head->ds_quota +
3511 DMU_MAX_ACCESS * spa_asize_inflation);
3512 ASSERT3P(clone->ds_prev, ==, origin_head->ds_prev);
3515 * Swap per-dataset feature flags.
3517 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
3518 if (!(spa_feature_table[f].fi_flags &
3519 ZFEATURE_FLAG_PER_DATASET)) {
3520 ASSERT(!clone->ds_feature_inuse[f]);
3521 ASSERT(!origin_head->ds_feature_inuse[f]);
3525 boolean_t clone_inuse = clone->ds_feature_inuse[f];
3526 boolean_t origin_head_inuse = origin_head->ds_feature_inuse[f];
3529 dsl_dataset_deactivate_feature(clone->ds_object, f, tx);
3530 clone->ds_feature_inuse[f] = B_FALSE;
3532 if (origin_head_inuse) {
3533 dsl_dataset_deactivate_feature(origin_head->ds_object,
3535 origin_head->ds_feature_inuse[f] = B_FALSE;
3538 dsl_dataset_activate_feature(origin_head->ds_object,
3540 origin_head->ds_feature_inuse[f] = B_TRUE;
3542 if (origin_head_inuse) {
3543 dsl_dataset_activate_feature(clone->ds_object, f, tx);
3544 clone->ds_feature_inuse[f] = B_TRUE;
3548 dmu_buf_will_dirty(clone->ds_dbuf, tx);
3549 dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
3551 if (clone->ds_objset != NULL) {
3552 dmu_objset_evict(clone->ds_objset);
3553 clone->ds_objset = NULL;
3556 if (origin_head->ds_objset != NULL) {
3557 dmu_objset_evict(origin_head->ds_objset);
3558 origin_head->ds_objset = NULL;
3561 unused_refres_delta =
3562 (int64_t)MIN(origin_head->ds_reserved,
3563 dsl_dataset_phys(origin_head)->ds_unique_bytes) -
3564 (int64_t)MIN(origin_head->ds_reserved,
3565 dsl_dataset_phys(clone)->ds_unique_bytes);
3568 * Reset origin's unique bytes, if it exists.
3570 if (clone->ds_prev) {
3571 dsl_dataset_t *origin = clone->ds_prev;
3572 uint64_t comp, uncomp;
3574 dmu_buf_will_dirty(origin->ds_dbuf, tx);
3575 dsl_deadlist_space_range(&clone->ds_deadlist,
3576 dsl_dataset_phys(origin)->ds_prev_snap_txg, UINT64_MAX,
3577 &dsl_dataset_phys(origin)->ds_unique_bytes, &comp, &uncomp);
3582 rrw_enter(&clone->ds_bp_rwlock, RW_WRITER, FTAG);
3583 rrw_enter(&origin_head->ds_bp_rwlock, RW_WRITER, FTAG);
3585 tmp = dsl_dataset_phys(origin_head)->ds_bp;
3586 dsl_dataset_phys(origin_head)->ds_bp =
3587 dsl_dataset_phys(clone)->ds_bp;
3588 dsl_dataset_phys(clone)->ds_bp = tmp;
3589 rrw_exit(&origin_head->ds_bp_rwlock, FTAG);
3590 rrw_exit(&clone->ds_bp_rwlock, FTAG);
3593 /* set dd_*_bytes */
3595 int64_t dused, dcomp, duncomp;
3596 uint64_t cdl_used, cdl_comp, cdl_uncomp;
3597 uint64_t odl_used, odl_comp, odl_uncomp;
3599 ASSERT3U(dsl_dir_phys(clone->ds_dir)->
3600 dd_used_breakdown[DD_USED_SNAP], ==, 0);
3602 dsl_deadlist_space(&clone->ds_deadlist,
3603 &cdl_used, &cdl_comp, &cdl_uncomp);
3604 dsl_deadlist_space(&origin_head->ds_deadlist,
3605 &odl_used, &odl_comp, &odl_uncomp);
3607 dused = dsl_dataset_phys(clone)->ds_referenced_bytes +
3609 (dsl_dataset_phys(origin_head)->ds_referenced_bytes +
3611 dcomp = dsl_dataset_phys(clone)->ds_compressed_bytes +
3613 (dsl_dataset_phys(origin_head)->ds_compressed_bytes +
3615 duncomp = dsl_dataset_phys(clone)->ds_uncompressed_bytes +
3617 (dsl_dataset_phys(origin_head)->ds_uncompressed_bytes +
3620 dsl_dir_diduse_space(origin_head->ds_dir, DD_USED_HEAD,
3621 dused, dcomp, duncomp, tx);
3622 dsl_dir_diduse_space(clone->ds_dir, DD_USED_HEAD,
3623 -dused, -dcomp, -duncomp, tx);
3626 * The difference in the space used by snapshots is the
3627 * difference in snapshot space due to the head's
3628 * deadlist (since that's the only thing that's
3629 * changing that affects the snapused).
3631 dsl_deadlist_space_range(&clone->ds_deadlist,
3632 origin_head->ds_dir->dd_origin_txg, UINT64_MAX,
3633 &cdl_used, &cdl_comp, &cdl_uncomp);
3634 dsl_deadlist_space_range(&origin_head->ds_deadlist,
3635 origin_head->ds_dir->dd_origin_txg, UINT64_MAX,
3636 &odl_used, &odl_comp, &odl_uncomp);
3637 dsl_dir_transfer_space(origin_head->ds_dir, cdl_used - odl_used,
3638 DD_USED_HEAD, DD_USED_SNAP, NULL);
3641 /* swap ds_*_bytes */
3642 SWITCH64(dsl_dataset_phys(origin_head)->ds_referenced_bytes,
3643 dsl_dataset_phys(clone)->ds_referenced_bytes);
3644 SWITCH64(dsl_dataset_phys(origin_head)->ds_compressed_bytes,
3645 dsl_dataset_phys(clone)->ds_compressed_bytes);
3646 SWITCH64(dsl_dataset_phys(origin_head)->ds_uncompressed_bytes,
3647 dsl_dataset_phys(clone)->ds_uncompressed_bytes);
3648 SWITCH64(dsl_dataset_phys(origin_head)->ds_unique_bytes,
3649 dsl_dataset_phys(clone)->ds_unique_bytes);
3651 /* apply any parent delta for change in unconsumed refreservation */
3652 dsl_dir_diduse_space(origin_head->ds_dir, DD_USED_REFRSRV,
3653 unused_refres_delta, 0, 0, tx);
3658 dsl_deadlist_close(&clone->ds_deadlist);
3659 dsl_deadlist_close(&origin_head->ds_deadlist);
3660 SWITCH64(dsl_dataset_phys(origin_head)->ds_deadlist_obj,
3661 dsl_dataset_phys(clone)->ds_deadlist_obj);
3662 dsl_deadlist_open(&clone->ds_deadlist, dp->dp_meta_objset,
3663 dsl_dataset_phys(clone)->ds_deadlist_obj);
3664 dsl_deadlist_open(&origin_head->ds_deadlist, dp->dp_meta_objset,
3665 dsl_dataset_phys(origin_head)->ds_deadlist_obj);
3666 dsl_dataset_swap_remap_deadlists(clone, origin_head, tx);
3668 dsl_scan_ds_clone_swapped(origin_head, clone, tx);
3670 spa_history_log_internal_ds(clone, "clone swap", tx,
3671 "parent=%s", origin_head->ds_dir->dd_myname);
3675 * Given a pool name and a dataset object number in that pool,
3676 * return the name of that dataset.
3679 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
3685 error = dsl_pool_hold(pname, FTAG, &dp);
3689 error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds);
3691 dsl_dataset_name(ds, buf);
3692 dsl_dataset_rele(ds, FTAG);
3694 dsl_pool_rele(dp, FTAG);
3700 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
3701 uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
3705 ASSERT3S(asize, >, 0);
3708 * *ref_rsrv is the portion of asize that will come from any
3709 * unconsumed refreservation space.
3713 mutex_enter(&ds->ds_lock);
3715 * Make a space adjustment for reserved bytes.
3717 if (ds->ds_reserved > dsl_dataset_phys(ds)->ds_unique_bytes) {
3719 ds->ds_reserved - dsl_dataset_phys(ds)->ds_unique_bytes);
3721 (ds->ds_reserved - dsl_dataset_phys(ds)->ds_unique_bytes);
3723 asize - MIN(asize, parent_delta(ds, asize + inflight));
3726 if (!check_quota || ds->ds_quota == 0) {
3727 mutex_exit(&ds->ds_lock);
3731 * If they are requesting more space, and our current estimate
3732 * is over quota, they get to try again unless the actual
3733 * on-disk is over quota and there are no pending changes (which
3734 * may free up space for us).
3736 if (dsl_dataset_phys(ds)->ds_referenced_bytes + inflight >=
3739 dsl_dataset_phys(ds)->ds_referenced_bytes < ds->ds_quota)
3740 error = SET_ERROR(ERESTART);
3742 error = SET_ERROR(EDQUOT);
3744 mutex_exit(&ds->ds_lock);
3749 typedef struct dsl_dataset_set_qr_arg {
3750 const char *ddsqra_name;
3751 zprop_source_t ddsqra_source;
3752 uint64_t ddsqra_value;
3753 } dsl_dataset_set_qr_arg_t;
3758 dsl_dataset_set_refquota_check(void *arg, dmu_tx_t *tx)
3760 dsl_dataset_set_qr_arg_t *ddsqra = arg;
3761 dsl_pool_t *dp = dmu_tx_pool(tx);
3766 if (spa_version(dp->dp_spa) < SPA_VERSION_REFQUOTA)
3767 return (SET_ERROR(ENOTSUP));
3769 error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
3773 if (ds->ds_is_snapshot) {
3774 dsl_dataset_rele(ds, FTAG);
3775 return (SET_ERROR(EINVAL));
3778 error = dsl_prop_predict(ds->ds_dir,
3779 zfs_prop_to_name(ZFS_PROP_REFQUOTA),
3780 ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
3782 dsl_dataset_rele(ds, FTAG);
3787 dsl_dataset_rele(ds, FTAG);
3791 if (newval < dsl_dataset_phys(ds)->ds_referenced_bytes ||
3792 newval < ds->ds_reserved) {
3793 dsl_dataset_rele(ds, FTAG);
3794 return (SET_ERROR(ENOSPC));
3797 dsl_dataset_rele(ds, FTAG);
3802 dsl_dataset_set_refquota_sync(void *arg, dmu_tx_t *tx)
3804 dsl_dataset_set_qr_arg_t *ddsqra = arg;
3805 dsl_pool_t *dp = dmu_tx_pool(tx);
3809 VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
3811 dsl_prop_set_sync_impl(ds,
3812 zfs_prop_to_name(ZFS_PROP_REFQUOTA),
3813 ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
3814 &ddsqra->ddsqra_value, tx);
3816 VERIFY0(dsl_prop_get_int_ds(ds,
3817 zfs_prop_to_name(ZFS_PROP_REFQUOTA), &newval));
3819 if (ds->ds_quota != newval) {
3820 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3821 ds->ds_quota = newval;
3823 dsl_dataset_rele(ds, FTAG);
3827 dsl_dataset_set_refquota(const char *dsname, zprop_source_t source,
3830 dsl_dataset_set_qr_arg_t ddsqra;
3832 ddsqra.ddsqra_name = dsname;
3833 ddsqra.ddsqra_source = source;
3834 ddsqra.ddsqra_value = refquota;
3836 return (dsl_sync_task(dsname, dsl_dataset_set_refquota_check,
3837 dsl_dataset_set_refquota_sync, &ddsqra, 0,
3838 ZFS_SPACE_CHECK_EXTRA_RESERVED));
3842 dsl_dataset_set_refreservation_check(void *arg, dmu_tx_t *tx)
3844 dsl_dataset_set_qr_arg_t *ddsqra = arg;
3845 dsl_pool_t *dp = dmu_tx_pool(tx);
3848 uint64_t newval, unique;
3850 if (spa_version(dp->dp_spa) < SPA_VERSION_REFRESERVATION)
3851 return (SET_ERROR(ENOTSUP));
3853 error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
3857 if (ds->ds_is_snapshot) {
3858 dsl_dataset_rele(ds, FTAG);
3859 return (SET_ERROR(EINVAL));
3862 error = dsl_prop_predict(ds->ds_dir,
3863 zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
3864 ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
3866 dsl_dataset_rele(ds, FTAG);
3871 * If we are doing the preliminary check in open context, the
3872 * space estimates may be inaccurate.
3874 if (!dmu_tx_is_syncing(tx)) {
3875 dsl_dataset_rele(ds, FTAG);
3879 mutex_enter(&ds->ds_lock);
3880 if (!DS_UNIQUE_IS_ACCURATE(ds))
3881 dsl_dataset_recalc_head_uniq(ds);
3882 unique = dsl_dataset_phys(ds)->ds_unique_bytes;
3883 mutex_exit(&ds->ds_lock);
3885 if (MAX(unique, newval) > MAX(unique, ds->ds_reserved)) {
3886 uint64_t delta = MAX(unique, newval) -
3887 MAX(unique, ds->ds_reserved);
3890 dsl_dir_space_available(ds->ds_dir, NULL, 0, B_TRUE) ||
3891 (ds->ds_quota > 0 && newval > ds->ds_quota)) {
3892 dsl_dataset_rele(ds, FTAG);
3893 return (SET_ERROR(ENOSPC));
3897 dsl_dataset_rele(ds, FTAG);
3902 dsl_dataset_set_refreservation_sync_impl(dsl_dataset_t *ds,
3903 zprop_source_t source, uint64_t value, dmu_tx_t *tx)
3909 dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
3910 source, sizeof (value), 1, &value, tx);
3912 VERIFY0(dsl_prop_get_int_ds(ds,
3913 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &newval));
3915 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3916 mutex_enter(&ds->ds_dir->dd_lock);
3917 mutex_enter(&ds->ds_lock);
3918 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
3919 unique = dsl_dataset_phys(ds)->ds_unique_bytes;
3920 delta = MAX(0, (int64_t)(newval - unique)) -
3921 MAX(0, (int64_t)(ds->ds_reserved - unique));
3922 ds->ds_reserved = newval;
3923 mutex_exit(&ds->ds_lock);
3925 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
3926 mutex_exit(&ds->ds_dir->dd_lock);
3930 dsl_dataset_set_refreservation_sync(void *arg, dmu_tx_t *tx)
3932 dsl_dataset_set_qr_arg_t *ddsqra = arg;
3933 dsl_pool_t *dp = dmu_tx_pool(tx);
3936 VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
3937 dsl_dataset_set_refreservation_sync_impl(ds,
3938 ddsqra->ddsqra_source, ddsqra->ddsqra_value, tx);
3939 dsl_dataset_rele(ds, FTAG);
3943 dsl_dataset_set_refreservation(const char *dsname, zprop_source_t source,
3944 uint64_t refreservation)
3946 dsl_dataset_set_qr_arg_t ddsqra;
3948 ddsqra.ddsqra_name = dsname;
3949 ddsqra.ddsqra_source = source;
3950 ddsqra.ddsqra_value = refreservation;
3952 return (dsl_sync_task(dsname, dsl_dataset_set_refreservation_check,
3953 dsl_dataset_set_refreservation_sync, &ddsqra, 0,
3954 ZFS_SPACE_CHECK_EXTRA_RESERVED));
3958 * Return (in *usedp) the amount of space written in new that is not
3959 * present in oldsnap. New may be a snapshot or the head. Old must be
3960 * a snapshot before new, in new's filesystem (or its origin). If not then
3961 * fail and return EINVAL.
3963 * The written space is calculated by considering two components: First, we
3964 * ignore any freed space, and calculate the written as new's used space
3965 * minus old's used space. Next, we add in the amount of space that was freed
3966 * between the two snapshots, thus reducing new's used space relative to old's.
3967 * Specifically, this is the space that was born before old->ds_creation_txg,
3968 * and freed before new (ie. on new's deadlist or a previous deadlist).
3970 * space freed [---------------------]
3971 * snapshots ---O-------O--------O-------O------
3975 dsl_dataset_space_written(dsl_dataset_t *oldsnap, dsl_dataset_t *new,
3976 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
3980 dsl_pool_t *dp = new->ds_dir->dd_pool;
3982 ASSERT(dsl_pool_config_held(dp));
3985 *usedp += dsl_dataset_phys(new)->ds_referenced_bytes;
3986 *usedp -= dsl_dataset_phys(oldsnap)->ds_referenced_bytes;
3989 *compp += dsl_dataset_phys(new)->ds_compressed_bytes;
3990 *compp -= dsl_dataset_phys(oldsnap)->ds_compressed_bytes;
3993 *uncompp += dsl_dataset_phys(new)->ds_uncompressed_bytes;
3994 *uncompp -= dsl_dataset_phys(oldsnap)->ds_uncompressed_bytes;
3996 snapobj = new->ds_object;
3997 while (snapobj != oldsnap->ds_object) {
3998 dsl_dataset_t *snap;
3999 uint64_t used, comp, uncomp;
4001 if (snapobj == new->ds_object) {
4004 err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &snap);
4009 if (dsl_dataset_phys(snap)->ds_prev_snap_txg ==
4010 dsl_dataset_phys(oldsnap)->ds_creation_txg) {
4012 * The blocks in the deadlist can not be born after
4013 * ds_prev_snap_txg, so get the whole deadlist space,
4014 * which is more efficient (especially for old-format
4015 * deadlists). Unfortunately the deadlist code
4016 * doesn't have enough information to make this
4017 * optimization itself.
4019 dsl_deadlist_space(&snap->ds_deadlist,
4020 &used, &comp, &uncomp);
4022 dsl_deadlist_space_range(&snap->ds_deadlist,
4023 0, dsl_dataset_phys(oldsnap)->ds_creation_txg,
4024 &used, &comp, &uncomp);
4031 * If we get to the beginning of the chain of snapshots
4032 * (ds_prev_snap_obj == 0) before oldsnap, then oldsnap
4033 * was not a snapshot of/before new.
4035 snapobj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
4037 dsl_dataset_rele(snap, FTAG);
4039 err = SET_ERROR(EINVAL);
4048 * Return (in *usedp) the amount of space that will be reclaimed if firstsnap,
4049 * lastsnap, and all snapshots in between are deleted.
4051 * blocks that would be freed [---------------------------]
4052 * snapshots ---O-------O--------O-------O--------O
4053 * firstsnap lastsnap
4055 * This is the set of blocks that were born after the snap before firstsnap,
4056 * (birth > firstsnap->prev_snap_txg) and died before the snap after the
4057 * last snap (ie, is on lastsnap->ds_next->ds_deadlist or an earlier deadlist).
4058 * We calculate this by iterating over the relevant deadlists (from the snap
4059 * after lastsnap, backward to the snap after firstsnap), summing up the
4060 * space on the deadlist that was born after the snap before firstsnap.
4063 dsl_dataset_space_wouldfree(dsl_dataset_t *firstsnap,
4064 dsl_dataset_t *lastsnap,
4065 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
4069 dsl_pool_t *dp = firstsnap->ds_dir->dd_pool;
4071 ASSERT(firstsnap->ds_is_snapshot);
4072 ASSERT(lastsnap->ds_is_snapshot);
4075 * Check that the snapshots are in the same dsl_dir, and firstsnap
4076 * is before lastsnap.
4078 if (firstsnap->ds_dir != lastsnap->ds_dir ||
4079 dsl_dataset_phys(firstsnap)->ds_creation_txg >
4080 dsl_dataset_phys(lastsnap)->ds_creation_txg)
4081 return (SET_ERROR(EINVAL));
4083 *usedp = *compp = *uncompp = 0;
4085 snapobj = dsl_dataset_phys(lastsnap)->ds_next_snap_obj;
4086 while (snapobj != firstsnap->ds_object) {
4088 uint64_t used, comp, uncomp;
4090 err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &ds);
4094 dsl_deadlist_space_range(&ds->ds_deadlist,
4095 dsl_dataset_phys(firstsnap)->ds_prev_snap_txg, UINT64_MAX,
4096 &used, &comp, &uncomp);
4101 snapobj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
4102 ASSERT3U(snapobj, !=, 0);
4103 dsl_dataset_rele(ds, FTAG);
4109 * Return TRUE if 'earlier' is an earlier snapshot in 'later's timeline.
4110 * For example, they could both be snapshots of the same filesystem, and
4111 * 'earlier' is before 'later'. Or 'earlier' could be the origin of
4112 * 'later's filesystem. Or 'earlier' could be an older snapshot in the origin's
4113 * filesystem. Or 'earlier' could be the origin's origin.
4115 * If non-zero, earlier_txg is used instead of earlier's ds_creation_txg.
4118 dsl_dataset_is_before(dsl_dataset_t *later, dsl_dataset_t *earlier,
4119 uint64_t earlier_txg)
4121 dsl_pool_t *dp = later->ds_dir->dd_pool;
4125 ASSERT(dsl_pool_config_held(dp));
4126 ASSERT(earlier->ds_is_snapshot || earlier_txg != 0);
4128 if (earlier_txg == 0)
4129 earlier_txg = dsl_dataset_phys(earlier)->ds_creation_txg;
4131 if (later->ds_is_snapshot &&
4132 earlier_txg >= dsl_dataset_phys(later)->ds_creation_txg)
4135 if (later->ds_dir == earlier->ds_dir)
4137 if (!dsl_dir_is_clone(later->ds_dir))
4140 if (dsl_dir_phys(later->ds_dir)->dd_origin_obj == earlier->ds_object)
4142 dsl_dataset_t *origin;
4143 error = dsl_dataset_hold_obj(dp,
4144 dsl_dir_phys(later->ds_dir)->dd_origin_obj, FTAG, &origin);
4147 ret = dsl_dataset_is_before(origin, earlier, earlier_txg);
4148 dsl_dataset_rele(origin, FTAG);
4153 dsl_dataset_zapify(dsl_dataset_t *ds, dmu_tx_t *tx)
4155 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
4156 dmu_object_zapify(mos, ds->ds_object, DMU_OT_DSL_DATASET, tx);
4160 dsl_dataset_is_zapified(dsl_dataset_t *ds)
4162 dmu_object_info_t doi;
4164 dmu_object_info_from_db(ds->ds_dbuf, &doi);
4165 return (doi.doi_type == DMU_OTN_ZAP_METADATA);
4169 dsl_dataset_has_resume_receive_state(dsl_dataset_t *ds)
4171 return (dsl_dataset_is_zapified(ds) &&
4172 zap_contains(ds->ds_dir->dd_pool->dp_meta_objset,
4173 ds->ds_object, DS_FIELD_RESUME_TOGUID) == 0);
4177 dsl_dataset_get_remap_deadlist_object(dsl_dataset_t *ds)
4179 uint64_t remap_deadlist_obj;
4182 if (!dsl_dataset_is_zapified(ds))
4185 err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset, ds->ds_object,
4186 DS_FIELD_REMAP_DEADLIST, sizeof (remap_deadlist_obj), 1,
4187 &remap_deadlist_obj);
4190 VERIFY3S(err, ==, ENOENT);
4194 ASSERT(remap_deadlist_obj != 0);
4195 return (remap_deadlist_obj);
4199 dsl_dataset_remap_deadlist_exists(dsl_dataset_t *ds)
4201 EQUIV(dsl_deadlist_is_open(&ds->ds_remap_deadlist),
4202 dsl_dataset_get_remap_deadlist_object(ds) != 0);
4203 return (dsl_deadlist_is_open(&ds->ds_remap_deadlist));
4207 dsl_dataset_set_remap_deadlist_object(dsl_dataset_t *ds, uint64_t obj,
4211 dsl_dataset_zapify(ds, tx);
4212 VERIFY0(zap_add(ds->ds_dir->dd_pool->dp_meta_objset, ds->ds_object,
4213 DS_FIELD_REMAP_DEADLIST, sizeof (obj), 1, &obj, tx));
4217 dsl_dataset_unset_remap_deadlist_object(dsl_dataset_t *ds, dmu_tx_t *tx)
4219 VERIFY0(zap_remove(ds->ds_dir->dd_pool->dp_meta_objset,
4220 ds->ds_object, DS_FIELD_REMAP_DEADLIST, tx));
4224 dsl_dataset_destroy_remap_deadlist(dsl_dataset_t *ds, dmu_tx_t *tx)
4226 uint64_t remap_deadlist_object;
4227 spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
4229 ASSERT(dmu_tx_is_syncing(tx));
4230 ASSERT(dsl_dataset_remap_deadlist_exists(ds));
4232 remap_deadlist_object = ds->ds_remap_deadlist.dl_object;
4233 dsl_deadlist_close(&ds->ds_remap_deadlist);
4234 dsl_deadlist_free(spa_meta_objset(spa), remap_deadlist_object, tx);
4235 dsl_dataset_unset_remap_deadlist_object(ds, tx);
4236 spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
4240 dsl_dataset_create_remap_deadlist(dsl_dataset_t *ds, dmu_tx_t *tx)
4242 uint64_t remap_deadlist_obj;
4243 spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
4245 ASSERT(dmu_tx_is_syncing(tx));
4246 ASSERT(MUTEX_HELD(&ds->ds_remap_deadlist_lock));
4248 * Currently we only create remap deadlists when there are indirect
4249 * vdevs with referenced mappings.
4251 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
4253 remap_deadlist_obj = dsl_deadlist_clone(
4254 &ds->ds_deadlist, UINT64_MAX,
4255 dsl_dataset_phys(ds)->ds_prev_snap_obj, tx);
4256 dsl_dataset_set_remap_deadlist_object(ds,
4257 remap_deadlist_obj, tx);
4258 dsl_deadlist_open(&ds->ds_remap_deadlist, spa_meta_objset(spa),
4259 remap_deadlist_obj);
4260 spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);