4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright (c) 2014 RackTop Systems.
27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
28 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
29 * Copyright 2016, OmniTI Computer Consulting, Inc. All rights reserved.
30 * Copyright 2017 Nexenta Systems, Inc.
31 * Copyright (c) 2019, Klara Inc.
32 * Copyright (c) 2019, Allan Jude
33 * Copyright (c) 2020 The FreeBSD Foundation [1]
35 * [1] Portions of this software were developed by Allan Jude
36 * under sponsorship from the FreeBSD Foundation.
39 #include <sys/dmu_objset.h>
40 #include <sys/dsl_dataset.h>
41 #include <sys/dsl_dir.h>
42 #include <sys/dsl_prop.h>
43 #include <sys/dsl_synctask.h>
44 #include <sys/dmu_traverse.h>
45 #include <sys/dmu_impl.h>
46 #include <sys/dmu_tx.h>
50 #include <sys/zfeature.h>
51 #include <sys/unique.h>
52 #include <sys/zfs_context.h>
53 #include <sys/zfs_ioctl.h>
55 #include <sys/spa_impl.h>
57 #include <sys/zfs_znode.h>
58 #include <sys/zfs_onexit.h>
60 #include <sys/dsl_scan.h>
61 #include <sys/dsl_deadlist.h>
62 #include <sys/dsl_destroy.h>
63 #include <sys/dsl_userhold.h>
64 #include <sys/dsl_bookmark.h>
65 #include <sys/policy.h>
66 #include <sys/dmu_send.h>
67 #include <sys/dmu_recv.h>
68 #include <sys/zio_compress.h>
69 #include <zfs_fletcher.h>
70 #include <sys/zio_checksum.h>
73 * The SPA supports block sizes up to 16MB. However, very large blocks
74 * can have an impact on i/o latency (e.g. tying up a spinning disk for
75 * ~300ms), and also potentially on the memory allocator. Therefore,
76 * we do not allow the recordsize to be set larger than zfs_max_recordsize
77 * (default 1MB). Larger blocks can be created by changing this tunable,
78 * and pools with larger blocks can always be imported and used, regardless
81 int zfs_max_recordsize = 1 * 1024 * 1024;
82 int zfs_allow_redacted_dataset_mount = 0;
84 #define SWITCH64(x, y) \
86 uint64_t __tmp = (x); \
91 #define DS_REF_MAX (1ULL << 62)
93 extern inline dsl_dataset_phys_t *dsl_dataset_phys(dsl_dataset_t *ds);
95 static void dsl_dataset_set_remap_deadlist_object(dsl_dataset_t *ds,
96 uint64_t obj, dmu_tx_t *tx);
97 static void dsl_dataset_unset_remap_deadlist_object(dsl_dataset_t *ds,
100 static void unload_zfeature(dsl_dataset_t *ds, spa_feature_t f);
102 extern int spa_asize_inflation;
104 static zil_header_t zero_zil;
107 * Figure out how much of this delta should be propagated to the dsl_dir
108 * layer. If there's a refreservation, that space has already been
109 * partially accounted for in our ancestors.
112 parent_delta(dsl_dataset_t *ds, int64_t delta)
114 dsl_dataset_phys_t *ds_phys;
115 uint64_t old_bytes, new_bytes;
117 if (ds->ds_reserved == 0)
120 ds_phys = dsl_dataset_phys(ds);
121 old_bytes = MAX(ds_phys->ds_unique_bytes, ds->ds_reserved);
122 new_bytes = MAX(ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
124 ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
125 return (new_bytes - old_bytes);
129 dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
131 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
132 int used = bp_get_dsize_sync(spa, bp);
133 int compressed = BP_GET_PSIZE(bp);
134 int uncompressed = BP_GET_UCSIZE(bp);
138 dprintf_bp(bp, "ds=%p", ds);
140 ASSERT(dmu_tx_is_syncing(tx));
141 /* It could have been compressed away to nothing */
142 if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp))
144 ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
145 ASSERT(DMU_OT_IS_VALID(BP_GET_TYPE(bp)));
147 dsl_pool_mos_diduse_space(tx->tx_pool,
148 used, compressed, uncompressed);
152 ASSERT3U(bp->blk_birth, >, dsl_dataset_phys(ds)->ds_prev_snap_txg);
153 dmu_buf_will_dirty(ds->ds_dbuf, tx);
154 mutex_enter(&ds->ds_lock);
155 delta = parent_delta(ds, used);
156 dsl_dataset_phys(ds)->ds_referenced_bytes += used;
157 dsl_dataset_phys(ds)->ds_compressed_bytes += compressed;
158 dsl_dataset_phys(ds)->ds_uncompressed_bytes += uncompressed;
159 dsl_dataset_phys(ds)->ds_unique_bytes += used;
161 if (BP_GET_LSIZE(bp) > SPA_OLD_MAXBLOCKSIZE) {
162 ds->ds_feature_activation[SPA_FEATURE_LARGE_BLOCKS] =
167 f = zio_checksum_to_feature(BP_GET_CHECKSUM(bp));
168 if (f != SPA_FEATURE_NONE) {
169 ASSERT3S(spa_feature_table[f].fi_type, ==,
170 ZFEATURE_TYPE_BOOLEAN);
171 ds->ds_feature_activation[f] = (void *)B_TRUE;
174 f = zio_compress_to_feature(BP_GET_COMPRESS(bp));
175 if (f != SPA_FEATURE_NONE) {
176 ASSERT3S(spa_feature_table[f].fi_type, ==,
177 ZFEATURE_TYPE_BOOLEAN);
178 ds->ds_feature_activation[f] = (void *)B_TRUE;
182 * Track block for livelist, but ignore embedded blocks because
183 * they do not need to be freed.
185 if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
186 bp->blk_birth > ds->ds_dir->dd_origin_txg &&
187 !(BP_IS_EMBEDDED(bp))) {
188 ASSERT(dsl_dir_is_clone(ds->ds_dir));
189 ASSERT(spa_feature_is_enabled(spa,
190 SPA_FEATURE_LIVELIST));
191 bplist_append(&ds->ds_dir->dd_pending_allocs, bp);
194 mutex_exit(&ds->ds_lock);
195 dsl_dir_diduse_transfer_space(ds->ds_dir, delta,
196 compressed, uncompressed, used,
197 DD_USED_REFRSRV, DD_USED_HEAD, tx);
201 * Called when the specified segment has been remapped, and is thus no
202 * longer referenced in the head dataset. The vdev must be indirect.
204 * If the segment is referenced by a snapshot, put it on the remap deadlist.
205 * Otherwise, add this segment to the obsolete spacemap.
208 dsl_dataset_block_remapped(dsl_dataset_t *ds, uint64_t vdev, uint64_t offset,
209 uint64_t size, uint64_t birth, dmu_tx_t *tx)
211 spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
213 ASSERT(dmu_tx_is_syncing(tx));
214 ASSERT(birth <= tx->tx_txg);
215 ASSERT(!ds->ds_is_snapshot);
217 if (birth > dsl_dataset_phys(ds)->ds_prev_snap_txg) {
218 spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
221 dva_t *dva = &fakebp.blk_dva[0];
225 mutex_enter(&ds->ds_remap_deadlist_lock);
226 if (!dsl_dataset_remap_deadlist_exists(ds)) {
227 dsl_dataset_create_remap_deadlist(ds, tx);
229 mutex_exit(&ds->ds_remap_deadlist_lock);
232 fakebp.blk_birth = birth;
233 DVA_SET_VDEV(dva, vdev);
234 DVA_SET_OFFSET(dva, offset);
235 DVA_SET_ASIZE(dva, size);
236 dsl_deadlist_insert(&ds->ds_remap_deadlist, &fakebp, B_FALSE,
242 dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
245 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
247 int used = bp_get_dsize_sync(spa, bp);
248 int compressed = BP_GET_PSIZE(bp);
249 int uncompressed = BP_GET_UCSIZE(bp);
251 if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp))
254 ASSERT(dmu_tx_is_syncing(tx));
255 ASSERT(bp->blk_birth <= tx->tx_txg);
258 dsl_free(tx->tx_pool, tx->tx_txg, bp);
259 dsl_pool_mos_diduse_space(tx->tx_pool,
260 -used, -compressed, -uncompressed);
263 ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
265 ASSERT(!ds->ds_is_snapshot);
266 dmu_buf_will_dirty(ds->ds_dbuf, tx);
269 * Track block for livelist, but ignore embedded blocks because
270 * they do not need to be freed.
272 if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
273 bp->blk_birth > ds->ds_dir->dd_origin_txg &&
274 !(BP_IS_EMBEDDED(bp))) {
275 ASSERT(dsl_dir_is_clone(ds->ds_dir));
276 ASSERT(spa_feature_is_enabled(spa,
277 SPA_FEATURE_LIVELIST));
278 bplist_append(&ds->ds_dir->dd_pending_frees, bp);
281 if (bp->blk_birth > dsl_dataset_phys(ds)->ds_prev_snap_txg) {
284 dprintf_bp(bp, "freeing ds=%llu", (u_longlong_t)ds->ds_object);
285 dsl_free(tx->tx_pool, tx->tx_txg, bp);
287 mutex_enter(&ds->ds_lock);
288 ASSERT(dsl_dataset_phys(ds)->ds_unique_bytes >= used ||
289 !DS_UNIQUE_IS_ACCURATE(ds));
290 delta = parent_delta(ds, -used);
291 dsl_dataset_phys(ds)->ds_unique_bytes -= used;
292 mutex_exit(&ds->ds_lock);
293 dsl_dir_diduse_transfer_space(ds->ds_dir,
294 delta, -compressed, -uncompressed, -used,
295 DD_USED_REFRSRV, DD_USED_HEAD, tx);
297 dprintf_bp(bp, "putting on dead list: %s", "");
300 * We are here as part of zio's write done callback,
301 * which means we're a zio interrupt thread. We can't
302 * call dsl_deadlist_insert() now because it may block
303 * waiting for I/O. Instead, put bp on the deferred
304 * queue and let dsl_pool_sync() finish the job.
306 bplist_append(&ds->ds_pending_deadlist, bp);
308 dsl_deadlist_insert(&ds->ds_deadlist, bp, B_FALSE, tx);
310 ASSERT3U(ds->ds_prev->ds_object, ==,
311 dsl_dataset_phys(ds)->ds_prev_snap_obj);
312 ASSERT(dsl_dataset_phys(ds->ds_prev)->ds_num_children > 0);
313 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
314 if (dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj ==
315 ds->ds_object && bp->blk_birth >
316 dsl_dataset_phys(ds->ds_prev)->ds_prev_snap_txg) {
317 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
318 mutex_enter(&ds->ds_prev->ds_lock);
319 dsl_dataset_phys(ds->ds_prev)->ds_unique_bytes += used;
320 mutex_exit(&ds->ds_prev->ds_lock);
322 if (bp->blk_birth > ds->ds_dir->dd_origin_txg) {
323 dsl_dir_transfer_space(ds->ds_dir, used,
324 DD_USED_HEAD, DD_USED_SNAP, tx);
328 dsl_bookmark_block_killed(ds, bp, tx);
330 mutex_enter(&ds->ds_lock);
331 ASSERT3U(dsl_dataset_phys(ds)->ds_referenced_bytes, >=, used);
332 dsl_dataset_phys(ds)->ds_referenced_bytes -= used;
333 ASSERT3U(dsl_dataset_phys(ds)->ds_compressed_bytes, >=, compressed);
334 dsl_dataset_phys(ds)->ds_compressed_bytes -= compressed;
335 ASSERT3U(dsl_dataset_phys(ds)->ds_uncompressed_bytes, >=, uncompressed);
336 dsl_dataset_phys(ds)->ds_uncompressed_bytes -= uncompressed;
337 mutex_exit(&ds->ds_lock);
342 struct feature_type_uint64_array_arg {
348 unload_zfeature(dsl_dataset_t *ds, spa_feature_t f)
350 switch (spa_feature_table[f].fi_type) {
351 case ZFEATURE_TYPE_BOOLEAN:
353 case ZFEATURE_TYPE_UINT64_ARRAY:
355 struct feature_type_uint64_array_arg *ftuaa = ds->ds_feature[f];
356 kmem_free(ftuaa->array, ftuaa->length * sizeof (uint64_t));
357 kmem_free(ftuaa, sizeof (*ftuaa));
361 panic("Invalid zfeature type %d", spa_feature_table[f].fi_type);
366 load_zfeature(objset_t *mos, dsl_dataset_t *ds, spa_feature_t f)
369 switch (spa_feature_table[f].fi_type) {
370 case ZFEATURE_TYPE_BOOLEAN:
371 err = zap_contains(mos, ds->ds_object,
372 spa_feature_table[f].fi_guid);
374 ds->ds_feature[f] = (void *)B_TRUE;
376 ASSERT3U(err, ==, ENOENT);
380 case ZFEATURE_TYPE_UINT64_ARRAY:
382 uint64_t int_size, num_int;
384 err = zap_length(mos, ds->ds_object,
385 spa_feature_table[f].fi_guid, &int_size, &num_int);
387 ASSERT3U(err, ==, ENOENT);
391 ASSERT3U(int_size, ==, sizeof (uint64_t));
392 data = kmem_alloc(int_size * num_int, KM_SLEEP);
393 VERIFY0(zap_lookup(mos, ds->ds_object,
394 spa_feature_table[f].fi_guid, int_size, num_int, data));
395 struct feature_type_uint64_array_arg *ftuaa =
396 kmem_alloc(sizeof (*ftuaa), KM_SLEEP);
397 ftuaa->length = num_int;
399 ds->ds_feature[f] = ftuaa;
403 panic("Invalid zfeature type %d", spa_feature_table[f].fi_type);
409 * We have to release the fsid synchronously or we risk that a subsequent
410 * mount of the same dataset will fail to unique_insert the fsid. This
411 * failure would manifest itself as the fsid of this dataset changing
412 * between mounts which makes NFS clients quite unhappy.
415 dsl_dataset_evict_sync(void *dbu)
417 dsl_dataset_t *ds = dbu;
419 ASSERT(ds->ds_owner == NULL);
421 unique_remove(ds->ds_fsid_guid);
425 dsl_dataset_evict_async(void *dbu)
427 dsl_dataset_t *ds = dbu;
429 ASSERT(ds->ds_owner == NULL);
433 if (ds->ds_objset != NULL)
434 dmu_objset_evict(ds->ds_objset);
437 dsl_dataset_rele(ds->ds_prev, ds);
441 dsl_bookmark_fini_ds(ds);
443 bplist_destroy(&ds->ds_pending_deadlist);
444 if (dsl_deadlist_is_open(&ds->ds_deadlist))
445 dsl_deadlist_close(&ds->ds_deadlist);
446 if (dsl_deadlist_is_open(&ds->ds_remap_deadlist))
447 dsl_deadlist_close(&ds->ds_remap_deadlist);
449 dsl_dir_async_rele(ds->ds_dir, ds);
451 ASSERT(!list_link_active(&ds->ds_synced_link));
453 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
454 if (dsl_dataset_feature_is_active(ds, f))
455 unload_zfeature(ds, f);
458 list_destroy(&ds->ds_prop_cbs);
459 mutex_destroy(&ds->ds_lock);
460 mutex_destroy(&ds->ds_opening_lock);
461 mutex_destroy(&ds->ds_sendstream_lock);
462 mutex_destroy(&ds->ds_remap_deadlist_lock);
463 zfs_refcount_destroy(&ds->ds_longholds);
464 rrw_destroy(&ds->ds_bp_rwlock);
466 kmem_free(ds, sizeof (dsl_dataset_t));
470 dsl_dataset_get_snapname(dsl_dataset_t *ds)
472 dsl_dataset_phys_t *headphys;
475 dsl_pool_t *dp = ds->ds_dir->dd_pool;
476 objset_t *mos = dp->dp_meta_objset;
478 if (ds->ds_snapname[0])
480 if (dsl_dataset_phys(ds)->ds_next_snap_obj == 0)
483 err = dmu_bonus_hold(mos, dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj,
487 headphys = headdbuf->db_data;
488 err = zap_value_search(dp->dp_meta_objset,
489 headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
490 if (err != 0 && zfs_recover == B_TRUE) {
492 (void) snprintf(ds->ds_snapname, sizeof (ds->ds_snapname),
493 "SNAPOBJ=%llu-ERR=%d",
494 (unsigned long long)ds->ds_object, err);
496 dmu_buf_rele(headdbuf, FTAG);
501 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
503 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
504 uint64_t snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
508 if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
511 err = zap_lookup_norm(mos, snapobj, name, 8, 1,
512 value, mt, NULL, 0, NULL);
513 if (err == ENOTSUP && (mt & MT_NORMALIZE))
514 err = zap_lookup(mos, snapobj, name, 8, 1, value);
519 dsl_dataset_snap_remove(dsl_dataset_t *ds, const char *name, dmu_tx_t *tx,
522 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
523 uint64_t snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
527 dsl_dir_snap_cmtime_update(ds->ds_dir);
529 if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
532 err = zap_remove_norm(mos, snapobj, name, mt, tx);
533 if (err == ENOTSUP && (mt & MT_NORMALIZE))
534 err = zap_remove(mos, snapobj, name, tx);
536 if (err == 0 && adj_cnt)
537 dsl_fs_ss_count_adjust(ds->ds_dir, -1,
538 DD_FIELD_SNAPSHOT_COUNT, tx);
544 dsl_dataset_try_add_ref(dsl_pool_t *dp, dsl_dataset_t *ds, void *tag)
546 dmu_buf_t *dbuf = ds->ds_dbuf;
547 boolean_t result = B_FALSE;
549 if (dbuf != NULL && dmu_buf_try_add_ref(dbuf, dp->dp_meta_objset,
550 ds->ds_object, DMU_BONUS_BLKID, tag)) {
552 if (ds == dmu_buf_get_user(dbuf))
555 dmu_buf_rele(dbuf, tag);
562 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
565 objset_t *mos = dp->dp_meta_objset;
569 dmu_object_info_t doi;
571 ASSERT(dsl_pool_config_held(dp));
573 err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
577 /* Make sure dsobj has the correct object type. */
578 dmu_object_info_from_db(dbuf, &doi);
579 if (doi.doi_bonus_type != DMU_OT_DSL_DATASET) {
580 dmu_buf_rele(dbuf, tag);
581 return (SET_ERROR(EINVAL));
584 ds = dmu_buf_get_user(dbuf);
586 dsl_dataset_t *winner = NULL;
588 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
590 ds->ds_object = dsobj;
591 ds->ds_is_snapshot = dsl_dataset_phys(ds)->ds_num_children != 0;
592 list_link_init(&ds->ds_synced_link);
594 err = dsl_dir_hold_obj(dp, dsl_dataset_phys(ds)->ds_dir_obj,
595 NULL, ds, &ds->ds_dir);
597 kmem_free(ds, sizeof (dsl_dataset_t));
598 dmu_buf_rele(dbuf, tag);
602 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
603 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
604 mutex_init(&ds->ds_sendstream_lock, NULL, MUTEX_DEFAULT, NULL);
605 mutex_init(&ds->ds_remap_deadlist_lock,
606 NULL, MUTEX_DEFAULT, NULL);
607 rrw_init(&ds->ds_bp_rwlock, B_FALSE);
608 zfs_refcount_create(&ds->ds_longholds);
610 bplist_create(&ds->ds_pending_deadlist);
612 list_create(&ds->ds_sendstreams, sizeof (dmu_sendstatus_t),
613 offsetof(dmu_sendstatus_t, dss_link));
615 list_create(&ds->ds_prop_cbs, sizeof (dsl_prop_cb_record_t),
616 offsetof(dsl_prop_cb_record_t, cbr_ds_node));
618 if (doi.doi_type == DMU_OTN_ZAP_METADATA) {
621 for (f = 0; f < SPA_FEATURES; f++) {
622 if (!(spa_feature_table[f].fi_flags &
623 ZFEATURE_FLAG_PER_DATASET))
625 err = load_zfeature(mos, ds, f);
629 if (!ds->ds_is_snapshot) {
630 ds->ds_snapname[0] = '\0';
631 if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
632 err = dsl_dataset_hold_obj(dp,
633 dsl_dataset_phys(ds)->ds_prev_snap_obj,
636 err = dsl_bookmark_init_ds(ds);
638 if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
639 err = dsl_dataset_get_snapname(ds);
641 dsl_dataset_phys(ds)->ds_userrefs_obj != 0) {
643 ds->ds_dir->dd_pool->dp_meta_objset,
644 dsl_dataset_phys(ds)->ds_userrefs_obj,
649 if (err == 0 && !ds->ds_is_snapshot) {
650 err = dsl_prop_get_int_ds(ds,
651 zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
654 err = dsl_prop_get_int_ds(ds,
655 zfs_prop_to_name(ZFS_PROP_REFQUOTA),
659 ds->ds_reserved = ds->ds_quota = 0;
662 if (err == 0 && ds->ds_dir->dd_crypto_obj != 0 &&
663 ds->ds_is_snapshot &&
664 zap_contains(mos, dsobj, DS_FIELD_IVSET_GUID) != 0) {
665 dp->dp_spa->spa_errata =
666 ZPOOL_ERRATA_ZOL_8308_ENCRYPTION;
669 dsl_deadlist_open(&ds->ds_deadlist,
670 mos, dsl_dataset_phys(ds)->ds_deadlist_obj);
671 uint64_t remap_deadlist_obj =
672 dsl_dataset_get_remap_deadlist_object(ds);
673 if (remap_deadlist_obj != 0) {
674 dsl_deadlist_open(&ds->ds_remap_deadlist, mos,
678 dmu_buf_init_user(&ds->ds_dbu, dsl_dataset_evict_sync,
679 dsl_dataset_evict_async, &ds->ds_dbuf);
681 winner = dmu_buf_set_user_ie(dbuf, &ds->ds_dbu);
683 if (err != 0 || winner != NULL) {
684 bplist_destroy(&ds->ds_pending_deadlist);
685 dsl_deadlist_close(&ds->ds_deadlist);
686 if (dsl_deadlist_is_open(&ds->ds_remap_deadlist))
687 dsl_deadlist_close(&ds->ds_remap_deadlist);
688 dsl_bookmark_fini_ds(ds);
690 dsl_dataset_rele(ds->ds_prev, ds);
691 dsl_dir_rele(ds->ds_dir, ds);
692 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
693 if (dsl_dataset_feature_is_active(ds, f))
694 unload_zfeature(ds, f);
697 list_destroy(&ds->ds_prop_cbs);
698 list_destroy(&ds->ds_sendstreams);
699 mutex_destroy(&ds->ds_lock);
700 mutex_destroy(&ds->ds_opening_lock);
701 mutex_destroy(&ds->ds_sendstream_lock);
702 mutex_destroy(&ds->ds_remap_deadlist_lock);
703 zfs_refcount_destroy(&ds->ds_longholds);
704 rrw_destroy(&ds->ds_bp_rwlock);
705 kmem_free(ds, sizeof (dsl_dataset_t));
707 dmu_buf_rele(dbuf, tag);
713 unique_insert(dsl_dataset_phys(ds)->ds_fsid_guid);
714 if (ds->ds_fsid_guid !=
715 dsl_dataset_phys(ds)->ds_fsid_guid) {
716 zfs_dbgmsg("ds_fsid_guid changed from "
717 "%llx to %llx for pool %s dataset id %llu",
719 dsl_dataset_phys(ds)->ds_fsid_guid,
720 (long long)ds->ds_fsid_guid,
721 spa_name(dp->dp_spa),
722 (u_longlong_t)dsobj);
727 ASSERT3P(ds->ds_dbuf, ==, dbuf);
728 ASSERT3P(dsl_dataset_phys(ds), ==, dbuf->db_data);
729 ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0 ||
730 spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
731 dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
738 dsl_dataset_create_key_mapping(dsl_dataset_t *ds)
740 dsl_dir_t *dd = ds->ds_dir;
742 if (dd->dd_crypto_obj == 0)
745 return (spa_keystore_create_mapping(dd->dd_pool->dp_spa,
746 ds, ds, &ds->ds_key_mapping));
750 dsl_dataset_hold_obj_flags(dsl_pool_t *dp, uint64_t dsobj,
751 ds_hold_flags_t flags, void *tag, dsl_dataset_t **dsp)
755 err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp);
759 ASSERT3P(*dsp, !=, NULL);
761 if (flags & DS_HOLD_FLAG_DECRYPT) {
762 err = dsl_dataset_create_key_mapping(*dsp);
764 dsl_dataset_rele(*dsp, tag);
771 dsl_dataset_hold_flags(dsl_pool_t *dp, const char *name, ds_hold_flags_t flags,
772 void *tag, dsl_dataset_t **dsp)
775 const char *snapname;
780 err = dsl_dir_hold(dp, name, FTAG, &dd, &snapname);
784 ASSERT(dsl_pool_config_held(dp));
785 obj = dsl_dir_phys(dd)->dd_head_dataset_obj;
787 err = dsl_dataset_hold_obj_flags(dp, obj, flags, tag, &ds);
789 err = SET_ERROR(ENOENT);
791 /* we may be looking for a snapshot */
792 if (err == 0 && snapname != NULL) {
793 dsl_dataset_t *snap_ds;
795 if (*snapname++ != '@') {
796 dsl_dataset_rele_flags(ds, flags, tag);
797 dsl_dir_rele(dd, FTAG);
798 return (SET_ERROR(ENOENT));
801 dprintf("looking for snapshot '%s'\n", snapname);
802 err = dsl_dataset_snap_lookup(ds, snapname, &obj);
804 err = dsl_dataset_hold_obj_flags(dp, obj, flags, tag,
807 dsl_dataset_rele_flags(ds, flags, tag);
810 mutex_enter(&snap_ds->ds_lock);
811 if (snap_ds->ds_snapname[0] == 0)
812 (void) strlcpy(snap_ds->ds_snapname, snapname,
813 sizeof (snap_ds->ds_snapname));
814 mutex_exit(&snap_ds->ds_lock);
820 dsl_dir_rele(dd, FTAG);
825 dsl_dataset_hold(dsl_pool_t *dp, const char *name, void *tag,
828 return (dsl_dataset_hold_flags(dp, name, 0, tag, dsp));
832 dsl_dataset_own_obj_impl(dsl_pool_t *dp, uint64_t dsobj, ds_hold_flags_t flags,
833 void *tag, boolean_t override, dsl_dataset_t **dsp)
835 int err = dsl_dataset_hold_obj_flags(dp, dsobj, flags, tag, dsp);
838 if (!dsl_dataset_tryown(*dsp, tag, override)) {
839 dsl_dataset_rele_flags(*dsp, flags, tag);
841 return (SET_ERROR(EBUSY));
848 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, ds_hold_flags_t flags,
849 void *tag, dsl_dataset_t **dsp)
851 return (dsl_dataset_own_obj_impl(dp, dsobj, flags, tag, B_FALSE, dsp));
855 dsl_dataset_own_obj_force(dsl_pool_t *dp, uint64_t dsobj,
856 ds_hold_flags_t flags, void *tag, dsl_dataset_t **dsp)
858 return (dsl_dataset_own_obj_impl(dp, dsobj, flags, tag, B_TRUE, dsp));
862 dsl_dataset_own_impl(dsl_pool_t *dp, const char *name, ds_hold_flags_t flags,
863 void *tag, boolean_t override, dsl_dataset_t **dsp)
865 int err = dsl_dataset_hold_flags(dp, name, flags, tag, dsp);
868 if (!dsl_dataset_tryown(*dsp, tag, override)) {
869 dsl_dataset_rele_flags(*dsp, flags, tag);
870 return (SET_ERROR(EBUSY));
876 dsl_dataset_own_force(dsl_pool_t *dp, const char *name, ds_hold_flags_t flags,
877 void *tag, dsl_dataset_t **dsp)
879 return (dsl_dataset_own_impl(dp, name, flags, tag, B_TRUE, dsp));
883 dsl_dataset_own(dsl_pool_t *dp, const char *name, ds_hold_flags_t flags,
884 void *tag, dsl_dataset_t **dsp)
886 return (dsl_dataset_own_impl(dp, name, flags, tag, B_FALSE, dsp));
890 * See the comment above dsl_pool_hold() for details. In summary, a long
891 * hold is used to prevent destruction of a dataset while the pool hold
892 * is dropped, allowing other concurrent operations (e.g. spa_sync()).
894 * The dataset and pool must be held when this function is called. After it
895 * is called, the pool hold may be released while the dataset is still held
899 dsl_dataset_long_hold(dsl_dataset_t *ds, void *tag)
901 ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
902 (void) zfs_refcount_add(&ds->ds_longholds, tag);
906 dsl_dataset_long_rele(dsl_dataset_t *ds, void *tag)
908 (void) zfs_refcount_remove(&ds->ds_longholds, tag);
911 /* Return B_TRUE if there are any long holds on this dataset. */
913 dsl_dataset_long_held(dsl_dataset_t *ds)
915 return (!zfs_refcount_is_zero(&ds->ds_longholds));
919 dsl_dataset_name(dsl_dataset_t *ds, char *name)
922 (void) strlcpy(name, "mos", ZFS_MAX_DATASET_NAME_LEN);
924 dsl_dir_name(ds->ds_dir, name);
925 VERIFY0(dsl_dataset_get_snapname(ds));
926 if (ds->ds_snapname[0]) {
927 VERIFY3U(strlcat(name, "@", ZFS_MAX_DATASET_NAME_LEN),
928 <, ZFS_MAX_DATASET_NAME_LEN);
930 * We use a "recursive" mutex so that we
931 * can call dprintf_ds() with ds_lock held.
933 if (!MUTEX_HELD(&ds->ds_lock)) {
934 mutex_enter(&ds->ds_lock);
935 VERIFY3U(strlcat(name, ds->ds_snapname,
936 ZFS_MAX_DATASET_NAME_LEN), <,
937 ZFS_MAX_DATASET_NAME_LEN);
938 mutex_exit(&ds->ds_lock);
940 VERIFY3U(strlcat(name, ds->ds_snapname,
941 ZFS_MAX_DATASET_NAME_LEN), <,
942 ZFS_MAX_DATASET_NAME_LEN);
949 dsl_dataset_namelen(dsl_dataset_t *ds)
951 VERIFY0(dsl_dataset_get_snapname(ds));
952 mutex_enter(&ds->ds_lock);
953 int len = strlen(ds->ds_snapname);
954 mutex_exit(&ds->ds_lock);
955 /* add '@' if ds is a snap */
958 len += dsl_dir_namelen(ds->ds_dir);
963 dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
965 dmu_buf_rele(ds->ds_dbuf, tag);
969 dsl_dataset_remove_key_mapping(dsl_dataset_t *ds)
971 dsl_dir_t *dd = ds->ds_dir;
973 if (dd == NULL || dd->dd_crypto_obj == 0)
976 (void) spa_keystore_remove_mapping(dd->dd_pool->dp_spa,
981 dsl_dataset_rele_flags(dsl_dataset_t *ds, ds_hold_flags_t flags, void *tag)
983 if (flags & DS_HOLD_FLAG_DECRYPT)
984 dsl_dataset_remove_key_mapping(ds);
986 dsl_dataset_rele(ds, tag);
990 dsl_dataset_disown(dsl_dataset_t *ds, ds_hold_flags_t flags, void *tag)
992 ASSERT3P(ds->ds_owner, ==, tag);
993 ASSERT(ds->ds_dbuf != NULL);
995 mutex_enter(&ds->ds_lock);
997 mutex_exit(&ds->ds_lock);
998 dsl_dataset_long_rele(ds, tag);
999 dsl_dataset_rele_flags(ds, flags, tag);
1003 dsl_dataset_tryown(dsl_dataset_t *ds, void *tag, boolean_t override)
1005 boolean_t gotit = FALSE;
1007 ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
1008 mutex_enter(&ds->ds_lock);
1009 if (ds->ds_owner == NULL && (override || !(DS_IS_INCONSISTENT(ds) ||
1010 (dsl_dataset_feature_is_active(ds,
1011 SPA_FEATURE_REDACTED_DATASETS) &&
1012 !zfs_allow_redacted_dataset_mount)))) {
1014 dsl_dataset_long_hold(ds, tag);
1017 mutex_exit(&ds->ds_lock);
1022 dsl_dataset_has_owner(dsl_dataset_t *ds)
1025 mutex_enter(&ds->ds_lock);
1026 rv = (ds->ds_owner != NULL);
1027 mutex_exit(&ds->ds_lock);
1032 zfeature_active(spa_feature_t f, void *arg)
1034 switch (spa_feature_table[f].fi_type) {
1035 case ZFEATURE_TYPE_BOOLEAN: {
1036 boolean_t val = (boolean_t)(uintptr_t)arg;
1037 ASSERT(val == B_FALSE || val == B_TRUE);
1040 case ZFEATURE_TYPE_UINT64_ARRAY:
1042 * In this case, arg is a uint64_t array. The feature is active
1043 * if the array is non-null.
1045 return (arg != NULL);
1047 panic("Invalid zfeature type %d", spa_feature_table[f].fi_type);
1053 dsl_dataset_feature_is_active(dsl_dataset_t *ds, spa_feature_t f)
1055 return (zfeature_active(f, ds->ds_feature[f]));
1059 * The buffers passed out by this function are references to internal buffers;
1060 * they should not be freed by callers of this function, and they should not be
1061 * used after the dataset has been released.
1064 dsl_dataset_get_uint64_array_feature(dsl_dataset_t *ds, spa_feature_t f,
1065 uint64_t *outlength, uint64_t **outp)
1067 VERIFY(spa_feature_table[f].fi_type & ZFEATURE_TYPE_UINT64_ARRAY);
1068 if (!dsl_dataset_feature_is_active(ds, f)) {
1071 struct feature_type_uint64_array_arg *ftuaa = ds->ds_feature[f];
1072 *outp = ftuaa->array;
1073 *outlength = ftuaa->length;
1078 dsl_dataset_activate_feature(uint64_t dsobj, spa_feature_t f, void *arg,
1081 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1082 objset_t *mos = dmu_tx_pool(tx)->dp_meta_objset;
1085 VERIFY(spa_feature_table[f].fi_flags & ZFEATURE_FLAG_PER_DATASET);
1087 spa_feature_incr(spa, f, tx);
1088 dmu_object_zapify(mos, dsobj, DMU_OT_DSL_DATASET, tx);
1090 switch (spa_feature_table[f].fi_type) {
1091 case ZFEATURE_TYPE_BOOLEAN:
1092 ASSERT3S((boolean_t)(uintptr_t)arg, ==, B_TRUE);
1093 VERIFY0(zap_add(mos, dsobj, spa_feature_table[f].fi_guid,
1094 sizeof (zero), 1, &zero, tx));
1096 case ZFEATURE_TYPE_UINT64_ARRAY:
1098 struct feature_type_uint64_array_arg *ftuaa = arg;
1099 VERIFY0(zap_add(mos, dsobj, spa_feature_table[f].fi_guid,
1100 sizeof (uint64_t), ftuaa->length, ftuaa->array, tx));
1104 panic("Invalid zfeature type %d", spa_feature_table[f].fi_type);
1109 dsl_dataset_deactivate_feature_impl(dsl_dataset_t *ds, spa_feature_t f,
1112 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1113 objset_t *mos = dmu_tx_pool(tx)->dp_meta_objset;
1114 uint64_t dsobj = ds->ds_object;
1116 VERIFY(spa_feature_table[f].fi_flags & ZFEATURE_FLAG_PER_DATASET);
1118 VERIFY0(zap_remove(mos, dsobj, spa_feature_table[f].fi_guid, tx));
1119 spa_feature_decr(spa, f, tx);
1120 ds->ds_feature[f] = NULL;
1124 dsl_dataset_deactivate_feature(dsl_dataset_t *ds, spa_feature_t f, dmu_tx_t *tx)
1126 unload_zfeature(ds, f);
1127 dsl_dataset_deactivate_feature_impl(ds, f, tx);
1131 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
1132 dsl_crypto_params_t *dcp, uint64_t flags, dmu_tx_t *tx)
1134 dsl_pool_t *dp = dd->dd_pool;
1136 dsl_dataset_phys_t *dsphys;
1138 objset_t *mos = dp->dp_meta_objset;
1141 origin = dp->dp_origin_snap;
1143 ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
1144 ASSERT(origin == NULL || dsl_dataset_phys(origin)->ds_num_children > 0);
1145 ASSERT(dmu_tx_is_syncing(tx));
1146 ASSERT(dsl_dir_phys(dd)->dd_head_dataset_obj == 0);
1148 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
1149 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
1150 VERIFY0(dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
1151 dmu_buf_will_dirty(dbuf, tx);
1152 dsphys = dbuf->db_data;
1153 bzero(dsphys, sizeof (dsl_dataset_phys_t));
1154 dsphys->ds_dir_obj = dd->dd_object;
1155 dsphys->ds_flags = flags;
1156 dsphys->ds_fsid_guid = unique_create();
1157 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
1158 sizeof (dsphys->ds_guid));
1159 dsphys->ds_snapnames_zapobj =
1160 zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
1161 DMU_OT_NONE, 0, tx);
1162 dsphys->ds_creation_time = gethrestime_sec();
1163 dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
1165 if (origin == NULL) {
1166 dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx);
1168 dsl_dataset_t *ohds; /* head of the origin snapshot */
1170 dsphys->ds_prev_snap_obj = origin->ds_object;
1171 dsphys->ds_prev_snap_txg =
1172 dsl_dataset_phys(origin)->ds_creation_txg;
1173 dsphys->ds_referenced_bytes =
1174 dsl_dataset_phys(origin)->ds_referenced_bytes;
1175 dsphys->ds_compressed_bytes =
1176 dsl_dataset_phys(origin)->ds_compressed_bytes;
1177 dsphys->ds_uncompressed_bytes =
1178 dsl_dataset_phys(origin)->ds_uncompressed_bytes;
1179 rrw_enter(&origin->ds_bp_rwlock, RW_READER, FTAG);
1180 dsphys->ds_bp = dsl_dataset_phys(origin)->ds_bp;
1181 rrw_exit(&origin->ds_bp_rwlock, FTAG);
1184 * Inherit flags that describe the dataset's contents
1185 * (INCONSISTENT) or properties (Case Insensitive).
1187 dsphys->ds_flags |= dsl_dataset_phys(origin)->ds_flags &
1188 (DS_FLAG_INCONSISTENT | DS_FLAG_CI_DATASET);
1190 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
1191 if (zfeature_active(f, origin->ds_feature[f])) {
1192 dsl_dataset_activate_feature(dsobj, f,
1193 origin->ds_feature[f], tx);
1197 dmu_buf_will_dirty(origin->ds_dbuf, tx);
1198 dsl_dataset_phys(origin)->ds_num_children++;
1200 VERIFY0(dsl_dataset_hold_obj(dp,
1201 dsl_dir_phys(origin->ds_dir)->dd_head_dataset_obj,
1203 dsphys->ds_deadlist_obj = dsl_deadlist_clone(&ohds->ds_deadlist,
1204 dsphys->ds_prev_snap_txg, dsphys->ds_prev_snap_obj, tx);
1205 dsl_dataset_rele(ohds, FTAG);
1207 if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
1208 if (dsl_dataset_phys(origin)->ds_next_clones_obj == 0) {
1209 dsl_dataset_phys(origin)->ds_next_clones_obj =
1211 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
1213 VERIFY0(zap_add_int(mos,
1214 dsl_dataset_phys(origin)->ds_next_clones_obj,
1218 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1219 dsl_dir_phys(dd)->dd_origin_obj = origin->ds_object;
1220 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
1221 if (dsl_dir_phys(origin->ds_dir)->dd_clones == 0) {
1222 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
1223 dsl_dir_phys(origin->ds_dir)->dd_clones =
1225 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
1227 VERIFY0(zap_add_int(mos,
1228 dsl_dir_phys(origin->ds_dir)->dd_clones,
1233 /* handle encryption */
1234 dsl_dataset_create_crypt_sync(dsobj, dd, origin, dcp, tx);
1236 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
1237 dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1239 dmu_buf_rele(dbuf, FTAG);
1241 dmu_buf_will_dirty(dd->dd_dbuf, tx);
1242 dsl_dir_phys(dd)->dd_head_dataset_obj = dsobj;
1248 dsl_dataset_zero_zil(dsl_dataset_t *ds, dmu_tx_t *tx)
1252 VERIFY0(dmu_objset_from_ds(ds, &os));
1253 if (bcmp(&os->os_zil_header, &zero_zil, sizeof (zero_zil)) != 0) {
1254 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1257 bzero(&os->os_zil_header, sizeof (os->os_zil_header));
1258 if (os->os_encrypted)
1259 os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
1261 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
1262 dsl_dataset_sync(ds, zio, tx);
1263 VERIFY0(zio_wait(zio));
1265 /* dsl_dataset_sync_done will drop this reference. */
1266 dmu_buf_add_ref(ds->ds_dbuf, ds);
1267 dsl_dataset_sync_done(ds, tx);
1272 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
1273 dsl_dataset_t *origin, uint64_t flags, cred_t *cr,
1274 dsl_crypto_params_t *dcp, dmu_tx_t *tx)
1276 dsl_pool_t *dp = pdd->dd_pool;
1277 uint64_t dsobj, ddobj;
1280 ASSERT(dmu_tx_is_syncing(tx));
1281 ASSERT(lastname[0] != '@');
1283 * Filesystems will eventually have their origin set to dp_origin_snap,
1284 * but that's taken care of in dsl_dataset_create_sync_dd. When
1285 * creating a filesystem, this function is called with origin equal to
1289 ASSERT3P(origin, !=, dp->dp_origin_snap);
1291 ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
1292 VERIFY0(dsl_dir_hold_obj(dp, ddobj, lastname, FTAG, &dd));
1294 dsobj = dsl_dataset_create_sync_dd(dd, origin, dcp,
1295 flags & ~DS_CREATE_FLAG_NODIRTY, tx);
1297 dsl_deleg_set_create_perms(dd, tx, cr);
1300 * If we are creating a clone and the livelist feature is enabled,
1301 * add the entry DD_FIELD_LIVELIST to ZAP.
1303 if (origin != NULL &&
1304 spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LIVELIST)) {
1305 objset_t *mos = dd->dd_pool->dp_meta_objset;
1306 dsl_dir_zapify(dd, tx);
1307 uint64_t obj = dsl_deadlist_alloc(mos, tx);
1308 VERIFY0(zap_add(mos, dd->dd_object, DD_FIELD_LIVELIST,
1309 sizeof (uint64_t), 1, &obj, tx));
1310 spa_feature_incr(dp->dp_spa, SPA_FEATURE_LIVELIST, tx);
1314 * Since we're creating a new node we know it's a leaf, so we can
1315 * initialize the counts if the limit feature is active.
1317 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT)) {
1319 objset_t *os = dd->dd_pool->dp_meta_objset;
1321 dsl_dir_zapify(dd, tx);
1322 VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT,
1323 sizeof (cnt), 1, &cnt, tx));
1324 VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT,
1325 sizeof (cnt), 1, &cnt, tx));
1328 dsl_dir_rele(dd, FTAG);
1331 * If we are creating a clone, make sure we zero out any stale
1332 * data from the origin snapshots zil header.
1334 if (origin != NULL && !(flags & DS_CREATE_FLAG_NODIRTY)) {
1337 VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
1338 dsl_dataset_zero_zil(ds, tx);
1339 dsl_dataset_rele(ds, FTAG);
1346 * The unique space in the head dataset can be calculated by subtracting
1347 * the space used in the most recent snapshot, that is still being used
1348 * in this file system, from the space currently in use. To figure out
1349 * the space in the most recent snapshot still in use, we need to take
1350 * the total space used in the snapshot and subtract out the space that
1351 * has been freed up since the snapshot was taken.
1354 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
1357 uint64_t dlused, dlcomp, dluncomp;
1359 ASSERT(!ds->ds_is_snapshot);
1361 if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0)
1362 mrs_used = dsl_dataset_phys(ds->ds_prev)->ds_referenced_bytes;
1366 dsl_deadlist_space(&ds->ds_deadlist, &dlused, &dlcomp, &dluncomp);
1368 ASSERT3U(dlused, <=, mrs_used);
1369 dsl_dataset_phys(ds)->ds_unique_bytes =
1370 dsl_dataset_phys(ds)->ds_referenced_bytes - (mrs_used - dlused);
1372 if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1373 SPA_VERSION_UNIQUE_ACCURATE)
1374 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1378 dsl_dataset_remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj,
1381 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1382 uint64_t count __maybe_unused;
1385 ASSERT(dsl_dataset_phys(ds)->ds_num_children >= 2);
1386 err = zap_remove_int(mos, dsl_dataset_phys(ds)->ds_next_clones_obj,
1389 * The err should not be ENOENT, but a bug in a previous version
1390 * of the code could cause upgrade_clones_cb() to not set
1391 * ds_next_snap_obj when it should, leading to a missing entry.
1392 * If we knew that the pool was created after
1393 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
1394 * ENOENT. However, at least we can check that we don't have
1395 * too many entries in the next_clones_obj even after failing to
1400 ASSERT0(zap_count(mos, dsl_dataset_phys(ds)->ds_next_clones_obj,
1402 ASSERT3U(count, <=, dsl_dataset_phys(ds)->ds_num_children - 2);
1407 dsl_dataset_get_blkptr(dsl_dataset_t *ds)
1409 return (&dsl_dataset_phys(ds)->ds_bp);
1413 dsl_dataset_get_spa(dsl_dataset_t *ds)
1415 return (ds->ds_dir->dd_pool->dp_spa);
1419 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
1423 if (ds == NULL) /* this is the meta-objset */
1426 ASSERT(ds->ds_objset != NULL);
1428 if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0)
1429 panic("dirtying snapshot!");
1431 /* Must not dirty a dataset in the same txg where it got snapshotted. */
1432 ASSERT3U(tx->tx_txg, >, dsl_dataset_phys(ds)->ds_prev_snap_txg);
1434 dp = ds->ds_dir->dd_pool;
1435 if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg)) {
1436 objset_t *os = ds->ds_objset;
1438 /* up the hold count until we can be written out */
1439 dmu_buf_add_ref(ds->ds_dbuf, ds);
1441 /* if this dataset is encrypted, grab a reference to the DCK */
1442 if (ds->ds_dir->dd_crypto_obj != 0 &&
1443 !os->os_raw_receive &&
1444 !os->os_next_write_raw[tx->tx_txg & TXG_MASK]) {
1445 ASSERT3P(ds->ds_key_mapping, !=, NULL);
1446 key_mapping_add_ref(ds->ds_key_mapping, ds);
1452 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
1456 if (!dmu_tx_is_syncing(tx))
1460 * If there's an fs-only reservation, any blocks that might become
1461 * owned by the snapshot dataset must be accommodated by space
1462 * outside of the reservation.
1464 ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
1465 asize = MIN(dsl_dataset_phys(ds)->ds_unique_bytes, ds->ds_reserved);
1466 if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
1467 return (SET_ERROR(ENOSPC));
1470 * Propagate any reserved space for this snapshot to other
1471 * snapshot checks in this sync group.
1474 dsl_dir_willuse_space(ds->ds_dir, asize, tx);
1480 dsl_dataset_snapshot_check_impl(dsl_dataset_t *ds, const char *snapname,
1481 dmu_tx_t *tx, boolean_t recv, uint64_t cnt, cred_t *cr, proc_t *proc)
1486 ds->ds_trysnap_txg = tx->tx_txg;
1488 if (!dmu_tx_is_syncing(tx))
1492 * We don't allow multiple snapshots of the same txg. If there
1493 * is already one, try again.
1495 if (dsl_dataset_phys(ds)->ds_prev_snap_txg >= tx->tx_txg)
1496 return (SET_ERROR(EAGAIN));
1499 * Check for conflicting snapshot name.
1501 error = dsl_dataset_snap_lookup(ds, snapname, &value);
1503 return (SET_ERROR(EEXIST));
1504 if (error != ENOENT)
1508 * We don't allow taking snapshots of inconsistent datasets, such as
1509 * those into which we are currently receiving. However, if we are
1510 * creating this snapshot as part of a receive, this check will be
1511 * executed atomically with respect to the completion of the receive
1512 * itself but prior to the clearing of DS_FLAG_INCONSISTENT; in this
1513 * case we ignore this, knowing it will be fixed up for us shortly in
1514 * dmu_recv_end_sync().
1516 if (!recv && DS_IS_INCONSISTENT(ds))
1517 return (SET_ERROR(EBUSY));
1520 * Skip the check for temporary snapshots or if we have already checked
1521 * the counts in dsl_dataset_snapshot_check. This means we really only
1522 * check the count here when we're receiving a stream.
1524 if (cnt != 0 && cr != NULL) {
1525 error = dsl_fs_ss_limit_check(ds->ds_dir, cnt,
1526 ZFS_PROP_SNAPSHOT_LIMIT, NULL, cr, proc);
1531 error = dsl_dataset_snapshot_reserve_space(ds, tx);
1539 dsl_dataset_snapshot_check(void *arg, dmu_tx_t *tx)
1541 dsl_dataset_snapshot_arg_t *ddsa = arg;
1542 dsl_pool_t *dp = dmu_tx_pool(tx);
1547 * Pre-compute how many total new snapshots will be created for each
1548 * level in the tree and below. This is needed for validating the
1549 * snapshot limit when either taking a recursive snapshot or when
1550 * taking multiple snapshots.
1552 * The problem is that the counts are not actually adjusted when
1553 * we are checking, only when we finally sync. For a single snapshot,
1554 * this is easy, the count will increase by 1 at each node up the tree,
1555 * but its more complicated for the recursive/multiple snapshot case.
1557 * The dsl_fs_ss_limit_check function does recursively check the count
1558 * at each level up the tree but since it is validating each snapshot
1559 * independently we need to be sure that we are validating the complete
1560 * count for the entire set of snapshots. We do this by rolling up the
1561 * counts for each component of the name into an nvlist and then
1562 * checking each of those cases with the aggregated count.
1564 * This approach properly handles not only the recursive snapshot
1565 * case (where we get all of those on the ddsa_snaps list) but also
1566 * the sibling case (e.g. snapshot a/b and a/c so that we will also
1567 * validate the limit on 'a' using a count of 2).
1569 * We validate the snapshot names in the third loop and only report
1572 if (dmu_tx_is_syncing(tx)) {
1574 nvlist_t *cnt_track = NULL;
1575 cnt_track = fnvlist_alloc();
1577 nm = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1579 /* Rollup aggregated counts into the cnt_track list */
1580 for (pair = nvlist_next_nvpair(ddsa->ddsa_snaps, NULL);
1582 pair = nvlist_next_nvpair(ddsa->ddsa_snaps, pair)) {
1586 (void) strlcpy(nm, nvpair_name(pair), MAXPATHLEN);
1587 pdelim = strchr(nm, '@');
1593 if (nvlist_lookup_uint64(cnt_track, nm,
1595 /* update existing entry */
1596 fnvlist_add_uint64(cnt_track, nm,
1600 fnvlist_add_uint64(cnt_track, nm, 1);
1603 pdelim = strrchr(nm, '/');
1606 } while (pdelim != NULL);
1609 kmem_free(nm, MAXPATHLEN);
1611 /* Check aggregated counts at each level */
1612 for (pair = nvlist_next_nvpair(cnt_track, NULL);
1613 pair != NULL; pair = nvlist_next_nvpair(cnt_track, pair)) {
1619 name = nvpair_name(pair);
1620 cnt = fnvpair_value_uint64(pair);
1623 error = dsl_dataset_hold(dp, name, FTAG, &ds);
1625 error = dsl_fs_ss_limit_check(ds->ds_dir, cnt,
1626 ZFS_PROP_SNAPSHOT_LIMIT, NULL,
1627 ddsa->ddsa_cr, ddsa->ddsa_proc);
1628 dsl_dataset_rele(ds, FTAG);
1632 if (ddsa->ddsa_errors != NULL)
1633 fnvlist_add_int32(ddsa->ddsa_errors,
1636 /* only report one error for this check */
1640 nvlist_free(cnt_track);
1643 for (pair = nvlist_next_nvpair(ddsa->ddsa_snaps, NULL);
1644 pair != NULL; pair = nvlist_next_nvpair(ddsa->ddsa_snaps, pair)) {
1647 char *name, *atp = NULL;
1648 char dsname[ZFS_MAX_DATASET_NAME_LEN];
1650 name = nvpair_name(pair);
1651 if (strlen(name) >= ZFS_MAX_DATASET_NAME_LEN)
1652 error = SET_ERROR(ENAMETOOLONG);
1654 atp = strchr(name, '@');
1656 error = SET_ERROR(EINVAL);
1658 (void) strlcpy(dsname, name, atp - name + 1);
1661 error = dsl_dataset_hold(dp, dsname, FTAG, &ds);
1663 /* passing 0/NULL skips dsl_fs_ss_limit_check */
1664 error = dsl_dataset_snapshot_check_impl(ds,
1665 atp + 1, tx, B_FALSE, 0, NULL, NULL);
1666 dsl_dataset_rele(ds, FTAG);
1670 if (ddsa->ddsa_errors != NULL) {
1671 fnvlist_add_int32(ddsa->ddsa_errors,
1682 dsl_dataset_snapshot_sync_impl(dsl_dataset_t *ds, const char *snapname,
1685 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1687 dsl_dataset_phys_t *dsphys;
1688 uint64_t dsobj, crtxg;
1689 objset_t *mos = dp->dp_meta_objset;
1690 static zil_header_t zero_zil __maybe_unused;
1691 objset_t *os __maybe_unused;
1693 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
1696 * If we are on an old pool, the zil must not be active, in which
1697 * case it will be zeroed. Usually zil_suspend() accomplishes this.
1699 ASSERT(spa_version(dmu_tx_pool(tx)->dp_spa) >= SPA_VERSION_FAST_SNAP ||
1700 dmu_objset_from_ds(ds, &os) != 0 ||
1701 bcmp(&os->os_phys->os_zil_header, &zero_zil,
1702 sizeof (zero_zil)) == 0);
1704 /* Should not snapshot a dirty dataset. */
1705 ASSERT(!txg_list_member(&ds->ds_dir->dd_pool->dp_dirty_datasets,
1708 dsl_fs_ss_count_adjust(ds->ds_dir, 1, DD_FIELD_SNAPSHOT_COUNT, tx);
1711 * The origin's ds_creation_txg has to be < TXG_INITIAL
1713 if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
1718 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
1719 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
1720 VERIFY0(dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
1721 dmu_buf_will_dirty(dbuf, tx);
1722 dsphys = dbuf->db_data;
1723 bzero(dsphys, sizeof (dsl_dataset_phys_t));
1724 dsphys->ds_dir_obj = ds->ds_dir->dd_object;
1725 dsphys->ds_fsid_guid = unique_create();
1726 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
1727 sizeof (dsphys->ds_guid));
1728 dsphys->ds_prev_snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
1729 dsphys->ds_prev_snap_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
1730 dsphys->ds_next_snap_obj = ds->ds_object;
1731 dsphys->ds_num_children = 1;
1732 dsphys->ds_creation_time = gethrestime_sec();
1733 dsphys->ds_creation_txg = crtxg;
1734 dsphys->ds_deadlist_obj = dsl_dataset_phys(ds)->ds_deadlist_obj;
1735 dsphys->ds_referenced_bytes = dsl_dataset_phys(ds)->ds_referenced_bytes;
1736 dsphys->ds_compressed_bytes = dsl_dataset_phys(ds)->ds_compressed_bytes;
1737 dsphys->ds_uncompressed_bytes =
1738 dsl_dataset_phys(ds)->ds_uncompressed_bytes;
1739 dsphys->ds_flags = dsl_dataset_phys(ds)->ds_flags;
1740 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
1741 dsphys->ds_bp = dsl_dataset_phys(ds)->ds_bp;
1742 rrw_exit(&ds->ds_bp_rwlock, FTAG);
1743 dmu_buf_rele(dbuf, FTAG);
1745 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
1746 if (zfeature_active(f, ds->ds_feature[f])) {
1747 dsl_dataset_activate_feature(dsobj, f,
1748 ds->ds_feature[f], tx);
1752 ASSERT3U(ds->ds_prev != 0, ==,
1753 dsl_dataset_phys(ds)->ds_prev_snap_obj != 0);
1755 uint64_t next_clones_obj =
1756 dsl_dataset_phys(ds->ds_prev)->ds_next_clones_obj;
1757 ASSERT(dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj ==
1759 dsl_dataset_phys(ds->ds_prev)->ds_num_children > 1);
1760 if (dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj ==
1762 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1763 ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_txg, ==,
1764 dsl_dataset_phys(ds->ds_prev)->ds_creation_txg);
1765 dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj = dsobj;
1766 } else if (next_clones_obj != 0) {
1767 dsl_dataset_remove_from_next_clones(ds->ds_prev,
1768 dsphys->ds_next_snap_obj, tx);
1769 VERIFY0(zap_add_int(mos,
1770 next_clones_obj, dsobj, tx));
1775 * If we have a reference-reservation on this dataset, we will
1776 * need to increase the amount of refreservation being charged
1777 * since our unique space is going to zero.
1779 if (ds->ds_reserved) {
1781 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
1782 delta = MIN(dsl_dataset_phys(ds)->ds_unique_bytes,
1784 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
1788 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1789 dsl_dataset_phys(ds)->ds_deadlist_obj =
1790 dsl_deadlist_clone(&ds->ds_deadlist, UINT64_MAX,
1791 dsl_dataset_phys(ds)->ds_prev_snap_obj, tx);
1792 dsl_deadlist_close(&ds->ds_deadlist);
1793 dsl_deadlist_open(&ds->ds_deadlist, mos,
1794 dsl_dataset_phys(ds)->ds_deadlist_obj);
1795 dsl_deadlist_add_key(&ds->ds_deadlist,
1796 dsl_dataset_phys(ds)->ds_prev_snap_txg, tx);
1797 dsl_bookmark_snapshotted(ds, tx);
1799 if (dsl_dataset_remap_deadlist_exists(ds)) {
1800 uint64_t remap_deadlist_obj =
1801 dsl_dataset_get_remap_deadlist_object(ds);
1803 * Move the remap_deadlist to the snapshot. The head
1804 * will create a new remap deadlist on demand, from
1805 * dsl_dataset_block_remapped().
1807 dsl_dataset_unset_remap_deadlist_object(ds, tx);
1808 dsl_deadlist_close(&ds->ds_remap_deadlist);
1810 dmu_object_zapify(mos, dsobj, DMU_OT_DSL_DATASET, tx);
1811 VERIFY0(zap_add(mos, dsobj, DS_FIELD_REMAP_DEADLIST,
1812 sizeof (remap_deadlist_obj), 1, &remap_deadlist_obj, tx));
1816 * Create a ivset guid for this snapshot if the dataset is
1817 * encrypted. This may be overridden by a raw receive. A
1818 * previous implementation of this code did not have this
1819 * field as part of the on-disk format for ZFS encryption
1820 * (see errata #4). As part of the remediation for this
1821 * issue, we ask the user to enable the bookmark_v2 feature
1822 * which is now a dependency of the encryption feature. We
1823 * use this as a heuristic to determine when the user has
1824 * elected to correct any datasets created with the old code.
1825 * As a result, we only do this step if the bookmark_v2
1826 * feature is enabled, which limits the number of states a
1827 * given pool / dataset can be in with regards to terms of
1828 * correcting the issue.
1830 if (ds->ds_dir->dd_crypto_obj != 0 &&
1831 spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARK_V2)) {
1832 uint64_t ivset_guid = unique_create();
1834 dmu_object_zapify(mos, dsobj, DMU_OT_DSL_DATASET, tx);
1835 VERIFY0(zap_add(mos, dsobj, DS_FIELD_IVSET_GUID,
1836 sizeof (ivset_guid), 1, &ivset_guid, tx));
1839 ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_txg, <, tx->tx_txg);
1840 dsl_dataset_phys(ds)->ds_prev_snap_obj = dsobj;
1841 dsl_dataset_phys(ds)->ds_prev_snap_txg = crtxg;
1842 dsl_dataset_phys(ds)->ds_unique_bytes = 0;
1844 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
1845 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1847 VERIFY0(zap_add(mos, dsl_dataset_phys(ds)->ds_snapnames_zapobj,
1848 snapname, 8, 1, &dsobj, tx));
1851 dsl_dataset_rele(ds->ds_prev, ds);
1852 VERIFY0(dsl_dataset_hold_obj(dp,
1853 dsl_dataset_phys(ds)->ds_prev_snap_obj, ds, &ds->ds_prev));
1855 dsl_scan_ds_snapshotted(ds, tx);
1857 dsl_dir_snap_cmtime_update(ds->ds_dir);
1859 spa_history_log_internal_ds(ds->ds_prev, "snapshot", tx, " ");
1863 dsl_dataset_snapshot_sync(void *arg, dmu_tx_t *tx)
1865 dsl_dataset_snapshot_arg_t *ddsa = arg;
1866 dsl_pool_t *dp = dmu_tx_pool(tx);
1869 for (pair = nvlist_next_nvpair(ddsa->ddsa_snaps, NULL);
1870 pair != NULL; pair = nvlist_next_nvpair(ddsa->ddsa_snaps, pair)) {
1873 char dsname[ZFS_MAX_DATASET_NAME_LEN];
1875 name = nvpair_name(pair);
1876 atp = strchr(name, '@');
1877 (void) strlcpy(dsname, name, atp - name + 1);
1878 VERIFY0(dsl_dataset_hold(dp, dsname, FTAG, &ds));
1880 dsl_dataset_snapshot_sync_impl(ds, atp + 1, tx);
1881 if (ddsa->ddsa_props != NULL) {
1882 dsl_props_set_sync_impl(ds->ds_prev,
1883 ZPROP_SRC_LOCAL, ddsa->ddsa_props, tx);
1885 dsl_dataset_rele(ds, FTAG);
1890 * The snapshots must all be in the same pool.
1891 * All-or-nothing: if there are any failures, nothing will be modified.
1894 dsl_dataset_snapshot(nvlist_t *snaps, nvlist_t *props, nvlist_t *errors)
1896 dsl_dataset_snapshot_arg_t ddsa;
1898 boolean_t needsuspend;
1902 nvlist_t *suspended = NULL;
1904 pair = nvlist_next_nvpair(snaps, NULL);
1907 firstname = nvpair_name(pair);
1909 error = spa_open(firstname, &spa, FTAG);
1912 needsuspend = (spa_version(spa) < SPA_VERSION_FAST_SNAP);
1913 spa_close(spa, FTAG);
1916 suspended = fnvlist_alloc();
1917 for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
1918 pair = nvlist_next_nvpair(snaps, pair)) {
1919 char fsname[ZFS_MAX_DATASET_NAME_LEN];
1920 char *snapname = nvpair_name(pair);
1924 atp = strchr(snapname, '@');
1926 error = SET_ERROR(EINVAL);
1929 (void) strlcpy(fsname, snapname, atp - snapname + 1);
1931 error = zil_suspend(fsname, &cookie);
1934 fnvlist_add_uint64(suspended, fsname,
1939 ddsa.ddsa_snaps = snaps;
1940 ddsa.ddsa_props = props;
1941 ddsa.ddsa_errors = errors;
1942 ddsa.ddsa_cr = CRED();
1943 ddsa.ddsa_proc = curproc;
1946 error = dsl_sync_task(firstname, dsl_dataset_snapshot_check,
1947 dsl_dataset_snapshot_sync, &ddsa,
1948 fnvlist_num_pairs(snaps) * 3, ZFS_SPACE_CHECK_NORMAL);
1951 if (suspended != NULL) {
1952 for (pair = nvlist_next_nvpair(suspended, NULL); pair != NULL;
1953 pair = nvlist_next_nvpair(suspended, pair)) {
1954 zil_resume((void *)(uintptr_t)
1955 fnvpair_value_uint64(pair));
1957 fnvlist_free(suspended);
1961 for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
1962 pair = nvlist_next_nvpair(snaps, pair)) {
1963 zvol_create_minor(nvpair_name(pair));
1970 typedef struct dsl_dataset_snapshot_tmp_arg {
1971 const char *ddsta_fsname;
1972 const char *ddsta_snapname;
1973 minor_t ddsta_cleanup_minor;
1974 const char *ddsta_htag;
1975 } dsl_dataset_snapshot_tmp_arg_t;
1978 dsl_dataset_snapshot_tmp_check(void *arg, dmu_tx_t *tx)
1980 dsl_dataset_snapshot_tmp_arg_t *ddsta = arg;
1981 dsl_pool_t *dp = dmu_tx_pool(tx);
1985 error = dsl_dataset_hold(dp, ddsta->ddsta_fsname, FTAG, &ds);
1989 /* NULL cred means no limit check for tmp snapshot */
1990 error = dsl_dataset_snapshot_check_impl(ds, ddsta->ddsta_snapname,
1991 tx, B_FALSE, 0, NULL, NULL);
1993 dsl_dataset_rele(ds, FTAG);
1997 if (spa_version(dp->dp_spa) < SPA_VERSION_USERREFS) {
1998 dsl_dataset_rele(ds, FTAG);
1999 return (SET_ERROR(ENOTSUP));
2001 error = dsl_dataset_user_hold_check_one(NULL, ddsta->ddsta_htag,
2004 dsl_dataset_rele(ds, FTAG);
2008 dsl_dataset_rele(ds, FTAG);
2013 dsl_dataset_snapshot_tmp_sync(void *arg, dmu_tx_t *tx)
2015 dsl_dataset_snapshot_tmp_arg_t *ddsta = arg;
2016 dsl_pool_t *dp = dmu_tx_pool(tx);
2017 dsl_dataset_t *ds = NULL;
2019 VERIFY0(dsl_dataset_hold(dp, ddsta->ddsta_fsname, FTAG, &ds));
2021 dsl_dataset_snapshot_sync_impl(ds, ddsta->ddsta_snapname, tx);
2022 dsl_dataset_user_hold_sync_one(ds->ds_prev, ddsta->ddsta_htag,
2023 ddsta->ddsta_cleanup_minor, gethrestime_sec(), tx);
2024 dsl_destroy_snapshot_sync_impl(ds->ds_prev, B_TRUE, tx);
2026 dsl_dataset_rele(ds, FTAG);
2030 dsl_dataset_snapshot_tmp(const char *fsname, const char *snapname,
2031 minor_t cleanup_minor, const char *htag)
2033 dsl_dataset_snapshot_tmp_arg_t ddsta;
2036 boolean_t needsuspend;
2039 ddsta.ddsta_fsname = fsname;
2040 ddsta.ddsta_snapname = snapname;
2041 ddsta.ddsta_cleanup_minor = cleanup_minor;
2042 ddsta.ddsta_htag = htag;
2044 error = spa_open(fsname, &spa, FTAG);
2047 needsuspend = (spa_version(spa) < SPA_VERSION_FAST_SNAP);
2048 spa_close(spa, FTAG);
2051 error = zil_suspend(fsname, &cookie);
2056 error = dsl_sync_task(fsname, dsl_dataset_snapshot_tmp_check,
2057 dsl_dataset_snapshot_tmp_sync, &ddsta, 3, ZFS_SPACE_CHECK_RESERVED);
2065 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
2067 ASSERT(dmu_tx_is_syncing(tx));
2068 ASSERT(ds->ds_objset != NULL);
2069 ASSERT(dsl_dataset_phys(ds)->ds_next_snap_obj == 0);
2072 * in case we had to change ds_fsid_guid when we opened it,
2075 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2076 dsl_dataset_phys(ds)->ds_fsid_guid = ds->ds_fsid_guid;
2078 if (ds->ds_resume_bytes[tx->tx_txg & TXG_MASK] != 0) {
2079 VERIFY0(zap_update(tx->tx_pool->dp_meta_objset,
2080 ds->ds_object, DS_FIELD_RESUME_OBJECT, 8, 1,
2081 &ds->ds_resume_object[tx->tx_txg & TXG_MASK], tx));
2082 VERIFY0(zap_update(tx->tx_pool->dp_meta_objset,
2083 ds->ds_object, DS_FIELD_RESUME_OFFSET, 8, 1,
2084 &ds->ds_resume_offset[tx->tx_txg & TXG_MASK], tx));
2085 VERIFY0(zap_update(tx->tx_pool->dp_meta_objset,
2086 ds->ds_object, DS_FIELD_RESUME_BYTES, 8, 1,
2087 &ds->ds_resume_bytes[tx->tx_txg & TXG_MASK], tx));
2088 ds->ds_resume_object[tx->tx_txg & TXG_MASK] = 0;
2089 ds->ds_resume_offset[tx->tx_txg & TXG_MASK] = 0;
2090 ds->ds_resume_bytes[tx->tx_txg & TXG_MASK] = 0;
2093 dmu_objset_sync(ds->ds_objset, zio, tx);
2095 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
2096 if (zfeature_active(f, ds->ds_feature_activation[f])) {
2097 if (zfeature_active(f, ds->ds_feature[f]))
2099 dsl_dataset_activate_feature(ds->ds_object, f,
2100 ds->ds_feature_activation[f], tx);
2101 ds->ds_feature[f] = ds->ds_feature_activation[f];
2107 * Check if the percentage of blocks shared between the clone and the
2108 * snapshot (as opposed to those that are clone only) is below a certain
2112 dsl_livelist_should_disable(dsl_dataset_t *ds)
2114 uint64_t used, referenced;
2117 used = dsl_dir_get_usedds(ds->ds_dir);
2118 referenced = dsl_get_referenced(ds);
2119 ASSERT3U(referenced, >=, 0);
2120 ASSERT3U(used, >=, 0);
2121 if (referenced == 0)
2123 percent_shared = (100 * (referenced - used)) / referenced;
2124 if (percent_shared <= zfs_livelist_min_percent_shared)
2130 * Check if it is possible to combine two livelist entries into one.
2131 * This is the case if the combined number of 'live' blkptrs (ALLOCs that
2132 * don't have a matching FREE) is under the maximum sublist size.
2133 * We check this by subtracting twice the total number of frees from the total
2134 * number of blkptrs. FREEs are counted twice because each FREE blkptr
2135 * will cancel out an ALLOC blkptr when the livelist is processed.
2138 dsl_livelist_should_condense(dsl_deadlist_entry_t *first,
2139 dsl_deadlist_entry_t *next)
2141 uint64_t total_free = first->dle_bpobj.bpo_phys->bpo_num_freed +
2142 next->dle_bpobj.bpo_phys->bpo_num_freed;
2143 uint64_t total_entries = first->dle_bpobj.bpo_phys->bpo_num_blkptrs +
2144 next->dle_bpobj.bpo_phys->bpo_num_blkptrs;
2145 if ((total_entries - (2 * total_free)) < zfs_livelist_max_entries)
2150 typedef struct try_condense_arg {
2153 } try_condense_arg_t;
2156 * Iterate over the livelist entries, searching for a pair to condense.
2157 * A nonzero return value means stop, 0 means keep looking.
2160 dsl_livelist_try_condense(void *arg, dsl_deadlist_entry_t *first)
2162 try_condense_arg_t *tca = arg;
2163 spa_t *spa = tca->spa;
2164 dsl_dataset_t *ds = tca->ds;
2165 dsl_deadlist_t *ll = &ds->ds_dir->dd_livelist;
2166 dsl_deadlist_entry_t *next;
2168 /* The condense thread has not yet been created at import */
2169 if (spa->spa_livelist_condense_zthr == NULL)
2172 /* A condense is already in progress */
2173 if (spa->spa_to_condense.ds != NULL)
2176 next = AVL_NEXT(&ll->dl_tree, &first->dle_node);
2177 /* The livelist has only one entry - don't condense it */
2181 /* Next is the newest entry - don't condense it */
2182 if (AVL_NEXT(&ll->dl_tree, &next->dle_node) == NULL)
2185 /* This pair is not ready to condense but keep looking */
2186 if (!dsl_livelist_should_condense(first, next))
2190 * Add a ref to prevent the dataset from being evicted while
2191 * the condense zthr or synctask are running. Ref will be
2192 * released at the end of the condense synctask
2194 dmu_buf_add_ref(ds->ds_dbuf, spa);
2196 spa->spa_to_condense.ds = ds;
2197 spa->spa_to_condense.first = first;
2198 spa->spa_to_condense.next = next;
2199 spa->spa_to_condense.syncing = B_FALSE;
2200 spa->spa_to_condense.cancelled = B_FALSE;
2202 zthr_wakeup(spa->spa_livelist_condense_zthr);
2207 dsl_flush_pending_livelist(dsl_dataset_t *ds, dmu_tx_t *tx)
2209 dsl_dir_t *dd = ds->ds_dir;
2210 spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
2211 dsl_deadlist_entry_t *last = dsl_deadlist_last(&dd->dd_livelist);
2213 /* Check if we need to add a new sub-livelist */
2215 /* The livelist is empty */
2216 dsl_deadlist_add_key(&dd->dd_livelist,
2217 tx->tx_txg - 1, tx);
2218 } else if (spa_sync_pass(spa) == 1) {
2220 * Check if the newest entry is full. If it is, make a new one.
2221 * We only do this once per sync because we could overfill a
2222 * sublist in one sync pass and don't want to add another entry
2223 * for a txg that is already represented. This ensures that
2224 * blkptrs born in the same txg are stored in the same sublist.
2226 bpobj_t bpobj = last->dle_bpobj;
2227 uint64_t all = bpobj.bpo_phys->bpo_num_blkptrs;
2228 uint64_t free = bpobj.bpo_phys->bpo_num_freed;
2229 uint64_t alloc = all - free;
2230 if (alloc > zfs_livelist_max_entries) {
2231 dsl_deadlist_add_key(&dd->dd_livelist,
2232 tx->tx_txg - 1, tx);
2236 /* Insert each entry into the on-disk livelist */
2237 bplist_iterate(&dd->dd_pending_allocs,
2238 dsl_deadlist_insert_alloc_cb, &dd->dd_livelist, tx);
2239 bplist_iterate(&dd->dd_pending_frees,
2240 dsl_deadlist_insert_free_cb, &dd->dd_livelist, tx);
2242 /* Attempt to condense every pair of adjacent entries */
2243 try_condense_arg_t arg = {
2247 dsl_deadlist_iterate(&dd->dd_livelist, dsl_livelist_try_condense,
2252 dsl_dataset_sync_done(dsl_dataset_t *ds, dmu_tx_t *tx)
2254 objset_t *os = ds->ds_objset;
2256 bplist_iterate(&ds->ds_pending_deadlist,
2257 dsl_deadlist_insert_alloc_cb, &ds->ds_deadlist, tx);
2259 if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist)) {
2260 dsl_flush_pending_livelist(ds, tx);
2261 if (dsl_livelist_should_disable(ds)) {
2262 dsl_dir_remove_livelist(ds->ds_dir, tx, B_TRUE);
2266 dsl_bookmark_sync_done(ds, tx);
2268 multilist_destroy(&os->os_synced_dnodes);
2270 if (os->os_encrypted)
2271 os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_FALSE;
2273 ASSERT0(os->os_next_write_raw[tx->tx_txg & TXG_MASK]);
2275 ASSERT(!dmu_objset_is_dirty(os, dmu_tx_get_txg(tx)));
2277 dmu_buf_rele(ds->ds_dbuf, ds);
2281 get_clones_stat_impl(dsl_dataset_t *ds, nvlist_t *val)
2284 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
2288 ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
2291 * There may be missing entries in ds_next_clones_obj
2292 * due to a bug in a previous version of the code.
2293 * Only trust it if it has the right number of entries.
2295 if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
2296 VERIFY0(zap_count(mos, dsl_dataset_phys(ds)->ds_next_clones_obj,
2299 if (count != dsl_dataset_phys(ds)->ds_num_children - 1) {
2300 return (SET_ERROR(ENOENT));
2302 for (zap_cursor_init(&zc, mos,
2303 dsl_dataset_phys(ds)->ds_next_clones_obj);
2304 zap_cursor_retrieve(&zc, &za) == 0;
2305 zap_cursor_advance(&zc)) {
2306 dsl_dataset_t *clone;
2307 char buf[ZFS_MAX_DATASET_NAME_LEN];
2308 VERIFY0(dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
2309 za.za_first_integer, FTAG, &clone));
2310 dsl_dir_name(clone->ds_dir, buf);
2311 fnvlist_add_boolean(val, buf);
2312 dsl_dataset_rele(clone, FTAG);
2314 zap_cursor_fini(&zc);
2319 get_clones_stat(dsl_dataset_t *ds, nvlist_t *nv)
2321 nvlist_t *propval = fnvlist_alloc();
2322 nvlist_t *val = fnvlist_alloc();
2324 if (get_clones_stat_impl(ds, val) == 0) {
2325 fnvlist_add_nvlist(propval, ZPROP_VALUE, val);
2326 fnvlist_add_nvlist(nv, zfs_prop_to_name(ZFS_PROP_CLONES),
2331 nvlist_free(propval);
2335 * Returns a string that represents the receive resume stats token. It should
2336 * be freed with strfree().
2339 get_receive_resume_stats_impl(dsl_dataset_t *ds)
2341 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2343 if (dsl_dataset_has_resume_receive_state(ds)) {
2346 uint8_t *compressed;
2348 nvlist_t *token_nv = fnvlist_alloc();
2349 size_t packed_size, compressed_size;
2351 if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
2352 DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val) == 0) {
2353 fnvlist_add_uint64(token_nv, "fromguid", val);
2355 if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
2356 DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val) == 0) {
2357 fnvlist_add_uint64(token_nv, "object", val);
2359 if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
2360 DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val) == 0) {
2361 fnvlist_add_uint64(token_nv, "offset", val);
2363 if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
2364 DS_FIELD_RESUME_BYTES, sizeof (val), 1, &val) == 0) {
2365 fnvlist_add_uint64(token_nv, "bytes", val);
2367 if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
2368 DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val) == 0) {
2369 fnvlist_add_uint64(token_nv, "toguid", val);
2371 char buf[MAXNAMELEN];
2372 if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
2373 DS_FIELD_RESUME_TONAME, 1, sizeof (buf), buf) == 0) {
2374 fnvlist_add_string(token_nv, "toname", buf);
2376 if (zap_contains(dp->dp_meta_objset, ds->ds_object,
2377 DS_FIELD_RESUME_LARGEBLOCK) == 0) {
2378 fnvlist_add_boolean(token_nv, "largeblockok");
2380 if (zap_contains(dp->dp_meta_objset, ds->ds_object,
2381 DS_FIELD_RESUME_EMBEDOK) == 0) {
2382 fnvlist_add_boolean(token_nv, "embedok");
2384 if (zap_contains(dp->dp_meta_objset, ds->ds_object,
2385 DS_FIELD_RESUME_COMPRESSOK) == 0) {
2386 fnvlist_add_boolean(token_nv, "compressok");
2388 if (zap_contains(dp->dp_meta_objset, ds->ds_object,
2389 DS_FIELD_RESUME_RAWOK) == 0) {
2390 fnvlist_add_boolean(token_nv, "rawok");
2392 if (dsl_dataset_feature_is_active(ds,
2393 SPA_FEATURE_REDACTED_DATASETS)) {
2394 uint64_t num_redact_snaps;
2395 uint64_t *redact_snaps;
2396 VERIFY(dsl_dataset_get_uint64_array_feature(ds,
2397 SPA_FEATURE_REDACTED_DATASETS, &num_redact_snaps,
2399 fnvlist_add_uint64_array(token_nv, "redact_snaps",
2400 redact_snaps, num_redact_snaps);
2402 if (zap_contains(dp->dp_meta_objset, ds->ds_object,
2403 DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS) == 0) {
2404 uint64_t num_redact_snaps, int_size;
2405 uint64_t *redact_snaps;
2406 VERIFY0(zap_length(dp->dp_meta_objset, ds->ds_object,
2407 DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS, &int_size,
2408 &num_redact_snaps));
2409 ASSERT3U(int_size, ==, sizeof (uint64_t));
2411 redact_snaps = kmem_alloc(int_size * num_redact_snaps,
2413 VERIFY0(zap_lookup(dp->dp_meta_objset, ds->ds_object,
2414 DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS, int_size,
2415 num_redact_snaps, redact_snaps));
2416 fnvlist_add_uint64_array(token_nv, "book_redact_snaps",
2417 redact_snaps, num_redact_snaps);
2418 kmem_free(redact_snaps, int_size * num_redact_snaps);
2420 packed = fnvlist_pack(token_nv, &packed_size);
2421 fnvlist_free(token_nv);
2422 compressed = kmem_alloc(packed_size, KM_SLEEP);
2424 compressed_size = gzip_compress(packed, compressed,
2425 packed_size, packed_size, 6);
2428 fletcher_4_native_varsize(compressed, compressed_size, &cksum);
2430 size_t alloc_size = compressed_size * 2 + 1;
2431 str = kmem_alloc(alloc_size, KM_SLEEP);
2432 for (int i = 0; i < compressed_size; i++) {
2433 size_t offset = i * 2;
2434 (void) snprintf(str + offset, alloc_size - offset,
2435 "%02x", compressed[i]);
2437 str[compressed_size * 2] = '\0';
2438 char *propval = kmem_asprintf("%u-%llx-%llx-%s",
2439 ZFS_SEND_RESUME_TOKEN_VERSION,
2440 (longlong_t)cksum.zc_word[0],
2441 (longlong_t)packed_size, str);
2442 kmem_free(packed, packed_size);
2443 kmem_free(str, alloc_size);
2444 kmem_free(compressed, packed_size);
2447 return (kmem_strdup(""));
2451 * Returns a string that represents the receive resume stats token of the
2452 * dataset's child. It should be freed with strfree().
2455 get_child_receive_stats(dsl_dataset_t *ds)
2457 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
2458 dsl_dataset_t *recv_ds;
2459 dsl_dataset_name(ds, recvname);
2460 if (strlcat(recvname, "/", sizeof (recvname)) <
2461 sizeof (recvname) &&
2462 strlcat(recvname, recv_clone_name, sizeof (recvname)) <
2463 sizeof (recvname) &&
2464 dsl_dataset_hold(ds->ds_dir->dd_pool, recvname, FTAG,
2466 char *propval = get_receive_resume_stats_impl(recv_ds);
2467 dsl_dataset_rele(recv_ds, FTAG);
2470 return (kmem_strdup(""));
2474 get_receive_resume_stats(dsl_dataset_t *ds, nvlist_t *nv)
2476 char *propval = get_receive_resume_stats_impl(ds);
2477 if (strcmp(propval, "") != 0) {
2478 dsl_prop_nvlist_add_string(nv,
2479 ZFS_PROP_RECEIVE_RESUME_TOKEN, propval);
2481 char *childval = get_child_receive_stats(ds);
2482 if (strcmp(childval, "") != 0) {
2483 dsl_prop_nvlist_add_string(nv,
2484 ZFS_PROP_RECEIVE_RESUME_TOKEN, childval);
2486 kmem_strfree(childval);
2488 kmem_strfree(propval);
2492 dsl_get_refratio(dsl_dataset_t *ds)
2494 uint64_t ratio = dsl_dataset_phys(ds)->ds_compressed_bytes == 0 ? 100 :
2495 (dsl_dataset_phys(ds)->ds_uncompressed_bytes * 100 /
2496 dsl_dataset_phys(ds)->ds_compressed_bytes);
2501 dsl_get_logicalreferenced(dsl_dataset_t *ds)
2503 return (dsl_dataset_phys(ds)->ds_uncompressed_bytes);
2507 dsl_get_compressratio(dsl_dataset_t *ds)
2509 if (ds->ds_is_snapshot) {
2510 return (dsl_get_refratio(ds));
2512 dsl_dir_t *dd = ds->ds_dir;
2513 mutex_enter(&dd->dd_lock);
2514 uint64_t val = dsl_dir_get_compressratio(dd);
2515 mutex_exit(&dd->dd_lock);
2521 dsl_get_used(dsl_dataset_t *ds)
2523 if (ds->ds_is_snapshot) {
2524 return (dsl_dataset_phys(ds)->ds_unique_bytes);
2526 dsl_dir_t *dd = ds->ds_dir;
2527 mutex_enter(&dd->dd_lock);
2528 uint64_t val = dsl_dir_get_used(dd);
2529 mutex_exit(&dd->dd_lock);
2535 dsl_get_creation(dsl_dataset_t *ds)
2537 return (dsl_dataset_phys(ds)->ds_creation_time);
2541 dsl_get_creationtxg(dsl_dataset_t *ds)
2543 return (dsl_dataset_phys(ds)->ds_creation_txg);
2547 dsl_get_refquota(dsl_dataset_t *ds)
2549 return (ds->ds_quota);
2553 dsl_get_refreservation(dsl_dataset_t *ds)
2555 return (ds->ds_reserved);
2559 dsl_get_guid(dsl_dataset_t *ds)
2561 return (dsl_dataset_phys(ds)->ds_guid);
2565 dsl_get_unique(dsl_dataset_t *ds)
2567 return (dsl_dataset_phys(ds)->ds_unique_bytes);
2571 dsl_get_objsetid(dsl_dataset_t *ds)
2573 return (ds->ds_object);
2577 dsl_get_userrefs(dsl_dataset_t *ds)
2579 return (ds->ds_userrefs);
2583 dsl_get_defer_destroy(dsl_dataset_t *ds)
2585 return (DS_IS_DEFER_DESTROY(ds) ? 1 : 0);
2589 dsl_get_referenced(dsl_dataset_t *ds)
2591 return (dsl_dataset_phys(ds)->ds_referenced_bytes);
2595 dsl_get_numclones(dsl_dataset_t *ds)
2597 ASSERT(ds->ds_is_snapshot);
2598 return (dsl_dataset_phys(ds)->ds_num_children - 1);
2602 dsl_get_inconsistent(dsl_dataset_t *ds)
2604 return ((dsl_dataset_phys(ds)->ds_flags & DS_FLAG_INCONSISTENT) ?
2609 dsl_get_redacted(dsl_dataset_t *ds)
2611 return (dsl_dataset_feature_is_active(ds,
2612 SPA_FEATURE_REDACTED_DATASETS));
2616 dsl_get_available(dsl_dataset_t *ds)
2618 uint64_t refdbytes = dsl_get_referenced(ds);
2619 uint64_t availbytes = dsl_dir_space_available(ds->ds_dir,
2621 if (ds->ds_reserved > dsl_dataset_phys(ds)->ds_unique_bytes) {
2623 ds->ds_reserved - dsl_dataset_phys(ds)->ds_unique_bytes;
2625 if (ds->ds_quota != 0) {
2627 * Adjust available bytes according to refquota
2629 if (refdbytes < ds->ds_quota) {
2630 availbytes = MIN(availbytes,
2631 ds->ds_quota - refdbytes);
2636 return (availbytes);
2640 dsl_get_written(dsl_dataset_t *ds, uint64_t *written)
2642 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2643 dsl_dataset_t *prev;
2644 int err = dsl_dataset_hold_obj(dp,
2645 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
2647 uint64_t comp, uncomp;
2648 err = dsl_dataset_space_written(prev, ds, written,
2650 dsl_dataset_rele(prev, FTAG);
2656 * 'snap' should be a buffer of size ZFS_MAX_DATASET_NAME_LEN.
2659 dsl_get_prev_snap(dsl_dataset_t *ds, char *snap)
2661 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2662 if (ds->ds_prev != NULL && ds->ds_prev != dp->dp_origin_snap) {
2663 dsl_dataset_name(ds->ds_prev, snap);
2666 return (SET_ERROR(ENOENT));
2671 dsl_get_redact_snaps(dsl_dataset_t *ds, nvlist_t *propval)
2675 if (dsl_dataset_get_uint64_array_feature(ds,
2676 SPA_FEATURE_REDACTED_DATASETS, &nsnaps, &snaps)) {
2677 fnvlist_add_uint64_array(propval, ZPROP_VALUE, snaps,
2683 * Returns the mountpoint property and source for the given dataset in the value
2684 * and source buffers. The value buffer must be at least as large as MAXPATHLEN
2685 * and the source buffer as least as large a ZFS_MAX_DATASET_NAME_LEN.
2686 * Returns 0 on success and an error on failure.
2689 dsl_get_mountpoint(dsl_dataset_t *ds, const char *dsname, char *value,
2693 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2695 /* Retrieve the mountpoint value stored in the zap object */
2696 error = dsl_prop_get_ds(ds, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), 1,
2697 ZAP_MAXVALUELEN, value, source);
2703 * Process the dsname and source to find the full mountpoint string.
2704 * Can be skipped for 'legacy' or 'none'.
2706 if (value[0] == '/') {
2707 char *buf = kmem_alloc(ZAP_MAXVALUELEN, KM_SLEEP);
2709 const char *relpath;
2712 * If we inherit the mountpoint, even from a dataset
2713 * with a received value, the source will be the path of
2714 * the dataset we inherit from. If source is
2715 * ZPROP_SOURCE_VAL_RECVD, the received value is not
2718 if (strcmp(source, ZPROP_SOURCE_VAL_RECVD) == 0) {
2721 ASSERT0(strncmp(dsname, source, strlen(source)));
2722 relpath = dsname + strlen(source);
2723 if (relpath[0] == '/')
2727 spa_altroot(dp->dp_spa, root, ZAP_MAXVALUELEN);
2730 * Special case an alternate root of '/'. This will
2731 * avoid having multiple leading slashes in the
2734 if (strcmp(root, "/") == 0)
2738 * If the mountpoint is '/' then skip over this
2739 * if we are obtaining either an alternate root or
2740 * an inherited mountpoint.
2743 if (value[1] == '\0' && (root[0] != '\0' ||
2744 relpath[0] != '\0'))
2747 if (relpath[0] == '\0') {
2748 (void) snprintf(value, ZAP_MAXVALUELEN, "%s%s",
2751 (void) snprintf(value, ZAP_MAXVALUELEN, "%s%s%s%s",
2752 root, mnt, relpath[0] == '@' ? "" : "/",
2755 kmem_free(buf, ZAP_MAXVALUELEN);
2762 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
2764 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2766 ASSERT(dsl_pool_config_held(dp));
2768 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRATIO,
2769 dsl_get_refratio(ds));
2770 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_LOGICALREFERENCED,
2771 dsl_get_logicalreferenced(ds));
2772 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
2773 dsl_get_compressratio(ds));
2774 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
2777 if (ds->ds_is_snapshot) {
2778 get_clones_stat(ds, nv);
2780 char buf[ZFS_MAX_DATASET_NAME_LEN];
2781 if (dsl_get_prev_snap(ds, buf) == 0)
2782 dsl_prop_nvlist_add_string(nv, ZFS_PROP_PREV_SNAP,
2784 dsl_dir_stats(ds->ds_dir, nv);
2787 nvlist_t *propval = fnvlist_alloc();
2788 dsl_get_redact_snaps(ds, propval);
2789 fnvlist_add_nvlist(nv, zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS),
2791 nvlist_free(propval);
2793 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE,
2794 dsl_get_available(ds));
2795 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED,
2796 dsl_get_referenced(ds));
2797 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
2798 dsl_get_creation(ds));
2799 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
2800 dsl_get_creationtxg(ds));
2801 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
2802 dsl_get_refquota(ds));
2803 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
2804 dsl_get_refreservation(ds));
2805 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
2807 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE,
2808 dsl_get_unique(ds));
2809 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID,
2810 dsl_get_objsetid(ds));
2811 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS,
2812 dsl_get_userrefs(ds));
2813 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY,
2814 dsl_get_defer_destroy(ds));
2815 dsl_dataset_crypt_stats(ds, nv);
2817 if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
2819 if (dsl_get_written(ds, &written) == 0) {
2820 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_WRITTEN,
2825 if (!dsl_dataset_is_snapshot(ds)) {
2827 * A failed "newfs" (e.g. full) resumable receive leaves
2828 * the stats set on this dataset. Check here for the prop.
2830 get_receive_resume_stats(ds, nv);
2833 * A failed incremental resumable receive leaves the
2834 * stats set on our child named "%recv". Check the child
2837 /* 6 extra bytes for /%recv */
2838 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
2839 dsl_dataset_t *recv_ds;
2840 dsl_dataset_name(ds, recvname);
2841 if (strlcat(recvname, "/", sizeof (recvname)) <
2842 sizeof (recvname) &&
2843 strlcat(recvname, recv_clone_name, sizeof (recvname)) <
2844 sizeof (recvname) &&
2845 dsl_dataset_hold(dp, recvname, FTAG, &recv_ds) == 0) {
2846 get_receive_resume_stats(recv_ds, nv);
2847 dsl_dataset_rele(recv_ds, FTAG);
2853 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
2855 dsl_pool_t *dp __maybe_unused = ds->ds_dir->dd_pool;
2856 ASSERT(dsl_pool_config_held(dp));
2858 stat->dds_creation_txg = dsl_get_creationtxg(ds);
2859 stat->dds_inconsistent = dsl_get_inconsistent(ds);
2860 stat->dds_guid = dsl_get_guid(ds);
2861 stat->dds_redacted = dsl_get_redacted(ds);
2862 stat->dds_origin[0] = '\0';
2863 if (ds->ds_is_snapshot) {
2864 stat->dds_is_snapshot = B_TRUE;
2865 stat->dds_num_clones = dsl_get_numclones(ds);
2867 stat->dds_is_snapshot = B_FALSE;
2868 stat->dds_num_clones = 0;
2870 if (dsl_dir_is_clone(ds->ds_dir)) {
2871 dsl_dir_get_origin(ds->ds_dir, stat->dds_origin);
2877 dsl_dataset_fsid_guid(dsl_dataset_t *ds)
2879 return (ds->ds_fsid_guid);
2883 dsl_dataset_space(dsl_dataset_t *ds,
2884 uint64_t *refdbytesp, uint64_t *availbytesp,
2885 uint64_t *usedobjsp, uint64_t *availobjsp)
2887 *refdbytesp = dsl_dataset_phys(ds)->ds_referenced_bytes;
2888 *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
2889 if (ds->ds_reserved > dsl_dataset_phys(ds)->ds_unique_bytes)
2891 ds->ds_reserved - dsl_dataset_phys(ds)->ds_unique_bytes;
2892 if (ds->ds_quota != 0) {
2894 * Adjust available bytes according to refquota
2896 if (*refdbytesp < ds->ds_quota)
2897 *availbytesp = MIN(*availbytesp,
2898 ds->ds_quota - *refdbytesp);
2902 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
2903 *usedobjsp = BP_GET_FILL(&dsl_dataset_phys(ds)->ds_bp);
2904 rrw_exit(&ds->ds_bp_rwlock, FTAG);
2905 *availobjsp = DN_MAX_OBJECT - *usedobjsp;
2909 dsl_dataset_modified_since_snap(dsl_dataset_t *ds, dsl_dataset_t *snap)
2911 dsl_pool_t *dp __maybe_unused = ds->ds_dir->dd_pool;
2914 ASSERT(dsl_pool_config_held(dp));
2917 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
2918 birth = dsl_dataset_get_blkptr(ds)->blk_birth;
2919 rrw_exit(&ds->ds_bp_rwlock, FTAG);
2920 if (birth > dsl_dataset_phys(snap)->ds_creation_txg) {
2921 objset_t *os, *os_snap;
2923 * It may be that only the ZIL differs, because it was
2924 * reset in the head. Don't count that as being
2927 if (dmu_objset_from_ds(ds, &os) != 0)
2929 if (dmu_objset_from_ds(snap, &os_snap) != 0)
2931 return (bcmp(&os->os_phys->os_meta_dnode,
2932 &os_snap->os_phys->os_meta_dnode,
2933 sizeof (os->os_phys->os_meta_dnode)) != 0);
2938 typedef struct dsl_dataset_rename_snapshot_arg {
2939 const char *ddrsa_fsname;
2940 const char *ddrsa_oldsnapname;
2941 const char *ddrsa_newsnapname;
2942 boolean_t ddrsa_recursive;
2944 } dsl_dataset_rename_snapshot_arg_t;
2948 dsl_dataset_rename_snapshot_check_impl(dsl_pool_t *dp,
2949 dsl_dataset_t *hds, void *arg)
2951 dsl_dataset_rename_snapshot_arg_t *ddrsa = arg;
2955 error = dsl_dataset_snap_lookup(hds, ddrsa->ddrsa_oldsnapname, &val);
2957 /* ignore nonexistent snapshots */
2958 return (error == ENOENT ? 0 : error);
2961 /* new name should not exist */
2962 error = dsl_dataset_snap_lookup(hds, ddrsa->ddrsa_newsnapname, &val);
2964 error = SET_ERROR(EEXIST);
2965 else if (error == ENOENT)
2968 /* dataset name + 1 for the "@" + the new snapshot name must fit */
2969 if (dsl_dir_namelen(hds->ds_dir) + 1 +
2970 strlen(ddrsa->ddrsa_newsnapname) >= ZFS_MAX_DATASET_NAME_LEN)
2971 error = SET_ERROR(ENAMETOOLONG);
2977 dsl_dataset_rename_snapshot_check(void *arg, dmu_tx_t *tx)
2979 dsl_dataset_rename_snapshot_arg_t *ddrsa = arg;
2980 dsl_pool_t *dp = dmu_tx_pool(tx);
2984 error = dsl_dataset_hold(dp, ddrsa->ddrsa_fsname, FTAG, &hds);
2988 if (ddrsa->ddrsa_recursive) {
2989 error = dmu_objset_find_dp(dp, hds->ds_dir->dd_object,
2990 dsl_dataset_rename_snapshot_check_impl, ddrsa,
2993 error = dsl_dataset_rename_snapshot_check_impl(dp, hds, ddrsa);
2995 dsl_dataset_rele(hds, FTAG);
3000 dsl_dataset_rename_snapshot_sync_impl(dsl_pool_t *dp,
3001 dsl_dataset_t *hds, void *arg)
3003 dsl_dataset_rename_snapshot_arg_t *ddrsa = arg;
3006 dmu_tx_t *tx = ddrsa->ddrsa_tx;
3009 error = dsl_dataset_snap_lookup(hds, ddrsa->ddrsa_oldsnapname, &val);
3010 ASSERT(error == 0 || error == ENOENT);
3011 if (error == ENOENT) {
3012 /* ignore nonexistent snapshots */
3016 VERIFY0(dsl_dataset_hold_obj(dp, val, FTAG, &ds));
3018 /* log before we change the name */
3019 spa_history_log_internal_ds(ds, "rename", tx,
3020 "-> @%s", ddrsa->ddrsa_newsnapname);
3022 VERIFY0(dsl_dataset_snap_remove(hds, ddrsa->ddrsa_oldsnapname, tx,
3024 mutex_enter(&ds->ds_lock);
3025 (void) strlcpy(ds->ds_snapname, ddrsa->ddrsa_newsnapname,
3026 sizeof (ds->ds_snapname));
3027 mutex_exit(&ds->ds_lock);
3028 VERIFY0(zap_add(dp->dp_meta_objset,
3029 dsl_dataset_phys(hds)->ds_snapnames_zapobj,
3030 ds->ds_snapname, 8, 1, &ds->ds_object, tx));
3031 zvol_rename_minors(dp->dp_spa, ddrsa->ddrsa_oldsnapname,
3032 ddrsa->ddrsa_newsnapname, B_TRUE);
3034 dsl_dataset_rele(ds, FTAG);
3039 dsl_dataset_rename_snapshot_sync(void *arg, dmu_tx_t *tx)
3041 dsl_dataset_rename_snapshot_arg_t *ddrsa = arg;
3042 dsl_pool_t *dp = dmu_tx_pool(tx);
3043 dsl_dataset_t *hds = NULL;
3045 VERIFY0(dsl_dataset_hold(dp, ddrsa->ddrsa_fsname, FTAG, &hds));
3046 ddrsa->ddrsa_tx = tx;
3047 if (ddrsa->ddrsa_recursive) {
3048 VERIFY0(dmu_objset_find_dp(dp, hds->ds_dir->dd_object,
3049 dsl_dataset_rename_snapshot_sync_impl, ddrsa,
3052 VERIFY0(dsl_dataset_rename_snapshot_sync_impl(dp, hds, ddrsa));
3054 dsl_dataset_rele(hds, FTAG);
3058 dsl_dataset_rename_snapshot(const char *fsname,
3059 const char *oldsnapname, const char *newsnapname, boolean_t recursive)
3061 dsl_dataset_rename_snapshot_arg_t ddrsa;
3063 ddrsa.ddrsa_fsname = fsname;
3064 ddrsa.ddrsa_oldsnapname = oldsnapname;
3065 ddrsa.ddrsa_newsnapname = newsnapname;
3066 ddrsa.ddrsa_recursive = recursive;
3068 return (dsl_sync_task(fsname, dsl_dataset_rename_snapshot_check,
3069 dsl_dataset_rename_snapshot_sync, &ddrsa,
3070 1, ZFS_SPACE_CHECK_RESERVED));
3074 * If we're doing an ownership handoff, we need to make sure that there is
3075 * only one long hold on the dataset. We're not allowed to change anything here
3076 * so we don't permanently release the long hold or regular hold here. We want
3077 * to do this only when syncing to avoid the dataset unexpectedly going away
3078 * when we release the long hold.
3081 dsl_dataset_handoff_check(dsl_dataset_t *ds, void *owner, dmu_tx_t *tx)
3083 boolean_t held = B_FALSE;
3085 if (!dmu_tx_is_syncing(tx))
3088 dsl_dir_t *dd = ds->ds_dir;
3089 mutex_enter(&dd->dd_activity_lock);
3090 uint64_t holds = zfs_refcount_count(&ds->ds_longholds) -
3091 (owner != NULL ? 1 : 0);
3093 * The value of dd_activity_waiters can chance as soon as we drop the
3094 * lock, but we're fine with that; new waiters coming in or old
3095 * waiters leaving doesn't cause problems, since we're going to cancel
3096 * waiters later anyway. The goal of this check is to verify that no
3097 * non-waiters have long-holds, and all new long-holds will be
3098 * prevented because we're holding the pool config as writer.
3100 if (holds != dd->dd_activity_waiters)
3102 mutex_exit(&dd->dd_activity_lock);
3105 return (SET_ERROR(EBUSY));
3111 dsl_dataset_rollback_check(void *arg, dmu_tx_t *tx)
3113 dsl_dataset_rollback_arg_t *ddra = arg;
3114 dsl_pool_t *dp = dmu_tx_pool(tx);
3116 int64_t unused_refres_delta;
3119 error = dsl_dataset_hold(dp, ddra->ddra_fsname, FTAG, &ds);
3123 /* must not be a snapshot */
3124 if (ds->ds_is_snapshot) {
3125 dsl_dataset_rele(ds, FTAG);
3126 return (SET_ERROR(EINVAL));
3129 /* must have a most recent snapshot */
3130 if (dsl_dataset_phys(ds)->ds_prev_snap_txg < TXG_INITIAL) {
3131 dsl_dataset_rele(ds, FTAG);
3132 return (SET_ERROR(ESRCH));
3136 * No rollback to a snapshot created in the current txg, because
3137 * the rollback may dirty the dataset and create blocks that are
3138 * not reachable from the rootbp while having a birth txg that
3139 * falls into the snapshot's range.
3141 if (dmu_tx_is_syncing(tx) &&
3142 dsl_dataset_phys(ds)->ds_prev_snap_txg >= tx->tx_txg) {
3143 dsl_dataset_rele(ds, FTAG);
3144 return (SET_ERROR(EAGAIN));
3148 * If the expected target snapshot is specified, then check that
3149 * the latest snapshot is it.
3151 if (ddra->ddra_tosnap != NULL) {
3152 dsl_dataset_t *snapds;
3154 /* Check if the target snapshot exists at all. */
3155 error = dsl_dataset_hold(dp, ddra->ddra_tosnap, FTAG, &snapds);
3158 * ESRCH is used to signal that the target snapshot does
3159 * not exist, while ENOENT is used to report that
3160 * the rolled back dataset does not exist.
3161 * ESRCH is also used to cover other cases where the
3162 * target snapshot is not related to the dataset being
3163 * rolled back such as being in a different pool.
3165 if (error == ENOENT || error == EXDEV)
3166 error = SET_ERROR(ESRCH);
3167 dsl_dataset_rele(ds, FTAG);
3170 ASSERT(snapds->ds_is_snapshot);
3172 /* Check if the snapshot is the latest snapshot indeed. */
3173 if (snapds != ds->ds_prev) {
3175 * Distinguish between the case where the only problem
3176 * is intervening snapshots (EEXIST) vs the snapshot
3177 * not being a valid target for rollback (ESRCH).
3179 if (snapds->ds_dir == ds->ds_dir ||
3180 (dsl_dir_is_clone(ds->ds_dir) &&
3181 dsl_dir_phys(ds->ds_dir)->dd_origin_obj ==
3182 snapds->ds_object)) {
3183 error = SET_ERROR(EEXIST);
3185 error = SET_ERROR(ESRCH);
3187 dsl_dataset_rele(snapds, FTAG);
3188 dsl_dataset_rele(ds, FTAG);
3191 dsl_dataset_rele(snapds, FTAG);
3194 /* must not have any bookmarks after the most recent snapshot */
3195 if (dsl_bookmark_latest_txg(ds) >
3196 dsl_dataset_phys(ds)->ds_prev_snap_txg) {
3197 dsl_dataset_rele(ds, FTAG);
3198 return (SET_ERROR(EEXIST));
3201 error = dsl_dataset_handoff_check(ds, ddra->ddra_owner, tx);
3203 dsl_dataset_rele(ds, FTAG);
3208 * Check if the snap we are rolling back to uses more than
3211 if (ds->ds_quota != 0 &&
3212 dsl_dataset_phys(ds->ds_prev)->ds_referenced_bytes > ds->ds_quota) {
3213 dsl_dataset_rele(ds, FTAG);
3214 return (SET_ERROR(EDQUOT));
3218 * When we do the clone swap, we will temporarily use more space
3219 * due to the refreservation (the head will no longer have any
3220 * unique space, so the entire amount of the refreservation will need
3221 * to be free). We will immediately destroy the clone, freeing
3222 * this space, but the freeing happens over many txg's.
3224 unused_refres_delta = (int64_t)MIN(ds->ds_reserved,
3225 dsl_dataset_phys(ds)->ds_unique_bytes);
3227 if (unused_refres_delta > 0 &&
3228 unused_refres_delta >
3229 dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE)) {
3230 dsl_dataset_rele(ds, FTAG);
3231 return (SET_ERROR(ENOSPC));
3234 dsl_dataset_rele(ds, FTAG);
3239 dsl_dataset_rollback_sync(void *arg, dmu_tx_t *tx)
3241 dsl_dataset_rollback_arg_t *ddra = arg;
3242 dsl_pool_t *dp = dmu_tx_pool(tx);
3243 dsl_dataset_t *ds, *clone;
3245 char namebuf[ZFS_MAX_DATASET_NAME_LEN];
3247 VERIFY0(dsl_dataset_hold(dp, ddra->ddra_fsname, FTAG, &ds));
3249 dsl_dataset_name(ds->ds_prev, namebuf);
3250 fnvlist_add_string(ddra->ddra_result, "target", namebuf);
3252 cloneobj = dsl_dataset_create_sync(ds->ds_dir, "%rollback",
3253 ds->ds_prev, DS_CREATE_FLAG_NODIRTY, kcred, NULL, tx);
3255 VERIFY0(dsl_dataset_hold_obj(dp, cloneobj, FTAG, &clone));
3257 dsl_dataset_clone_swap_sync_impl(clone, ds, tx);
3258 dsl_dataset_zero_zil(ds, tx);
3260 dsl_destroy_head_sync_impl(clone, tx);
3262 dsl_dataset_rele(clone, FTAG);
3263 dsl_dataset_rele(ds, FTAG);
3267 * Rolls back the given filesystem or volume to the most recent snapshot.
3268 * The name of the most recent snapshot will be returned under key "target"
3269 * in the result nvlist.
3272 * - The existing dataset MUST be owned by the specified owner at entry
3273 * - Upon return, dataset will still be held by the same owner, whether we
3276 * This mode is required any time the existing filesystem is mounted. See
3277 * notes above zfs_suspend_fs() for further details.
3280 dsl_dataset_rollback(const char *fsname, const char *tosnap, void *owner,
3283 dsl_dataset_rollback_arg_t ddra;
3285 ddra.ddra_fsname = fsname;
3286 ddra.ddra_tosnap = tosnap;
3287 ddra.ddra_owner = owner;
3288 ddra.ddra_result = result;
3290 return (dsl_sync_task(fsname, dsl_dataset_rollback_check,
3291 dsl_dataset_rollback_sync, &ddra,
3292 1, ZFS_SPACE_CHECK_RESERVED));
3295 struct promotenode {
3300 static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
3301 static int promote_hold(dsl_dataset_promote_arg_t *ddpa, dsl_pool_t *dp,
3303 static void promote_rele(dsl_dataset_promote_arg_t *ddpa, void *tag);
3306 dsl_dataset_promote_check(void *arg, dmu_tx_t *tx)
3308 dsl_dataset_promote_arg_t *ddpa = arg;
3309 dsl_pool_t *dp = dmu_tx_pool(tx);
3311 struct promotenode *snap;
3312 dsl_dataset_t *origin_ds, *origin_head;
3316 size_t max_snap_len;
3317 boolean_t conflicting_snaps;
3319 err = promote_hold(ddpa, dp, FTAG);
3323 hds = ddpa->ddpa_clone;
3324 max_snap_len = MAXNAMELEN - strlen(ddpa->ddpa_clonename) - 1;
3326 if (dsl_dataset_phys(hds)->ds_flags & DS_FLAG_NOPROMOTE) {
3327 promote_rele(ddpa, FTAG);
3328 return (SET_ERROR(EXDEV));
3331 snap = list_head(&ddpa->shared_snaps);
3332 origin_head = snap->ds;
3334 err = SET_ERROR(ENOENT);
3337 origin_ds = snap->ds;
3340 * Encrypted clones share a DSL Crypto Key with their origin's dsl dir.
3341 * When doing a promote we must make sure the encryption root for
3342 * both the target and the target's origin does not change to avoid
3343 * needing to rewrap encryption keys
3345 err = dsl_dataset_promote_crypt_check(hds->ds_dir, origin_ds->ds_dir);
3350 * Compute and check the amount of space to transfer. Since this is
3351 * so expensive, don't do the preliminary check.
3353 if (!dmu_tx_is_syncing(tx)) {
3354 promote_rele(ddpa, FTAG);
3358 /* compute origin's new unique space */
3359 snap = list_tail(&ddpa->clone_snaps);
3360 ASSERT(snap != NULL);
3361 ASSERT3U(dsl_dataset_phys(snap->ds)->ds_prev_snap_obj, ==,
3362 origin_ds->ds_object);
3363 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
3364 dsl_dataset_phys(origin_ds)->ds_prev_snap_txg, UINT64_MAX,
3365 &ddpa->unique, &unused, &unused);
3368 * Walk the snapshots that we are moving
3370 * Compute space to transfer. Consider the incremental changes
3371 * to used by each snapshot:
3372 * (my used) = (prev's used) + (blocks born) - (blocks killed)
3373 * So each snapshot gave birth to:
3374 * (blocks born) = (my used) - (prev's used) + (blocks killed)
3375 * So a sequence would look like:
3376 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
3377 * Which simplifies to:
3378 * uN + kN + kN-1 + ... + k1 + k0
3379 * Note however, if we stop before we reach the ORIGIN we get:
3380 * uN + kN + kN-1 + ... + kM - uM-1
3382 conflicting_snaps = B_FALSE;
3384 ddpa->used = dsl_dataset_phys(origin_ds)->ds_referenced_bytes;
3385 ddpa->comp = dsl_dataset_phys(origin_ds)->ds_compressed_bytes;
3386 ddpa->uncomp = dsl_dataset_phys(origin_ds)->ds_uncompressed_bytes;
3387 for (snap = list_head(&ddpa->shared_snaps); snap;
3388 snap = list_next(&ddpa->shared_snaps, snap)) {
3389 uint64_t val, dlused, dlcomp, dluncomp;
3390 dsl_dataset_t *ds = snap->ds;
3395 * If there are long holds, we won't be able to evict
3398 if (dsl_dataset_long_held(ds)) {
3399 err = SET_ERROR(EBUSY);
3403 /* Check that the snapshot name does not conflict */
3404 VERIFY0(dsl_dataset_get_snapname(ds));
3405 if (strlen(ds->ds_snapname) >= max_snap_len) {
3406 err = SET_ERROR(ENAMETOOLONG);
3409 err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
3411 fnvlist_add_boolean(ddpa->err_ds,
3412 snap->ds->ds_snapname);
3413 conflicting_snaps = B_TRUE;
3414 } else if (err != ENOENT) {
3418 /* The very first snapshot does not have a deadlist */
3419 if (dsl_dataset_phys(ds)->ds_prev_snap_obj == 0)
3422 dsl_deadlist_space(&ds->ds_deadlist,
3423 &dlused, &dlcomp, &dluncomp);
3424 ddpa->used += dlused;
3425 ddpa->comp += dlcomp;
3426 ddpa->uncomp += dluncomp;
3430 * Check that bookmarks that are being transferred don't have
3433 for (dsl_bookmark_node_t *dbn = avl_first(&origin_head->ds_bookmarks);
3434 dbn != NULL && dbn->dbn_phys.zbm_creation_txg <=
3435 dsl_dataset_phys(origin_ds)->ds_creation_txg;
3436 dbn = AVL_NEXT(&origin_head->ds_bookmarks, dbn)) {
3437 if (strlen(dbn->dbn_name) >= max_snap_len) {
3438 err = SET_ERROR(ENAMETOOLONG);
3441 zfs_bookmark_phys_t bm;
3442 err = dsl_bookmark_lookup_impl(ddpa->ddpa_clone,
3443 dbn->dbn_name, &bm);
3446 fnvlist_add_boolean(ddpa->err_ds, dbn->dbn_name);
3447 conflicting_snaps = B_TRUE;
3448 } else if (err == ESRCH) {
3450 } else if (err != 0) {
3456 * In order to return the full list of conflicting snapshots, we check
3457 * whether there was a conflict after traversing all of them.
3459 if (conflicting_snaps) {
3460 err = SET_ERROR(EEXIST);
3465 * If we are a clone of a clone then we never reached ORIGIN,
3466 * so we need to subtract out the clone origin's used space.
3468 if (ddpa->origin_origin) {
3470 dsl_dataset_phys(ddpa->origin_origin)->ds_referenced_bytes;
3472 dsl_dataset_phys(ddpa->origin_origin)->ds_compressed_bytes;
3474 dsl_dataset_phys(ddpa->origin_origin)->
3475 ds_uncompressed_bytes;
3478 /* Check that there is enough space and limit headroom here */
3479 err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
3480 0, ss_mv_cnt, ddpa->used, ddpa->cr, ddpa->proc);
3485 * Compute the amounts of space that will be used by snapshots
3486 * after the promotion (for both origin and clone). For each,
3487 * it is the amount of space that will be on all of their
3488 * deadlists (that was not born before their new origin).
3490 if (dsl_dir_phys(hds->ds_dir)->dd_flags & DD_FLAG_USED_BREAKDOWN) {
3494 * Note, typically this will not be a clone of a clone,
3495 * so dd_origin_txg will be < TXG_INITIAL, so
3496 * these snaplist_space() -> dsl_deadlist_space_range()
3497 * calls will be fast because they do not have to
3498 * iterate over all bps.
3500 snap = list_head(&ddpa->origin_snaps);
3502 err = SET_ERROR(ENOENT);
3505 err = snaplist_space(&ddpa->shared_snaps,
3506 snap->ds->ds_dir->dd_origin_txg, &ddpa->cloneusedsnap);
3510 err = snaplist_space(&ddpa->clone_snaps,
3511 snap->ds->ds_dir->dd_origin_txg, &space);
3514 ddpa->cloneusedsnap += space;
3516 if (dsl_dir_phys(origin_ds->ds_dir)->dd_flags &
3517 DD_FLAG_USED_BREAKDOWN) {
3518 err = snaplist_space(&ddpa->origin_snaps,
3519 dsl_dataset_phys(origin_ds)->ds_creation_txg,
3520 &ddpa->originusedsnap);
3526 promote_rele(ddpa, FTAG);
3531 dsl_dataset_promote_sync(void *arg, dmu_tx_t *tx)
3533 dsl_dataset_promote_arg_t *ddpa = arg;
3534 dsl_pool_t *dp = dmu_tx_pool(tx);
3536 struct promotenode *snap;
3537 dsl_dataset_t *origin_ds;
3538 dsl_dataset_t *origin_head;
3540 dsl_dir_t *odd = NULL;
3541 uint64_t oldnext_obj;
3544 ASSERT(nvlist_empty(ddpa->err_ds));
3546 VERIFY0(promote_hold(ddpa, dp, FTAG));
3547 hds = ddpa->ddpa_clone;
3549 ASSERT0(dsl_dataset_phys(hds)->ds_flags & DS_FLAG_NOPROMOTE);
3551 snap = list_head(&ddpa->shared_snaps);
3552 origin_ds = snap->ds;
3555 snap = list_head(&ddpa->origin_snaps);
3556 origin_head = snap->ds;
3559 * We need to explicitly open odd, since origin_ds's dd will be
3562 VERIFY0(dsl_dir_hold_obj(dp, origin_ds->ds_dir->dd_object,
3565 dsl_dataset_promote_crypt_sync(hds->ds_dir, odd, tx);
3567 /* change origin's next snap */
3568 dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
3569 oldnext_obj = dsl_dataset_phys(origin_ds)->ds_next_snap_obj;
3570 snap = list_tail(&ddpa->clone_snaps);
3571 ASSERT3U(dsl_dataset_phys(snap->ds)->ds_prev_snap_obj, ==,
3572 origin_ds->ds_object);
3573 dsl_dataset_phys(origin_ds)->ds_next_snap_obj = snap->ds->ds_object;
3575 /* change the origin's next clone */
3576 if (dsl_dataset_phys(origin_ds)->ds_next_clones_obj) {
3577 dsl_dataset_remove_from_next_clones(origin_ds,
3578 snap->ds->ds_object, tx);
3579 VERIFY0(zap_add_int(dp->dp_meta_objset,
3580 dsl_dataset_phys(origin_ds)->ds_next_clones_obj,
3585 dmu_buf_will_dirty(dd->dd_dbuf, tx);
3586 ASSERT3U(dsl_dir_phys(dd)->dd_origin_obj, ==, origin_ds->ds_object);
3587 dsl_dir_phys(dd)->dd_origin_obj = dsl_dir_phys(odd)->dd_origin_obj;
3588 dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg;
3589 dmu_buf_will_dirty(odd->dd_dbuf, tx);
3590 dsl_dir_phys(odd)->dd_origin_obj = origin_ds->ds_object;
3591 origin_head->ds_dir->dd_origin_txg =
3592 dsl_dataset_phys(origin_ds)->ds_creation_txg;
3594 /* change dd_clone entries */
3595 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
3596 VERIFY0(zap_remove_int(dp->dp_meta_objset,
3597 dsl_dir_phys(odd)->dd_clones, hds->ds_object, tx));
3598 VERIFY0(zap_add_int(dp->dp_meta_objset,
3599 dsl_dir_phys(ddpa->origin_origin->ds_dir)->dd_clones,
3600 hds->ds_object, tx));
3602 VERIFY0(zap_remove_int(dp->dp_meta_objset,
3603 dsl_dir_phys(ddpa->origin_origin->ds_dir)->dd_clones,
3604 origin_head->ds_object, tx));
3605 if (dsl_dir_phys(dd)->dd_clones == 0) {
3606 dsl_dir_phys(dd)->dd_clones =
3607 zap_create(dp->dp_meta_objset, DMU_OT_DSL_CLONES,
3608 DMU_OT_NONE, 0, tx);
3610 VERIFY0(zap_add_int(dp->dp_meta_objset,
3611 dsl_dir_phys(dd)->dd_clones, origin_head->ds_object, tx));
3615 * Move bookmarks to this dir.
3617 dsl_bookmark_node_t *dbn_next;
3618 for (dsl_bookmark_node_t *dbn = avl_first(&origin_head->ds_bookmarks);
3619 dbn != NULL && dbn->dbn_phys.zbm_creation_txg <=
3620 dsl_dataset_phys(origin_ds)->ds_creation_txg;
3622 dbn_next = AVL_NEXT(&origin_head->ds_bookmarks, dbn);
3624 avl_remove(&origin_head->ds_bookmarks, dbn);
3625 VERIFY0(zap_remove(dp->dp_meta_objset,
3626 origin_head->ds_bookmarks_obj, dbn->dbn_name, tx));
3628 dsl_bookmark_node_add(hds, dbn, tx);
3631 dsl_bookmark_next_changed(hds, origin_ds, tx);
3633 /* move snapshots to this dir */
3634 for (snap = list_head(&ddpa->shared_snaps); snap;
3635 snap = list_next(&ddpa->shared_snaps, snap)) {
3636 dsl_dataset_t *ds = snap->ds;
3639 * Property callbacks are registered to a particular
3640 * dsl_dir. Since ours is changing, evict the objset
3641 * so that they will be unregistered from the old dsl_dir.
3643 if (ds->ds_objset) {
3644 dmu_objset_evict(ds->ds_objset);
3645 ds->ds_objset = NULL;
3648 /* move snap name entry */
3649 VERIFY0(dsl_dataset_get_snapname(ds));
3650 VERIFY0(dsl_dataset_snap_remove(origin_head,
3651 ds->ds_snapname, tx, B_TRUE));
3652 VERIFY0(zap_add(dp->dp_meta_objset,
3653 dsl_dataset_phys(hds)->ds_snapnames_zapobj, ds->ds_snapname,
3654 8, 1, &ds->ds_object, tx));
3655 dsl_fs_ss_count_adjust(hds->ds_dir, 1,
3656 DD_FIELD_SNAPSHOT_COUNT, tx);
3658 /* change containing dsl_dir */
3659 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3660 ASSERT3U(dsl_dataset_phys(ds)->ds_dir_obj, ==, odd->dd_object);
3661 dsl_dataset_phys(ds)->ds_dir_obj = dd->dd_object;
3662 ASSERT3P(ds->ds_dir, ==, odd);
3663 dsl_dir_rele(ds->ds_dir, ds);
3664 VERIFY0(dsl_dir_hold_obj(dp, dd->dd_object,
3665 NULL, ds, &ds->ds_dir));
3667 /* move any clone references */
3668 if (dsl_dataset_phys(ds)->ds_next_clones_obj &&
3669 spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
3673 for (zap_cursor_init(&zc, dp->dp_meta_objset,
3674 dsl_dataset_phys(ds)->ds_next_clones_obj);
3675 zap_cursor_retrieve(&zc, &za) == 0;
3676 zap_cursor_advance(&zc)) {
3677 dsl_dataset_t *cnds;
3680 if (za.za_first_integer == oldnext_obj) {
3682 * We've already moved the
3683 * origin's reference.
3688 VERIFY0(dsl_dataset_hold_obj(dp,
3689 za.za_first_integer, FTAG, &cnds));
3690 o = dsl_dir_phys(cnds->ds_dir)->
3691 dd_head_dataset_obj;
3693 VERIFY0(zap_remove_int(dp->dp_meta_objset,
3694 dsl_dir_phys(odd)->dd_clones, o, tx));
3695 VERIFY0(zap_add_int(dp->dp_meta_objset,
3696 dsl_dir_phys(dd)->dd_clones, o, tx));
3697 dsl_dataset_rele(cnds, FTAG);
3699 zap_cursor_fini(&zc);
3702 ASSERT(!dsl_prop_hascb(ds));
3706 * Change space accounting.
3707 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
3708 * both be valid, or both be 0 (resulting in delta == 0). This
3709 * is true for each of {clone,origin} independently.
3712 delta = ddpa->cloneusedsnap -
3713 dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_SNAP];
3714 ASSERT3S(delta, >=, 0);
3715 ASSERT3U(ddpa->used, >=, delta);
3716 dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
3717 dsl_dir_diduse_space(dd, DD_USED_HEAD,
3718 ddpa->used - delta, ddpa->comp, ddpa->uncomp, tx);
3720 delta = ddpa->originusedsnap -
3721 dsl_dir_phys(odd)->dd_used_breakdown[DD_USED_SNAP];
3722 ASSERT3S(delta, <=, 0);
3723 ASSERT3U(ddpa->used, >=, -delta);
3724 dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
3725 dsl_dir_diduse_space(odd, DD_USED_HEAD,
3726 -ddpa->used - delta, -ddpa->comp, -ddpa->uncomp, tx);
3728 dsl_dataset_phys(origin_ds)->ds_unique_bytes = ddpa->unique;
3731 * Since livelists are specific to a clone's origin txg, they
3732 * are no longer accurate. Destroy the livelist from the clone being
3733 * promoted. If the origin dataset is a clone, destroy its livelist
3736 dsl_dir_remove_livelist(dd, tx, B_TRUE);
3737 dsl_dir_remove_livelist(odd, tx, B_TRUE);
3739 /* log history record */
3740 spa_history_log_internal_ds(hds, "promote", tx, " ");
3742 dsl_dir_rele(odd, FTAG);
3743 promote_rele(ddpa, FTAG);
3747 * Make a list of dsl_dataset_t's for the snapshots between first_obj
3748 * (exclusive) and last_obj (inclusive). The list will be in reverse
3749 * order (last_obj will be the list_head()). If first_obj == 0, do all
3750 * snapshots back to this dataset's origin.
3753 snaplist_make(dsl_pool_t *dp,
3754 uint64_t first_obj, uint64_t last_obj, list_t *l, void *tag)
3756 uint64_t obj = last_obj;
3758 list_create(l, sizeof (struct promotenode),
3759 offsetof(struct promotenode, link));
3761 while (obj != first_obj) {
3763 struct promotenode *snap;
3766 err = dsl_dataset_hold_obj(dp, obj, tag, &ds);
3767 ASSERT(err != ENOENT);
3772 first_obj = dsl_dir_phys(ds->ds_dir)->dd_origin_obj;
3774 snap = kmem_alloc(sizeof (*snap), KM_SLEEP);
3776 list_insert_tail(l, snap);
3777 obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
3784 snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
3786 struct promotenode *snap;
3789 for (snap = list_head(l); snap; snap = list_next(l, snap)) {
3790 uint64_t used, comp, uncomp;
3791 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
3792 mintxg, UINT64_MAX, &used, &comp, &uncomp);
3799 snaplist_destroy(list_t *l, void *tag)
3801 struct promotenode *snap;
3803 if (l == NULL || !list_link_active(&l->list_head))
3806 while ((snap = list_tail(l)) != NULL) {
3807 list_remove(l, snap);
3808 dsl_dataset_rele(snap->ds, tag);
3809 kmem_free(snap, sizeof (*snap));
3815 promote_hold(dsl_dataset_promote_arg_t *ddpa, dsl_pool_t *dp, void *tag)
3819 struct promotenode *snap;
3821 error = dsl_dataset_hold(dp, ddpa->ddpa_clonename, tag,
3825 dd = ddpa->ddpa_clone->ds_dir;
3827 if (ddpa->ddpa_clone->ds_is_snapshot ||
3828 !dsl_dir_is_clone(dd)) {
3829 dsl_dataset_rele(ddpa->ddpa_clone, tag);
3830 return (SET_ERROR(EINVAL));
3833 error = snaplist_make(dp, 0, dsl_dir_phys(dd)->dd_origin_obj,
3834 &ddpa->shared_snaps, tag);
3838 error = snaplist_make(dp, 0, ddpa->ddpa_clone->ds_object,
3839 &ddpa->clone_snaps, tag);
3843 snap = list_head(&ddpa->shared_snaps);
3844 ASSERT3U(snap->ds->ds_object, ==, dsl_dir_phys(dd)->dd_origin_obj);
3845 error = snaplist_make(dp, dsl_dir_phys(dd)->dd_origin_obj,
3846 dsl_dir_phys(snap->ds->ds_dir)->dd_head_dataset_obj,
3847 &ddpa->origin_snaps, tag);
3851 if (dsl_dir_phys(snap->ds->ds_dir)->dd_origin_obj != 0) {
3852 error = dsl_dataset_hold_obj(dp,
3853 dsl_dir_phys(snap->ds->ds_dir)->dd_origin_obj,
3854 tag, &ddpa->origin_origin);
3860 promote_rele(ddpa, tag);
3865 promote_rele(dsl_dataset_promote_arg_t *ddpa, void *tag)
3867 snaplist_destroy(&ddpa->shared_snaps, tag);
3868 snaplist_destroy(&ddpa->clone_snaps, tag);
3869 snaplist_destroy(&ddpa->origin_snaps, tag);
3870 if (ddpa->origin_origin != NULL)
3871 dsl_dataset_rele(ddpa->origin_origin, tag);
3872 dsl_dataset_rele(ddpa->ddpa_clone, tag);
3878 * If it fails due to a conflicting snapshot name, "conflsnap" will be filled
3879 * in with the name. (It must be at least ZFS_MAX_DATASET_NAME_LEN bytes long.)
3882 dsl_dataset_promote(const char *name, char *conflsnap)
3884 dsl_dataset_promote_arg_t ddpa = { 0 };
3887 nvpair_t *snap_pair;
3891 * We will modify space proportional to the number of
3892 * snapshots. Compute numsnaps.
3894 error = dmu_objset_hold(name, FTAG, &os);
3897 error = zap_count(dmu_objset_pool(os)->dp_meta_objset,
3898 dsl_dataset_phys(dmu_objset_ds(os))->ds_snapnames_zapobj,
3900 dmu_objset_rele(os, FTAG);
3904 ddpa.ddpa_clonename = name;
3905 ddpa.err_ds = fnvlist_alloc();
3907 ddpa.proc = curproc;
3909 error = dsl_sync_task(name, dsl_dataset_promote_check,
3910 dsl_dataset_promote_sync, &ddpa,
3911 2 + numsnaps, ZFS_SPACE_CHECK_RESERVED);
3914 * Return the first conflicting snapshot found.
3916 snap_pair = nvlist_next_nvpair(ddpa.err_ds, NULL);
3917 if (snap_pair != NULL && conflsnap != NULL)
3918 (void) strlcpy(conflsnap, nvpair_name(snap_pair),
3919 ZFS_MAX_DATASET_NAME_LEN);
3921 fnvlist_free(ddpa.err_ds);
3926 dsl_dataset_clone_swap_check_impl(dsl_dataset_t *clone,
3927 dsl_dataset_t *origin_head, boolean_t force, void *owner, dmu_tx_t *tx)
3930 * "slack" factor for received datasets with refquota set on them.
3931 * See the bottom of this function for details on its use.
3933 uint64_t refquota_slack = (uint64_t)DMU_MAX_ACCESS *
3934 spa_asize_inflation;
3935 int64_t unused_refres_delta;
3937 /* they should both be heads */
3938 if (clone->ds_is_snapshot ||
3939 origin_head->ds_is_snapshot)
3940 return (SET_ERROR(EINVAL));
3942 /* if we are not forcing, the branch point should be just before them */
3943 if (!force && clone->ds_prev != origin_head->ds_prev)
3944 return (SET_ERROR(EINVAL));
3946 /* clone should be the clone (unless they are unrelated) */
3947 if (clone->ds_prev != NULL &&
3948 clone->ds_prev != clone->ds_dir->dd_pool->dp_origin_snap &&
3949 origin_head->ds_dir != clone->ds_prev->ds_dir)
3950 return (SET_ERROR(EINVAL));
3952 /* the clone should be a child of the origin */
3953 if (clone->ds_dir->dd_parent != origin_head->ds_dir)
3954 return (SET_ERROR(EINVAL));
3956 /* origin_head shouldn't be modified unless 'force' */
3958 dsl_dataset_modified_since_snap(origin_head, origin_head->ds_prev))
3959 return (SET_ERROR(ETXTBSY));
3961 /* origin_head should have no long holds (e.g. is not mounted) */
3962 if (dsl_dataset_handoff_check(origin_head, owner, tx))
3963 return (SET_ERROR(EBUSY));
3965 /* check amount of any unconsumed refreservation */
3966 unused_refres_delta =
3967 (int64_t)MIN(origin_head->ds_reserved,
3968 dsl_dataset_phys(origin_head)->ds_unique_bytes) -
3969 (int64_t)MIN(origin_head->ds_reserved,
3970 dsl_dataset_phys(clone)->ds_unique_bytes);
3972 if (unused_refres_delta > 0 &&
3973 unused_refres_delta >
3974 dsl_dir_space_available(origin_head->ds_dir, NULL, 0, TRUE))
3975 return (SET_ERROR(ENOSPC));
3978 * The clone can't be too much over the head's refquota.
3980 * To ensure that the entire refquota can be used, we allow one
3981 * transaction to exceed the refquota. Therefore, this check
3982 * needs to also allow for the space referenced to be more than the
3983 * refquota. The maximum amount of space that one transaction can use
3984 * on disk is DMU_MAX_ACCESS * spa_asize_inflation. Allowing this
3985 * overage ensures that we are able to receive a filesystem that
3986 * exceeds the refquota on the source system.
3988 * So that overage is the refquota_slack we use below.
3990 if (origin_head->ds_quota != 0 &&
3991 dsl_dataset_phys(clone)->ds_referenced_bytes >
3992 origin_head->ds_quota + refquota_slack)
3993 return (SET_ERROR(EDQUOT));
3999 dsl_dataset_swap_remap_deadlists(dsl_dataset_t *clone,
4000 dsl_dataset_t *origin, dmu_tx_t *tx)
4002 uint64_t clone_remap_dl_obj, origin_remap_dl_obj;
4003 dsl_pool_t *dp = dmu_tx_pool(tx);
4005 ASSERT(dsl_pool_sync_context(dp));
4007 clone_remap_dl_obj = dsl_dataset_get_remap_deadlist_object(clone);
4008 origin_remap_dl_obj = dsl_dataset_get_remap_deadlist_object(origin);
4010 if (clone_remap_dl_obj != 0) {
4011 dsl_deadlist_close(&clone->ds_remap_deadlist);
4012 dsl_dataset_unset_remap_deadlist_object(clone, tx);
4014 if (origin_remap_dl_obj != 0) {
4015 dsl_deadlist_close(&origin->ds_remap_deadlist);
4016 dsl_dataset_unset_remap_deadlist_object(origin, tx);
4019 if (clone_remap_dl_obj != 0) {
4020 dsl_dataset_set_remap_deadlist_object(origin,
4021 clone_remap_dl_obj, tx);
4022 dsl_deadlist_open(&origin->ds_remap_deadlist,
4023 dp->dp_meta_objset, clone_remap_dl_obj);
4025 if (origin_remap_dl_obj != 0) {
4026 dsl_dataset_set_remap_deadlist_object(clone,
4027 origin_remap_dl_obj, tx);
4028 dsl_deadlist_open(&clone->ds_remap_deadlist,
4029 dp->dp_meta_objset, origin_remap_dl_obj);
4034 dsl_dataset_clone_swap_sync_impl(dsl_dataset_t *clone,
4035 dsl_dataset_t *origin_head, dmu_tx_t *tx)
4037 dsl_pool_t *dp = dmu_tx_pool(tx);
4038 int64_t unused_refres_delta;
4040 ASSERT(clone->ds_reserved == 0);
4042 * NOTE: On DEBUG kernels there could be a race between this and
4043 * the check function if spa_asize_inflation is adjusted...
4045 ASSERT(origin_head->ds_quota == 0 ||
4046 dsl_dataset_phys(clone)->ds_unique_bytes <= origin_head->ds_quota +
4047 DMU_MAX_ACCESS * spa_asize_inflation);
4048 ASSERT3P(clone->ds_prev, ==, origin_head->ds_prev);
4050 dsl_dir_cancel_waiters(origin_head->ds_dir);
4053 * Swap per-dataset feature flags.
4055 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
4056 if (!(spa_feature_table[f].fi_flags &
4057 ZFEATURE_FLAG_PER_DATASET)) {
4058 ASSERT(!dsl_dataset_feature_is_active(clone, f));
4059 ASSERT(!dsl_dataset_feature_is_active(origin_head, f));
4063 boolean_t clone_inuse = dsl_dataset_feature_is_active(clone, f);
4064 void *clone_feature = clone->ds_feature[f];
4065 boolean_t origin_head_inuse =
4066 dsl_dataset_feature_is_active(origin_head, f);
4067 void *origin_head_feature = origin_head->ds_feature[f];
4070 dsl_dataset_deactivate_feature_impl(clone, f, tx);
4071 if (origin_head_inuse)
4072 dsl_dataset_deactivate_feature_impl(origin_head, f, tx);
4075 dsl_dataset_activate_feature(origin_head->ds_object, f,
4077 origin_head->ds_feature[f] = clone_feature;
4079 if (origin_head_inuse) {
4080 dsl_dataset_activate_feature(clone->ds_object, f,
4081 origin_head_feature, tx);
4082 clone->ds_feature[f] = origin_head_feature;
4086 dmu_buf_will_dirty(clone->ds_dbuf, tx);
4087 dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
4089 if (clone->ds_objset != NULL) {
4090 dmu_objset_evict(clone->ds_objset);
4091 clone->ds_objset = NULL;
4094 if (origin_head->ds_objset != NULL) {
4095 dmu_objset_evict(origin_head->ds_objset);
4096 origin_head->ds_objset = NULL;
4099 unused_refres_delta =
4100 (int64_t)MIN(origin_head->ds_reserved,
4101 dsl_dataset_phys(origin_head)->ds_unique_bytes) -
4102 (int64_t)MIN(origin_head->ds_reserved,
4103 dsl_dataset_phys(clone)->ds_unique_bytes);
4106 * Reset origin's unique bytes.
4109 dsl_dataset_t *origin = clone->ds_prev;
4110 uint64_t comp, uncomp;
4112 dmu_buf_will_dirty(origin->ds_dbuf, tx);
4113 dsl_deadlist_space_range(&clone->ds_deadlist,
4114 dsl_dataset_phys(origin)->ds_prev_snap_txg, UINT64_MAX,
4115 &dsl_dataset_phys(origin)->ds_unique_bytes, &comp, &uncomp);
4120 rrw_enter(&clone->ds_bp_rwlock, RW_WRITER, FTAG);
4121 rrw_enter(&origin_head->ds_bp_rwlock, RW_WRITER, FTAG);
4123 tmp = dsl_dataset_phys(origin_head)->ds_bp;
4124 dsl_dataset_phys(origin_head)->ds_bp =
4125 dsl_dataset_phys(clone)->ds_bp;
4126 dsl_dataset_phys(clone)->ds_bp = tmp;
4127 rrw_exit(&origin_head->ds_bp_rwlock, FTAG);
4128 rrw_exit(&clone->ds_bp_rwlock, FTAG);
4131 /* set dd_*_bytes */
4133 int64_t dused, dcomp, duncomp;
4134 uint64_t cdl_used, cdl_comp, cdl_uncomp;
4135 uint64_t odl_used, odl_comp, odl_uncomp;
4137 ASSERT3U(dsl_dir_phys(clone->ds_dir)->
4138 dd_used_breakdown[DD_USED_SNAP], ==, 0);
4140 dsl_deadlist_space(&clone->ds_deadlist,
4141 &cdl_used, &cdl_comp, &cdl_uncomp);
4142 dsl_deadlist_space(&origin_head->ds_deadlist,
4143 &odl_used, &odl_comp, &odl_uncomp);
4145 dused = dsl_dataset_phys(clone)->ds_referenced_bytes +
4147 (dsl_dataset_phys(origin_head)->ds_referenced_bytes +
4149 dcomp = dsl_dataset_phys(clone)->ds_compressed_bytes +
4151 (dsl_dataset_phys(origin_head)->ds_compressed_bytes +
4153 duncomp = dsl_dataset_phys(clone)->ds_uncompressed_bytes +
4155 (dsl_dataset_phys(origin_head)->ds_uncompressed_bytes +
4158 dsl_dir_diduse_space(origin_head->ds_dir, DD_USED_HEAD,
4159 dused, dcomp, duncomp, tx);
4160 dsl_dir_diduse_space(clone->ds_dir, DD_USED_HEAD,
4161 -dused, -dcomp, -duncomp, tx);
4164 * The difference in the space used by snapshots is the
4165 * difference in snapshot space due to the head's
4166 * deadlist (since that's the only thing that's
4167 * changing that affects the snapused).
4169 dsl_deadlist_space_range(&clone->ds_deadlist,
4170 origin_head->ds_dir->dd_origin_txg, UINT64_MAX,
4171 &cdl_used, &cdl_comp, &cdl_uncomp);
4172 dsl_deadlist_space_range(&origin_head->ds_deadlist,
4173 origin_head->ds_dir->dd_origin_txg, UINT64_MAX,
4174 &odl_used, &odl_comp, &odl_uncomp);
4175 dsl_dir_transfer_space(origin_head->ds_dir, cdl_used - odl_used,
4176 DD_USED_HEAD, DD_USED_SNAP, tx);
4179 /* swap ds_*_bytes */
4180 SWITCH64(dsl_dataset_phys(origin_head)->ds_referenced_bytes,
4181 dsl_dataset_phys(clone)->ds_referenced_bytes);
4182 SWITCH64(dsl_dataset_phys(origin_head)->ds_compressed_bytes,
4183 dsl_dataset_phys(clone)->ds_compressed_bytes);
4184 SWITCH64(dsl_dataset_phys(origin_head)->ds_uncompressed_bytes,
4185 dsl_dataset_phys(clone)->ds_uncompressed_bytes);
4186 SWITCH64(dsl_dataset_phys(origin_head)->ds_unique_bytes,
4187 dsl_dataset_phys(clone)->ds_unique_bytes);
4189 /* apply any parent delta for change in unconsumed refreservation */
4190 dsl_dir_diduse_space(origin_head->ds_dir, DD_USED_REFRSRV,
4191 unused_refres_delta, 0, 0, tx);
4196 dsl_deadlist_close(&clone->ds_deadlist);
4197 dsl_deadlist_close(&origin_head->ds_deadlist);
4198 SWITCH64(dsl_dataset_phys(origin_head)->ds_deadlist_obj,
4199 dsl_dataset_phys(clone)->ds_deadlist_obj);
4200 dsl_deadlist_open(&clone->ds_deadlist, dp->dp_meta_objset,
4201 dsl_dataset_phys(clone)->ds_deadlist_obj);
4202 dsl_deadlist_open(&origin_head->ds_deadlist, dp->dp_meta_objset,
4203 dsl_dataset_phys(origin_head)->ds_deadlist_obj);
4204 dsl_dataset_swap_remap_deadlists(clone, origin_head, tx);
4207 * If there is a bookmark at the origin, its "next dataset" is
4208 * changing, so we need to reset its FBN.
4210 dsl_bookmark_next_changed(origin_head, origin_head->ds_prev, tx);
4212 dsl_scan_ds_clone_swapped(origin_head, clone, tx);
4215 * Destroy any livelists associated with the clone or the origin,
4216 * since after the swap the corresponding livelists are no longer
4219 dsl_dir_remove_livelist(clone->ds_dir, tx, B_TRUE);
4220 dsl_dir_remove_livelist(origin_head->ds_dir, tx, B_TRUE);
4222 spa_history_log_internal_ds(clone, "clone swap", tx,
4223 "parent=%s", origin_head->ds_dir->dd_myname);
4227 * Given a pool name and a dataset object number in that pool,
4228 * return the name of that dataset.
4231 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
4237 error = dsl_pool_hold(pname, FTAG, &dp);
4241 error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds);
4243 dsl_dataset_name(ds, buf);
4244 dsl_dataset_rele(ds, FTAG);
4246 dsl_pool_rele(dp, FTAG);
4252 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
4253 uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
4257 ASSERT3S(asize, >, 0);
4260 * *ref_rsrv is the portion of asize that will come from any
4261 * unconsumed refreservation space.
4265 mutex_enter(&ds->ds_lock);
4267 * Make a space adjustment for reserved bytes.
4269 if (ds->ds_reserved > dsl_dataset_phys(ds)->ds_unique_bytes) {
4271 ds->ds_reserved - dsl_dataset_phys(ds)->ds_unique_bytes);
4273 (ds->ds_reserved - dsl_dataset_phys(ds)->ds_unique_bytes);
4275 asize - MIN(asize, parent_delta(ds, asize + inflight));
4278 if (!check_quota || ds->ds_quota == 0) {
4279 mutex_exit(&ds->ds_lock);
4283 * If they are requesting more space, and our current estimate
4284 * is over quota, they get to try again unless the actual
4285 * on-disk is over quota and there are no pending changes (which
4286 * may free up space for us).
4288 if (dsl_dataset_phys(ds)->ds_referenced_bytes + inflight >=
4291 dsl_dataset_phys(ds)->ds_referenced_bytes < ds->ds_quota)
4292 error = SET_ERROR(ERESTART);
4294 error = SET_ERROR(EDQUOT);
4296 mutex_exit(&ds->ds_lock);
4301 typedef struct dsl_dataset_set_qr_arg {
4302 const char *ddsqra_name;
4303 zprop_source_t ddsqra_source;
4304 uint64_t ddsqra_value;
4305 } dsl_dataset_set_qr_arg_t;
4310 dsl_dataset_set_refquota_check(void *arg, dmu_tx_t *tx)
4312 dsl_dataset_set_qr_arg_t *ddsqra = arg;
4313 dsl_pool_t *dp = dmu_tx_pool(tx);
4318 if (spa_version(dp->dp_spa) < SPA_VERSION_REFQUOTA)
4319 return (SET_ERROR(ENOTSUP));
4321 error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
4325 if (ds->ds_is_snapshot) {
4326 dsl_dataset_rele(ds, FTAG);
4327 return (SET_ERROR(EINVAL));
4330 error = dsl_prop_predict(ds->ds_dir,
4331 zfs_prop_to_name(ZFS_PROP_REFQUOTA),
4332 ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
4334 dsl_dataset_rele(ds, FTAG);
4339 dsl_dataset_rele(ds, FTAG);
4343 if (newval < dsl_dataset_phys(ds)->ds_referenced_bytes ||
4344 newval < ds->ds_reserved) {
4345 dsl_dataset_rele(ds, FTAG);
4346 return (SET_ERROR(ENOSPC));
4349 dsl_dataset_rele(ds, FTAG);
4354 dsl_dataset_set_refquota_sync(void *arg, dmu_tx_t *tx)
4356 dsl_dataset_set_qr_arg_t *ddsqra = arg;
4357 dsl_pool_t *dp = dmu_tx_pool(tx);
4358 dsl_dataset_t *ds = NULL;
4361 VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
4363 dsl_prop_set_sync_impl(ds,
4364 zfs_prop_to_name(ZFS_PROP_REFQUOTA),
4365 ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
4366 &ddsqra->ddsqra_value, tx);
4368 VERIFY0(dsl_prop_get_int_ds(ds,
4369 zfs_prop_to_name(ZFS_PROP_REFQUOTA), &newval));
4371 if (ds->ds_quota != newval) {
4372 dmu_buf_will_dirty(ds->ds_dbuf, tx);
4373 ds->ds_quota = newval;
4375 dsl_dataset_rele(ds, FTAG);
4379 dsl_dataset_set_refquota(const char *dsname, zprop_source_t source,
4382 dsl_dataset_set_qr_arg_t ddsqra;
4384 ddsqra.ddsqra_name = dsname;
4385 ddsqra.ddsqra_source = source;
4386 ddsqra.ddsqra_value = refquota;
4388 return (dsl_sync_task(dsname, dsl_dataset_set_refquota_check,
4389 dsl_dataset_set_refquota_sync, &ddsqra, 0,
4390 ZFS_SPACE_CHECK_EXTRA_RESERVED));
4394 dsl_dataset_set_refreservation_check(void *arg, dmu_tx_t *tx)
4396 dsl_dataset_set_qr_arg_t *ddsqra = arg;
4397 dsl_pool_t *dp = dmu_tx_pool(tx);
4400 uint64_t newval, unique;
4402 if (spa_version(dp->dp_spa) < SPA_VERSION_REFRESERVATION)
4403 return (SET_ERROR(ENOTSUP));
4405 error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
4409 if (ds->ds_is_snapshot) {
4410 dsl_dataset_rele(ds, FTAG);
4411 return (SET_ERROR(EINVAL));
4414 error = dsl_prop_predict(ds->ds_dir,
4415 zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
4416 ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
4418 dsl_dataset_rele(ds, FTAG);
4423 * If we are doing the preliminary check in open context, the
4424 * space estimates may be inaccurate.
4426 if (!dmu_tx_is_syncing(tx)) {
4427 dsl_dataset_rele(ds, FTAG);
4431 mutex_enter(&ds->ds_lock);
4432 if (!DS_UNIQUE_IS_ACCURATE(ds))
4433 dsl_dataset_recalc_head_uniq(ds);
4434 unique = dsl_dataset_phys(ds)->ds_unique_bytes;
4435 mutex_exit(&ds->ds_lock);
4437 if (MAX(unique, newval) > MAX(unique, ds->ds_reserved)) {
4438 uint64_t delta = MAX(unique, newval) -
4439 MAX(unique, ds->ds_reserved);
4442 dsl_dir_space_available(ds->ds_dir, NULL, 0, B_TRUE) ||
4443 (ds->ds_quota > 0 && newval > ds->ds_quota)) {
4444 dsl_dataset_rele(ds, FTAG);
4445 return (SET_ERROR(ENOSPC));
4449 dsl_dataset_rele(ds, FTAG);
4454 dsl_dataset_set_refreservation_sync_impl(dsl_dataset_t *ds,
4455 zprop_source_t source, uint64_t value, dmu_tx_t *tx)
4461 dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
4462 source, sizeof (value), 1, &value, tx);
4464 VERIFY0(dsl_prop_get_int_ds(ds,
4465 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &newval));
4467 dmu_buf_will_dirty(ds->ds_dbuf, tx);
4468 mutex_enter(&ds->ds_dir->dd_lock);
4469 mutex_enter(&ds->ds_lock);
4470 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
4471 unique = dsl_dataset_phys(ds)->ds_unique_bytes;
4472 delta = MAX(0, (int64_t)(newval - unique)) -
4473 MAX(0, (int64_t)(ds->ds_reserved - unique));
4474 ds->ds_reserved = newval;
4475 mutex_exit(&ds->ds_lock);
4477 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
4478 mutex_exit(&ds->ds_dir->dd_lock);
4482 dsl_dataset_set_refreservation_sync(void *arg, dmu_tx_t *tx)
4484 dsl_dataset_set_qr_arg_t *ddsqra = arg;
4485 dsl_pool_t *dp = dmu_tx_pool(tx);
4486 dsl_dataset_t *ds = NULL;
4488 VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
4489 dsl_dataset_set_refreservation_sync_impl(ds,
4490 ddsqra->ddsqra_source, ddsqra->ddsqra_value, tx);
4491 dsl_dataset_rele(ds, FTAG);
4495 dsl_dataset_set_refreservation(const char *dsname, zprop_source_t source,
4496 uint64_t refreservation)
4498 dsl_dataset_set_qr_arg_t ddsqra;
4500 ddsqra.ddsqra_name = dsname;
4501 ddsqra.ddsqra_source = source;
4502 ddsqra.ddsqra_value = refreservation;
4504 return (dsl_sync_task(dsname, dsl_dataset_set_refreservation_check,
4505 dsl_dataset_set_refreservation_sync, &ddsqra, 0,
4506 ZFS_SPACE_CHECK_EXTRA_RESERVED));
4509 typedef struct dsl_dataset_set_compression_arg {
4510 const char *ddsca_name;
4511 zprop_source_t ddsca_source;
4512 uint64_t ddsca_value;
4513 } dsl_dataset_set_compression_arg_t;
4517 dsl_dataset_set_compression_check(void *arg, dmu_tx_t *tx)
4519 dsl_dataset_set_compression_arg_t *ddsca = arg;
4520 dsl_pool_t *dp = dmu_tx_pool(tx);
4522 uint64_t compval = ZIO_COMPRESS_ALGO(ddsca->ddsca_value);
4523 spa_feature_t f = zio_compress_to_feature(compval);
4525 if (f == SPA_FEATURE_NONE)
4526 return (SET_ERROR(EINVAL));
4528 if (!spa_feature_is_enabled(dp->dp_spa, f))
4529 return (SET_ERROR(ENOTSUP));
4535 dsl_dataset_set_compression_sync(void *arg, dmu_tx_t *tx)
4537 dsl_dataset_set_compression_arg_t *ddsca = arg;
4538 dsl_pool_t *dp = dmu_tx_pool(tx);
4539 dsl_dataset_t *ds = NULL;
4541 uint64_t compval = ZIO_COMPRESS_ALGO(ddsca->ddsca_value);
4542 spa_feature_t f = zio_compress_to_feature(compval);
4543 ASSERT3S(spa_feature_table[f].fi_type, ==, ZFEATURE_TYPE_BOOLEAN);
4545 VERIFY0(dsl_dataset_hold(dp, ddsca->ddsca_name, FTAG, &ds));
4546 if (zfeature_active(f, ds->ds_feature[f]) != B_TRUE) {
4547 ds->ds_feature_activation[f] = (void *)B_TRUE;
4548 dsl_dataset_activate_feature(ds->ds_object, f,
4549 ds->ds_feature_activation[f], tx);
4550 ds->ds_feature[f] = ds->ds_feature_activation[f];
4552 dsl_dataset_rele(ds, FTAG);
4556 dsl_dataset_set_compression(const char *dsname, zprop_source_t source,
4557 uint64_t compression)
4559 dsl_dataset_set_compression_arg_t ddsca;
4562 * The sync task is only required for zstd in order to activate
4563 * the feature flag when the property is first set.
4565 if (ZIO_COMPRESS_ALGO(compression) != ZIO_COMPRESS_ZSTD)
4568 ddsca.ddsca_name = dsname;
4569 ddsca.ddsca_source = source;
4570 ddsca.ddsca_value = compression;
4572 return (dsl_sync_task(dsname, dsl_dataset_set_compression_check,
4573 dsl_dataset_set_compression_sync, &ddsca, 0,
4574 ZFS_SPACE_CHECK_EXTRA_RESERVED));
4578 * Return (in *usedp) the amount of space referenced by "new" that was not
4579 * referenced at the time the bookmark corresponds to. "New" may be a
4580 * snapshot or a head. The bookmark must be before new, in
4581 * new's filesystem (or its origin) -- caller verifies this.
4583 * The written space is calculated by considering two components: First, we
4584 * ignore any freed space, and calculate the written as new's used space
4585 * minus old's used space. Next, we add in the amount of space that was freed
4586 * between the two time points, thus reducing new's used space relative to
4587 * old's. Specifically, this is the space that was born before
4588 * zbm_creation_txg, and freed before new (ie. on new's deadlist or a
4589 * previous deadlist).
4591 * space freed [---------------------]
4592 * snapshots ---O-------O--------O-------O------
4595 * Note, the bookmark's zbm_*_bytes_refd must be valid, but if the HAS_FBN
4596 * flag is not set, we will calculate the freed_before_next based on the
4597 * next snapshot's deadlist, rather than using zbm_*_freed_before_next_snap.
4600 dsl_dataset_space_written_impl(zfs_bookmark_phys_t *bmp,
4601 dsl_dataset_t *new, uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
4604 dsl_pool_t *dp = new->ds_dir->dd_pool;
4606 ASSERT(dsl_pool_config_held(dp));
4607 if (dsl_dataset_is_snapshot(new)) {
4608 ASSERT3U(bmp->zbm_creation_txg, <,
4609 dsl_dataset_phys(new)->ds_creation_txg);
4613 *usedp += dsl_dataset_phys(new)->ds_referenced_bytes;
4614 *usedp -= bmp->zbm_referenced_bytes_refd;
4617 *compp += dsl_dataset_phys(new)->ds_compressed_bytes;
4618 *compp -= bmp->zbm_compressed_bytes_refd;
4621 *uncompp += dsl_dataset_phys(new)->ds_uncompressed_bytes;
4622 *uncompp -= bmp->zbm_uncompressed_bytes_refd;
4624 dsl_dataset_t *snap = new;
4626 while (dsl_dataset_phys(snap)->ds_prev_snap_txg >
4627 bmp->zbm_creation_txg) {
4628 uint64_t used, comp, uncomp;
4630 dsl_deadlist_space_range(&snap->ds_deadlist,
4631 0, bmp->zbm_creation_txg,
4632 &used, &comp, &uncomp);
4637 uint64_t snapobj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
4639 dsl_dataset_rele(snap, FTAG);
4640 err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &snap);
4646 * We might not have the FBN if we are calculating written from
4647 * a snapshot (because we didn't know the correct "next" snapshot
4650 if (bmp->zbm_flags & ZBM_FLAG_HAS_FBN) {
4651 *usedp += bmp->zbm_referenced_freed_before_next_snap;
4652 *compp += bmp->zbm_compressed_freed_before_next_snap;
4653 *uncompp += bmp->zbm_uncompressed_freed_before_next_snap;
4655 ASSERT3U(dsl_dataset_phys(snap)->ds_prev_snap_txg, ==,
4656 bmp->zbm_creation_txg);
4657 uint64_t used, comp, uncomp;
4658 dsl_deadlist_space(&snap->ds_deadlist, &used, &comp, &uncomp);
4664 dsl_dataset_rele(snap, FTAG);
4669 * Return (in *usedp) the amount of space written in new that was not
4670 * present at the time the bookmark corresponds to. New may be a
4671 * snapshot or the head. Old must be a bookmark before new, in
4672 * new's filesystem (or its origin) -- caller verifies this.
4675 dsl_dataset_space_written_bookmark(zfs_bookmark_phys_t *bmp,
4676 dsl_dataset_t *new, uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
4678 if (!(bmp->zbm_flags & ZBM_FLAG_HAS_FBN))
4679 return (SET_ERROR(ENOTSUP));
4680 return (dsl_dataset_space_written_impl(bmp, new,
4681 usedp, compp, uncompp));
4685 * Return (in *usedp) the amount of space written in new that is not
4686 * present in oldsnap. New may be a snapshot or the head. Old must be
4687 * a snapshot before new, in new's filesystem (or its origin). If not then
4688 * fail and return EINVAL.
4691 dsl_dataset_space_written(dsl_dataset_t *oldsnap, dsl_dataset_t *new,
4692 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
4694 if (!dsl_dataset_is_before(new, oldsnap, 0))
4695 return (SET_ERROR(EINVAL));
4697 zfs_bookmark_phys_t zbm = { 0 };
4698 dsl_dataset_phys_t *dsp = dsl_dataset_phys(oldsnap);
4699 zbm.zbm_guid = dsp->ds_guid;
4700 zbm.zbm_creation_txg = dsp->ds_creation_txg;
4701 zbm.zbm_creation_time = dsp->ds_creation_time;
4702 zbm.zbm_referenced_bytes_refd = dsp->ds_referenced_bytes;
4703 zbm.zbm_compressed_bytes_refd = dsp->ds_compressed_bytes;
4704 zbm.zbm_uncompressed_bytes_refd = dsp->ds_uncompressed_bytes;
4707 * If oldsnap is the origin (or origin's origin, ...) of new,
4708 * we can't easily calculate the effective FBN. Therefore,
4709 * we do not set ZBM_FLAG_HAS_FBN, so that the _impl will calculate
4710 * it relative to the correct "next": the next snapshot towards "new",
4711 * rather than the next snapshot in oldsnap's dsl_dir.
4713 return (dsl_dataset_space_written_impl(&zbm, new,
4714 usedp, compp, uncompp));
4718 * Return (in *usedp) the amount of space that will be reclaimed if firstsnap,
4719 * lastsnap, and all snapshots in between are deleted.
4721 * blocks that would be freed [---------------------------]
4722 * snapshots ---O-------O--------O-------O--------O
4723 * firstsnap lastsnap
4725 * This is the set of blocks that were born after the snap before firstsnap,
4726 * (birth > firstsnap->prev_snap_txg) and died before the snap after the
4727 * last snap (ie, is on lastsnap->ds_next->ds_deadlist or an earlier deadlist).
4728 * We calculate this by iterating over the relevant deadlists (from the snap
4729 * after lastsnap, backward to the snap after firstsnap), summing up the
4730 * space on the deadlist that was born after the snap before firstsnap.
4733 dsl_dataset_space_wouldfree(dsl_dataset_t *firstsnap,
4734 dsl_dataset_t *lastsnap,
4735 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
4739 dsl_pool_t *dp = firstsnap->ds_dir->dd_pool;
4741 ASSERT(firstsnap->ds_is_snapshot);
4742 ASSERT(lastsnap->ds_is_snapshot);
4745 * Check that the snapshots are in the same dsl_dir, and firstsnap
4746 * is before lastsnap.
4748 if (firstsnap->ds_dir != lastsnap->ds_dir ||
4749 dsl_dataset_phys(firstsnap)->ds_creation_txg >
4750 dsl_dataset_phys(lastsnap)->ds_creation_txg)
4751 return (SET_ERROR(EINVAL));
4753 *usedp = *compp = *uncompp = 0;
4755 snapobj = dsl_dataset_phys(lastsnap)->ds_next_snap_obj;
4756 while (snapobj != firstsnap->ds_object) {
4758 uint64_t used, comp, uncomp;
4760 err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &ds);
4764 dsl_deadlist_space_range(&ds->ds_deadlist,
4765 dsl_dataset_phys(firstsnap)->ds_prev_snap_txg, UINT64_MAX,
4766 &used, &comp, &uncomp);
4771 snapobj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
4772 ASSERT3U(snapobj, !=, 0);
4773 dsl_dataset_rele(ds, FTAG);
4779 * Return TRUE if 'earlier' is an earlier snapshot in 'later's timeline.
4780 * For example, they could both be snapshots of the same filesystem, and
4781 * 'earlier' is before 'later'. Or 'earlier' could be the origin of
4782 * 'later's filesystem. Or 'earlier' could be an older snapshot in the origin's
4783 * filesystem. Or 'earlier' could be the origin's origin.
4785 * If non-zero, earlier_txg is used instead of earlier's ds_creation_txg.
4788 dsl_dataset_is_before(dsl_dataset_t *later, dsl_dataset_t *earlier,
4789 uint64_t earlier_txg)
4791 dsl_pool_t *dp = later->ds_dir->dd_pool;
4795 ASSERT(dsl_pool_config_held(dp));
4796 ASSERT(earlier->ds_is_snapshot || earlier_txg != 0);
4798 if (earlier_txg == 0)
4799 earlier_txg = dsl_dataset_phys(earlier)->ds_creation_txg;
4801 if (later->ds_is_snapshot &&
4802 earlier_txg >= dsl_dataset_phys(later)->ds_creation_txg)
4805 if (later->ds_dir == earlier->ds_dir)
4809 * We check dd_origin_obj explicitly here rather than using
4810 * dsl_dir_is_clone() so that we will return TRUE if "earlier"
4811 * is $ORIGIN@$ORIGIN. dsl_dataset_space_written() depends on
4814 if (dsl_dir_phys(later->ds_dir)->dd_origin_obj == 0)
4817 dsl_dataset_t *origin;
4818 error = dsl_dataset_hold_obj(dp,
4819 dsl_dir_phys(later->ds_dir)->dd_origin_obj, FTAG, &origin);
4822 if (dsl_dataset_phys(origin)->ds_creation_txg == earlier_txg &&
4823 origin->ds_dir == earlier->ds_dir) {
4824 dsl_dataset_rele(origin, FTAG);
4827 ret = dsl_dataset_is_before(origin, earlier, earlier_txg);
4828 dsl_dataset_rele(origin, FTAG);
4833 dsl_dataset_zapify(dsl_dataset_t *ds, dmu_tx_t *tx)
4835 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
4836 dmu_object_zapify(mos, ds->ds_object, DMU_OT_DSL_DATASET, tx);
4840 dsl_dataset_is_zapified(dsl_dataset_t *ds)
4842 dmu_object_info_t doi;
4844 dmu_object_info_from_db(ds->ds_dbuf, &doi);
4845 return (doi.doi_type == DMU_OTN_ZAP_METADATA);
4849 dsl_dataset_has_resume_receive_state(dsl_dataset_t *ds)
4851 return (dsl_dataset_is_zapified(ds) &&
4852 zap_contains(ds->ds_dir->dd_pool->dp_meta_objset,
4853 ds->ds_object, DS_FIELD_RESUME_TOGUID) == 0);
4857 dsl_dataset_get_remap_deadlist_object(dsl_dataset_t *ds)
4859 uint64_t remap_deadlist_obj;
4862 if (!dsl_dataset_is_zapified(ds))
4865 err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset, ds->ds_object,
4866 DS_FIELD_REMAP_DEADLIST, sizeof (remap_deadlist_obj), 1,
4867 &remap_deadlist_obj);
4870 VERIFY3S(err, ==, ENOENT);
4874 ASSERT(remap_deadlist_obj != 0);
4875 return (remap_deadlist_obj);
4879 dsl_dataset_remap_deadlist_exists(dsl_dataset_t *ds)
4881 EQUIV(dsl_deadlist_is_open(&ds->ds_remap_deadlist),
4882 dsl_dataset_get_remap_deadlist_object(ds) != 0);
4883 return (dsl_deadlist_is_open(&ds->ds_remap_deadlist));
4887 dsl_dataset_set_remap_deadlist_object(dsl_dataset_t *ds, uint64_t obj,
4891 dsl_dataset_zapify(ds, tx);
4892 VERIFY0(zap_add(ds->ds_dir->dd_pool->dp_meta_objset, ds->ds_object,
4893 DS_FIELD_REMAP_DEADLIST, sizeof (obj), 1, &obj, tx));
4897 dsl_dataset_unset_remap_deadlist_object(dsl_dataset_t *ds, dmu_tx_t *tx)
4899 VERIFY0(zap_remove(ds->ds_dir->dd_pool->dp_meta_objset,
4900 ds->ds_object, DS_FIELD_REMAP_DEADLIST, tx));
4904 dsl_dataset_destroy_remap_deadlist(dsl_dataset_t *ds, dmu_tx_t *tx)
4906 uint64_t remap_deadlist_object;
4907 spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
4909 ASSERT(dmu_tx_is_syncing(tx));
4910 ASSERT(dsl_dataset_remap_deadlist_exists(ds));
4912 remap_deadlist_object = ds->ds_remap_deadlist.dl_object;
4913 dsl_deadlist_close(&ds->ds_remap_deadlist);
4914 dsl_deadlist_free(spa_meta_objset(spa), remap_deadlist_object, tx);
4915 dsl_dataset_unset_remap_deadlist_object(ds, tx);
4916 spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
4920 dsl_dataset_create_remap_deadlist(dsl_dataset_t *ds, dmu_tx_t *tx)
4922 uint64_t remap_deadlist_obj;
4923 spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
4925 ASSERT(dmu_tx_is_syncing(tx));
4926 ASSERT(MUTEX_HELD(&ds->ds_remap_deadlist_lock));
4928 * Currently we only create remap deadlists when there are indirect
4929 * vdevs with referenced mappings.
4931 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
4933 remap_deadlist_obj = dsl_deadlist_clone(
4934 &ds->ds_deadlist, UINT64_MAX,
4935 dsl_dataset_phys(ds)->ds_prev_snap_obj, tx);
4936 dsl_dataset_set_remap_deadlist_object(ds,
4937 remap_deadlist_obj, tx);
4938 dsl_deadlist_open(&ds->ds_remap_deadlist, spa_meta_objset(spa),
4939 remap_deadlist_obj);
4940 spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
4944 dsl_dataset_activate_redaction(dsl_dataset_t *ds, uint64_t *redact_snaps,
4945 uint64_t num_redact_snaps, dmu_tx_t *tx)
4947 uint64_t dsobj = ds->ds_object;
4948 struct feature_type_uint64_array_arg *ftuaa =
4949 kmem_zalloc(sizeof (*ftuaa), KM_SLEEP);
4950 ftuaa->length = (int64_t)num_redact_snaps;
4951 if (num_redact_snaps > 0) {
4952 ftuaa->array = kmem_alloc(num_redact_snaps * sizeof (uint64_t),
4954 bcopy(redact_snaps, ftuaa->array, num_redact_snaps *
4957 dsl_dataset_activate_feature(dsobj, SPA_FEATURE_REDACTED_DATASETS,
4959 ds->ds_feature[SPA_FEATURE_REDACTED_DATASETS] = ftuaa;
4964 #define RECORDSIZE_PERM ZMOD_RW
4966 /* Limited to 1M on 32-bit platforms due to lack of virtual address space */
4967 #define RECORDSIZE_PERM ZMOD_RD
4969 ZFS_MODULE_PARAM(zfs, zfs_, max_recordsize, INT, RECORDSIZE_PERM,
4970 "Max allowed record size");
4972 ZFS_MODULE_PARAM(zfs, zfs_, allow_redacted_dataset_mount, INT, ZMOD_RW,
4973 "Allow mounting of redacted datasets");
4976 EXPORT_SYMBOL(dsl_dataset_hold);
4977 EXPORT_SYMBOL(dsl_dataset_hold_flags);
4978 EXPORT_SYMBOL(dsl_dataset_hold_obj);
4979 EXPORT_SYMBOL(dsl_dataset_hold_obj_flags);
4980 EXPORT_SYMBOL(dsl_dataset_own);
4981 EXPORT_SYMBOL(dsl_dataset_own_obj);
4982 EXPORT_SYMBOL(dsl_dataset_name);
4983 EXPORT_SYMBOL(dsl_dataset_rele);
4984 EXPORT_SYMBOL(dsl_dataset_rele_flags);
4985 EXPORT_SYMBOL(dsl_dataset_disown);
4986 EXPORT_SYMBOL(dsl_dataset_tryown);
4987 EXPORT_SYMBOL(dsl_dataset_create_sync);
4988 EXPORT_SYMBOL(dsl_dataset_create_sync_dd);
4989 EXPORT_SYMBOL(dsl_dataset_snapshot_check);
4990 EXPORT_SYMBOL(dsl_dataset_snapshot_sync);
4991 EXPORT_SYMBOL(dsl_dataset_promote);
4992 EXPORT_SYMBOL(dsl_dataset_user_hold);
4993 EXPORT_SYMBOL(dsl_dataset_user_release);
4994 EXPORT_SYMBOL(dsl_dataset_get_holds);
4995 EXPORT_SYMBOL(dsl_dataset_get_blkptr);
4996 EXPORT_SYMBOL(dsl_dataset_get_spa);
4997 EXPORT_SYMBOL(dsl_dataset_modified_since_snap);
4998 EXPORT_SYMBOL(dsl_dataset_space_written);
4999 EXPORT_SYMBOL(dsl_dataset_space_wouldfree);
5000 EXPORT_SYMBOL(dsl_dataset_sync);
5001 EXPORT_SYMBOL(dsl_dataset_block_born);
5002 EXPORT_SYMBOL(dsl_dataset_block_kill);
5003 EXPORT_SYMBOL(dsl_dataset_dirty);
5004 EXPORT_SYMBOL(dsl_dataset_stats);
5005 EXPORT_SYMBOL(dsl_dataset_fast_stat);
5006 EXPORT_SYMBOL(dsl_dataset_space);
5007 EXPORT_SYMBOL(dsl_dataset_fsid_guid);
5008 EXPORT_SYMBOL(dsl_dsobj_to_dsname);
5009 EXPORT_SYMBOL(dsl_dataset_check_quota);
5010 EXPORT_SYMBOL(dsl_dataset_clone_swap_check_impl);
5011 EXPORT_SYMBOL(dsl_dataset_clone_swap_sync_impl);