4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
24 * Copyright (c) 2013 Steven Hartland. All rights reserved.
25 * Copyright (c) 2013 by Joyent, Inc. All rights reserved.
28 #include <sys/zfs_context.h>
29 #include <sys/dsl_userhold.h>
30 #include <sys/dsl_dataset.h>
31 #include <sys/dsl_synctask.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dsl_pool.h>
34 #include <sys/dsl_dir.h>
35 #include <sys/dmu_traverse.h>
36 #include <sys/dsl_scan.h>
37 #include <sys/dmu_objset.h>
39 #include <sys/zfeature.h>
40 #include <sys/zfs_ioctl.h>
41 #include <sys/dsl_deleg.h>
42 #include <sys/dmu_impl.h>
44 typedef struct dmu_snapshots_destroy_arg {
46 nvlist_t *dsda_successful_snaps;
48 nvlist_t *dsda_errlist;
49 } dmu_snapshots_destroy_arg_t;
52 dsl_destroy_snapshot_check_impl(dsl_dataset_t *ds, boolean_t defer)
54 if (!ds->ds_is_snapshot)
55 return (SET_ERROR(EINVAL));
57 if (dsl_dataset_long_held(ds))
58 return (SET_ERROR(EBUSY));
61 * Only allow deferred destroy on pools that support it.
62 * NOTE: deferred destroy is only supported on snapshots.
65 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
67 return (SET_ERROR(ENOTSUP));
72 * If this snapshot has an elevated user reference count,
73 * we can't destroy it yet.
75 if (ds->ds_userrefs > 0)
76 return (SET_ERROR(EBUSY));
79 * Can't delete a branch point.
81 if (dsl_dataset_phys(ds)->ds_num_children > 1)
82 return (SET_ERROR(EEXIST));
88 dsl_destroy_snapshot_check(void *arg, dmu_tx_t *tx)
90 dmu_snapshots_destroy_arg_t *dsda = arg;
91 dsl_pool_t *dp = dmu_tx_pool(tx);
95 if (!dmu_tx_is_syncing(tx))
98 for (pair = nvlist_next_nvpair(dsda->dsda_snaps, NULL);
99 pair != NULL; pair = nvlist_next_nvpair(dsda->dsda_snaps, pair)) {
102 error = dsl_dataset_hold(dp, nvpair_name(pair),
106 * If the snapshot does not exist, silently ignore it
107 * (it's "already destroyed").
113 error = dsl_destroy_snapshot_check_impl(ds,
115 dsl_dataset_rele(ds, FTAG);
119 fnvlist_add_boolean(dsda->dsda_successful_snaps,
122 fnvlist_add_int32(dsda->dsda_errlist,
123 nvpair_name(pair), error);
127 pair = nvlist_next_nvpair(dsda->dsda_errlist, NULL);
129 return (fnvpair_value_int32(pair));
134 struct process_old_arg {
136 dsl_dataset_t *ds_prev;
137 boolean_t after_branch_point;
139 uint64_t used, comp, uncomp;
143 process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
145 struct process_old_arg *poa = arg;
146 dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
148 ASSERT(!BP_IS_HOLE(bp));
150 if (bp->blk_birth <= dsl_dataset_phys(poa->ds)->ds_prev_snap_txg) {
151 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
152 if (poa->ds_prev && !poa->after_branch_point &&
154 dsl_dataset_phys(poa->ds_prev)->ds_prev_snap_txg) {
155 dsl_dataset_phys(poa->ds_prev)->ds_unique_bytes +=
156 bp_get_dsize_sync(dp->dp_spa, bp);
159 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
160 poa->comp += BP_GET_PSIZE(bp);
161 poa->uncomp += BP_GET_UCSIZE(bp);
162 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
168 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
169 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
171 struct process_old_arg poa = { 0 };
172 dsl_pool_t *dp = ds->ds_dir->dd_pool;
173 objset_t *mos = dp->dp_meta_objset;
174 uint64_t deadlist_obj;
176 ASSERT(ds->ds_deadlist.dl_oldfmt);
177 ASSERT(ds_next->ds_deadlist.dl_oldfmt);
180 poa.ds_prev = ds_prev;
181 poa.after_branch_point = after_branch_point;
182 poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
183 VERIFY0(bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
184 process_old_cb, &poa, tx));
185 VERIFY0(zio_wait(poa.pio));
186 ASSERT3U(poa.used, ==, dsl_dataset_phys(ds)->ds_unique_bytes);
188 /* change snapused */
189 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
190 -poa.used, -poa.comp, -poa.uncomp, tx);
192 /* swap next's deadlist to our deadlist */
193 dsl_deadlist_close(&ds->ds_deadlist);
194 dsl_deadlist_close(&ds_next->ds_deadlist);
195 deadlist_obj = dsl_dataset_phys(ds)->ds_deadlist_obj;
196 dsl_dataset_phys(ds)->ds_deadlist_obj =
197 dsl_dataset_phys(ds_next)->ds_deadlist_obj;
198 dsl_dataset_phys(ds_next)->ds_deadlist_obj = deadlist_obj;
199 dsl_deadlist_open(&ds->ds_deadlist, mos,
200 dsl_dataset_phys(ds)->ds_deadlist_obj);
201 dsl_deadlist_open(&ds_next->ds_deadlist, mos,
202 dsl_dataset_phys(ds_next)->ds_deadlist_obj);
206 dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
208 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
213 * If it is the old version, dd_clones doesn't exist so we can't
214 * find the clones, but dsl_deadlist_remove_key() is a no-op so it
217 if (dsl_dir_phys(ds->ds_dir)->dd_clones == 0)
220 for (zap_cursor_init(&zc, mos, dsl_dir_phys(ds->ds_dir)->dd_clones);
221 zap_cursor_retrieve(&zc, &za) == 0;
222 zap_cursor_advance(&zc)) {
223 dsl_dataset_t *clone;
225 VERIFY0(dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
226 za.za_first_integer, FTAG, &clone));
227 if (clone->ds_dir->dd_origin_txg > mintxg) {
228 dsl_deadlist_remove_key(&clone->ds_deadlist,
230 dsl_dataset_remove_clones_key(clone, mintxg, tx);
232 dsl_dataset_rele(clone, FTAG);
234 zap_cursor_fini(&zc);
238 dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
241 int after_branch_point = FALSE;
242 dsl_pool_t *dp = ds->ds_dir->dd_pool;
243 objset_t *mos = dp->dp_meta_objset;
244 dsl_dataset_t *ds_prev = NULL;
247 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
248 ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
249 ASSERT(refcount_is_zero(&ds->ds_longholds));
252 (ds->ds_userrefs > 0 ||
253 dsl_dataset_phys(ds)->ds_num_children > 1)) {
254 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
255 dmu_buf_will_dirty(ds->ds_dbuf, tx);
256 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_DEFER_DESTROY;
257 spa_history_log_internal_ds(ds, "defer_destroy", tx, "");
261 ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
263 /* We need to log before removing it from the namespace. */
264 spa_history_log_internal_ds(ds, "destroy", tx, "");
266 dsl_scan_ds_destroyed(ds, tx);
270 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
271 if (ds->ds_feature_inuse[f]) {
272 dsl_dataset_deactivate_feature(obj, f, tx);
273 ds->ds_feature_inuse[f] = B_FALSE;
276 if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
277 ASSERT3P(ds->ds_prev, ==, NULL);
278 VERIFY0(dsl_dataset_hold_obj(dp,
279 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &ds_prev));
281 (dsl_dataset_phys(ds_prev)->ds_next_snap_obj != obj);
283 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
284 if (after_branch_point &&
285 dsl_dataset_phys(ds_prev)->ds_next_clones_obj != 0) {
286 dsl_dataset_remove_from_next_clones(ds_prev, obj, tx);
287 if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) {
288 VERIFY0(zap_add_int(mos,
289 dsl_dataset_phys(ds_prev)->
291 dsl_dataset_phys(ds)->ds_next_snap_obj,
295 if (!after_branch_point) {
296 dsl_dataset_phys(ds_prev)->ds_next_snap_obj =
297 dsl_dataset_phys(ds)->ds_next_snap_obj;
301 dsl_dataset_t *ds_next;
303 uint64_t used = 0, comp = 0, uncomp = 0;
305 VERIFY0(dsl_dataset_hold_obj(dp,
306 dsl_dataset_phys(ds)->ds_next_snap_obj, FTAG, &ds_next));
307 ASSERT3U(dsl_dataset_phys(ds_next)->ds_prev_snap_obj, ==, obj);
309 old_unique = dsl_dataset_phys(ds_next)->ds_unique_bytes;
311 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
312 dsl_dataset_phys(ds_next)->ds_prev_snap_obj =
313 dsl_dataset_phys(ds)->ds_prev_snap_obj;
314 dsl_dataset_phys(ds_next)->ds_prev_snap_txg =
315 dsl_dataset_phys(ds)->ds_prev_snap_txg;
316 ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_txg, ==,
317 ds_prev ? dsl_dataset_phys(ds_prev)->ds_creation_txg : 0);
319 if (ds_next->ds_deadlist.dl_oldfmt) {
320 process_old_deadlist(ds, ds_prev, ds_next,
321 after_branch_point, tx);
323 /* Adjust prev's unique space. */
324 if (ds_prev && !after_branch_point) {
325 dsl_deadlist_space_range(&ds_next->ds_deadlist,
326 dsl_dataset_phys(ds_prev)->ds_prev_snap_txg,
327 dsl_dataset_phys(ds)->ds_prev_snap_txg,
328 &used, &comp, &uncomp);
329 dsl_dataset_phys(ds_prev)->ds_unique_bytes += used;
332 /* Adjust snapused. */
333 dsl_deadlist_space_range(&ds_next->ds_deadlist,
334 dsl_dataset_phys(ds)->ds_prev_snap_txg, UINT64_MAX,
335 &used, &comp, &uncomp);
336 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
337 -used, -comp, -uncomp, tx);
339 /* Move blocks to be freed to pool's free list. */
340 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
341 &dp->dp_free_bpobj, dsl_dataset_phys(ds)->ds_prev_snap_txg,
343 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
344 DD_USED_HEAD, used, comp, uncomp, tx);
346 /* Merge our deadlist into next's and free it. */
347 dsl_deadlist_merge(&ds_next->ds_deadlist,
348 dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
350 dsl_deadlist_close(&ds->ds_deadlist);
351 dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
352 dmu_buf_will_dirty(ds->ds_dbuf, tx);
353 dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
355 /* Collapse range in clone heads */
356 dsl_dataset_remove_clones_key(ds,
357 dsl_dataset_phys(ds)->ds_creation_txg, tx);
359 if (ds_next->ds_is_snapshot) {
360 dsl_dataset_t *ds_nextnext;
363 * Update next's unique to include blocks which
364 * were previously shared by only this snapshot
365 * and it. Those blocks will be born after the
366 * prev snap and before this snap, and will have
367 * died after the next snap and before the one
368 * after that (ie. be on the snap after next's
371 VERIFY0(dsl_dataset_hold_obj(dp,
372 dsl_dataset_phys(ds_next)->ds_next_snap_obj,
373 FTAG, &ds_nextnext));
374 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
375 dsl_dataset_phys(ds)->ds_prev_snap_txg,
376 dsl_dataset_phys(ds)->ds_creation_txg,
377 &used, &comp, &uncomp);
378 dsl_dataset_phys(ds_next)->ds_unique_bytes += used;
379 dsl_dataset_rele(ds_nextnext, FTAG);
380 ASSERT3P(ds_next->ds_prev, ==, NULL);
382 /* Collapse range in this head. */
384 VERIFY0(dsl_dataset_hold_obj(dp,
385 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &hds));
386 dsl_deadlist_remove_key(&hds->ds_deadlist,
387 dsl_dataset_phys(ds)->ds_creation_txg, tx);
388 dsl_dataset_rele(hds, FTAG);
391 ASSERT3P(ds_next->ds_prev, ==, ds);
392 dsl_dataset_rele(ds_next->ds_prev, ds_next);
393 ds_next->ds_prev = NULL;
395 VERIFY0(dsl_dataset_hold_obj(dp,
396 dsl_dataset_phys(ds)->ds_prev_snap_obj,
397 ds_next, &ds_next->ds_prev));
400 dsl_dataset_recalc_head_uniq(ds_next);
403 * Reduce the amount of our unconsumed refreservation
404 * being charged to our parent by the amount of
405 * new unique data we have gained.
407 if (old_unique < ds_next->ds_reserved) {
409 uint64_t new_unique =
410 dsl_dataset_phys(ds_next)->ds_unique_bytes;
412 ASSERT(old_unique <= new_unique);
413 mrsdelta = MIN(new_unique - old_unique,
414 ds_next->ds_reserved - old_unique);
415 dsl_dir_diduse_space(ds->ds_dir,
416 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
419 dsl_dataset_rele(ds_next, FTAG);
422 * This must be done after the dsl_traverse(), because it will
423 * re-open the objset.
426 dmu_objset_evict(ds->ds_objset);
427 ds->ds_objset = NULL;
430 /* remove from snapshot namespace */
431 dsl_dataset_t *ds_head;
432 ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0);
433 VERIFY0(dsl_dataset_hold_obj(dp,
434 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &ds_head));
435 VERIFY0(dsl_dataset_get_snapname(ds));
440 err = dsl_dataset_snap_lookup(ds_head,
441 ds->ds_snapname, &val);
443 ASSERT3U(val, ==, obj);
446 VERIFY0(dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx, B_TRUE));
447 dsl_dataset_rele(ds_head, FTAG);
450 dsl_dataset_rele(ds_prev, FTAG);
452 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
454 if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
456 ASSERT0(zap_count(mos,
457 dsl_dataset_phys(ds)->ds_next_clones_obj, &count) &&
459 VERIFY0(dmu_object_free(mos,
460 dsl_dataset_phys(ds)->ds_next_clones_obj, tx));
462 if (dsl_dataset_phys(ds)->ds_props_obj != 0)
463 VERIFY0(zap_destroy(mos, dsl_dataset_phys(ds)->ds_props_obj,
465 if (dsl_dataset_phys(ds)->ds_userrefs_obj != 0)
466 VERIFY0(zap_destroy(mos, dsl_dataset_phys(ds)->ds_userrefs_obj,
468 dsl_dir_rele(ds->ds_dir, ds);
470 dmu_object_free_zapified(mos, obj, tx);
474 dsl_destroy_snapshot_sync(void *arg, dmu_tx_t *tx)
476 dmu_snapshots_destroy_arg_t *dsda = arg;
477 dsl_pool_t *dp = dmu_tx_pool(tx);
480 for (pair = nvlist_next_nvpair(dsda->dsda_successful_snaps, NULL);
482 pair = nvlist_next_nvpair(dsda->dsda_successful_snaps, pair)) {
485 VERIFY0(dsl_dataset_hold(dp, nvpair_name(pair), FTAG, &ds));
487 dsl_destroy_snapshot_sync_impl(ds, dsda->dsda_defer, tx);
488 dsl_dataset_rele(ds, FTAG);
493 * The semantics of this function are described in the comment above
494 * lzc_destroy_snaps(). To summarize:
496 * The snapshots must all be in the same pool.
498 * Snapshots that don't exist will be silently ignored (considered to be
499 * "already deleted").
501 * On success, all snaps will be destroyed and this will return 0.
502 * On failure, no snaps will be destroyed, the errlist will be filled in,
503 * and this will return an errno.
506 dsl_destroy_snapshots_nvl(nvlist_t *snaps, boolean_t defer,
509 dmu_snapshots_destroy_arg_t dsda;
513 pair = nvlist_next_nvpair(snaps, NULL);
517 dsda.dsda_snaps = snaps;
518 dsda.dsda_successful_snaps = fnvlist_alloc();
519 dsda.dsda_defer = defer;
520 dsda.dsda_errlist = errlist;
522 error = dsl_sync_task(nvpair_name(pair),
523 dsl_destroy_snapshot_check, dsl_destroy_snapshot_sync,
524 &dsda, 0, ZFS_SPACE_CHECK_NONE);
525 fnvlist_free(dsda.dsda_successful_snaps);
531 dsl_destroy_snapshot(const char *name, boolean_t defer)
534 nvlist_t *nvl = fnvlist_alloc();
535 nvlist_t *errlist = fnvlist_alloc();
537 fnvlist_add_boolean(nvl, name);
538 error = dsl_destroy_snapshots_nvl(nvl, defer, errlist);
539 fnvlist_free(errlist);
551 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
552 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
554 struct killarg *ka = arg;
555 dmu_tx_t *tx = ka->tx;
557 if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
560 if (zb->zb_level == ZB_ZIL_LEVEL) {
561 ASSERT(zilog != NULL);
563 * It's a block in the intent log. It has no
564 * accounting, so just free it.
566 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
568 ASSERT(zilog == NULL);
569 ASSERT3U(bp->blk_birth, >,
570 dsl_dataset_phys(ka->ds)->ds_prev_snap_txg);
571 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
578 old_synchronous_dataset_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
583 * Free everything that we point to (that's born after
584 * the previous snapshot, if we are a clone)
586 * NB: this should be very quick, because we already
587 * freed all the objects in open context.
591 VERIFY0(traverse_dataset(ds,
592 dsl_dataset_phys(ds)->ds_prev_snap_txg, TRAVERSE_POST,
594 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
595 dsl_dataset_phys(ds)->ds_unique_bytes == 0);
598 typedef struct dsl_destroy_head_arg {
599 const char *ddha_name;
600 } dsl_destroy_head_arg_t;
603 dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
609 ASSERT(!ds->ds_is_snapshot);
610 if (ds->ds_is_snapshot)
611 return (SET_ERROR(EINVAL));
613 if (refcount_count(&ds->ds_longholds) != expected_holds)
614 return (SET_ERROR(EBUSY));
616 mos = ds->ds_dir->dd_pool->dp_meta_objset;
619 * Can't delete a head dataset if there are snapshots of it.
620 * (Except if the only snapshots are from the branch we cloned
623 if (ds->ds_prev != NULL &&
624 dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj == ds->ds_object)
625 return (SET_ERROR(EBUSY));
628 * Can't delete if there are children of this fs.
630 error = zap_count(mos,
631 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, &count);
635 return (SET_ERROR(EEXIST));
637 if (dsl_dir_is_clone(ds->ds_dir) && DS_IS_DEFER_DESTROY(ds->ds_prev) &&
638 dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
639 ds->ds_prev->ds_userrefs == 0) {
640 /* We need to remove the origin snapshot as well. */
641 if (!refcount_is_zero(&ds->ds_prev->ds_longholds))
642 return (SET_ERROR(EBUSY));
648 dsl_destroy_head_check(void *arg, dmu_tx_t *tx)
650 dsl_destroy_head_arg_t *ddha = arg;
651 dsl_pool_t *dp = dmu_tx_pool(tx);
655 error = dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds);
659 error = dsl_destroy_head_check_impl(ds, 0);
660 dsl_dataset_rele(ds, FTAG);
665 dsl_dir_destroy_sync(uint64_t ddobj, dmu_tx_t *tx)
668 dsl_pool_t *dp = dmu_tx_pool(tx);
669 objset_t *mos = dp->dp_meta_objset;
672 ASSERT(RRW_WRITE_HELD(&dmu_tx_pool(tx)->dp_config_rwlock));
674 VERIFY0(dsl_dir_hold_obj(dp, ddobj, NULL, FTAG, &dd));
676 ASSERT0(dsl_dir_phys(dd)->dd_head_dataset_obj);
679 * Decrement the filesystem count for all parent filesystems.
681 * When we receive an incremental stream into a filesystem that already
682 * exists, a temporary clone is created. We never count this temporary
683 * clone, whose name begins with a '%'.
685 if (dd->dd_myname[0] != '%' && dd->dd_parent != NULL)
686 dsl_fs_ss_count_adjust(dd->dd_parent, -1,
687 DD_FIELD_FILESYSTEM_COUNT, tx);
690 * Remove our reservation. The impl() routine avoids setting the
691 * actual property, which would require the (already destroyed) ds.
693 dsl_dir_set_reservation_sync_impl(dd, 0, tx);
695 ASSERT0(dsl_dir_phys(dd)->dd_used_bytes);
696 ASSERT0(dsl_dir_phys(dd)->dd_reserved);
697 for (t = 0; t < DD_USED_NUM; t++)
698 ASSERT0(dsl_dir_phys(dd)->dd_used_breakdown[t]);
700 VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_child_dir_zapobj, tx));
701 VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_props_zapobj, tx));
702 VERIFY0(dsl_deleg_destroy(mos, dsl_dir_phys(dd)->dd_deleg_zapobj, tx));
703 VERIFY0(zap_remove(mos,
704 dsl_dir_phys(dd->dd_parent)->dd_child_dir_zapobj,
707 dsl_dir_rele(dd, FTAG);
708 dmu_object_free_zapified(mos, ddobj, tx);
712 dsl_destroy_head_sync_impl(dsl_dataset_t *ds, dmu_tx_t *tx)
714 dsl_pool_t *dp = dmu_tx_pool(tx);
715 objset_t *mos = dp->dp_meta_objset;
716 uint64_t obj, ddobj, prevobj = 0;
719 ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
720 ASSERT(ds->ds_prev == NULL ||
721 dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj != ds->ds_object);
722 ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
723 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
725 /* We need to log before removing it from the namespace. */
726 spa_history_log_internal_ds(ds, "destroy", tx, "");
728 rmorigin = (dsl_dir_is_clone(ds->ds_dir) &&
729 DS_IS_DEFER_DESTROY(ds->ds_prev) &&
730 dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
731 ds->ds_prev->ds_userrefs == 0);
733 /* Remove our reservation. */
734 if (ds->ds_reserved != 0) {
735 dsl_dataset_set_refreservation_sync_impl(ds,
736 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
738 ASSERT0(ds->ds_reserved);
743 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
744 if (ds->ds_feature_inuse[f]) {
745 dsl_dataset_deactivate_feature(obj, f, tx);
746 ds->ds_feature_inuse[f] = B_FALSE;
750 dsl_scan_ds_destroyed(ds, tx);
752 if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
753 /* This is a clone */
754 ASSERT(ds->ds_prev != NULL);
755 ASSERT3U(dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj, !=,
757 ASSERT0(dsl_dataset_phys(ds)->ds_next_snap_obj);
759 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
760 if (dsl_dataset_phys(ds->ds_prev)->ds_next_clones_obj != 0) {
761 dsl_dataset_remove_from_next_clones(ds->ds_prev,
765 ASSERT3U(dsl_dataset_phys(ds->ds_prev)->ds_num_children, >, 1);
766 dsl_dataset_phys(ds->ds_prev)->ds_num_children--;
770 * Destroy the deadlist. Unless it's a clone, the
771 * deadlist should be empty. (If it's a clone, it's
772 * safe to ignore the deadlist contents.)
774 dsl_deadlist_close(&ds->ds_deadlist);
775 dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
776 dmu_buf_will_dirty(ds->ds_dbuf, tx);
777 dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
780 VERIFY0(dmu_objset_from_ds(ds, &os));
782 if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) {
783 old_synchronous_dataset_destroy(ds, tx);
786 * Move the bptree into the pool's list of trees to
787 * clean up and update space accounting information.
789 uint64_t used, comp, uncomp;
791 zil_destroy_sync(dmu_objset_zil(os), tx);
793 if (!spa_feature_is_active(dp->dp_spa,
794 SPA_FEATURE_ASYNC_DESTROY)) {
795 dsl_scan_t *scn = dp->dp_scan;
796 spa_feature_incr(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY,
798 dp->dp_bptree_obj = bptree_alloc(mos, tx);
800 DMU_POOL_DIRECTORY_OBJECT,
801 DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
802 &dp->dp_bptree_obj, tx));
803 ASSERT(!scn->scn_async_destroying);
804 scn->scn_async_destroying = B_TRUE;
807 used = dsl_dir_phys(ds->ds_dir)->dd_used_bytes;
808 comp = dsl_dir_phys(ds->ds_dir)->dd_compressed_bytes;
809 uncomp = dsl_dir_phys(ds->ds_dir)->dd_uncompressed_bytes;
811 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
812 dsl_dataset_phys(ds)->ds_unique_bytes == used);
814 bptree_add(mos, dp->dp_bptree_obj,
815 &dsl_dataset_phys(ds)->ds_bp,
816 dsl_dataset_phys(ds)->ds_prev_snap_txg,
817 used, comp, uncomp, tx);
818 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
819 -used, -comp, -uncomp, tx);
820 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
821 used, comp, uncomp, tx);
824 if (ds->ds_prev != NULL) {
825 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
826 VERIFY0(zap_remove_int(mos,
827 dsl_dir_phys(ds->ds_prev->ds_dir)->dd_clones,
830 prevobj = ds->ds_prev->ds_object;
831 dsl_dataset_rele(ds->ds_prev, ds);
836 * This must be done after the dsl_traverse(), because it will
837 * re-open the objset.
840 dmu_objset_evict(ds->ds_objset);
841 ds->ds_objset = NULL;
844 /* Erase the link in the dir */
845 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
846 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj = 0;
847 ddobj = ds->ds_dir->dd_object;
848 ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0);
849 VERIFY0(zap_destroy(mos,
850 dsl_dataset_phys(ds)->ds_snapnames_zapobj, tx));
852 if (ds->ds_bookmarks != 0) {
853 VERIFY0(zap_destroy(mos, ds->ds_bookmarks, tx));
854 spa_feature_decr(dp->dp_spa, SPA_FEATURE_BOOKMARKS, tx);
857 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
859 ASSERT0(dsl_dataset_phys(ds)->ds_next_clones_obj);
860 ASSERT0(dsl_dataset_phys(ds)->ds_props_obj);
861 ASSERT0(dsl_dataset_phys(ds)->ds_userrefs_obj);
862 dsl_dir_rele(ds->ds_dir, ds);
864 dmu_object_free_zapified(mos, obj, tx);
866 dsl_dir_destroy_sync(ddobj, tx);
870 VERIFY0(dsl_dataset_hold_obj(dp, prevobj, FTAG, &prev));
871 dsl_destroy_snapshot_sync_impl(prev, B_FALSE, tx);
872 dsl_dataset_rele(prev, FTAG);
877 dsl_destroy_head_sync(void *arg, dmu_tx_t *tx)
879 dsl_destroy_head_arg_t *ddha = arg;
880 dsl_pool_t *dp = dmu_tx_pool(tx);
883 VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
884 dsl_destroy_head_sync_impl(ds, tx);
885 dsl_dataset_rele(ds, FTAG);
889 dsl_destroy_head_begin_sync(void *arg, dmu_tx_t *tx)
891 dsl_destroy_head_arg_t *ddha = arg;
892 dsl_pool_t *dp = dmu_tx_pool(tx);
895 VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
897 /* Mark it as inconsistent on-disk, in case we crash */
898 dmu_buf_will_dirty(ds->ds_dbuf, tx);
899 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT;
901 spa_history_log_internal_ds(ds, "destroy begin", tx, "");
902 dsl_dataset_rele(ds, FTAG);
906 dsl_destroy_head(const char *name)
908 dsl_destroy_head_arg_t ddha;
914 zfs_destroy_unmount_origin(name);
917 error = spa_open(name, &spa, FTAG);
920 isenabled = spa_feature_is_enabled(spa, SPA_FEATURE_ASYNC_DESTROY);
921 spa_close(spa, FTAG);
923 ddha.ddha_name = name;
928 error = dsl_sync_task(name, dsl_destroy_head_check,
929 dsl_destroy_head_begin_sync, &ddha,
930 0, ZFS_SPACE_CHECK_NONE);
935 * Head deletion is processed in one txg on old pools;
936 * remove the objects from open context so that the txg sync
939 error = dmu_objset_own(name, DMU_OST_ANY, B_FALSE, FTAG, &os);
941 uint64_t prev_snap_txg =
942 dsl_dataset_phys(dmu_objset_ds(os))->
944 for (uint64_t obj = 0; error == 0;
945 error = dmu_object_next(os, &obj, FALSE,
947 (void) dmu_free_long_object(os, obj);
948 /* sync out all frees */
949 txg_wait_synced(dmu_objset_pool(os), 0);
950 dmu_objset_disown(os, FTAG);
954 return (dsl_sync_task(name, dsl_destroy_head_check,
955 dsl_destroy_head_sync, &ddha, 0, ZFS_SPACE_CHECK_NONE));
959 * Note, this function is used as the callback for dmu_objset_find(). We
960 * always return 0 so that we will continue to find and process
961 * inconsistent datasets, even if we encounter an error trying to
962 * process one of them.
966 dsl_destroy_inconsistent(const char *dsname, void *arg)
970 if (dmu_objset_hold(dsname, FTAG, &os) == 0) {
971 boolean_t need_destroy = DS_IS_INCONSISTENT(dmu_objset_ds(os));
974 * If the dataset is inconsistent because a resumable receive
975 * has failed, then do not destroy it.
977 if (dsl_dataset_has_resume_receive_state(dmu_objset_ds(os)))
978 need_destroy = B_FALSE;
980 dmu_objset_rele(os, FTAG);
982 (void) dsl_destroy_head(dsname);