4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
24 * Copyright (c) 2013 Steven Hartland. All rights reserved.
25 * Copyright (c) 2013 by Joyent, Inc. All rights reserved.
26 * Copyright (c) 2014 Integros [integros.com]
29 #include <sys/zfs_context.h>
30 #include <sys/dsl_userhold.h>
31 #include <sys/dsl_dataset.h>
32 #include <sys/dsl_synctask.h>
33 #include <sys/dmu_tx.h>
34 #include <sys/dsl_pool.h>
35 #include <sys/dsl_dir.h>
36 #include <sys/dmu_traverse.h>
37 #include <sys/dsl_scan.h>
38 #include <sys/dmu_objset.h>
40 #include <sys/zfeature.h>
41 #include <sys/zfs_ioctl.h>
42 #include <sys/dsl_deleg.h>
43 #include <sys/dmu_impl.h>
45 typedef struct dmu_snapshots_destroy_arg {
47 nvlist_t *dsda_successful_snaps;
49 nvlist_t *dsda_errlist;
50 } dmu_snapshots_destroy_arg_t;
53 dsl_destroy_snapshot_check_impl(dsl_dataset_t *ds, boolean_t defer)
55 if (!ds->ds_is_snapshot)
56 return (SET_ERROR(EINVAL));
58 if (dsl_dataset_long_held(ds))
59 return (SET_ERROR(EBUSY));
62 * Only allow deferred destroy on pools that support it.
63 * NOTE: deferred destroy is only supported on snapshots.
66 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
68 return (SET_ERROR(ENOTSUP));
73 * If this snapshot has an elevated user reference count,
74 * we can't destroy it yet.
76 if (ds->ds_userrefs > 0)
77 return (SET_ERROR(EBUSY));
80 * Can't delete a branch point.
82 if (dsl_dataset_phys(ds)->ds_num_children > 1)
83 return (SET_ERROR(EEXIST));
89 dsl_destroy_snapshot_check(void *arg, dmu_tx_t *tx)
91 dmu_snapshots_destroy_arg_t *dsda = arg;
92 dsl_pool_t *dp = dmu_tx_pool(tx);
96 if (!dmu_tx_is_syncing(tx))
99 for (pair = nvlist_next_nvpair(dsda->dsda_snaps, NULL);
100 pair != NULL; pair = nvlist_next_nvpair(dsda->dsda_snaps, pair)) {
103 error = dsl_dataset_hold(dp, nvpair_name(pair),
107 * If the snapshot does not exist, silently ignore it
108 * (it's "already destroyed").
114 error = dsl_destroy_snapshot_check_impl(ds,
116 dsl_dataset_rele(ds, FTAG);
120 fnvlist_add_boolean(dsda->dsda_successful_snaps,
123 fnvlist_add_int32(dsda->dsda_errlist,
124 nvpair_name(pair), error);
128 pair = nvlist_next_nvpair(dsda->dsda_errlist, NULL);
130 return (fnvpair_value_int32(pair));
135 struct process_old_arg {
137 dsl_dataset_t *ds_prev;
138 boolean_t after_branch_point;
140 uint64_t used, comp, uncomp;
144 process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
146 struct process_old_arg *poa = arg;
147 dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
149 ASSERT(!BP_IS_HOLE(bp));
151 if (bp->blk_birth <= dsl_dataset_phys(poa->ds)->ds_prev_snap_txg) {
152 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
153 if (poa->ds_prev && !poa->after_branch_point &&
155 dsl_dataset_phys(poa->ds_prev)->ds_prev_snap_txg) {
156 dsl_dataset_phys(poa->ds_prev)->ds_unique_bytes +=
157 bp_get_dsize_sync(dp->dp_spa, bp);
160 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
161 poa->comp += BP_GET_PSIZE(bp);
162 poa->uncomp += BP_GET_UCSIZE(bp);
163 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
169 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
170 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
172 struct process_old_arg poa = { 0 };
173 dsl_pool_t *dp = ds->ds_dir->dd_pool;
174 objset_t *mos = dp->dp_meta_objset;
175 uint64_t deadlist_obj;
177 ASSERT(ds->ds_deadlist.dl_oldfmt);
178 ASSERT(ds_next->ds_deadlist.dl_oldfmt);
181 poa.ds_prev = ds_prev;
182 poa.after_branch_point = after_branch_point;
183 poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
184 VERIFY0(bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
185 process_old_cb, &poa, tx));
186 VERIFY0(zio_wait(poa.pio));
187 ASSERT3U(poa.used, ==, dsl_dataset_phys(ds)->ds_unique_bytes);
189 /* change snapused */
190 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
191 -poa.used, -poa.comp, -poa.uncomp, tx);
193 /* swap next's deadlist to our deadlist */
194 dsl_deadlist_close(&ds->ds_deadlist);
195 dsl_deadlist_close(&ds_next->ds_deadlist);
196 deadlist_obj = dsl_dataset_phys(ds)->ds_deadlist_obj;
197 dsl_dataset_phys(ds)->ds_deadlist_obj =
198 dsl_dataset_phys(ds_next)->ds_deadlist_obj;
199 dsl_dataset_phys(ds_next)->ds_deadlist_obj = deadlist_obj;
200 dsl_deadlist_open(&ds->ds_deadlist, mos,
201 dsl_dataset_phys(ds)->ds_deadlist_obj);
202 dsl_deadlist_open(&ds_next->ds_deadlist, mos,
203 dsl_dataset_phys(ds_next)->ds_deadlist_obj);
207 dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
209 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
214 * If it is the old version, dd_clones doesn't exist so we can't
215 * find the clones, but dsl_deadlist_remove_key() is a no-op so it
218 if (dsl_dir_phys(ds->ds_dir)->dd_clones == 0)
221 for (zap_cursor_init(&zc, mos, dsl_dir_phys(ds->ds_dir)->dd_clones);
222 zap_cursor_retrieve(&zc, &za) == 0;
223 zap_cursor_advance(&zc)) {
224 dsl_dataset_t *clone;
226 VERIFY0(dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
227 za.za_first_integer, FTAG, &clone));
228 if (clone->ds_dir->dd_origin_txg > mintxg) {
229 dsl_deadlist_remove_key(&clone->ds_deadlist,
231 dsl_dataset_remove_clones_key(clone, mintxg, tx);
233 dsl_dataset_rele(clone, FTAG);
235 zap_cursor_fini(&zc);
239 dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
242 int after_branch_point = FALSE;
243 dsl_pool_t *dp = ds->ds_dir->dd_pool;
244 objset_t *mos = dp->dp_meta_objset;
245 dsl_dataset_t *ds_prev = NULL;
248 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
249 ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
250 ASSERT(refcount_is_zero(&ds->ds_longholds));
253 (ds->ds_userrefs > 0 ||
254 dsl_dataset_phys(ds)->ds_num_children > 1)) {
255 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
256 dmu_buf_will_dirty(ds->ds_dbuf, tx);
257 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_DEFER_DESTROY;
258 spa_history_log_internal_ds(ds, "defer_destroy", tx, "");
262 ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
264 /* We need to log before removing it from the namespace. */
265 spa_history_log_internal_ds(ds, "destroy", tx, "");
267 dsl_scan_ds_destroyed(ds, tx);
271 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
272 if (ds->ds_feature_inuse[f]) {
273 dsl_dataset_deactivate_feature(obj, f, tx);
274 ds->ds_feature_inuse[f] = B_FALSE;
277 if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
278 ASSERT3P(ds->ds_prev, ==, NULL);
279 VERIFY0(dsl_dataset_hold_obj(dp,
280 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &ds_prev));
282 (dsl_dataset_phys(ds_prev)->ds_next_snap_obj != obj);
284 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
285 if (after_branch_point &&
286 dsl_dataset_phys(ds_prev)->ds_next_clones_obj != 0) {
287 dsl_dataset_remove_from_next_clones(ds_prev, obj, tx);
288 if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) {
289 VERIFY0(zap_add_int(mos,
290 dsl_dataset_phys(ds_prev)->
292 dsl_dataset_phys(ds)->ds_next_snap_obj,
296 if (!after_branch_point) {
297 dsl_dataset_phys(ds_prev)->ds_next_snap_obj =
298 dsl_dataset_phys(ds)->ds_next_snap_obj;
302 dsl_dataset_t *ds_next;
304 uint64_t used = 0, comp = 0, uncomp = 0;
306 VERIFY0(dsl_dataset_hold_obj(dp,
307 dsl_dataset_phys(ds)->ds_next_snap_obj, FTAG, &ds_next));
308 ASSERT3U(dsl_dataset_phys(ds_next)->ds_prev_snap_obj, ==, obj);
310 old_unique = dsl_dataset_phys(ds_next)->ds_unique_bytes;
312 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
313 dsl_dataset_phys(ds_next)->ds_prev_snap_obj =
314 dsl_dataset_phys(ds)->ds_prev_snap_obj;
315 dsl_dataset_phys(ds_next)->ds_prev_snap_txg =
316 dsl_dataset_phys(ds)->ds_prev_snap_txg;
317 ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_txg, ==,
318 ds_prev ? dsl_dataset_phys(ds_prev)->ds_creation_txg : 0);
320 if (ds_next->ds_deadlist.dl_oldfmt) {
321 process_old_deadlist(ds, ds_prev, ds_next,
322 after_branch_point, tx);
324 /* Adjust prev's unique space. */
325 if (ds_prev && !after_branch_point) {
326 dsl_deadlist_space_range(&ds_next->ds_deadlist,
327 dsl_dataset_phys(ds_prev)->ds_prev_snap_txg,
328 dsl_dataset_phys(ds)->ds_prev_snap_txg,
329 &used, &comp, &uncomp);
330 dsl_dataset_phys(ds_prev)->ds_unique_bytes += used;
333 /* Adjust snapused. */
334 dsl_deadlist_space_range(&ds_next->ds_deadlist,
335 dsl_dataset_phys(ds)->ds_prev_snap_txg, UINT64_MAX,
336 &used, &comp, &uncomp);
337 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
338 -used, -comp, -uncomp, tx);
340 /* Move blocks to be freed to pool's free list. */
341 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
342 &dp->dp_free_bpobj, dsl_dataset_phys(ds)->ds_prev_snap_txg,
344 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
345 DD_USED_HEAD, used, comp, uncomp, tx);
347 /* Merge our deadlist into next's and free it. */
348 dsl_deadlist_merge(&ds_next->ds_deadlist,
349 dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
351 dsl_deadlist_close(&ds->ds_deadlist);
352 dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
353 dmu_buf_will_dirty(ds->ds_dbuf, tx);
354 dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
356 /* Collapse range in clone heads */
357 dsl_dataset_remove_clones_key(ds,
358 dsl_dataset_phys(ds)->ds_creation_txg, tx);
360 if (ds_next->ds_is_snapshot) {
361 dsl_dataset_t *ds_nextnext;
364 * Update next's unique to include blocks which
365 * were previously shared by only this snapshot
366 * and it. Those blocks will be born after the
367 * prev snap and before this snap, and will have
368 * died after the next snap and before the one
369 * after that (ie. be on the snap after next's
372 VERIFY0(dsl_dataset_hold_obj(dp,
373 dsl_dataset_phys(ds_next)->ds_next_snap_obj,
374 FTAG, &ds_nextnext));
375 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
376 dsl_dataset_phys(ds)->ds_prev_snap_txg,
377 dsl_dataset_phys(ds)->ds_creation_txg,
378 &used, &comp, &uncomp);
379 dsl_dataset_phys(ds_next)->ds_unique_bytes += used;
380 dsl_dataset_rele(ds_nextnext, FTAG);
381 ASSERT3P(ds_next->ds_prev, ==, NULL);
383 /* Collapse range in this head. */
385 VERIFY0(dsl_dataset_hold_obj(dp,
386 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &hds));
387 dsl_deadlist_remove_key(&hds->ds_deadlist,
388 dsl_dataset_phys(ds)->ds_creation_txg, tx);
389 dsl_dataset_rele(hds, FTAG);
392 ASSERT3P(ds_next->ds_prev, ==, ds);
393 dsl_dataset_rele(ds_next->ds_prev, ds_next);
394 ds_next->ds_prev = NULL;
396 VERIFY0(dsl_dataset_hold_obj(dp,
397 dsl_dataset_phys(ds)->ds_prev_snap_obj,
398 ds_next, &ds_next->ds_prev));
401 dsl_dataset_recalc_head_uniq(ds_next);
404 * Reduce the amount of our unconsumed refreservation
405 * being charged to our parent by the amount of
406 * new unique data we have gained.
408 if (old_unique < ds_next->ds_reserved) {
410 uint64_t new_unique =
411 dsl_dataset_phys(ds_next)->ds_unique_bytes;
413 ASSERT(old_unique <= new_unique);
414 mrsdelta = MIN(new_unique - old_unique,
415 ds_next->ds_reserved - old_unique);
416 dsl_dir_diduse_space(ds->ds_dir,
417 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
420 dsl_dataset_rele(ds_next, FTAG);
423 * This must be done after the dsl_traverse(), because it will
424 * re-open the objset.
427 dmu_objset_evict(ds->ds_objset);
428 ds->ds_objset = NULL;
431 /* remove from snapshot namespace */
432 dsl_dataset_t *ds_head;
433 ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0);
434 VERIFY0(dsl_dataset_hold_obj(dp,
435 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &ds_head));
436 VERIFY0(dsl_dataset_get_snapname(ds));
441 err = dsl_dataset_snap_lookup(ds_head,
442 ds->ds_snapname, &val);
444 ASSERT3U(val, ==, obj);
447 VERIFY0(dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx, B_TRUE));
448 dsl_dataset_rele(ds_head, FTAG);
451 dsl_dataset_rele(ds_prev, FTAG);
453 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
455 if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
457 ASSERT0(zap_count(mos,
458 dsl_dataset_phys(ds)->ds_next_clones_obj, &count) &&
460 VERIFY0(dmu_object_free(mos,
461 dsl_dataset_phys(ds)->ds_next_clones_obj, tx));
463 if (dsl_dataset_phys(ds)->ds_props_obj != 0)
464 VERIFY0(zap_destroy(mos, dsl_dataset_phys(ds)->ds_props_obj,
466 if (dsl_dataset_phys(ds)->ds_userrefs_obj != 0)
467 VERIFY0(zap_destroy(mos, dsl_dataset_phys(ds)->ds_userrefs_obj,
469 dsl_dir_rele(ds->ds_dir, ds);
471 dmu_object_free_zapified(mos, obj, tx);
475 dsl_destroy_snapshot_sync(void *arg, dmu_tx_t *tx)
477 dmu_snapshots_destroy_arg_t *dsda = arg;
478 dsl_pool_t *dp = dmu_tx_pool(tx);
481 for (pair = nvlist_next_nvpair(dsda->dsda_successful_snaps, NULL);
483 pair = nvlist_next_nvpair(dsda->dsda_successful_snaps, pair)) {
486 VERIFY0(dsl_dataset_hold(dp, nvpair_name(pair), FTAG, &ds));
488 dsl_destroy_snapshot_sync_impl(ds, dsda->dsda_defer, tx);
489 dsl_dataset_rele(ds, FTAG);
494 * The semantics of this function are described in the comment above
495 * lzc_destroy_snaps(). To summarize:
497 * The snapshots must all be in the same pool.
499 * Snapshots that don't exist will be silently ignored (considered to be
500 * "already deleted").
502 * On success, all snaps will be destroyed and this will return 0.
503 * On failure, no snaps will be destroyed, the errlist will be filled in,
504 * and this will return an errno.
507 dsl_destroy_snapshots_nvl(nvlist_t *snaps, boolean_t defer,
510 dmu_snapshots_destroy_arg_t dsda;
514 pair = nvlist_next_nvpair(snaps, NULL);
518 dsda.dsda_snaps = snaps;
519 dsda.dsda_successful_snaps = fnvlist_alloc();
520 dsda.dsda_defer = defer;
521 dsda.dsda_errlist = errlist;
523 error = dsl_sync_task(nvpair_name(pair),
524 dsl_destroy_snapshot_check, dsl_destroy_snapshot_sync,
525 &dsda, 0, ZFS_SPACE_CHECK_NONE);
526 fnvlist_free(dsda.dsda_successful_snaps);
532 dsl_destroy_snapshot(const char *name, boolean_t defer)
535 nvlist_t *nvl = fnvlist_alloc();
536 nvlist_t *errlist = fnvlist_alloc();
538 fnvlist_add_boolean(nvl, name);
539 error = dsl_destroy_snapshots_nvl(nvl, defer, errlist);
540 fnvlist_free(errlist);
552 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
553 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
555 struct killarg *ka = arg;
556 dmu_tx_t *tx = ka->tx;
558 if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
561 if (zb->zb_level == ZB_ZIL_LEVEL) {
562 ASSERT(zilog != NULL);
564 * It's a block in the intent log. It has no
565 * accounting, so just free it.
567 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
569 ASSERT(zilog == NULL);
570 ASSERT3U(bp->blk_birth, >,
571 dsl_dataset_phys(ka->ds)->ds_prev_snap_txg);
572 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
579 old_synchronous_dataset_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
584 * Free everything that we point to (that's born after
585 * the previous snapshot, if we are a clone)
587 * NB: this should be very quick, because we already
588 * freed all the objects in open context.
592 VERIFY0(traverse_dataset(ds,
593 dsl_dataset_phys(ds)->ds_prev_snap_txg, TRAVERSE_POST,
595 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
596 dsl_dataset_phys(ds)->ds_unique_bytes == 0);
599 typedef struct dsl_destroy_head_arg {
600 const char *ddha_name;
601 } dsl_destroy_head_arg_t;
604 dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
610 ASSERT(!ds->ds_is_snapshot);
611 if (ds->ds_is_snapshot)
612 return (SET_ERROR(EINVAL));
614 if (refcount_count(&ds->ds_longholds) != expected_holds)
615 return (SET_ERROR(EBUSY));
617 mos = ds->ds_dir->dd_pool->dp_meta_objset;
620 * Can't delete a head dataset if there are snapshots of it.
621 * (Except if the only snapshots are from the branch we cloned
624 if (ds->ds_prev != NULL &&
625 dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj == ds->ds_object)
626 return (SET_ERROR(EBUSY));
629 * Can't delete if there are children of this fs.
631 error = zap_count(mos,
632 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, &count);
636 return (SET_ERROR(EEXIST));
638 if (dsl_dir_is_clone(ds->ds_dir) && DS_IS_DEFER_DESTROY(ds->ds_prev) &&
639 dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
640 ds->ds_prev->ds_userrefs == 0) {
641 /* We need to remove the origin snapshot as well. */
642 if (!refcount_is_zero(&ds->ds_prev->ds_longholds))
643 return (SET_ERROR(EBUSY));
649 dsl_destroy_head_check(void *arg, dmu_tx_t *tx)
651 dsl_destroy_head_arg_t *ddha = arg;
652 dsl_pool_t *dp = dmu_tx_pool(tx);
656 error = dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds);
660 error = dsl_destroy_head_check_impl(ds, 0);
661 dsl_dataset_rele(ds, FTAG);
666 dsl_dir_destroy_sync(uint64_t ddobj, dmu_tx_t *tx)
669 dsl_pool_t *dp = dmu_tx_pool(tx);
670 objset_t *mos = dp->dp_meta_objset;
673 ASSERT(RRW_WRITE_HELD(&dmu_tx_pool(tx)->dp_config_rwlock));
675 VERIFY0(dsl_dir_hold_obj(dp, ddobj, NULL, FTAG, &dd));
677 ASSERT0(dsl_dir_phys(dd)->dd_head_dataset_obj);
680 * Decrement the filesystem count for all parent filesystems.
682 * When we receive an incremental stream into a filesystem that already
683 * exists, a temporary clone is created. We never count this temporary
684 * clone, whose name begins with a '%'.
686 if (dd->dd_myname[0] != '%' && dd->dd_parent != NULL)
687 dsl_fs_ss_count_adjust(dd->dd_parent, -1,
688 DD_FIELD_FILESYSTEM_COUNT, tx);
691 * Remove our reservation. The impl() routine avoids setting the
692 * actual property, which would require the (already destroyed) ds.
694 dsl_dir_set_reservation_sync_impl(dd, 0, tx);
696 ASSERT0(dsl_dir_phys(dd)->dd_used_bytes);
697 ASSERT0(dsl_dir_phys(dd)->dd_reserved);
698 for (t = 0; t < DD_USED_NUM; t++)
699 ASSERT0(dsl_dir_phys(dd)->dd_used_breakdown[t]);
701 VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_child_dir_zapobj, tx));
702 VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_props_zapobj, tx));
703 VERIFY0(dsl_deleg_destroy(mos, dsl_dir_phys(dd)->dd_deleg_zapobj, tx));
704 VERIFY0(zap_remove(mos,
705 dsl_dir_phys(dd->dd_parent)->dd_child_dir_zapobj,
708 dsl_dir_rele(dd, FTAG);
709 dmu_object_free_zapified(mos, ddobj, tx);
713 dsl_destroy_head_sync_impl(dsl_dataset_t *ds, dmu_tx_t *tx)
715 dsl_pool_t *dp = dmu_tx_pool(tx);
716 objset_t *mos = dp->dp_meta_objset;
717 uint64_t obj, ddobj, prevobj = 0;
720 ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
721 ASSERT(ds->ds_prev == NULL ||
722 dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj != ds->ds_object);
723 ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
724 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
726 /* We need to log before removing it from the namespace. */
727 spa_history_log_internal_ds(ds, "destroy", tx, "");
729 rmorigin = (dsl_dir_is_clone(ds->ds_dir) &&
730 DS_IS_DEFER_DESTROY(ds->ds_prev) &&
731 dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
732 ds->ds_prev->ds_userrefs == 0);
734 /* Remove our reservation. */
735 if (ds->ds_reserved != 0) {
736 dsl_dataset_set_refreservation_sync_impl(ds,
737 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
739 ASSERT0(ds->ds_reserved);
744 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
745 if (ds->ds_feature_inuse[f]) {
746 dsl_dataset_deactivate_feature(obj, f, tx);
747 ds->ds_feature_inuse[f] = B_FALSE;
751 dsl_scan_ds_destroyed(ds, tx);
753 if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
754 /* This is a clone */
755 ASSERT(ds->ds_prev != NULL);
756 ASSERT3U(dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj, !=,
758 ASSERT0(dsl_dataset_phys(ds)->ds_next_snap_obj);
760 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
761 if (dsl_dataset_phys(ds->ds_prev)->ds_next_clones_obj != 0) {
762 dsl_dataset_remove_from_next_clones(ds->ds_prev,
766 ASSERT3U(dsl_dataset_phys(ds->ds_prev)->ds_num_children, >, 1);
767 dsl_dataset_phys(ds->ds_prev)->ds_num_children--;
771 * Destroy the deadlist. Unless it's a clone, the
772 * deadlist should be empty. (If it's a clone, it's
773 * safe to ignore the deadlist contents.)
775 dsl_deadlist_close(&ds->ds_deadlist);
776 dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
777 dmu_buf_will_dirty(ds->ds_dbuf, tx);
778 dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
781 VERIFY0(dmu_objset_from_ds(ds, &os));
783 if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) {
784 old_synchronous_dataset_destroy(ds, tx);
787 * Move the bptree into the pool's list of trees to
788 * clean up and update space accounting information.
790 uint64_t used, comp, uncomp;
792 zil_destroy_sync(dmu_objset_zil(os), tx);
794 if (!spa_feature_is_active(dp->dp_spa,
795 SPA_FEATURE_ASYNC_DESTROY)) {
796 dsl_scan_t *scn = dp->dp_scan;
797 spa_feature_incr(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY,
799 dp->dp_bptree_obj = bptree_alloc(mos, tx);
801 DMU_POOL_DIRECTORY_OBJECT,
802 DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
803 &dp->dp_bptree_obj, tx));
804 ASSERT(!scn->scn_async_destroying);
805 scn->scn_async_destroying = B_TRUE;
808 used = dsl_dir_phys(ds->ds_dir)->dd_used_bytes;
809 comp = dsl_dir_phys(ds->ds_dir)->dd_compressed_bytes;
810 uncomp = dsl_dir_phys(ds->ds_dir)->dd_uncompressed_bytes;
812 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
813 dsl_dataset_phys(ds)->ds_unique_bytes == used);
815 bptree_add(mos, dp->dp_bptree_obj,
816 &dsl_dataset_phys(ds)->ds_bp,
817 dsl_dataset_phys(ds)->ds_prev_snap_txg,
818 used, comp, uncomp, tx);
819 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
820 -used, -comp, -uncomp, tx);
821 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
822 used, comp, uncomp, tx);
825 if (ds->ds_prev != NULL) {
826 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
827 VERIFY0(zap_remove_int(mos,
828 dsl_dir_phys(ds->ds_prev->ds_dir)->dd_clones,
831 prevobj = ds->ds_prev->ds_object;
832 dsl_dataset_rele(ds->ds_prev, ds);
837 * This must be done after the dsl_traverse(), because it will
838 * re-open the objset.
841 dmu_objset_evict(ds->ds_objset);
842 ds->ds_objset = NULL;
845 /* Erase the link in the dir */
846 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
847 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj = 0;
848 ddobj = ds->ds_dir->dd_object;
849 ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0);
850 VERIFY0(zap_destroy(mos,
851 dsl_dataset_phys(ds)->ds_snapnames_zapobj, tx));
853 if (ds->ds_bookmarks != 0) {
854 VERIFY0(zap_destroy(mos, ds->ds_bookmarks, tx));
855 spa_feature_decr(dp->dp_spa, SPA_FEATURE_BOOKMARKS, tx);
858 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
860 ASSERT0(dsl_dataset_phys(ds)->ds_next_clones_obj);
861 ASSERT0(dsl_dataset_phys(ds)->ds_props_obj);
862 ASSERT0(dsl_dataset_phys(ds)->ds_userrefs_obj);
863 dsl_dir_rele(ds->ds_dir, ds);
865 dmu_object_free_zapified(mos, obj, tx);
867 dsl_dir_destroy_sync(ddobj, tx);
871 VERIFY0(dsl_dataset_hold_obj(dp, prevobj, FTAG, &prev));
872 dsl_destroy_snapshot_sync_impl(prev, B_FALSE, tx);
873 dsl_dataset_rele(prev, FTAG);
878 dsl_destroy_head_sync(void *arg, dmu_tx_t *tx)
880 dsl_destroy_head_arg_t *ddha = arg;
881 dsl_pool_t *dp = dmu_tx_pool(tx);
884 VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
885 dsl_destroy_head_sync_impl(ds, tx);
886 dsl_dataset_rele(ds, FTAG);
890 dsl_destroy_head_begin_sync(void *arg, dmu_tx_t *tx)
892 dsl_destroy_head_arg_t *ddha = arg;
893 dsl_pool_t *dp = dmu_tx_pool(tx);
896 VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
898 /* Mark it as inconsistent on-disk, in case we crash */
899 dmu_buf_will_dirty(ds->ds_dbuf, tx);
900 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT;
902 spa_history_log_internal_ds(ds, "destroy begin", tx, "");
903 dsl_dataset_rele(ds, FTAG);
907 dsl_destroy_head(const char *name)
909 dsl_destroy_head_arg_t ddha;
915 zfs_destroy_unmount_origin(name);
918 error = spa_open(name, &spa, FTAG);
921 isenabled = spa_feature_is_enabled(spa, SPA_FEATURE_ASYNC_DESTROY);
922 spa_close(spa, FTAG);
924 ddha.ddha_name = name;
929 error = dsl_sync_task(name, dsl_destroy_head_check,
930 dsl_destroy_head_begin_sync, &ddha,
931 0, ZFS_SPACE_CHECK_NONE);
936 * Head deletion is processed in one txg on old pools;
937 * remove the objects from open context so that the txg sync
940 error = dmu_objset_own(name, DMU_OST_ANY, B_FALSE, FTAG, &os);
942 uint64_t prev_snap_txg =
943 dsl_dataset_phys(dmu_objset_ds(os))->
945 for (uint64_t obj = 0; error == 0;
946 error = dmu_object_next(os, &obj, FALSE,
948 (void) dmu_free_long_object(os, obj);
949 /* sync out all frees */
950 txg_wait_synced(dmu_objset_pool(os), 0);
951 dmu_objset_disown(os, FTAG);
955 return (dsl_sync_task(name, dsl_destroy_head_check,
956 dsl_destroy_head_sync, &ddha, 0, ZFS_SPACE_CHECK_NONE));
960 * Note, this function is used as the callback for dmu_objset_find(). We
961 * always return 0 so that we will continue to find and process
962 * inconsistent datasets, even if we encounter an error trying to
963 * process one of them.
967 dsl_destroy_inconsistent(const char *dsname, void *arg)
971 if (dmu_objset_hold(dsname, FTAG, &os) == 0) {
972 boolean_t need_destroy = DS_IS_INCONSISTENT(dmu_objset_ds(os));
975 * If the dataset is inconsistent because a resumable receive
976 * has failed, then do not destroy it.
978 if (dsl_dataset_has_resume_receive_state(dmu_objset_ds(os)))
979 need_destroy = B_FALSE;
981 dmu_objset_rele(os, FTAG);
983 (void) dsl_destroy_head(dsname);