]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - module/zfs/dsl_destroy.c
dsl_scan_scrub_cb: don't double-account non-embedded blocks
[FreeBSD/FreeBSD.git] / module / zfs / dsl_destroy.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
24  * Copyright (c) 2013 Steven Hartland. All rights reserved.
25  * Copyright (c) 2013 by Joyent, Inc. All rights reserved.
26  * Copyright (c) 2016 Actifio, Inc. All rights reserved.
27  */
28
29 #include <sys/zfs_context.h>
30 #include <sys/dsl_userhold.h>
31 #include <sys/dsl_dataset.h>
32 #include <sys/dsl_synctask.h>
33 #include <sys/dsl_destroy.h>
34 #include <sys/dmu_tx.h>
35 #include <sys/dsl_pool.h>
36 #include <sys/dsl_dir.h>
37 #include <sys/dmu_traverse.h>
38 #include <sys/dsl_scan.h>
39 #include <sys/dmu_objset.h>
40 #include <sys/zap.h>
41 #include <sys/zfeature.h>
42 #include <sys/zfs_ioctl.h>
43 #include <sys/dsl_deleg.h>
44 #include <sys/dmu_impl.h>
45 #include <sys/zvol.h>
46 #include <sys/zcp.h>
47
48 int
49 dsl_destroy_snapshot_check_impl(dsl_dataset_t *ds, boolean_t defer)
50 {
51         if (!ds->ds_is_snapshot)
52                 return (SET_ERROR(EINVAL));
53
54         if (dsl_dataset_long_held(ds))
55                 return (SET_ERROR(EBUSY));
56
57         /*
58          * Only allow deferred destroy on pools that support it.
59          * NOTE: deferred destroy is only supported on snapshots.
60          */
61         if (defer) {
62                 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
63                     SPA_VERSION_USERREFS)
64                         return (SET_ERROR(ENOTSUP));
65                 return (0);
66         }
67
68         /*
69          * If this snapshot has an elevated user reference count,
70          * we can't destroy it yet.
71          */
72         if (ds->ds_userrefs > 0)
73                 return (SET_ERROR(EBUSY));
74
75         /*
76          * Can't delete a branch point.
77          */
78         if (dsl_dataset_phys(ds)->ds_num_children > 1)
79                 return (SET_ERROR(EEXIST));
80
81         return (0);
82 }
83
84 int
85 dsl_destroy_snapshot_check(void *arg, dmu_tx_t *tx)
86 {
87         dsl_destroy_snapshot_arg_t *ddsa = arg;
88         const char *dsname = ddsa->ddsa_name;
89         boolean_t defer = ddsa->ddsa_defer;
90
91         dsl_pool_t *dp = dmu_tx_pool(tx);
92         int error = 0;
93         dsl_dataset_t *ds;
94
95         error = dsl_dataset_hold(dp, dsname, FTAG, &ds);
96
97         /*
98          * If the snapshot does not exist, silently ignore it, and
99          * dsl_destroy_snapshot_sync() will be a no-op
100          * (it's "already destroyed").
101          */
102         if (error == ENOENT)
103                 return (0);
104
105         if (error == 0) {
106                 error = dsl_destroy_snapshot_check_impl(ds, defer);
107                 dsl_dataset_rele(ds, FTAG);
108         }
109
110         return (error);
111 }
112
113 struct process_old_arg {
114         dsl_dataset_t *ds;
115         dsl_dataset_t *ds_prev;
116         boolean_t after_branch_point;
117         zio_t *pio;
118         uint64_t used, comp, uncomp;
119 };
120
121 static int
122 process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
123 {
124         struct process_old_arg *poa = arg;
125         dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
126
127         ASSERT(!BP_IS_HOLE(bp));
128
129         if (bp->blk_birth <= dsl_dataset_phys(poa->ds)->ds_prev_snap_txg) {
130                 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
131                 if (poa->ds_prev && !poa->after_branch_point &&
132                     bp->blk_birth >
133                     dsl_dataset_phys(poa->ds_prev)->ds_prev_snap_txg) {
134                         dsl_dataset_phys(poa->ds_prev)->ds_unique_bytes +=
135                             bp_get_dsize_sync(dp->dp_spa, bp);
136                 }
137         } else {
138                 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
139                 poa->comp += BP_GET_PSIZE(bp);
140                 poa->uncomp += BP_GET_UCSIZE(bp);
141                 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
142         }
143         return (0);
144 }
145
146 static void
147 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
148     dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
149 {
150         struct process_old_arg poa = { 0 };
151         dsl_pool_t *dp = ds->ds_dir->dd_pool;
152         objset_t *mos = dp->dp_meta_objset;
153         uint64_t deadlist_obj;
154
155         ASSERT(ds->ds_deadlist.dl_oldfmt);
156         ASSERT(ds_next->ds_deadlist.dl_oldfmt);
157
158         poa.ds = ds;
159         poa.ds_prev = ds_prev;
160         poa.after_branch_point = after_branch_point;
161         poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
162         VERIFY0(bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
163             process_old_cb, &poa, tx));
164         VERIFY0(zio_wait(poa.pio));
165         ASSERT3U(poa.used, ==, dsl_dataset_phys(ds)->ds_unique_bytes);
166
167         /* change snapused */
168         dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
169             -poa.used, -poa.comp, -poa.uncomp, tx);
170
171         /* swap next's deadlist to our deadlist */
172         dsl_deadlist_close(&ds->ds_deadlist);
173         dsl_deadlist_close(&ds_next->ds_deadlist);
174         deadlist_obj = dsl_dataset_phys(ds)->ds_deadlist_obj;
175         dsl_dataset_phys(ds)->ds_deadlist_obj =
176             dsl_dataset_phys(ds_next)->ds_deadlist_obj;
177         dsl_dataset_phys(ds_next)->ds_deadlist_obj = deadlist_obj;
178         dsl_deadlist_open(&ds->ds_deadlist, mos,
179             dsl_dataset_phys(ds)->ds_deadlist_obj);
180         dsl_deadlist_open(&ds_next->ds_deadlist, mos,
181             dsl_dataset_phys(ds_next)->ds_deadlist_obj);
182 }
183
184 static void
185 dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
186 {
187         objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
188         zap_cursor_t *zc;
189         zap_attribute_t *za;
190
191         /*
192          * If it is the old version, dd_clones doesn't exist so we can't
193          * find the clones, but dsl_deadlist_remove_key() is a no-op so it
194          * doesn't matter.
195          */
196         if (dsl_dir_phys(ds->ds_dir)->dd_clones == 0)
197                 return;
198
199         zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
200         za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
201
202         for (zap_cursor_init(zc, mos, dsl_dir_phys(ds->ds_dir)->dd_clones);
203             zap_cursor_retrieve(zc, za) == 0;
204             zap_cursor_advance(zc)) {
205                 dsl_dataset_t *clone;
206
207                 VERIFY0(dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
208                     za->za_first_integer, FTAG, &clone));
209                 if (clone->ds_dir->dd_origin_txg > mintxg) {
210                         dsl_deadlist_remove_key(&clone->ds_deadlist,
211                             mintxg, tx);
212                         if (dsl_dataset_remap_deadlist_exists(clone)) {
213                                 dsl_deadlist_remove_key(
214                                     &clone->ds_remap_deadlist, mintxg, tx);
215                         }
216                         dsl_dataset_remove_clones_key(clone, mintxg, tx);
217                 }
218                 dsl_dataset_rele(clone, FTAG);
219         }
220         zap_cursor_fini(zc);
221
222         kmem_free(za, sizeof (zap_attribute_t));
223         kmem_free(zc, sizeof (zap_cursor_t));
224 }
225
226 static void
227 dsl_destroy_snapshot_handle_remaps(dsl_dataset_t *ds, dsl_dataset_t *ds_next,
228     dmu_tx_t *tx)
229 {
230         dsl_pool_t *dp = ds->ds_dir->dd_pool;
231
232         /* Move blocks to be obsoleted to pool's obsolete list. */
233         if (dsl_dataset_remap_deadlist_exists(ds_next)) {
234                 if (!bpobj_is_open(&dp->dp_obsolete_bpobj))
235                         dsl_pool_create_obsolete_bpobj(dp, tx);
236
237                 dsl_deadlist_move_bpobj(&ds_next->ds_remap_deadlist,
238                     &dp->dp_obsolete_bpobj,
239                     dsl_dataset_phys(ds)->ds_prev_snap_txg, tx);
240         }
241
242         /* Merge our deadlist into next's and free it. */
243         if (dsl_dataset_remap_deadlist_exists(ds)) {
244                 uint64_t remap_deadlist_object =
245                     dsl_dataset_get_remap_deadlist_object(ds);
246                 ASSERT(remap_deadlist_object != 0);
247
248                 mutex_enter(&ds_next->ds_remap_deadlist_lock);
249                 if (!dsl_dataset_remap_deadlist_exists(ds_next))
250                         dsl_dataset_create_remap_deadlist(ds_next, tx);
251                 mutex_exit(&ds_next->ds_remap_deadlist_lock);
252
253                 dsl_deadlist_merge(&ds_next->ds_remap_deadlist,
254                     remap_deadlist_object, tx);
255                 dsl_dataset_destroy_remap_deadlist(ds, tx);
256         }
257 }
258
259 void
260 dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
261 {
262         int after_branch_point = FALSE;
263         dsl_pool_t *dp = ds->ds_dir->dd_pool;
264         objset_t *mos = dp->dp_meta_objset;
265         dsl_dataset_t *ds_prev = NULL;
266         uint64_t obj;
267
268         ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
269         rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
270         ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
271         rrw_exit(&ds->ds_bp_rwlock, FTAG);
272         ASSERT(refcount_is_zero(&ds->ds_longholds));
273
274         if (defer &&
275             (ds->ds_userrefs > 0 ||
276             dsl_dataset_phys(ds)->ds_num_children > 1)) {
277                 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
278                 dmu_buf_will_dirty(ds->ds_dbuf, tx);
279                 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_DEFER_DESTROY;
280                 spa_history_log_internal_ds(ds, "defer_destroy", tx, "");
281                 return;
282         }
283
284         ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
285
286         /* We need to log before removing it from the namespace. */
287         spa_history_log_internal_ds(ds, "destroy", tx, "");
288
289         dsl_scan_ds_destroyed(ds, tx);
290
291         obj = ds->ds_object;
292
293         for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
294                 if (ds->ds_feature_inuse[f]) {
295                         dsl_dataset_deactivate_feature(obj, f, tx);
296                         ds->ds_feature_inuse[f] = B_FALSE;
297                 }
298         }
299         if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
300                 ASSERT3P(ds->ds_prev, ==, NULL);
301                 VERIFY0(dsl_dataset_hold_obj(dp,
302                     dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &ds_prev));
303                 after_branch_point =
304                     (dsl_dataset_phys(ds_prev)->ds_next_snap_obj != obj);
305
306                 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
307                 if (after_branch_point &&
308                     dsl_dataset_phys(ds_prev)->ds_next_clones_obj != 0) {
309                         dsl_dataset_remove_from_next_clones(ds_prev, obj, tx);
310                         if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) {
311                                 VERIFY0(zap_add_int(mos,
312                                     dsl_dataset_phys(ds_prev)->
313                                     ds_next_clones_obj,
314                                     dsl_dataset_phys(ds)->ds_next_snap_obj,
315                                     tx));
316                         }
317                 }
318                 if (!after_branch_point) {
319                         dsl_dataset_phys(ds_prev)->ds_next_snap_obj =
320                             dsl_dataset_phys(ds)->ds_next_snap_obj;
321                 }
322         }
323
324         dsl_dataset_t *ds_next;
325         uint64_t old_unique;
326         uint64_t used = 0, comp = 0, uncomp = 0;
327
328         VERIFY0(dsl_dataset_hold_obj(dp,
329             dsl_dataset_phys(ds)->ds_next_snap_obj, FTAG, &ds_next));
330         ASSERT3U(dsl_dataset_phys(ds_next)->ds_prev_snap_obj, ==, obj);
331
332         old_unique = dsl_dataset_phys(ds_next)->ds_unique_bytes;
333
334         dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
335         dsl_dataset_phys(ds_next)->ds_prev_snap_obj =
336             dsl_dataset_phys(ds)->ds_prev_snap_obj;
337         dsl_dataset_phys(ds_next)->ds_prev_snap_txg =
338             dsl_dataset_phys(ds)->ds_prev_snap_txg;
339         ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_txg, ==,
340             ds_prev ? dsl_dataset_phys(ds_prev)->ds_creation_txg : 0);
341
342         if (ds_next->ds_deadlist.dl_oldfmt) {
343                 process_old_deadlist(ds, ds_prev, ds_next,
344                     after_branch_point, tx);
345         } else {
346                 /* Adjust prev's unique space. */
347                 if (ds_prev && !after_branch_point) {
348                         dsl_deadlist_space_range(&ds_next->ds_deadlist,
349                             dsl_dataset_phys(ds_prev)->ds_prev_snap_txg,
350                             dsl_dataset_phys(ds)->ds_prev_snap_txg,
351                             &used, &comp, &uncomp);
352                         dsl_dataset_phys(ds_prev)->ds_unique_bytes += used;
353                 }
354
355                 /* Adjust snapused. */
356                 dsl_deadlist_space_range(&ds_next->ds_deadlist,
357                     dsl_dataset_phys(ds)->ds_prev_snap_txg, UINT64_MAX,
358                     &used, &comp, &uncomp);
359                 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
360                     -used, -comp, -uncomp, tx);
361
362                 /* Move blocks to be freed to pool's free list. */
363                 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
364                     &dp->dp_free_bpobj, dsl_dataset_phys(ds)->ds_prev_snap_txg,
365                     tx);
366                 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
367                     DD_USED_HEAD, used, comp, uncomp, tx);
368
369                 /* Merge our deadlist into next's and free it. */
370                 dsl_deadlist_merge(&ds_next->ds_deadlist,
371                     dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
372         }
373
374         dsl_deadlist_close(&ds->ds_deadlist);
375         dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
376         dmu_buf_will_dirty(ds->ds_dbuf, tx);
377         dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
378
379         dsl_destroy_snapshot_handle_remaps(ds, ds_next, tx);
380
381         /* Collapse range in clone heads */
382         dsl_dataset_remove_clones_key(ds,
383             dsl_dataset_phys(ds)->ds_creation_txg, tx);
384
385         if (ds_next->ds_is_snapshot) {
386                 dsl_dataset_t *ds_nextnext;
387
388                 /*
389                  * Update next's unique to include blocks which
390                  * were previously shared by only this snapshot
391                  * and it.  Those blocks will be born after the
392                  * prev snap and before this snap, and will have
393                  * died after the next snap and before the one
394                  * after that (ie. be on the snap after next's
395                  * deadlist).
396                  */
397                 VERIFY0(dsl_dataset_hold_obj(dp,
398                     dsl_dataset_phys(ds_next)->ds_next_snap_obj,
399                     FTAG, &ds_nextnext));
400                 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
401                     dsl_dataset_phys(ds)->ds_prev_snap_txg,
402                     dsl_dataset_phys(ds)->ds_creation_txg,
403                     &used, &comp, &uncomp);
404                 dsl_dataset_phys(ds_next)->ds_unique_bytes += used;
405                 dsl_dataset_rele(ds_nextnext, FTAG);
406                 ASSERT3P(ds_next->ds_prev, ==, NULL);
407
408                 /* Collapse range in this head. */
409                 dsl_dataset_t *hds;
410                 VERIFY0(dsl_dataset_hold_obj(dp,
411                     dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &hds));
412                 dsl_deadlist_remove_key(&hds->ds_deadlist,
413                     dsl_dataset_phys(ds)->ds_creation_txg, tx);
414                 if (dsl_dataset_remap_deadlist_exists(hds)) {
415                         dsl_deadlist_remove_key(&hds->ds_remap_deadlist,
416                             dsl_dataset_phys(ds)->ds_creation_txg, tx);
417                 }
418                 dsl_dataset_rele(hds, FTAG);
419
420         } else {
421                 ASSERT3P(ds_next->ds_prev, ==, ds);
422                 dsl_dataset_rele(ds_next->ds_prev, ds_next);
423                 ds_next->ds_prev = NULL;
424                 if (ds_prev) {
425                         VERIFY0(dsl_dataset_hold_obj(dp,
426                             dsl_dataset_phys(ds)->ds_prev_snap_obj,
427                             ds_next, &ds_next->ds_prev));
428                 }
429
430                 dsl_dataset_recalc_head_uniq(ds_next);
431
432                 /*
433                  * Reduce the amount of our unconsumed refreservation
434                  * being charged to our parent by the amount of
435                  * new unique data we have gained.
436                  */
437                 if (old_unique < ds_next->ds_reserved) {
438                         int64_t mrsdelta;
439                         uint64_t new_unique =
440                             dsl_dataset_phys(ds_next)->ds_unique_bytes;
441
442                         ASSERT(old_unique <= new_unique);
443                         mrsdelta = MIN(new_unique - old_unique,
444                             ds_next->ds_reserved - old_unique);
445                         dsl_dir_diduse_space(ds->ds_dir,
446                             DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
447                 }
448         }
449         dsl_dataset_rele(ds_next, FTAG);
450
451         /*
452          * This must be done after the dsl_traverse(), because it will
453          * re-open the objset.
454          */
455         if (ds->ds_objset) {
456                 dmu_objset_evict(ds->ds_objset);
457                 ds->ds_objset = NULL;
458         }
459
460         /* remove from snapshot namespace */
461         dsl_dataset_t *ds_head;
462         ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0);
463         VERIFY0(dsl_dataset_hold_obj(dp,
464             dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &ds_head));
465         VERIFY0(dsl_dataset_get_snapname(ds));
466 #ifdef ZFS_DEBUG
467         {
468                 uint64_t val;
469                 int err;
470
471                 err = dsl_dataset_snap_lookup(ds_head,
472                     ds->ds_snapname, &val);
473                 ASSERT0(err);
474                 ASSERT3U(val, ==, obj);
475         }
476 #endif
477         VERIFY0(dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx, B_TRUE));
478         dsl_dataset_rele(ds_head, FTAG);
479
480         if (ds_prev != NULL)
481                 dsl_dataset_rele(ds_prev, FTAG);
482
483         spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
484
485         if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
486                 ASSERTV(uint64_t count);
487                 ASSERT0(zap_count(mos,
488                     dsl_dataset_phys(ds)->ds_next_clones_obj, &count) &&
489                     count == 0);
490                 VERIFY0(dmu_object_free(mos,
491                     dsl_dataset_phys(ds)->ds_next_clones_obj, tx));
492         }
493         if (dsl_dataset_phys(ds)->ds_props_obj != 0)
494                 VERIFY0(zap_destroy(mos, dsl_dataset_phys(ds)->ds_props_obj,
495                     tx));
496         if (dsl_dataset_phys(ds)->ds_userrefs_obj != 0)
497                 VERIFY0(zap_destroy(mos, dsl_dataset_phys(ds)->ds_userrefs_obj,
498                     tx));
499         dsl_dir_rele(ds->ds_dir, ds);
500         ds->ds_dir = NULL;
501         dmu_object_free_zapified(mos, obj, tx);
502 }
503
504 void
505 dsl_destroy_snapshot_sync(void *arg, dmu_tx_t *tx)
506 {
507         dsl_destroy_snapshot_arg_t *ddsa = arg;
508         const char *dsname = ddsa->ddsa_name;
509         boolean_t defer = ddsa->ddsa_defer;
510
511         dsl_pool_t *dp = dmu_tx_pool(tx);
512         dsl_dataset_t *ds;
513
514         int error = dsl_dataset_hold(dp, dsname, FTAG, &ds);
515         if (error == ENOENT)
516                 return;
517         ASSERT0(error);
518         dsl_destroy_snapshot_sync_impl(ds, defer, tx);
519         zvol_remove_minors(dp->dp_spa, dsname, B_TRUE);
520         dsl_dataset_rele(ds, FTAG);
521 }
522
523 /*
524  * The semantics of this function are described in the comment above
525  * lzc_destroy_snaps().  To summarize:
526  *
527  * The snapshots must all be in the same pool.
528  *
529  * Snapshots that don't exist will be silently ignored (considered to be
530  * "already deleted").
531  *
532  * On success, all snaps will be destroyed and this will return 0.
533  * On failure, no snaps will be destroyed, the errlist will be filled in,
534  * and this will return an errno.
535  */
536 int
537 dsl_destroy_snapshots_nvl(nvlist_t *snaps, boolean_t defer,
538     nvlist_t *errlist)
539 {
540         if (nvlist_next_nvpair(snaps, NULL) == NULL)
541                 return (0);
542
543         /*
544          * lzc_destroy_snaps() is documented to take an nvlist whose
545          * values "don't matter".  We need to convert that nvlist to
546          * one that we know can be converted to LUA. We also don't
547          * care about any duplicate entries because the nvlist will
548          * be converted to a LUA table which should take care of this.
549          */
550         nvlist_t *snaps_normalized;
551         VERIFY0(nvlist_alloc(&snaps_normalized, 0, KM_SLEEP));
552         for (nvpair_t *pair = nvlist_next_nvpair(snaps, NULL);
553             pair != NULL; pair = nvlist_next_nvpair(snaps, pair)) {
554                 fnvlist_add_boolean_value(snaps_normalized,
555                     nvpair_name(pair), B_TRUE);
556         }
557
558         nvlist_t *arg;
559         VERIFY0(nvlist_alloc(&arg, 0, KM_SLEEP));
560         fnvlist_add_nvlist(arg, "snaps", snaps_normalized);
561         fnvlist_free(snaps_normalized);
562         fnvlist_add_boolean_value(arg, "defer", defer);
563
564         nvlist_t *wrapper;
565         VERIFY0(nvlist_alloc(&wrapper, 0, KM_SLEEP));
566         fnvlist_add_nvlist(wrapper, ZCP_ARG_ARGLIST, arg);
567         fnvlist_free(arg);
568
569         const char *program =
570             "arg = ...\n"
571             "snaps = arg['snaps']\n"
572             "defer = arg['defer']\n"
573             "errors = { }\n"
574             "has_errors = false\n"
575             "for snap, v in pairs(snaps) do\n"
576             "    errno = zfs.check.destroy{snap, defer=defer}\n"
577             "    zfs.debug('snap: ' .. snap .. ' errno: ' .. errno)\n"
578             "    if errno == ENOENT then\n"
579             "        snaps[snap] = nil\n"
580             "    elseif errno ~= 0 then\n"
581             "        errors[snap] = errno\n"
582             "        has_errors = true\n"
583             "    end\n"
584             "end\n"
585             "if has_errors then\n"
586             "    return errors\n"
587             "end\n"
588             "for snap, v in pairs(snaps) do\n"
589             "    errno = zfs.sync.destroy{snap, defer=defer}\n"
590             "    assert(errno == 0)\n"
591             "end\n"
592             "return { }\n";
593
594         nvlist_t *result = fnvlist_alloc();
595         int error = zcp_eval(nvpair_name(nvlist_next_nvpair(snaps, NULL)),
596             program,
597             B_TRUE,
598             0,
599             zfs_lua_max_memlimit,
600             nvlist_next_nvpair(wrapper, NULL), result);
601         if (error != 0) {
602                 char *errorstr = NULL;
603                 (void) nvlist_lookup_string(result, ZCP_RET_ERROR, &errorstr);
604                 if (errorstr != NULL) {
605                         zfs_dbgmsg(errorstr);
606                 }
607                 return (error);
608         }
609         fnvlist_free(wrapper);
610
611         /*
612          * lzc_destroy_snaps() is documented to fill the errlist with
613          * int32 values, so we need to covert the int64 values that are
614          * returned from LUA.
615          */
616         int rv = 0;
617         nvlist_t *errlist_raw = fnvlist_lookup_nvlist(result, ZCP_RET_RETURN);
618         for (nvpair_t *pair = nvlist_next_nvpair(errlist_raw, NULL);
619             pair != NULL; pair = nvlist_next_nvpair(errlist_raw, pair)) {
620                 int32_t val = (int32_t)fnvpair_value_int64(pair);
621                 if (rv == 0)
622                         rv = val;
623                 fnvlist_add_int32(errlist, nvpair_name(pair), val);
624         }
625         fnvlist_free(result);
626         return (rv);
627 }
628
629 int
630 dsl_destroy_snapshot(const char *name, boolean_t defer)
631 {
632         int error;
633         nvlist_t *nvl = fnvlist_alloc();
634         nvlist_t *errlist = fnvlist_alloc();
635
636         fnvlist_add_boolean(nvl, name);
637         error = dsl_destroy_snapshots_nvl(nvl, defer, errlist);
638         fnvlist_free(errlist);
639         fnvlist_free(nvl);
640         return (error);
641 }
642
643 struct killarg {
644         dsl_dataset_t *ds;
645         dmu_tx_t *tx;
646 };
647
648 /* ARGSUSED */
649 static int
650 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
651     const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
652 {
653         struct killarg *ka = arg;
654         dmu_tx_t *tx = ka->tx;
655
656         if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
657                 return (0);
658
659         if (zb->zb_level == ZB_ZIL_LEVEL) {
660                 ASSERT(zilog != NULL);
661                 /*
662                  * It's a block in the intent log.  It has no
663                  * accounting, so just free it.
664                  */
665                 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
666         } else {
667                 ASSERT(zilog == NULL);
668                 ASSERT3U(bp->blk_birth, >,
669                     dsl_dataset_phys(ka->ds)->ds_prev_snap_txg);
670                 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
671         }
672
673         return (0);
674 }
675
676 static void
677 old_synchronous_dataset_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
678 {
679         struct killarg ka;
680
681         /*
682          * Free everything that we point to (that's born after
683          * the previous snapshot, if we are a clone)
684          *
685          * NB: this should be very quick, because we already
686          * freed all the objects in open context.
687          */
688         ka.ds = ds;
689         ka.tx = tx;
690         VERIFY0(traverse_dataset(ds,
691             dsl_dataset_phys(ds)->ds_prev_snap_txg, TRAVERSE_POST |
692             TRAVERSE_NO_DECRYPT, kill_blkptr, &ka));
693         ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
694             dsl_dataset_phys(ds)->ds_unique_bytes == 0);
695 }
696
697 int
698 dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
699 {
700         int error;
701         uint64_t count;
702         objset_t *mos;
703
704         ASSERT(!ds->ds_is_snapshot);
705         if (ds->ds_is_snapshot)
706                 return (SET_ERROR(EINVAL));
707
708         if (refcount_count(&ds->ds_longholds) != expected_holds)
709                 return (SET_ERROR(EBUSY));
710
711         mos = ds->ds_dir->dd_pool->dp_meta_objset;
712
713         /*
714          * Can't delete a head dataset if there are snapshots of it.
715          * (Except if the only snapshots are from the branch we cloned
716          * from.)
717          */
718         if (ds->ds_prev != NULL &&
719             dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj == ds->ds_object)
720                 return (SET_ERROR(EBUSY));
721
722         /*
723          * Can't delete if there are children of this fs.
724          */
725         error = zap_count(mos,
726             dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, &count);
727         if (error != 0)
728                 return (error);
729         if (count != 0)
730                 return (SET_ERROR(EEXIST));
731
732         if (dsl_dir_is_clone(ds->ds_dir) && DS_IS_DEFER_DESTROY(ds->ds_prev) &&
733             dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
734             ds->ds_prev->ds_userrefs == 0) {
735                 /* We need to remove the origin snapshot as well. */
736                 if (!refcount_is_zero(&ds->ds_prev->ds_longholds))
737                         return (SET_ERROR(EBUSY));
738         }
739         return (0);
740 }
741
742 int
743 dsl_destroy_head_check(void *arg, dmu_tx_t *tx)
744 {
745         dsl_destroy_head_arg_t *ddha = arg;
746         dsl_pool_t *dp = dmu_tx_pool(tx);
747         dsl_dataset_t *ds;
748         int error;
749
750         error = dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds);
751         if (error != 0)
752                 return (error);
753
754         error = dsl_destroy_head_check_impl(ds, 0);
755         dsl_dataset_rele(ds, FTAG);
756         return (error);
757 }
758
759 static void
760 dsl_dir_destroy_sync(uint64_t ddobj, dmu_tx_t *tx)
761 {
762         dsl_dir_t *dd;
763         dsl_pool_t *dp = dmu_tx_pool(tx);
764         objset_t *mos = dp->dp_meta_objset;
765         dd_used_t t;
766
767         ASSERT(RRW_WRITE_HELD(&dmu_tx_pool(tx)->dp_config_rwlock));
768
769         VERIFY0(dsl_dir_hold_obj(dp, ddobj, NULL, FTAG, &dd));
770
771         ASSERT0(dsl_dir_phys(dd)->dd_head_dataset_obj);
772
773         /*
774          * Decrement the filesystem count for all parent filesystems.
775          *
776          * When we receive an incremental stream into a filesystem that already
777          * exists, a temporary clone is created.  We never count this temporary
778          * clone, whose name begins with a '%'.
779          */
780         if (dd->dd_myname[0] != '%' && dd->dd_parent != NULL)
781                 dsl_fs_ss_count_adjust(dd->dd_parent, -1,
782                     DD_FIELD_FILESYSTEM_COUNT, tx);
783
784         /*
785          * Remove our reservation. The impl() routine avoids setting the
786          * actual property, which would require the (already destroyed) ds.
787          */
788         dsl_dir_set_reservation_sync_impl(dd, 0, tx);
789
790         ASSERT0(dsl_dir_phys(dd)->dd_used_bytes);
791         ASSERT0(dsl_dir_phys(dd)->dd_reserved);
792         for (t = 0; t < DD_USED_NUM; t++)
793                 ASSERT0(dsl_dir_phys(dd)->dd_used_breakdown[t]);
794
795         if (dd->dd_crypto_obj != 0) {
796                 dsl_crypto_key_destroy_sync(dd->dd_crypto_obj, tx);
797                 (void) spa_keystore_unload_wkey_impl(dp->dp_spa, dd->dd_object);
798         }
799
800         VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_child_dir_zapobj, tx));
801         VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_props_zapobj, tx));
802         VERIFY0(dsl_deleg_destroy(mos, dsl_dir_phys(dd)->dd_deleg_zapobj, tx));
803         VERIFY0(zap_remove(mos,
804             dsl_dir_phys(dd->dd_parent)->dd_child_dir_zapobj,
805             dd->dd_myname, tx));
806
807         dsl_dir_rele(dd, FTAG);
808         dmu_object_free_zapified(mos, ddobj, tx);
809 }
810
811 void
812 dsl_destroy_head_sync_impl(dsl_dataset_t *ds, dmu_tx_t *tx)
813 {
814         dsl_pool_t *dp = dmu_tx_pool(tx);
815         objset_t *mos = dp->dp_meta_objset;
816         uint64_t obj, ddobj, prevobj = 0;
817         boolean_t rmorigin;
818
819         ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
820         ASSERT(ds->ds_prev == NULL ||
821             dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj != ds->ds_object);
822         rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
823         ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
824         rrw_exit(&ds->ds_bp_rwlock, FTAG);
825         ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
826
827         /* We need to log before removing it from the namespace. */
828         spa_history_log_internal_ds(ds, "destroy", tx, "");
829
830         rmorigin = (dsl_dir_is_clone(ds->ds_dir) &&
831             DS_IS_DEFER_DESTROY(ds->ds_prev) &&
832             dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
833             ds->ds_prev->ds_userrefs == 0);
834
835         /* Remove our reservation. */
836         if (ds->ds_reserved != 0) {
837                 dsl_dataset_set_refreservation_sync_impl(ds,
838                     (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
839                     0, tx);
840                 ASSERT0(ds->ds_reserved);
841         }
842
843         obj = ds->ds_object;
844
845         for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
846                 if (ds->ds_feature_inuse[f]) {
847                         dsl_dataset_deactivate_feature(obj, f, tx);
848                         ds->ds_feature_inuse[f] = B_FALSE;
849                 }
850         }
851
852         dsl_scan_ds_destroyed(ds, tx);
853
854         if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
855                 /* This is a clone */
856                 ASSERT(ds->ds_prev != NULL);
857                 ASSERT3U(dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj, !=,
858                     obj);
859                 ASSERT0(dsl_dataset_phys(ds)->ds_next_snap_obj);
860
861                 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
862                 if (dsl_dataset_phys(ds->ds_prev)->ds_next_clones_obj != 0) {
863                         dsl_dataset_remove_from_next_clones(ds->ds_prev,
864                             obj, tx);
865                 }
866
867                 ASSERT3U(dsl_dataset_phys(ds->ds_prev)->ds_num_children, >, 1);
868                 dsl_dataset_phys(ds->ds_prev)->ds_num_children--;
869         }
870
871         /*
872          * Destroy the deadlist.  Unless it's a clone, the
873          * deadlist should be empty since the dataset has no snapshots.
874          * (If it's a clone, it's safe to ignore the deadlist contents
875          * since they are still referenced by the origin snapshot.)
876          */
877         dsl_deadlist_close(&ds->ds_deadlist);
878         dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
879         dmu_buf_will_dirty(ds->ds_dbuf, tx);
880         dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
881
882         if (dsl_dataset_remap_deadlist_exists(ds))
883                 dsl_dataset_destroy_remap_deadlist(ds, tx);
884
885         objset_t *os;
886         VERIFY0(dmu_objset_from_ds(ds, &os));
887
888         if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) {
889                 old_synchronous_dataset_destroy(ds, tx);
890         } else {
891                 /*
892                  * Move the bptree into the pool's list of trees to
893                  * clean up and update space accounting information.
894                  */
895                 uint64_t used, comp, uncomp;
896
897                 zil_destroy_sync(dmu_objset_zil(os), tx);
898
899                 if (!spa_feature_is_active(dp->dp_spa,
900                     SPA_FEATURE_ASYNC_DESTROY)) {
901                         dsl_scan_t *scn = dp->dp_scan;
902                         spa_feature_incr(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY,
903                             tx);
904                         dp->dp_bptree_obj = bptree_alloc(mos, tx);
905                         VERIFY0(zap_add(mos,
906                             DMU_POOL_DIRECTORY_OBJECT,
907                             DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
908                             &dp->dp_bptree_obj, tx));
909                         ASSERT(!scn->scn_async_destroying);
910                         scn->scn_async_destroying = B_TRUE;
911                 }
912
913                 used = dsl_dir_phys(ds->ds_dir)->dd_used_bytes;
914                 comp = dsl_dir_phys(ds->ds_dir)->dd_compressed_bytes;
915                 uncomp = dsl_dir_phys(ds->ds_dir)->dd_uncompressed_bytes;
916
917                 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
918                     dsl_dataset_phys(ds)->ds_unique_bytes == used);
919
920                 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
921                 bptree_add(mos, dp->dp_bptree_obj,
922                     &dsl_dataset_phys(ds)->ds_bp,
923                     dsl_dataset_phys(ds)->ds_prev_snap_txg,
924                     used, comp, uncomp, tx);
925                 rrw_exit(&ds->ds_bp_rwlock, FTAG);
926                 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
927                     -used, -comp, -uncomp, tx);
928                 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
929                     used, comp, uncomp, tx);
930         }
931
932         if (ds->ds_prev != NULL) {
933                 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
934                         VERIFY0(zap_remove_int(mos,
935                             dsl_dir_phys(ds->ds_prev->ds_dir)->dd_clones,
936                             ds->ds_object, tx));
937                 }
938                 prevobj = ds->ds_prev->ds_object;
939                 dsl_dataset_rele(ds->ds_prev, ds);
940                 ds->ds_prev = NULL;
941         }
942
943         /*
944          * This must be done after the dsl_traverse(), because it will
945          * re-open the objset.
946          */
947         if (ds->ds_objset) {
948                 dmu_objset_evict(ds->ds_objset);
949                 ds->ds_objset = NULL;
950         }
951
952         /* Erase the link in the dir */
953         dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
954         dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj = 0;
955         ddobj = ds->ds_dir->dd_object;
956         ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0);
957         VERIFY0(zap_destroy(mos,
958             dsl_dataset_phys(ds)->ds_snapnames_zapobj, tx));
959
960         if (ds->ds_bookmarks != 0) {
961                 VERIFY0(zap_destroy(mos, ds->ds_bookmarks, tx));
962                 spa_feature_decr(dp->dp_spa, SPA_FEATURE_BOOKMARKS, tx);
963         }
964
965         spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
966
967         ASSERT0(dsl_dataset_phys(ds)->ds_next_clones_obj);
968         ASSERT0(dsl_dataset_phys(ds)->ds_props_obj);
969         ASSERT0(dsl_dataset_phys(ds)->ds_userrefs_obj);
970         dsl_dir_rele(ds->ds_dir, ds);
971         ds->ds_dir = NULL;
972         dmu_object_free_zapified(mos, obj, tx);
973
974         dsl_dir_destroy_sync(ddobj, tx);
975
976         if (rmorigin) {
977                 dsl_dataset_t *prev;
978                 VERIFY0(dsl_dataset_hold_obj(dp, prevobj, FTAG, &prev));
979                 dsl_destroy_snapshot_sync_impl(prev, B_FALSE, tx);
980                 dsl_dataset_rele(prev, FTAG);
981         }
982 }
983
984 void
985 dsl_destroy_head_sync(void *arg, dmu_tx_t *tx)
986 {
987         dsl_destroy_head_arg_t *ddha = arg;
988         dsl_pool_t *dp = dmu_tx_pool(tx);
989         dsl_dataset_t *ds;
990
991         VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
992         dsl_destroy_head_sync_impl(ds, tx);
993         zvol_remove_minors(dp->dp_spa, ddha->ddha_name, B_TRUE);
994         dsl_dataset_rele(ds, FTAG);
995 }
996
997 static void
998 dsl_destroy_head_begin_sync(void *arg, dmu_tx_t *tx)
999 {
1000         dsl_destroy_head_arg_t *ddha = arg;
1001         dsl_pool_t *dp = dmu_tx_pool(tx);
1002         dsl_dataset_t *ds;
1003
1004         VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
1005
1006         /* Mark it as inconsistent on-disk, in case we crash */
1007         dmu_buf_will_dirty(ds->ds_dbuf, tx);
1008         dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT;
1009
1010         spa_history_log_internal_ds(ds, "destroy begin", tx, "");
1011         dsl_dataset_rele(ds, FTAG);
1012 }
1013
1014 int
1015 dsl_destroy_head(const char *name)
1016 {
1017         dsl_destroy_head_arg_t ddha;
1018         int error;
1019         spa_t *spa;
1020         boolean_t isenabled;
1021
1022 #ifdef _KERNEL
1023         zfs_destroy_unmount_origin(name);
1024 #endif
1025
1026         error = spa_open(name, &spa, FTAG);
1027         if (error != 0)
1028                 return (error);
1029         isenabled = spa_feature_is_enabled(spa, SPA_FEATURE_ASYNC_DESTROY);
1030         spa_close(spa, FTAG);
1031
1032         ddha.ddha_name = name;
1033
1034         if (!isenabled) {
1035                 objset_t *os;
1036
1037                 error = dsl_sync_task(name, dsl_destroy_head_check,
1038                     dsl_destroy_head_begin_sync, &ddha,
1039                     0, ZFS_SPACE_CHECK_DESTROY);
1040                 if (error != 0)
1041                         return (error);
1042
1043                 /*
1044                  * Head deletion is processed in one txg on old pools;
1045                  * remove the objects from open context so that the txg sync
1046                  * is not too long.
1047                  */
1048                 error = dmu_objset_own(name, DMU_OST_ANY, B_FALSE, B_FALSE,
1049                     FTAG, &os);
1050                 if (error == 0) {
1051                         uint64_t prev_snap_txg =
1052                             dsl_dataset_phys(dmu_objset_ds(os))->
1053                             ds_prev_snap_txg;
1054                         for (uint64_t obj = 0; error == 0;
1055                             error = dmu_object_next(os, &obj, FALSE,
1056                             prev_snap_txg))
1057                                 (void) dmu_free_long_object(os, obj);
1058                         /* sync out all frees */
1059                         txg_wait_synced(dmu_objset_pool(os), 0);
1060                         dmu_objset_disown(os, B_FALSE, FTAG);
1061                 }
1062         }
1063
1064         return (dsl_sync_task(name, dsl_destroy_head_check,
1065             dsl_destroy_head_sync, &ddha, 0, ZFS_SPACE_CHECK_DESTROY));
1066 }
1067
1068 /*
1069  * Note, this function is used as the callback for dmu_objset_find().  We
1070  * always return 0 so that we will continue to find and process
1071  * inconsistent datasets, even if we encounter an error trying to
1072  * process one of them.
1073  */
1074 /* ARGSUSED */
1075 int
1076 dsl_destroy_inconsistent(const char *dsname, void *arg)
1077 {
1078         objset_t *os;
1079
1080         if (dmu_objset_hold(dsname, FTAG, &os) == 0) {
1081                 boolean_t need_destroy = DS_IS_INCONSISTENT(dmu_objset_ds(os));
1082
1083                 /*
1084                  * If the dataset is inconsistent because a resumable receive
1085                  * has failed, then do not destroy it.
1086                  */
1087                 if (dsl_dataset_has_resume_receive_state(dmu_objset_ds(os)))
1088                         need_destroy = B_FALSE;
1089
1090                 dmu_objset_rele(os, FTAG);
1091                 if (need_destroy)
1092                         (void) dsl_destroy_head(dsname);
1093         }
1094         return (0);
1095 }
1096
1097
1098 #if defined(_KERNEL)
1099 EXPORT_SYMBOL(dsl_destroy_head);
1100 EXPORT_SYMBOL(dsl_destroy_head_sync_impl);
1101 EXPORT_SYMBOL(dsl_dataset_user_hold_check_one);
1102 EXPORT_SYMBOL(dsl_destroy_snapshot_sync_impl);
1103 EXPORT_SYMBOL(dsl_destroy_inconsistent);
1104 EXPORT_SYMBOL(dsl_dataset_user_release_tmp);
1105 EXPORT_SYMBOL(dsl_destroy_head_check_impl);
1106 #endif