]> CyberLeo.Net >> Repos - FreeBSD/stable/9.git/blob - sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dataset.c
MFC r240870 (pjd):
[FreeBSD/stable/9.git] / sys / cddl / contrib / opensolaris / uts / common / fs / zfs / dsl_dataset.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012 by Delphix. All rights reserved.
24  * Copyright (c) 2012, Joyent, Inc. All rights reserved.
25  * Copyright (c) 2011 Pawel Jakub Dawidek <pawel@dawidek.net>.
26  * All rights reserved.
27  * Portions Copyright (c) 2011 Martin Matuska <mm@FreeBSD.org>
28  */
29
30 #include <sys/dmu_objset.h>
31 #include <sys/dsl_dataset.h>
32 #include <sys/dsl_dir.h>
33 #include <sys/dsl_prop.h>
34 #include <sys/dsl_synctask.h>
35 #include <sys/dmu_traverse.h>
36 #include <sys/dmu_impl.h>
37 #include <sys/dmu_tx.h>
38 #include <sys/arc.h>
39 #include <sys/zio.h>
40 #include <sys/zap.h>
41 #include <sys/zfeature.h>
42 #include <sys/unique.h>
43 #include <sys/zfs_context.h>
44 #include <sys/zfs_ioctl.h>
45 #include <sys/spa.h>
46 #include <sys/zfs_znode.h>
47 #include <sys/zfs_onexit.h>
48 #include <sys/zvol.h>
49 #include <sys/dsl_scan.h>
50 #include <sys/dsl_deadlist.h>
51
52 static char *dsl_reaper = "the grim reaper";
53
54 static dsl_checkfunc_t dsl_dataset_destroy_begin_check;
55 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync;
56 static dsl_syncfunc_t dsl_dataset_set_reservation_sync;
57
58 #define SWITCH64(x, y) \
59         { \
60                 uint64_t __tmp = (x); \
61                 (x) = (y); \
62                 (y) = __tmp; \
63         }
64
65 #define DS_REF_MAX      (1ULL << 62)
66
67 #define DSL_DEADLIST_BLOCKSIZE  SPA_MAXBLOCKSIZE
68
69 #define DSL_DATASET_IS_DESTROYED(ds)    ((ds)->ds_owner == dsl_reaper)
70
71
72 /*
73  * Figure out how much of this delta should be propogated to the dsl_dir
74  * layer.  If there's a refreservation, that space has already been
75  * partially accounted for in our ancestors.
76  */
77 static int64_t
78 parent_delta(dsl_dataset_t *ds, int64_t delta)
79 {
80         uint64_t old_bytes, new_bytes;
81
82         if (ds->ds_reserved == 0)
83                 return (delta);
84
85         old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
86         new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
87
88         ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
89         return (new_bytes - old_bytes);
90 }
91
92 void
93 dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
94 {
95         int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
96         int compressed = BP_GET_PSIZE(bp);
97         int uncompressed = BP_GET_UCSIZE(bp);
98         int64_t delta;
99
100         dprintf_bp(bp, "ds=%p", ds);
101
102         ASSERT(dmu_tx_is_syncing(tx));
103         /* It could have been compressed away to nothing */
104         if (BP_IS_HOLE(bp))
105                 return;
106         ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
107         ASSERT(DMU_OT_IS_VALID(BP_GET_TYPE(bp)));
108         if (ds == NULL) {
109                 dsl_pool_mos_diduse_space(tx->tx_pool,
110                     used, compressed, uncompressed);
111                 return;
112         }
113         dmu_buf_will_dirty(ds->ds_dbuf, tx);
114
115         mutex_enter(&ds->ds_dir->dd_lock);
116         mutex_enter(&ds->ds_lock);
117         delta = parent_delta(ds, used);
118         ds->ds_phys->ds_referenced_bytes += used;
119         ds->ds_phys->ds_compressed_bytes += compressed;
120         ds->ds_phys->ds_uncompressed_bytes += uncompressed;
121         ds->ds_phys->ds_unique_bytes += used;
122         mutex_exit(&ds->ds_lock);
123         dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta,
124             compressed, uncompressed, tx);
125         dsl_dir_transfer_space(ds->ds_dir, used - delta,
126             DD_USED_REFRSRV, DD_USED_HEAD, tx);
127         mutex_exit(&ds->ds_dir->dd_lock);
128 }
129
130 int
131 dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
132     boolean_t async)
133 {
134         if (BP_IS_HOLE(bp))
135                 return (0);
136
137         ASSERT(dmu_tx_is_syncing(tx));
138         ASSERT(bp->blk_birth <= tx->tx_txg);
139
140         int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
141         int compressed = BP_GET_PSIZE(bp);
142         int uncompressed = BP_GET_UCSIZE(bp);
143
144         ASSERT(used > 0);
145         if (ds == NULL) {
146                 dsl_free(tx->tx_pool, tx->tx_txg, bp);
147                 dsl_pool_mos_diduse_space(tx->tx_pool,
148                     -used, -compressed, -uncompressed);
149                 return (used);
150         }
151         ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
152
153         ASSERT(!dsl_dataset_is_snapshot(ds));
154         dmu_buf_will_dirty(ds->ds_dbuf, tx);
155
156         if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) {
157                 int64_t delta;
158
159                 dprintf_bp(bp, "freeing ds=%llu", ds->ds_object);
160                 dsl_free(tx->tx_pool, tx->tx_txg, bp);
161
162                 mutex_enter(&ds->ds_dir->dd_lock);
163                 mutex_enter(&ds->ds_lock);
164                 ASSERT(ds->ds_phys->ds_unique_bytes >= used ||
165                     !DS_UNIQUE_IS_ACCURATE(ds));
166                 delta = parent_delta(ds, -used);
167                 ds->ds_phys->ds_unique_bytes -= used;
168                 mutex_exit(&ds->ds_lock);
169                 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
170                     delta, -compressed, -uncompressed, tx);
171                 dsl_dir_transfer_space(ds->ds_dir, -used - delta,
172                     DD_USED_REFRSRV, DD_USED_HEAD, tx);
173                 mutex_exit(&ds->ds_dir->dd_lock);
174         } else {
175                 dprintf_bp(bp, "putting on dead list: %s", "");
176                 if (async) {
177                         /*
178                          * We are here as part of zio's write done callback,
179                          * which means we're a zio interrupt thread.  We can't
180                          * call dsl_deadlist_insert() now because it may block
181                          * waiting for I/O.  Instead, put bp on the deferred
182                          * queue and let dsl_pool_sync() finish the job.
183                          */
184                         bplist_append(&ds->ds_pending_deadlist, bp);
185                 } else {
186                         dsl_deadlist_insert(&ds->ds_deadlist, bp, tx);
187                 }
188                 ASSERT3U(ds->ds_prev->ds_object, ==,
189                     ds->ds_phys->ds_prev_snap_obj);
190                 ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0);
191                 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
192                 if (ds->ds_prev->ds_phys->ds_next_snap_obj ==
193                     ds->ds_object && bp->blk_birth >
194                     ds->ds_prev->ds_phys->ds_prev_snap_txg) {
195                         dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
196                         mutex_enter(&ds->ds_prev->ds_lock);
197                         ds->ds_prev->ds_phys->ds_unique_bytes += used;
198                         mutex_exit(&ds->ds_prev->ds_lock);
199                 }
200                 if (bp->blk_birth > ds->ds_dir->dd_origin_txg) {
201                         dsl_dir_transfer_space(ds->ds_dir, used,
202                             DD_USED_HEAD, DD_USED_SNAP, tx);
203                 }
204         }
205         mutex_enter(&ds->ds_lock);
206         ASSERT3U(ds->ds_phys->ds_referenced_bytes, >=, used);
207         ds->ds_phys->ds_referenced_bytes -= used;
208         ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed);
209         ds->ds_phys->ds_compressed_bytes -= compressed;
210         ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed);
211         ds->ds_phys->ds_uncompressed_bytes -= uncompressed;
212         mutex_exit(&ds->ds_lock);
213
214         return (used);
215 }
216
217 uint64_t
218 dsl_dataset_prev_snap_txg(dsl_dataset_t *ds)
219 {
220         uint64_t trysnap = 0;
221
222         if (ds == NULL)
223                 return (0);
224         /*
225          * The snapshot creation could fail, but that would cause an
226          * incorrect FALSE return, which would only result in an
227          * overestimation of the amount of space that an operation would
228          * consume, which is OK.
229          *
230          * There's also a small window where we could miss a pending
231          * snapshot, because we could set the sync task in the quiescing
232          * phase.  So this should only be used as a guess.
233          */
234         if (ds->ds_trysnap_txg >
235             spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa))
236                 trysnap = ds->ds_trysnap_txg;
237         return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap));
238 }
239
240 boolean_t
241 dsl_dataset_block_freeable(dsl_dataset_t *ds, const blkptr_t *bp,
242     uint64_t blk_birth)
243 {
244         if (blk_birth <= dsl_dataset_prev_snap_txg(ds))
245                 return (B_FALSE);
246
247         ddt_prefetch(dsl_dataset_get_spa(ds), bp);
248
249         return (B_TRUE);
250 }
251
252 /* ARGSUSED */
253 static void
254 dsl_dataset_evict(dmu_buf_t *db, void *dsv)
255 {
256         dsl_dataset_t *ds = dsv;
257
258         ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds));
259
260         unique_remove(ds->ds_fsid_guid);
261
262         if (ds->ds_objset != NULL)
263                 dmu_objset_evict(ds->ds_objset);
264
265         if (ds->ds_prev) {
266                 dsl_dataset_drop_ref(ds->ds_prev, ds);
267                 ds->ds_prev = NULL;
268         }
269
270         bplist_destroy(&ds->ds_pending_deadlist);
271         if (db != NULL) {
272                 dsl_deadlist_close(&ds->ds_deadlist);
273         } else {
274                 ASSERT(ds->ds_deadlist.dl_dbuf == NULL);
275                 ASSERT(!ds->ds_deadlist.dl_oldfmt);
276         }
277         if (ds->ds_dir)
278                 dsl_dir_close(ds->ds_dir, ds);
279
280         ASSERT(!list_link_active(&ds->ds_synced_link));
281
282         if (mutex_owned(&ds->ds_lock))
283                 mutex_exit(&ds->ds_lock);
284         mutex_destroy(&ds->ds_lock);
285         mutex_destroy(&ds->ds_recvlock);
286         if (mutex_owned(&ds->ds_opening_lock))
287                 mutex_exit(&ds->ds_opening_lock);
288         mutex_destroy(&ds->ds_opening_lock);
289         rw_destroy(&ds->ds_rwlock);
290         cv_destroy(&ds->ds_exclusive_cv);
291
292         kmem_free(ds, sizeof (dsl_dataset_t));
293 }
294
295 static int
296 dsl_dataset_get_snapname(dsl_dataset_t *ds)
297 {
298         dsl_dataset_phys_t *headphys;
299         int err;
300         dmu_buf_t *headdbuf;
301         dsl_pool_t *dp = ds->ds_dir->dd_pool;
302         objset_t *mos = dp->dp_meta_objset;
303
304         if (ds->ds_snapname[0])
305                 return (0);
306         if (ds->ds_phys->ds_next_snap_obj == 0)
307                 return (0);
308
309         err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj,
310             FTAG, &headdbuf);
311         if (err)
312                 return (err);
313         headphys = headdbuf->db_data;
314         err = zap_value_search(dp->dp_meta_objset,
315             headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
316         dmu_buf_rele(headdbuf, FTAG);
317         return (err);
318 }
319
320 static int
321 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
322 {
323         objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
324         uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
325         matchtype_t mt;
326         int err;
327
328         if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
329                 mt = MT_FIRST;
330         else
331                 mt = MT_EXACT;
332
333         err = zap_lookup_norm(mos, snapobj, name, 8, 1,
334             value, mt, NULL, 0, NULL);
335         if (err == ENOTSUP && mt == MT_FIRST)
336                 err = zap_lookup(mos, snapobj, name, 8, 1, value);
337         return (err);
338 }
339
340 static int
341 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx)
342 {
343         objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
344         uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
345         matchtype_t mt;
346         int err;
347
348         dsl_dir_snap_cmtime_update(ds->ds_dir);
349
350         if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
351                 mt = MT_FIRST;
352         else
353                 mt = MT_EXACT;
354
355         err = zap_remove_norm(mos, snapobj, name, mt, tx);
356         if (err == ENOTSUP && mt == MT_FIRST)
357                 err = zap_remove(mos, snapobj, name, tx);
358         return (err);
359 }
360
361 static int
362 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
363     dsl_dataset_t **dsp)
364 {
365         objset_t *mos = dp->dp_meta_objset;
366         dmu_buf_t *dbuf;
367         dsl_dataset_t *ds;
368         int err;
369         dmu_object_info_t doi;
370
371         ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
372             dsl_pool_sync_context(dp));
373
374         err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
375         if (err)
376                 return (err);
377
378         /* Make sure dsobj has the correct object type. */
379         dmu_object_info_from_db(dbuf, &doi);
380         if (doi.doi_type != DMU_OT_DSL_DATASET)
381                 return (EINVAL);
382
383         ds = dmu_buf_get_user(dbuf);
384         if (ds == NULL) {
385                 dsl_dataset_t *winner = NULL;
386
387                 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
388                 ds->ds_dbuf = dbuf;
389                 ds->ds_object = dsobj;
390                 ds->ds_phys = dbuf->db_data;
391
392                 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
393                 mutex_init(&ds->ds_recvlock, NULL, MUTEX_DEFAULT, NULL);
394                 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
395                 mutex_init(&ds->ds_sendstream_lock, NULL, MUTEX_DEFAULT, NULL);
396
397                 rw_init(&ds->ds_rwlock, 0, 0, 0);
398                 cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
399
400                 bplist_create(&ds->ds_pending_deadlist);
401                 dsl_deadlist_open(&ds->ds_deadlist,
402                     mos, ds->ds_phys->ds_deadlist_obj);
403
404                 list_create(&ds->ds_sendstreams, sizeof (dmu_sendarg_t),
405                     offsetof(dmu_sendarg_t, dsa_link));
406
407                 if (err == 0) {
408                         err = dsl_dir_open_obj(dp,
409                             ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir);
410                 }
411                 if (err) {
412                         mutex_destroy(&ds->ds_lock);
413                         mutex_destroy(&ds->ds_recvlock);
414                         mutex_destroy(&ds->ds_opening_lock);
415                         rw_destroy(&ds->ds_rwlock);
416                         cv_destroy(&ds->ds_exclusive_cv);
417                         bplist_destroy(&ds->ds_pending_deadlist);
418                         dsl_deadlist_close(&ds->ds_deadlist);
419                         kmem_free(ds, sizeof (dsl_dataset_t));
420                         dmu_buf_rele(dbuf, tag);
421                         return (err);
422                 }
423
424                 if (!dsl_dataset_is_snapshot(ds)) {
425                         ds->ds_snapname[0] = '\0';
426                         if (ds->ds_phys->ds_prev_snap_obj) {
427                                 err = dsl_dataset_get_ref(dp,
428                                     ds->ds_phys->ds_prev_snap_obj,
429                                     ds, &ds->ds_prev);
430                         }
431                 } else {
432                         if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
433                                 err = dsl_dataset_get_snapname(ds);
434                         if (err == 0 && ds->ds_phys->ds_userrefs_obj != 0) {
435                                 err = zap_count(
436                                     ds->ds_dir->dd_pool->dp_meta_objset,
437                                     ds->ds_phys->ds_userrefs_obj,
438                                     &ds->ds_userrefs);
439                         }
440                 }
441
442                 if (err == 0 && !dsl_dataset_is_snapshot(ds)) {
443                         /*
444                          * In sync context, we're called with either no lock
445                          * or with the write lock.  If we're not syncing,
446                          * we're always called with the read lock held.
447                          */
448                         boolean_t need_lock =
449                             !RW_WRITE_HELD(&dp->dp_config_rwlock) &&
450                             dsl_pool_sync_context(dp);
451
452                         if (need_lock)
453                                 rw_enter(&dp->dp_config_rwlock, RW_READER);
454
455                         err = dsl_prop_get_ds(ds,
456                             "refreservation", sizeof (uint64_t), 1,
457                             &ds->ds_reserved, NULL);
458                         if (err == 0) {
459                                 err = dsl_prop_get_ds(ds,
460                                     "refquota", sizeof (uint64_t), 1,
461                                     &ds->ds_quota, NULL);
462                         }
463
464                         if (need_lock)
465                                 rw_exit(&dp->dp_config_rwlock);
466                 } else {
467                         ds->ds_reserved = ds->ds_quota = 0;
468                 }
469
470                 if (err != 0 || (winner = dmu_buf_set_user_ie(dbuf, ds,
471                     &ds->ds_phys, dsl_dataset_evict)) != NULL) {
472                         bplist_destroy(&ds->ds_pending_deadlist);
473                         dsl_deadlist_close(&ds->ds_deadlist);
474                         if (ds->ds_prev)
475                                 dsl_dataset_drop_ref(ds->ds_prev, ds);
476                         dsl_dir_close(ds->ds_dir, ds);
477                         mutex_destroy(&ds->ds_lock);
478                         mutex_destroy(&ds->ds_recvlock);
479                         mutex_destroy(&ds->ds_opening_lock);
480                         rw_destroy(&ds->ds_rwlock);
481                         cv_destroy(&ds->ds_exclusive_cv);
482                         kmem_free(ds, sizeof (dsl_dataset_t));
483                         if (err) {
484                                 dmu_buf_rele(dbuf, tag);
485                                 return (err);
486                         }
487                         ds = winner;
488                 } else {
489                         ds->ds_fsid_guid =
490                             unique_insert(ds->ds_phys->ds_fsid_guid);
491                 }
492         }
493         ASSERT3P(ds->ds_dbuf, ==, dbuf);
494         ASSERT3P(ds->ds_phys, ==, dbuf->db_data);
495         ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 ||
496             spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
497             dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
498         mutex_enter(&ds->ds_lock);
499         if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) {
500                 mutex_exit(&ds->ds_lock);
501                 dmu_buf_rele(ds->ds_dbuf, tag);
502                 return (ENOENT);
503         }
504         mutex_exit(&ds->ds_lock);
505         *dsp = ds;
506         return (0);
507 }
508
509 static int
510 dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag)
511 {
512         dsl_pool_t *dp = ds->ds_dir->dd_pool;
513
514         /*
515          * In syncing context we don't want the rwlock lock: there
516          * may be an existing writer waiting for sync phase to
517          * finish.  We don't need to worry about such writers, since
518          * sync phase is single-threaded, so the writer can't be
519          * doing anything while we are active.
520          */
521         if (dsl_pool_sync_context(dp)) {
522                 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
523                 return (0);
524         }
525
526         /*
527          * Normal users will hold the ds_rwlock as a READER until they
528          * are finished (i.e., call dsl_dataset_rele()).  "Owners" will
529          * drop their READER lock after they set the ds_owner field.
530          *
531          * If the dataset is being destroyed, the destroy thread will
532          * obtain a WRITER lock for exclusive access after it's done its
533          * open-context work and then change the ds_owner to
534          * dsl_reaper once destruction is assured.  So threads
535          * may block here temporarily, until the "destructability" of
536          * the dataset is determined.
537          */
538         ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock));
539         mutex_enter(&ds->ds_lock);
540         while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) {
541                 rw_exit(&dp->dp_config_rwlock);
542                 cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock);
543                 if (DSL_DATASET_IS_DESTROYED(ds)) {
544                         mutex_exit(&ds->ds_lock);
545                         dsl_dataset_drop_ref(ds, tag);
546                         rw_enter(&dp->dp_config_rwlock, RW_READER);
547                         return (ENOENT);
548                 }
549                 /*
550                  * The dp_config_rwlock lives above the ds_lock. And
551                  * we need to check DSL_DATASET_IS_DESTROYED() while
552                  * holding the ds_lock, so we have to drop and reacquire
553                  * the ds_lock here.
554                  */
555                 mutex_exit(&ds->ds_lock);
556                 rw_enter(&dp->dp_config_rwlock, RW_READER);
557                 mutex_enter(&ds->ds_lock);
558         }
559         mutex_exit(&ds->ds_lock);
560         return (0);
561 }
562
563 int
564 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
565     dsl_dataset_t **dsp)
566 {
567         int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp);
568
569         if (err)
570                 return (err);
571         return (dsl_dataset_hold_ref(*dsp, tag));
572 }
573
574 int
575 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, boolean_t inconsistentok,
576     void *tag, dsl_dataset_t **dsp)
577 {
578         int err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp);
579         if (err)
580                 return (err);
581         if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
582                 dsl_dataset_rele(*dsp, tag);
583                 *dsp = NULL;
584                 return (EBUSY);
585         }
586         return (0);
587 }
588
589 int
590 dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp)
591 {
592         dsl_dir_t *dd;
593         dsl_pool_t *dp;
594         const char *snapname;
595         uint64_t obj;
596         int err = 0;
597
598         err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname);
599         if (err)
600                 return (err);
601
602         dp = dd->dd_pool;
603         obj = dd->dd_phys->dd_head_dataset_obj;
604         rw_enter(&dp->dp_config_rwlock, RW_READER);
605         if (obj)
606                 err = dsl_dataset_get_ref(dp, obj, tag, dsp);
607         else
608                 err = ENOENT;
609         if (err)
610                 goto out;
611
612         err = dsl_dataset_hold_ref(*dsp, tag);
613
614         /* we may be looking for a snapshot */
615         if (err == 0 && snapname != NULL) {
616                 dsl_dataset_t *ds = NULL;
617
618                 if (*snapname++ != '@') {
619                         dsl_dataset_rele(*dsp, tag);
620                         err = ENOENT;
621                         goto out;
622                 }
623
624                 dprintf("looking for snapshot '%s'\n", snapname);
625                 err = dsl_dataset_snap_lookup(*dsp, snapname, &obj);
626                 if (err == 0)
627                         err = dsl_dataset_get_ref(dp, obj, tag, &ds);
628                 dsl_dataset_rele(*dsp, tag);
629
630                 ASSERT3U((err == 0), ==, (ds != NULL));
631
632                 if (ds) {
633                         mutex_enter(&ds->ds_lock);
634                         if (ds->ds_snapname[0] == 0)
635                                 (void) strlcpy(ds->ds_snapname, snapname,
636                                     sizeof (ds->ds_snapname));
637                         mutex_exit(&ds->ds_lock);
638                         err = dsl_dataset_hold_ref(ds, tag);
639                         *dsp = err ? NULL : ds;
640                 }
641         }
642 out:
643         rw_exit(&dp->dp_config_rwlock);
644         dsl_dir_close(dd, FTAG);
645         return (err);
646 }
647
648 int
649 dsl_dataset_own(const char *name, boolean_t inconsistentok,
650     void *tag, dsl_dataset_t **dsp)
651 {
652         int err = dsl_dataset_hold(name, tag, dsp);
653         if (err)
654                 return (err);
655         if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
656                 dsl_dataset_rele(*dsp, tag);
657                 return (EBUSY);
658         }
659         return (0);
660 }
661
662 void
663 dsl_dataset_name(dsl_dataset_t *ds, char *name)
664 {
665         if (ds == NULL) {
666                 (void) strcpy(name, "mos");
667         } else {
668                 dsl_dir_name(ds->ds_dir, name);
669                 VERIFY(0 == dsl_dataset_get_snapname(ds));
670                 if (ds->ds_snapname[0]) {
671                         (void) strcat(name, "@");
672                         /*
673                          * We use a "recursive" mutex so that we
674                          * can call dprintf_ds() with ds_lock held.
675                          */
676                         if (!MUTEX_HELD(&ds->ds_lock)) {
677                                 mutex_enter(&ds->ds_lock);
678                                 (void) strcat(name, ds->ds_snapname);
679                                 mutex_exit(&ds->ds_lock);
680                         } else {
681                                 (void) strcat(name, ds->ds_snapname);
682                         }
683                 }
684         }
685 }
686
687 static int
688 dsl_dataset_namelen(dsl_dataset_t *ds)
689 {
690         int result;
691
692         if (ds == NULL) {
693                 result = 3;     /* "mos" */
694         } else {
695                 result = dsl_dir_namelen(ds->ds_dir);
696                 VERIFY(0 == dsl_dataset_get_snapname(ds));
697                 if (ds->ds_snapname[0]) {
698                         ++result;       /* adding one for the @-sign */
699                         if (!MUTEX_HELD(&ds->ds_lock)) {
700                                 mutex_enter(&ds->ds_lock);
701                                 result += strlen(ds->ds_snapname);
702                                 mutex_exit(&ds->ds_lock);
703                         } else {
704                                 result += strlen(ds->ds_snapname);
705                         }
706                 }
707         }
708
709         return (result);
710 }
711
712 void
713 dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag)
714 {
715         dmu_buf_rele(ds->ds_dbuf, tag);
716 }
717
718 void
719 dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
720 {
721         if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) {
722                 rw_exit(&ds->ds_rwlock);
723         }
724         dsl_dataset_drop_ref(ds, tag);
725 }
726
727 void
728 dsl_dataset_disown(dsl_dataset_t *ds, void *tag)
729 {
730         ASSERT((ds->ds_owner == tag && ds->ds_dbuf) ||
731             (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL));
732
733         mutex_enter(&ds->ds_lock);
734         ds->ds_owner = NULL;
735         if (RW_WRITE_HELD(&ds->ds_rwlock)) {
736                 rw_exit(&ds->ds_rwlock);
737                 cv_broadcast(&ds->ds_exclusive_cv);
738         }
739         mutex_exit(&ds->ds_lock);
740         if (ds->ds_dbuf)
741                 dsl_dataset_drop_ref(ds, tag);
742         else
743                 dsl_dataset_evict(NULL, ds);
744 }
745
746 boolean_t
747 dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *tag)
748 {
749         boolean_t gotit = FALSE;
750
751         mutex_enter(&ds->ds_lock);
752         if (ds->ds_owner == NULL &&
753             (!DS_IS_INCONSISTENT(ds) || inconsistentok)) {
754                 ds->ds_owner = tag;
755                 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool))
756                         rw_exit(&ds->ds_rwlock);
757                 gotit = TRUE;
758         }
759         mutex_exit(&ds->ds_lock);
760         return (gotit);
761 }
762
763 void
764 dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner)
765 {
766         ASSERT3P(owner, ==, ds->ds_owner);
767         if (!RW_WRITE_HELD(&ds->ds_rwlock))
768                 rw_enter(&ds->ds_rwlock, RW_WRITER);
769 }
770
771 uint64_t
772 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
773     uint64_t flags, dmu_tx_t *tx)
774 {
775         dsl_pool_t *dp = dd->dd_pool;
776         dmu_buf_t *dbuf;
777         dsl_dataset_phys_t *dsphys;
778         uint64_t dsobj;
779         objset_t *mos = dp->dp_meta_objset;
780
781         if (origin == NULL)
782                 origin = dp->dp_origin_snap;
783
784         ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
785         ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0);
786         ASSERT(dmu_tx_is_syncing(tx));
787         ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
788
789         dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
790             DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
791         VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
792         dmu_buf_will_dirty(dbuf, tx);
793         dsphys = dbuf->db_data;
794         bzero(dsphys, sizeof (dsl_dataset_phys_t));
795         dsphys->ds_dir_obj = dd->dd_object;
796         dsphys->ds_flags = flags;
797         dsphys->ds_fsid_guid = unique_create();
798         do {
799                 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
800                     sizeof (dsphys->ds_guid));
801         } while (dsphys->ds_guid == 0);
802         dsphys->ds_snapnames_zapobj =
803             zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
804             DMU_OT_NONE, 0, tx);
805         dsphys->ds_creation_time = gethrestime_sec();
806         dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
807
808         if (origin == NULL) {
809                 dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx);
810         } else {
811                 dsl_dataset_t *ohds;
812
813                 dsphys->ds_prev_snap_obj = origin->ds_object;
814                 dsphys->ds_prev_snap_txg =
815                     origin->ds_phys->ds_creation_txg;
816                 dsphys->ds_referenced_bytes =
817                     origin->ds_phys->ds_referenced_bytes;
818                 dsphys->ds_compressed_bytes =
819                     origin->ds_phys->ds_compressed_bytes;
820                 dsphys->ds_uncompressed_bytes =
821                     origin->ds_phys->ds_uncompressed_bytes;
822                 dsphys->ds_bp = origin->ds_phys->ds_bp;
823                 dsphys->ds_flags |= origin->ds_phys->ds_flags;
824
825                 dmu_buf_will_dirty(origin->ds_dbuf, tx);
826                 origin->ds_phys->ds_num_children++;
827
828                 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
829                     origin->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ohds));
830                 dsphys->ds_deadlist_obj = dsl_deadlist_clone(&ohds->ds_deadlist,
831                     dsphys->ds_prev_snap_txg, dsphys->ds_prev_snap_obj, tx);
832                 dsl_dataset_rele(ohds, FTAG);
833
834                 if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
835                         if (origin->ds_phys->ds_next_clones_obj == 0) {
836                                 origin->ds_phys->ds_next_clones_obj =
837                                     zap_create(mos,
838                                     DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
839                         }
840                         VERIFY(0 == zap_add_int(mos,
841                             origin->ds_phys->ds_next_clones_obj,
842                             dsobj, tx));
843                 }
844
845                 dmu_buf_will_dirty(dd->dd_dbuf, tx);
846                 dd->dd_phys->dd_origin_obj = origin->ds_object;
847                 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
848                         if (origin->ds_dir->dd_phys->dd_clones == 0) {
849                                 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
850                                 origin->ds_dir->dd_phys->dd_clones =
851                                     zap_create(mos,
852                                     DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
853                         }
854                         VERIFY3U(0, ==, zap_add_int(mos,
855                             origin->ds_dir->dd_phys->dd_clones, dsobj, tx));
856                 }
857         }
858
859         if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
860                 dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
861
862         dmu_buf_rele(dbuf, FTAG);
863
864         dmu_buf_will_dirty(dd->dd_dbuf, tx);
865         dd->dd_phys->dd_head_dataset_obj = dsobj;
866
867         return (dsobj);
868 }
869
870 uint64_t
871 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
872     dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx)
873 {
874         dsl_pool_t *dp = pdd->dd_pool;
875         uint64_t dsobj, ddobj;
876         dsl_dir_t *dd;
877
878         ASSERT(lastname[0] != '@');
879
880         ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
881         VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd));
882
883         dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx);
884
885         dsl_deleg_set_create_perms(dd, tx, cr);
886
887         dsl_dir_close(dd, FTAG);
888
889         /*
890          * If we are creating a clone, make sure we zero out any stale
891          * data from the origin snapshots zil header.
892          */
893         if (origin != NULL) {
894                 dsl_dataset_t *ds;
895                 objset_t *os;
896
897                 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
898                 VERIFY3U(0, ==, dmu_objset_from_ds(ds, &os));
899                 bzero(&os->os_zil_header, sizeof (os->os_zil_header));
900                 dsl_dataset_dirty(ds, tx);
901                 dsl_dataset_rele(ds, FTAG);
902         }
903
904         return (dsobj);
905 }
906
907 #ifdef __FreeBSD__
908 /* FreeBSD ioctl compat begin */
909 struct destroyarg {
910         nvlist_t *nvl;
911         const char *snapname;
912 };
913
914 static int
915 dsl_check_snap_cb(const char *name, void *arg)
916 {
917         struct destroyarg *da = arg;
918         dsl_dataset_t *ds;
919         char *dsname;
920
921         dsname = kmem_asprintf("%s@%s", name, da->snapname);
922         fnvlist_add_boolean(da->nvl, dsname);
923         kmem_free(dsname, strlen(dsname) + 1);
924
925         return (0);
926 }
927
928 int
929 dmu_get_recursive_snaps_nvl(const char *fsname, const char *snapname,
930     nvlist_t *snaps)
931 {
932         struct destroyarg *da;
933         int err;
934
935         da = kmem_zalloc(sizeof (struct destroyarg), KM_SLEEP);
936         da->nvl = snaps;
937         da->snapname = snapname;
938         err = dmu_objset_find(fsname, dsl_check_snap_cb, da,
939             DS_FIND_CHILDREN);
940         kmem_free(da, sizeof (struct destroyarg));
941
942         return (err);
943 }
944 /* FreeBSD ioctl compat end */
945 #endif /* __FreeBSD__ */
946
947 /*
948  * The snapshots must all be in the same pool.
949  */
950 int
951 dmu_snapshots_destroy_nvl(nvlist_t *snaps, boolean_t defer, char *failed)
952 {
953         int err;
954         dsl_sync_task_t *dst;
955         spa_t *spa;
956         nvpair_t *pair;
957         dsl_sync_task_group_t *dstg;
958
959         pair = nvlist_next_nvpair(snaps, NULL);
960         if (pair == NULL)
961                 return (0);
962
963         err = spa_open(nvpair_name(pair), &spa, FTAG);
964         if (err)
965                 return (err);
966         dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
967
968         for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
969             pair = nvlist_next_nvpair(snaps, pair)) {
970                 dsl_dataset_t *ds;
971
972                 err = dsl_dataset_own(nvpair_name(pair), B_TRUE, dstg, &ds);
973                 if (err == 0) {
974                         struct dsl_ds_destroyarg *dsda;
975
976                         dsl_dataset_make_exclusive(ds, dstg);
977                         dsda = kmem_zalloc(sizeof (struct dsl_ds_destroyarg),
978                             KM_SLEEP);
979                         dsda->ds = ds;
980                         dsda->defer = defer;
981                         dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
982                             dsl_dataset_destroy_sync, dsda, dstg, 0);
983                 } else if (err == ENOENT) {
984                         err = 0;
985                 } else {
986                         (void) strcpy(failed, nvpair_name(pair));
987                         break;
988                 }
989         }
990
991         if (err == 0)
992                 err = dsl_sync_task_group_wait(dstg);
993
994         for (dst = list_head(&dstg->dstg_tasks); dst;
995             dst = list_next(&dstg->dstg_tasks, dst)) {
996                 struct dsl_ds_destroyarg *dsda = dst->dst_arg1;
997                 dsl_dataset_t *ds = dsda->ds;
998
999                 /*
1000                  * Return the file system name that triggered the error
1001                  */
1002                 if (dst->dst_err) {
1003                         dsl_dataset_name(ds, failed);
1004                 }
1005                 ASSERT3P(dsda->rm_origin, ==, NULL);
1006                 dsl_dataset_disown(ds, dstg);
1007                 kmem_free(dsda, sizeof (struct dsl_ds_destroyarg));
1008         }
1009
1010         dsl_sync_task_group_destroy(dstg);
1011         spa_close(spa, FTAG);
1012         return (err);
1013
1014 }
1015
1016 static boolean_t
1017 dsl_dataset_might_destroy_origin(dsl_dataset_t *ds)
1018 {
1019         boolean_t might_destroy = B_FALSE;
1020
1021         mutex_enter(&ds->ds_lock);
1022         if (ds->ds_phys->ds_num_children == 2 && ds->ds_userrefs == 0 &&
1023             DS_IS_DEFER_DESTROY(ds))
1024                 might_destroy = B_TRUE;
1025         mutex_exit(&ds->ds_lock);
1026
1027         return (might_destroy);
1028 }
1029
1030 /*
1031  * If we're removing a clone, and these three conditions are true:
1032  *      1) the clone's origin has no other children
1033  *      2) the clone's origin has no user references
1034  *      3) the clone's origin has been marked for deferred destruction
1035  * Then, prepare to remove the origin as part of this sync task group.
1036  */
1037 static int
1038 dsl_dataset_origin_rm_prep(struct dsl_ds_destroyarg *dsda, void *tag)
1039 {
1040         dsl_dataset_t *ds = dsda->ds;
1041         dsl_dataset_t *origin = ds->ds_prev;
1042
1043         if (dsl_dataset_might_destroy_origin(origin)) {
1044                 char *name;
1045                 int namelen;
1046                 int error;
1047
1048                 namelen = dsl_dataset_namelen(origin) + 1;
1049                 name = kmem_alloc(namelen, KM_SLEEP);
1050                 dsl_dataset_name(origin, name);
1051 #ifdef _KERNEL
1052                 error = zfs_unmount_snap(name, NULL);
1053                 if (error) {
1054                         kmem_free(name, namelen);
1055                         return (error);
1056                 }
1057 #endif
1058                 error = dsl_dataset_own(name, B_TRUE, tag, &origin);
1059                 kmem_free(name, namelen);
1060                 if (error)
1061                         return (error);
1062                 dsda->rm_origin = origin;
1063                 dsl_dataset_make_exclusive(origin, tag);
1064         }
1065
1066         return (0);
1067 }
1068
1069 /*
1070  * ds must be opened as OWNER.  On return (whether successful or not),
1071  * ds will be closed and caller can no longer dereference it.
1072  */
1073 int
1074 dsl_dataset_destroy(dsl_dataset_t *ds, void *tag, boolean_t defer)
1075 {
1076         int err;
1077         dsl_sync_task_group_t *dstg;
1078         objset_t *os;
1079         dsl_dir_t *dd;
1080         uint64_t obj;
1081         struct dsl_ds_destroyarg dsda = { 0 };
1082         dsl_dataset_t dummy_ds = { 0 };
1083
1084         dsda.ds = ds;
1085
1086         if (dsl_dataset_is_snapshot(ds)) {
1087                 /* Destroying a snapshot is simpler */
1088                 dsl_dataset_make_exclusive(ds, tag);
1089
1090                 dsda.defer = defer;
1091                 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1092                     dsl_dataset_destroy_check, dsl_dataset_destroy_sync,
1093                     &dsda, tag, 0);
1094                 ASSERT3P(dsda.rm_origin, ==, NULL);
1095                 goto out;
1096         } else if (defer) {
1097                 err = EINVAL;
1098                 goto out;
1099         }
1100
1101         dd = ds->ds_dir;
1102         dummy_ds.ds_dir = dd;
1103         dummy_ds.ds_object = ds->ds_object;
1104
1105         if (!spa_feature_is_enabled(dsl_dataset_get_spa(ds),
1106             &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY])) {
1107                 /*
1108                  * Check for errors and mark this ds as inconsistent, in
1109                  * case we crash while freeing the objects.
1110                  */
1111                 err = dsl_sync_task_do(dd->dd_pool,
1112                     dsl_dataset_destroy_begin_check,
1113                     dsl_dataset_destroy_begin_sync, ds, NULL, 0);
1114                 if (err)
1115                         goto out;
1116
1117                 err = dmu_objset_from_ds(ds, &os);
1118                 if (err)
1119                         goto out;
1120
1121                 /*
1122                  * Remove all objects while in the open context so that
1123                  * there is less work to do in the syncing context.
1124                  */
1125                 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE,
1126                     ds->ds_phys->ds_prev_snap_txg)) {
1127                         /*
1128                          * Ignore errors, if there is not enough disk space
1129                          * we will deal with it in dsl_dataset_destroy_sync().
1130                          */
1131                         (void) dmu_free_object(os, obj);
1132                 }
1133                 if (err != ESRCH)
1134                         goto out;
1135
1136                 /*
1137                  * Sync out all in-flight IO.
1138                  */
1139                 txg_wait_synced(dd->dd_pool, 0);
1140
1141                 /*
1142                  * If we managed to free all the objects in open
1143                  * context, the user space accounting should be zero.
1144                  */
1145                 if (ds->ds_phys->ds_bp.blk_fill == 0 &&
1146                     dmu_objset_userused_enabled(os)) {
1147                         uint64_t count;
1148
1149                         ASSERT(zap_count(os, DMU_USERUSED_OBJECT,
1150                             &count) != 0 || count == 0);
1151                         ASSERT(zap_count(os, DMU_GROUPUSED_OBJECT,
1152                             &count) != 0 || count == 0);
1153                 }
1154         }
1155
1156         rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
1157         err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd);
1158         rw_exit(&dd->dd_pool->dp_config_rwlock);
1159
1160         if (err)
1161                 goto out;
1162
1163         /*
1164          * Blow away the dsl_dir + head dataset.
1165          */
1166         dsl_dataset_make_exclusive(ds, tag);
1167         /*
1168          * If we're removing a clone, we might also need to remove its
1169          * origin.
1170          */
1171         do {
1172                 dsda.need_prep = B_FALSE;
1173                 if (dsl_dir_is_clone(dd)) {
1174                         err = dsl_dataset_origin_rm_prep(&dsda, tag);
1175                         if (err) {
1176                                 dsl_dir_close(dd, FTAG);
1177                                 goto out;
1178                         }
1179                 }
1180
1181                 dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool);
1182                 dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
1183                     dsl_dataset_destroy_sync, &dsda, tag, 0);
1184                 dsl_sync_task_create(dstg, dsl_dir_destroy_check,
1185                     dsl_dir_destroy_sync, &dummy_ds, FTAG, 0);
1186                 err = dsl_sync_task_group_wait(dstg);
1187                 dsl_sync_task_group_destroy(dstg);
1188
1189                 /*
1190                  * We could be racing against 'zfs release' or 'zfs destroy -d'
1191                  * on the origin snap, in which case we can get EBUSY if we
1192                  * needed to destroy the origin snap but were not ready to
1193                  * do so.
1194                  */
1195                 if (dsda.need_prep) {
1196                         ASSERT(err == EBUSY);
1197                         ASSERT(dsl_dir_is_clone(dd));
1198                         ASSERT(dsda.rm_origin == NULL);
1199                 }
1200         } while (dsda.need_prep);
1201
1202         if (dsda.rm_origin != NULL)
1203                 dsl_dataset_disown(dsda.rm_origin, tag);
1204
1205         /* if it is successful, dsl_dir_destroy_sync will close the dd */
1206         if (err)
1207                 dsl_dir_close(dd, FTAG);
1208 out:
1209         dsl_dataset_disown(ds, tag);
1210         return (err);
1211 }
1212
1213 blkptr_t *
1214 dsl_dataset_get_blkptr(dsl_dataset_t *ds)
1215 {
1216         return (&ds->ds_phys->ds_bp);
1217 }
1218
1219 void
1220 dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
1221 {
1222         ASSERT(dmu_tx_is_syncing(tx));
1223         /* If it's the meta-objset, set dp_meta_rootbp */
1224         if (ds == NULL) {
1225                 tx->tx_pool->dp_meta_rootbp = *bp;
1226         } else {
1227                 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1228                 ds->ds_phys->ds_bp = *bp;
1229         }
1230 }
1231
1232 spa_t *
1233 dsl_dataset_get_spa(dsl_dataset_t *ds)
1234 {
1235         return (ds->ds_dir->dd_pool->dp_spa);
1236 }
1237
1238 void
1239 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
1240 {
1241         dsl_pool_t *dp;
1242
1243         if (ds == NULL) /* this is the meta-objset */
1244                 return;
1245
1246         ASSERT(ds->ds_objset != NULL);
1247
1248         if (ds->ds_phys->ds_next_snap_obj != 0)
1249                 panic("dirtying snapshot!");
1250
1251         dp = ds->ds_dir->dd_pool;
1252
1253         if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) {
1254                 /* up the hold count until we can be written out */
1255                 dmu_buf_add_ref(ds->ds_dbuf, ds);
1256         }
1257 }
1258
1259 boolean_t
1260 dsl_dataset_is_dirty(dsl_dataset_t *ds)
1261 {
1262         for (int t = 0; t < TXG_SIZE; t++) {
1263                 if (txg_list_member(&ds->ds_dir->dd_pool->dp_dirty_datasets,
1264                     ds, t))
1265                         return (B_TRUE);
1266         }
1267         return (B_FALSE);
1268 }
1269
1270 /*
1271  * The unique space in the head dataset can be calculated by subtracting
1272  * the space used in the most recent snapshot, that is still being used
1273  * in this file system, from the space currently in use.  To figure out
1274  * the space in the most recent snapshot still in use, we need to take
1275  * the total space used in the snapshot and subtract out the space that
1276  * has been freed up since the snapshot was taken.
1277  */
1278 static void
1279 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
1280 {
1281         uint64_t mrs_used;
1282         uint64_t dlused, dlcomp, dluncomp;
1283
1284         ASSERT(!dsl_dataset_is_snapshot(ds));
1285
1286         if (ds->ds_phys->ds_prev_snap_obj != 0)
1287                 mrs_used = ds->ds_prev->ds_phys->ds_referenced_bytes;
1288         else
1289                 mrs_used = 0;
1290
1291         dsl_deadlist_space(&ds->ds_deadlist, &dlused, &dlcomp, &dluncomp);
1292
1293         ASSERT3U(dlused, <=, mrs_used);
1294         ds->ds_phys->ds_unique_bytes =
1295             ds->ds_phys->ds_referenced_bytes - (mrs_used - dlused);
1296
1297         if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1298             SPA_VERSION_UNIQUE_ACCURATE)
1299                 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1300 }
1301
1302 struct killarg {
1303         dsl_dataset_t *ds;
1304         dmu_tx_t *tx;
1305 };
1306
1307 /* ARGSUSED */
1308 static int
1309 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
1310     const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
1311 {
1312         struct killarg *ka = arg;
1313         dmu_tx_t *tx = ka->tx;
1314
1315         if (bp == NULL)
1316                 return (0);
1317
1318         if (zb->zb_level == ZB_ZIL_LEVEL) {
1319                 ASSERT(zilog != NULL);
1320                 /*
1321                  * It's a block in the intent log.  It has no
1322                  * accounting, so just free it.
1323                  */
1324                 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
1325         } else {
1326                 ASSERT(zilog == NULL);
1327                 ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
1328                 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
1329         }
1330
1331         return (0);
1332 }
1333
1334 /* ARGSUSED */
1335 static int
1336 dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx)
1337 {
1338         dsl_dataset_t *ds = arg1;
1339         objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1340         uint64_t count;
1341         int err;
1342
1343         /*
1344          * Can't delete a head dataset if there are snapshots of it.
1345          * (Except if the only snapshots are from the branch we cloned
1346          * from.)
1347          */
1348         if (ds->ds_prev != NULL &&
1349             ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1350                 return (EBUSY);
1351
1352         /*
1353          * This is really a dsl_dir thing, but check it here so that
1354          * we'll be less likely to leave this dataset inconsistent &
1355          * nearly destroyed.
1356          */
1357         err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
1358         if (err)
1359                 return (err);
1360         if (count != 0)
1361                 return (EEXIST);
1362
1363         return (0);
1364 }
1365
1366 /* ARGSUSED */
1367 static void
1368 dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1369 {
1370         dsl_dataset_t *ds = arg1;
1371         dsl_pool_t *dp = ds->ds_dir->dd_pool;
1372
1373         /* Mark it as inconsistent on-disk, in case we crash */
1374         dmu_buf_will_dirty(ds->ds_dbuf, tx);
1375         ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
1376
1377         spa_history_log_internal(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx,
1378             "dataset = %llu", ds->ds_object);
1379 }
1380
1381 static int
1382 dsl_dataset_origin_check(struct dsl_ds_destroyarg *dsda, void *tag,
1383     dmu_tx_t *tx)
1384 {
1385         dsl_dataset_t *ds = dsda->ds;
1386         dsl_dataset_t *ds_prev = ds->ds_prev;
1387
1388         if (dsl_dataset_might_destroy_origin(ds_prev)) {
1389                 struct dsl_ds_destroyarg ndsda = {0};
1390
1391                 /*
1392                  * If we're not prepared to remove the origin, don't remove
1393                  * the clone either.
1394                  */
1395                 if (dsda->rm_origin == NULL) {
1396                         dsda->need_prep = B_TRUE;
1397                         return (EBUSY);
1398                 }
1399
1400                 ndsda.ds = ds_prev;
1401                 ndsda.is_origin_rm = B_TRUE;
1402                 return (dsl_dataset_destroy_check(&ndsda, tag, tx));
1403         }
1404
1405         /*
1406          * If we're not going to remove the origin after all,
1407          * undo the open context setup.
1408          */
1409         if (dsda->rm_origin != NULL) {
1410                 dsl_dataset_disown(dsda->rm_origin, tag);
1411                 dsda->rm_origin = NULL;
1412         }
1413
1414         return (0);
1415 }
1416
1417 /*
1418  * If you add new checks here, you may need to add
1419  * additional checks to the "temporary" case in
1420  * snapshot_check() in dmu_objset.c.
1421  */
1422 /* ARGSUSED */
1423 int
1424 dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
1425 {
1426         struct dsl_ds_destroyarg *dsda = arg1;
1427         dsl_dataset_t *ds = dsda->ds;
1428
1429         /* we have an owner hold, so noone else can destroy us */
1430         ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
1431
1432         /*
1433          * Only allow deferred destroy on pools that support it.
1434          * NOTE: deferred destroy is only supported on snapshots.
1435          */
1436         if (dsda->defer) {
1437                 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
1438                     SPA_VERSION_USERREFS)
1439                         return (ENOTSUP);
1440                 ASSERT(dsl_dataset_is_snapshot(ds));
1441                 return (0);
1442         }
1443
1444         /*
1445          * Can't delete a head dataset if there are snapshots of it.
1446          * (Except if the only snapshots are from the branch we cloned
1447          * from.)
1448          */
1449         if (ds->ds_prev != NULL &&
1450             ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1451                 return (EBUSY);
1452
1453         /*
1454          * If we made changes this txg, traverse_dsl_dataset won't find
1455          * them.  Try again.
1456          */
1457         if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1458                 return (EAGAIN);
1459
1460         if (dsl_dataset_is_snapshot(ds)) {
1461                 /*
1462                  * If this snapshot has an elevated user reference count,
1463                  * we can't destroy it yet.
1464                  */
1465                 if (ds->ds_userrefs > 0 && !dsda->releasing)
1466                         return (EBUSY);
1467
1468                 mutex_enter(&ds->ds_lock);
1469                 /*
1470                  * Can't delete a branch point. However, if we're destroying
1471                  * a clone and removing its origin due to it having a user
1472                  * hold count of 0 and having been marked for deferred destroy,
1473                  * it's OK for the origin to have a single clone.
1474                  */
1475                 if (ds->ds_phys->ds_num_children >
1476                     (dsda->is_origin_rm ? 2 : 1)) {
1477                         mutex_exit(&ds->ds_lock);
1478                         return (EEXIST);
1479                 }
1480                 mutex_exit(&ds->ds_lock);
1481         } else if (dsl_dir_is_clone(ds->ds_dir)) {
1482                 return (dsl_dataset_origin_check(dsda, arg2, tx));
1483         }
1484
1485         /* XXX we should do some i/o error checking... */
1486         return (0);
1487 }
1488
1489 struct refsarg {
1490         kmutex_t lock;
1491         boolean_t gone;
1492         kcondvar_t cv;
1493 };
1494
1495 /* ARGSUSED */
1496 static void
1497 dsl_dataset_refs_gone(dmu_buf_t *db, void *argv)
1498 {
1499         struct refsarg *arg = argv;
1500
1501         mutex_enter(&arg->lock);
1502         arg->gone = TRUE;
1503         cv_signal(&arg->cv);
1504         mutex_exit(&arg->lock);
1505 }
1506
1507 static void
1508 dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag)
1509 {
1510         struct refsarg arg;
1511
1512         bzero(&arg, sizeof(arg));
1513         mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL);
1514         cv_init(&arg.cv, NULL, CV_DEFAULT, NULL);
1515         arg.gone = FALSE;
1516         (void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys,
1517             dsl_dataset_refs_gone);
1518         dmu_buf_rele(ds->ds_dbuf, tag);
1519         mutex_enter(&arg.lock);
1520         while (!arg.gone)
1521                 cv_wait(&arg.cv, &arg.lock);
1522         ASSERT(arg.gone);
1523         mutex_exit(&arg.lock);
1524         ds->ds_dbuf = NULL;
1525         ds->ds_phys = NULL;
1526         mutex_destroy(&arg.lock);
1527         cv_destroy(&arg.cv);
1528 }
1529
1530 static void
1531 remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj, dmu_tx_t *tx)
1532 {
1533         objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1534         uint64_t count;
1535         int err;
1536
1537         ASSERT(ds->ds_phys->ds_num_children >= 2);
1538         err = zap_remove_int(mos, ds->ds_phys->ds_next_clones_obj, obj, tx);
1539         /*
1540          * The err should not be ENOENT, but a bug in a previous version
1541          * of the code could cause upgrade_clones_cb() to not set
1542          * ds_next_snap_obj when it should, leading to a missing entry.
1543          * If we knew that the pool was created after
1544          * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
1545          * ENOENT.  However, at least we can check that we don't have
1546          * too many entries in the next_clones_obj even after failing to
1547          * remove this one.
1548          */
1549         if (err != ENOENT) {
1550                 VERIFY0(err);
1551         }
1552         ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
1553             &count));
1554         ASSERT3U(count, <=, ds->ds_phys->ds_num_children - 2);
1555 }
1556
1557 static void
1558 dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
1559 {
1560         objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1561         zap_cursor_t zc;
1562         zap_attribute_t za;
1563
1564         /*
1565          * If it is the old version, dd_clones doesn't exist so we can't
1566          * find the clones, but deadlist_remove_key() is a no-op so it
1567          * doesn't matter.
1568          */
1569         if (ds->ds_dir->dd_phys->dd_clones == 0)
1570                 return;
1571
1572         for (zap_cursor_init(&zc, mos, ds->ds_dir->dd_phys->dd_clones);
1573             zap_cursor_retrieve(&zc, &za) == 0;
1574             zap_cursor_advance(&zc)) {
1575                 dsl_dataset_t *clone;
1576
1577                 VERIFY3U(0, ==, dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
1578                     za.za_first_integer, FTAG, &clone));
1579                 if (clone->ds_dir->dd_origin_txg > mintxg) {
1580                         dsl_deadlist_remove_key(&clone->ds_deadlist,
1581                             mintxg, tx);
1582                         dsl_dataset_remove_clones_key(clone, mintxg, tx);
1583                 }
1584                 dsl_dataset_rele(clone, FTAG);
1585         }
1586         zap_cursor_fini(&zc);
1587 }
1588
1589 struct process_old_arg {
1590         dsl_dataset_t *ds;
1591         dsl_dataset_t *ds_prev;
1592         boolean_t after_branch_point;
1593         zio_t *pio;
1594         uint64_t used, comp, uncomp;
1595 };
1596
1597 static int
1598 process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1599 {
1600         struct process_old_arg *poa = arg;
1601         dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
1602
1603         if (bp->blk_birth <= poa->ds->ds_phys->ds_prev_snap_txg) {
1604                 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
1605                 if (poa->ds_prev && !poa->after_branch_point &&
1606                     bp->blk_birth >
1607                     poa->ds_prev->ds_phys->ds_prev_snap_txg) {
1608                         poa->ds_prev->ds_phys->ds_unique_bytes +=
1609                             bp_get_dsize_sync(dp->dp_spa, bp);
1610                 }
1611         } else {
1612                 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
1613                 poa->comp += BP_GET_PSIZE(bp);
1614                 poa->uncomp += BP_GET_UCSIZE(bp);
1615                 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
1616         }
1617         return (0);
1618 }
1619
1620 static void
1621 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
1622     dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
1623 {
1624         struct process_old_arg poa = { 0 };
1625         dsl_pool_t *dp = ds->ds_dir->dd_pool;
1626         objset_t *mos = dp->dp_meta_objset;
1627
1628         ASSERT(ds->ds_deadlist.dl_oldfmt);
1629         ASSERT(ds_next->ds_deadlist.dl_oldfmt);
1630
1631         poa.ds = ds;
1632         poa.ds_prev = ds_prev;
1633         poa.after_branch_point = after_branch_point;
1634         poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
1635         VERIFY3U(0, ==, bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
1636             process_old_cb, &poa, tx));
1637         VERIFY0(zio_wait(poa.pio));
1638         ASSERT3U(poa.used, ==, ds->ds_phys->ds_unique_bytes);
1639
1640         /* change snapused */
1641         dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1642             -poa.used, -poa.comp, -poa.uncomp, tx);
1643
1644         /* swap next's deadlist to our deadlist */
1645         dsl_deadlist_close(&ds->ds_deadlist);
1646         dsl_deadlist_close(&ds_next->ds_deadlist);
1647         SWITCH64(ds_next->ds_phys->ds_deadlist_obj,
1648             ds->ds_phys->ds_deadlist_obj);
1649         dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
1650         dsl_deadlist_open(&ds_next->ds_deadlist, mos,
1651             ds_next->ds_phys->ds_deadlist_obj);
1652 }
1653
1654 static int
1655 old_synchronous_dataset_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
1656 {
1657         int err;
1658         struct killarg ka;
1659
1660         /*
1661          * Free everything that we point to (that's born after
1662          * the previous snapshot, if we are a clone)
1663          *
1664          * NB: this should be very quick, because we already
1665          * freed all the objects in open context.
1666          */
1667         ka.ds = ds;
1668         ka.tx = tx;
1669         err = traverse_dataset(ds,
1670             ds->ds_phys->ds_prev_snap_txg, TRAVERSE_POST,
1671             kill_blkptr, &ka);
1672         ASSERT0(err);
1673         ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) || ds->ds_phys->ds_unique_bytes == 0);
1674
1675         return (err);
1676 }
1677
1678 void
1679 dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx)
1680 {
1681         struct dsl_ds_destroyarg *dsda = arg1;
1682         dsl_dataset_t *ds = dsda->ds;
1683         int err;
1684         int after_branch_point = FALSE;
1685         dsl_pool_t *dp = ds->ds_dir->dd_pool;
1686         objset_t *mos = dp->dp_meta_objset;
1687         dsl_dataset_t *ds_prev = NULL;
1688         boolean_t wont_destroy;
1689         uint64_t obj;
1690
1691         wont_destroy = (dsda->defer &&
1692             (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1));
1693
1694         ASSERT(ds->ds_owner || wont_destroy);
1695         ASSERT(dsda->defer || ds->ds_phys->ds_num_children <= 1);
1696         ASSERT(ds->ds_prev == NULL ||
1697             ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
1698         ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
1699
1700         if (wont_destroy) {
1701                 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1702                 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1703                 ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY;
1704                 return;
1705         }
1706
1707         /* signal any waiters that this dataset is going away */
1708         mutex_enter(&ds->ds_lock);
1709         ds->ds_owner = dsl_reaper;
1710         cv_broadcast(&ds->ds_exclusive_cv);
1711         mutex_exit(&ds->ds_lock);
1712
1713         /* Remove our reservation */
1714         if (ds->ds_reserved != 0) {
1715                 dsl_prop_setarg_t psa;
1716                 uint64_t value = 0;
1717
1718                 dsl_prop_setarg_init_uint64(&psa, "refreservation",
1719                     (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
1720                     &value);
1721                 psa.psa_effective_value = 0;    /* predict default value */
1722
1723                 dsl_dataset_set_reservation_sync(ds, &psa, tx);
1724                 ASSERT0(ds->ds_reserved);
1725         }
1726
1727         ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1728
1729         dsl_scan_ds_destroyed(ds, tx);
1730
1731         obj = ds->ds_object;
1732
1733         if (ds->ds_phys->ds_prev_snap_obj != 0) {
1734                 if (ds->ds_prev) {
1735                         ds_prev = ds->ds_prev;
1736                 } else {
1737                         VERIFY(0 == dsl_dataset_hold_obj(dp,
1738                             ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
1739                 }
1740                 after_branch_point =
1741                     (ds_prev->ds_phys->ds_next_snap_obj != obj);
1742
1743                 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
1744                 if (after_branch_point &&
1745                     ds_prev->ds_phys->ds_next_clones_obj != 0) {
1746                         remove_from_next_clones(ds_prev, obj, tx);
1747                         if (ds->ds_phys->ds_next_snap_obj != 0) {
1748                                 VERIFY(0 == zap_add_int(mos,
1749                                     ds_prev->ds_phys->ds_next_clones_obj,
1750                                     ds->ds_phys->ds_next_snap_obj, tx));
1751                         }
1752                 }
1753                 if (after_branch_point &&
1754                     ds->ds_phys->ds_next_snap_obj == 0) {
1755                         /* This clone is toast. */
1756                         ASSERT(ds_prev->ds_phys->ds_num_children > 1);
1757                         ds_prev->ds_phys->ds_num_children--;
1758
1759                         /*
1760                          * If the clone's origin has no other clones, no
1761                          * user holds, and has been marked for deferred
1762                          * deletion, then we should have done the necessary
1763                          * destroy setup for it.
1764                          */
1765                         if (ds_prev->ds_phys->ds_num_children == 1 &&
1766                             ds_prev->ds_userrefs == 0 &&
1767                             DS_IS_DEFER_DESTROY(ds_prev)) {
1768                                 ASSERT3P(dsda->rm_origin, !=, NULL);
1769                         } else {
1770                                 ASSERT3P(dsda->rm_origin, ==, NULL);
1771                         }
1772                 } else if (!after_branch_point) {
1773                         ds_prev->ds_phys->ds_next_snap_obj =
1774                             ds->ds_phys->ds_next_snap_obj;
1775                 }
1776         }
1777
1778         if (dsl_dataset_is_snapshot(ds)) {
1779                 dsl_dataset_t *ds_next;
1780                 uint64_t old_unique;
1781                 uint64_t used = 0, comp = 0, uncomp = 0;
1782
1783                 VERIFY(0 == dsl_dataset_hold_obj(dp,
1784                     ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
1785                 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
1786
1787                 old_unique = ds_next->ds_phys->ds_unique_bytes;
1788
1789                 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
1790                 ds_next->ds_phys->ds_prev_snap_obj =
1791                     ds->ds_phys->ds_prev_snap_obj;
1792                 ds_next->ds_phys->ds_prev_snap_txg =
1793                     ds->ds_phys->ds_prev_snap_txg;
1794                 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1795                     ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
1796
1797
1798                 if (ds_next->ds_deadlist.dl_oldfmt) {
1799                         process_old_deadlist(ds, ds_prev, ds_next,
1800                             after_branch_point, tx);
1801                 } else {
1802                         /* Adjust prev's unique space. */
1803                         if (ds_prev && !after_branch_point) {
1804                                 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1805                                     ds_prev->ds_phys->ds_prev_snap_txg,
1806                                     ds->ds_phys->ds_prev_snap_txg,
1807                                     &used, &comp, &uncomp);
1808                                 ds_prev->ds_phys->ds_unique_bytes += used;
1809                         }
1810
1811                         /* Adjust snapused. */
1812                         dsl_deadlist_space_range(&ds_next->ds_deadlist,
1813                             ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
1814                             &used, &comp, &uncomp);
1815                         dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1816                             -used, -comp, -uncomp, tx);
1817
1818                         /* Move blocks to be freed to pool's free list. */
1819                         dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
1820                             &dp->dp_free_bpobj, ds->ds_phys->ds_prev_snap_txg,
1821                             tx);
1822                         dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
1823                             DD_USED_HEAD, used, comp, uncomp, tx);
1824
1825                         /* Merge our deadlist into next's and free it. */
1826                         dsl_deadlist_merge(&ds_next->ds_deadlist,
1827                             ds->ds_phys->ds_deadlist_obj, tx);
1828                 }
1829                 dsl_deadlist_close(&ds->ds_deadlist);
1830                 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1831
1832                 /* Collapse range in clone heads */
1833                 dsl_dataset_remove_clones_key(ds,
1834                     ds->ds_phys->ds_creation_txg, tx);
1835
1836                 if (dsl_dataset_is_snapshot(ds_next)) {
1837                         dsl_dataset_t *ds_nextnext;
1838
1839                         /*
1840                          * Update next's unique to include blocks which
1841                          * were previously shared by only this snapshot
1842                          * and it.  Those blocks will be born after the
1843                          * prev snap and before this snap, and will have
1844                          * died after the next snap and before the one
1845                          * after that (ie. be on the snap after next's
1846                          * deadlist).
1847                          */
1848                         VERIFY(0 == dsl_dataset_hold_obj(dp,
1849                             ds_next->ds_phys->ds_next_snap_obj,
1850                             FTAG, &ds_nextnext));
1851                         dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
1852                             ds->ds_phys->ds_prev_snap_txg,
1853                             ds->ds_phys->ds_creation_txg,
1854                             &used, &comp, &uncomp);
1855                         ds_next->ds_phys->ds_unique_bytes += used;
1856                         dsl_dataset_rele(ds_nextnext, FTAG);
1857                         ASSERT3P(ds_next->ds_prev, ==, NULL);
1858
1859                         /* Collapse range in this head. */
1860                         dsl_dataset_t *hds;
1861                         VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
1862                             ds->ds_dir->dd_phys->dd_head_dataset_obj,
1863                             FTAG, &hds));
1864                         dsl_deadlist_remove_key(&hds->ds_deadlist,
1865                             ds->ds_phys->ds_creation_txg, tx);
1866                         dsl_dataset_rele(hds, FTAG);
1867
1868                 } else {
1869                         ASSERT3P(ds_next->ds_prev, ==, ds);
1870                         dsl_dataset_drop_ref(ds_next->ds_prev, ds_next);
1871                         ds_next->ds_prev = NULL;
1872                         if (ds_prev) {
1873                                 VERIFY(0 == dsl_dataset_get_ref(dp,
1874                                     ds->ds_phys->ds_prev_snap_obj,
1875                                     ds_next, &ds_next->ds_prev));
1876                         }
1877
1878                         dsl_dataset_recalc_head_uniq(ds_next);
1879
1880                         /*
1881                          * Reduce the amount of our unconsmed refreservation
1882                          * being charged to our parent by the amount of
1883                          * new unique data we have gained.
1884                          */
1885                         if (old_unique < ds_next->ds_reserved) {
1886                                 int64_t mrsdelta;
1887                                 uint64_t new_unique =
1888                                     ds_next->ds_phys->ds_unique_bytes;
1889
1890                                 ASSERT(old_unique <= new_unique);
1891                                 mrsdelta = MIN(new_unique - old_unique,
1892                                     ds_next->ds_reserved - old_unique);
1893                                 dsl_dir_diduse_space(ds->ds_dir,
1894                                     DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
1895                         }
1896                 }
1897                 dsl_dataset_rele(ds_next, FTAG);
1898         } else {
1899                 zfeature_info_t *async_destroy =
1900                     &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY];
1901                 objset_t *os;
1902
1903                 /*
1904                  * There's no next snapshot, so this is a head dataset.
1905                  * Destroy the deadlist.  Unless it's a clone, the
1906                  * deadlist should be empty.  (If it's a clone, it's
1907                  * safe to ignore the deadlist contents.)
1908                  */
1909                 dsl_deadlist_close(&ds->ds_deadlist);
1910                 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1911                 ds->ds_phys->ds_deadlist_obj = 0;
1912
1913                 VERIFY3U(0, ==, dmu_objset_from_ds(ds, &os));
1914
1915                 if (!spa_feature_is_enabled(dp->dp_spa, async_destroy)) {
1916                         err = old_synchronous_dataset_destroy(ds, tx);
1917                 } else {
1918                         /*
1919                          * Move the bptree into the pool's list of trees to
1920                          * clean up and update space accounting information.
1921                          */
1922                         uint64_t used, comp, uncomp;
1923
1924                         zil_destroy_sync(dmu_objset_zil(os), tx);
1925
1926                         if (!spa_feature_is_active(dp->dp_spa, async_destroy)) {
1927                                 spa_feature_incr(dp->dp_spa, async_destroy, tx);
1928                                 dp->dp_bptree_obj = bptree_alloc(mos, tx);
1929                                 VERIFY(zap_add(mos,
1930                                     DMU_POOL_DIRECTORY_OBJECT,
1931                                     DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
1932                                     &dp->dp_bptree_obj, tx) == 0);
1933                         }
1934
1935                         used = ds->ds_dir->dd_phys->dd_used_bytes;
1936                         comp = ds->ds_dir->dd_phys->dd_compressed_bytes;
1937                         uncomp = ds->ds_dir->dd_phys->dd_uncompressed_bytes;
1938
1939                         ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
1940                             ds->ds_phys->ds_unique_bytes == used);
1941
1942                         bptree_add(mos, dp->dp_bptree_obj,
1943                             &ds->ds_phys->ds_bp, ds->ds_phys->ds_prev_snap_txg,
1944                             used, comp, uncomp, tx);
1945                         dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
1946                             -used, -comp, -uncomp, tx);
1947                         dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
1948                             used, comp, uncomp, tx);
1949                 }
1950
1951                 if (ds->ds_prev != NULL) {
1952                         if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
1953                                 VERIFY3U(0, ==, zap_remove_int(mos,
1954                                     ds->ds_prev->ds_dir->dd_phys->dd_clones,
1955                                     ds->ds_object, tx));
1956                         }
1957                         dsl_dataset_rele(ds->ds_prev, ds);
1958                         ds->ds_prev = ds_prev = NULL;
1959                 }
1960         }
1961
1962         /*
1963          * This must be done after the dsl_traverse(), because it will
1964          * re-open the objset.
1965          */
1966         if (ds->ds_objset) {
1967                 dmu_objset_evict(ds->ds_objset);
1968                 ds->ds_objset = NULL;
1969         }
1970
1971         if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) {
1972                 /* Erase the link in the dir */
1973                 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1974                 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
1975                 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
1976                 err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx);
1977                 ASSERT(err == 0);
1978         } else {
1979                 /* remove from snapshot namespace */
1980                 dsl_dataset_t *ds_head;
1981                 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
1982                 VERIFY(0 == dsl_dataset_hold_obj(dp,
1983                     ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
1984                 VERIFY(0 == dsl_dataset_get_snapname(ds));
1985 #ifdef ZFS_DEBUG
1986                 {
1987                         uint64_t val;
1988
1989                         err = dsl_dataset_snap_lookup(ds_head,
1990                             ds->ds_snapname, &val);
1991                         ASSERT0(err);
1992                         ASSERT3U(val, ==, obj);
1993                 }
1994 #endif
1995                 err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx);
1996                 ASSERT(err == 0);
1997                 dsl_dataset_rele(ds_head, FTAG);
1998         }
1999
2000         if (ds_prev && ds->ds_prev != ds_prev)
2001                 dsl_dataset_rele(ds_prev, FTAG);
2002
2003         spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
2004         spa_history_log_internal(LOG_DS_DESTROY, dp->dp_spa, tx,
2005             "dataset = %llu", ds->ds_object);
2006
2007         if (ds->ds_phys->ds_next_clones_obj != 0) {
2008                 uint64_t count;
2009                 ASSERT(0 == zap_count(mos,
2010                     ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
2011                 VERIFY(0 == dmu_object_free(mos,
2012                     ds->ds_phys->ds_next_clones_obj, tx));
2013         }
2014         if (ds->ds_phys->ds_props_obj != 0)
2015                 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
2016         if (ds->ds_phys->ds_userrefs_obj != 0)
2017                 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_userrefs_obj, tx));
2018         dsl_dir_close(ds->ds_dir, ds);
2019         ds->ds_dir = NULL;
2020         dsl_dataset_drain_refs(ds, tag);
2021         VERIFY(0 == dmu_object_free(mos, obj, tx));
2022
2023         if (dsda->rm_origin) {
2024                 /*
2025                  * Remove the origin of the clone we just destroyed.
2026                  */
2027                 struct dsl_ds_destroyarg ndsda = {0};
2028
2029                 ndsda.ds = dsda->rm_origin;
2030                 dsl_dataset_destroy_sync(&ndsda, tag, tx);
2031         }
2032 }
2033
2034 static int
2035 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
2036 {
2037         uint64_t asize;
2038
2039         if (!dmu_tx_is_syncing(tx))
2040                 return (0);
2041
2042         /*
2043          * If there's an fs-only reservation, any blocks that might become
2044          * owned by the snapshot dataset must be accommodated by space
2045          * outside of the reservation.
2046          */
2047         ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
2048         asize = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
2049         if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
2050                 return (ENOSPC);
2051
2052         /*
2053          * Propogate any reserved space for this snapshot to other
2054          * snapshot checks in this sync group.
2055          */
2056         if (asize > 0)
2057                 dsl_dir_willuse_space(ds->ds_dir, asize, tx);
2058
2059         return (0);
2060 }
2061
2062 int
2063 dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
2064 {
2065         dsl_dataset_t *ds = arg1;
2066         const char *snapname = arg2;
2067         int err;
2068         uint64_t value;
2069
2070         /*
2071          * We don't allow multiple snapshots of the same txg.  If there
2072          * is already one, try again.
2073          */
2074         if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
2075                 return (EAGAIN);
2076
2077         /*
2078          * Check for conflicting name snapshot name.
2079          */
2080         err = dsl_dataset_snap_lookup(ds, snapname, &value);
2081         if (err == 0)
2082                 return (EEXIST);
2083         if (err != ENOENT)
2084                 return (err);
2085
2086         /*
2087          * Check that the dataset's name is not too long.  Name consists
2088          * of the dataset's length + 1 for the @-sign + snapshot name's length
2089          */
2090         if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN)
2091                 return (ENAMETOOLONG);
2092
2093         err = dsl_dataset_snapshot_reserve_space(ds, tx);
2094         if (err)
2095                 return (err);
2096
2097         ds->ds_trysnap_txg = tx->tx_txg;
2098         return (0);
2099 }
2100
2101 void
2102 dsl_dataset_snapshot_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2103 {
2104         dsl_dataset_t *ds = arg1;
2105         const char *snapname = arg2;
2106         dsl_pool_t *dp = ds->ds_dir->dd_pool;
2107         dmu_buf_t *dbuf;
2108         dsl_dataset_phys_t *dsphys;
2109         uint64_t dsobj, crtxg;
2110         objset_t *mos = dp->dp_meta_objset;
2111         int err;
2112
2113         ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
2114
2115         /*
2116          * The origin's ds_creation_txg has to be < TXG_INITIAL
2117          */
2118         if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
2119                 crtxg = 1;
2120         else
2121                 crtxg = tx->tx_txg;
2122
2123         dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
2124             DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
2125         VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
2126         dmu_buf_will_dirty(dbuf, tx);
2127         dsphys = dbuf->db_data;
2128         bzero(dsphys, sizeof (dsl_dataset_phys_t));
2129         dsphys->ds_dir_obj = ds->ds_dir->dd_object;
2130         dsphys->ds_fsid_guid = unique_create();
2131         do {
2132                 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
2133                     sizeof (dsphys->ds_guid));
2134         } while (dsphys->ds_guid == 0);
2135         dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj;
2136         dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg;
2137         dsphys->ds_next_snap_obj = ds->ds_object;
2138         dsphys->ds_num_children = 1;
2139         dsphys->ds_creation_time = gethrestime_sec();
2140         dsphys->ds_creation_txg = crtxg;
2141         dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj;
2142         dsphys->ds_referenced_bytes = ds->ds_phys->ds_referenced_bytes;
2143         dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes;
2144         dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes;
2145         dsphys->ds_flags = ds->ds_phys->ds_flags;
2146         dsphys->ds_bp = ds->ds_phys->ds_bp;
2147         dmu_buf_rele(dbuf, FTAG);
2148
2149         ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0);
2150         if (ds->ds_prev) {
2151                 uint64_t next_clones_obj =
2152                     ds->ds_prev->ds_phys->ds_next_clones_obj;
2153                 ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj ==
2154                     ds->ds_object ||
2155                     ds->ds_prev->ds_phys->ds_num_children > 1);
2156                 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
2157                         dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
2158                         ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
2159                             ds->ds_prev->ds_phys->ds_creation_txg);
2160                         ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj;
2161                 } else if (next_clones_obj != 0) {
2162                         remove_from_next_clones(ds->ds_prev,
2163                             dsphys->ds_next_snap_obj, tx);
2164                         VERIFY3U(0, ==, zap_add_int(mos,
2165                             next_clones_obj, dsobj, tx));
2166                 }
2167         }
2168
2169         /*
2170          * If we have a reference-reservation on this dataset, we will
2171          * need to increase the amount of refreservation being charged
2172          * since our unique space is going to zero.
2173          */
2174         if (ds->ds_reserved) {
2175                 int64_t delta;
2176                 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
2177                 delta = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
2178                 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
2179                     delta, 0, 0, tx);
2180         }
2181
2182         dmu_buf_will_dirty(ds->ds_dbuf, tx);
2183         zfs_dbgmsg("taking snapshot %s@%s/%llu; newkey=%llu",
2184             ds->ds_dir->dd_myname, snapname, dsobj,
2185             ds->ds_phys->ds_prev_snap_txg);
2186         ds->ds_phys->ds_deadlist_obj = dsl_deadlist_clone(&ds->ds_deadlist,
2187             UINT64_MAX, ds->ds_phys->ds_prev_snap_obj, tx);
2188         dsl_deadlist_close(&ds->ds_deadlist);
2189         dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
2190         dsl_deadlist_add_key(&ds->ds_deadlist,
2191             ds->ds_phys->ds_prev_snap_txg, tx);
2192
2193         ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg);
2194         ds->ds_phys->ds_prev_snap_obj = dsobj;
2195         ds->ds_phys->ds_prev_snap_txg = crtxg;
2196         ds->ds_phys->ds_unique_bytes = 0;
2197         if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
2198                 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
2199
2200         err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj,
2201             snapname, 8, 1, &dsobj, tx);
2202         ASSERT(err == 0);
2203
2204         if (ds->ds_prev)
2205                 dsl_dataset_drop_ref(ds->ds_prev, ds);
2206         VERIFY(0 == dsl_dataset_get_ref(dp,
2207             ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
2208
2209         dsl_scan_ds_snapshotted(ds, tx);
2210
2211         dsl_dir_snap_cmtime_update(ds->ds_dir);
2212
2213         spa_history_log_internal(LOG_DS_SNAPSHOT, dp->dp_spa, tx,
2214             "dataset = %llu", dsobj);
2215 }
2216
2217 void
2218 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
2219 {
2220         ASSERT(dmu_tx_is_syncing(tx));
2221         ASSERT(ds->ds_objset != NULL);
2222         ASSERT(ds->ds_phys->ds_next_snap_obj == 0);
2223
2224         /*
2225          * in case we had to change ds_fsid_guid when we opened it,
2226          * sync it out now.
2227          */
2228         dmu_buf_will_dirty(ds->ds_dbuf, tx);
2229         ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid;
2230
2231         dmu_objset_sync(ds->ds_objset, zio, tx);
2232 }
2233
2234 static void
2235 get_clones_stat(dsl_dataset_t *ds, nvlist_t *nv)
2236 {
2237         uint64_t count = 0;
2238         objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
2239         zap_cursor_t zc;
2240         zap_attribute_t za;
2241         nvlist_t *propval;
2242         nvlist_t *val;
2243
2244         rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2245         VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2246         VERIFY(nvlist_alloc(&val, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2247
2248         /*
2249          * There may me missing entries in ds_next_clones_obj
2250          * due to a bug in a previous version of the code.
2251          * Only trust it if it has the right number of entries.
2252          */
2253         if (ds->ds_phys->ds_next_clones_obj != 0) {
2254                 ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
2255                     &count));
2256         }
2257         if (count != ds->ds_phys->ds_num_children - 1) {
2258                 goto fail;
2259         }
2260         for (zap_cursor_init(&zc, mos, ds->ds_phys->ds_next_clones_obj);
2261             zap_cursor_retrieve(&zc, &za) == 0;
2262             zap_cursor_advance(&zc)) {
2263                 dsl_dataset_t *clone;
2264                 char buf[ZFS_MAXNAMELEN];
2265                 /*
2266                  * Even though we hold the dp_config_rwlock, the dataset
2267                  * may fail to open, returning ENOENT.  If there is a
2268                  * thread concurrently attempting to destroy this
2269                  * dataset, it will have the ds_rwlock held for
2270                  * RW_WRITER.  Our call to dsl_dataset_hold_obj() ->
2271                  * dsl_dataset_hold_ref() will fail its
2272                  * rw_tryenter(&ds->ds_rwlock, RW_READER), drop the
2273                  * dp_config_rwlock, and wait for the destroy progress
2274                  * and signal ds_exclusive_cv.  If the destroy was
2275                  * successful, we will see that
2276                  * DSL_DATASET_IS_DESTROYED(), and return ENOENT.
2277                  */
2278                 if (dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
2279                     za.za_first_integer, FTAG, &clone) != 0)
2280                         continue;
2281                 dsl_dir_name(clone->ds_dir, buf);
2282                 VERIFY(nvlist_add_boolean(val, buf) == 0);
2283                 dsl_dataset_rele(clone, FTAG);
2284         }
2285         zap_cursor_fini(&zc);
2286         VERIFY(nvlist_add_nvlist(propval, ZPROP_VALUE, val) == 0);
2287         VERIFY(nvlist_add_nvlist(nv, zfs_prop_to_name(ZFS_PROP_CLONES),
2288             propval) == 0);
2289 fail:
2290         nvlist_free(val);
2291         nvlist_free(propval);
2292         rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2293 }
2294
2295 void
2296 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
2297 {
2298         uint64_t refd, avail, uobjs, aobjs, ratio;
2299
2300         dsl_dir_stats(ds->ds_dir, nv);
2301
2302         dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs);
2303         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail);
2304         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd);
2305
2306         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
2307             ds->ds_phys->ds_creation_time);
2308         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
2309             ds->ds_phys->ds_creation_txg);
2310         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
2311             ds->ds_quota);
2312         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
2313             ds->ds_reserved);
2314         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
2315             ds->ds_phys->ds_guid);
2316         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE,
2317             ds->ds_phys->ds_unique_bytes);
2318         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID,
2319             ds->ds_object);
2320         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS,
2321             ds->ds_userrefs);
2322         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY,
2323             DS_IS_DEFER_DESTROY(ds) ? 1 : 0);
2324
2325         if (ds->ds_phys->ds_prev_snap_obj != 0) {
2326                 uint64_t written, comp, uncomp;
2327                 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2328                 dsl_dataset_t *prev;
2329
2330                 rw_enter(&dp->dp_config_rwlock, RW_READER);
2331                 int err = dsl_dataset_hold_obj(dp,
2332                     ds->ds_phys->ds_prev_snap_obj, FTAG, &prev);
2333                 rw_exit(&dp->dp_config_rwlock);
2334                 if (err == 0) {
2335                         err = dsl_dataset_space_written(prev, ds, &written,
2336                             &comp, &uncomp);
2337                         dsl_dataset_rele(prev, FTAG);
2338                         if (err == 0) {
2339                                 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_WRITTEN,
2340                                     written);
2341                         }
2342                 }
2343         }
2344         ratio = ds->ds_phys->ds_compressed_bytes == 0 ? 100 :
2345             (ds->ds_phys->ds_uncompressed_bytes * 100 /
2346             ds->ds_phys->ds_compressed_bytes);
2347         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRATIO, ratio);
2348         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_LOGICALREFERENCED,
2349             ds->ds_phys->ds_uncompressed_bytes);
2350
2351         if (ds->ds_phys->ds_next_snap_obj) {
2352                 /*
2353                  * This is a snapshot; override the dd's space used with
2354                  * our unique space and compression ratio.
2355                  */
2356                 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
2357                     ds->ds_phys->ds_unique_bytes);
2358                 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO, ratio);
2359
2360                 get_clones_stat(ds, nv);
2361         }
2362 }
2363
2364 void
2365 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
2366 {
2367         stat->dds_creation_txg = ds->ds_phys->ds_creation_txg;
2368         stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT;
2369         stat->dds_guid = ds->ds_phys->ds_guid;
2370         if (ds->ds_phys->ds_next_snap_obj) {
2371                 stat->dds_is_snapshot = B_TRUE;
2372                 stat->dds_num_clones = ds->ds_phys->ds_num_children - 1;
2373         } else {
2374                 stat->dds_is_snapshot = B_FALSE;
2375                 stat->dds_num_clones = 0;
2376         }
2377
2378         /* clone origin is really a dsl_dir thing... */
2379         rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2380         if (dsl_dir_is_clone(ds->ds_dir)) {
2381                 dsl_dataset_t *ods;
2382
2383                 VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool,
2384                     ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods));
2385                 dsl_dataset_name(ods, stat->dds_origin);
2386                 dsl_dataset_drop_ref(ods, FTAG);
2387         } else {
2388                 stat->dds_origin[0] = '\0';
2389         }
2390         rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2391 }
2392
2393 uint64_t
2394 dsl_dataset_fsid_guid(dsl_dataset_t *ds)
2395 {
2396         return (ds->ds_fsid_guid);
2397 }
2398
2399 void
2400 dsl_dataset_space(dsl_dataset_t *ds,
2401     uint64_t *refdbytesp, uint64_t *availbytesp,
2402     uint64_t *usedobjsp, uint64_t *availobjsp)
2403 {
2404         *refdbytesp = ds->ds_phys->ds_referenced_bytes;
2405         *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
2406         if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes)
2407                 *availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes;
2408         if (ds->ds_quota != 0) {
2409                 /*
2410                  * Adjust available bytes according to refquota
2411                  */
2412                 if (*refdbytesp < ds->ds_quota)
2413                         *availbytesp = MIN(*availbytesp,
2414                             ds->ds_quota - *refdbytesp);
2415                 else
2416                         *availbytesp = 0;
2417         }
2418         *usedobjsp = ds->ds_phys->ds_bp.blk_fill;
2419         *availobjsp = DN_MAX_OBJECT - *usedobjsp;
2420 }
2421
2422 boolean_t
2423 dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds)
2424 {
2425         dsl_pool_t *dp = ds->ds_dir->dd_pool;
2426
2427         ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
2428             dsl_pool_sync_context(dp));
2429         if (ds->ds_prev == NULL)
2430                 return (B_FALSE);
2431         if (ds->ds_phys->ds_bp.blk_birth >
2432             ds->ds_prev->ds_phys->ds_creation_txg) {
2433                 objset_t *os, *os_prev;
2434                 /*
2435                  * It may be that only the ZIL differs, because it was
2436                  * reset in the head.  Don't count that as being
2437                  * modified.
2438                  */
2439                 if (dmu_objset_from_ds(ds, &os) != 0)
2440                         return (B_TRUE);
2441                 if (dmu_objset_from_ds(ds->ds_prev, &os_prev) != 0)
2442                         return (B_TRUE);
2443                 return (bcmp(&os->os_phys->os_meta_dnode,
2444                     &os_prev->os_phys->os_meta_dnode,
2445                     sizeof (os->os_phys->os_meta_dnode)) != 0);
2446         }
2447         return (B_FALSE);
2448 }
2449
2450 /* ARGSUSED */
2451 static int
2452 dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
2453 {
2454         dsl_dataset_t *ds = arg1;
2455         char *newsnapname = arg2;
2456         dsl_dir_t *dd = ds->ds_dir;
2457         dsl_dataset_t *hds;
2458         uint64_t val;
2459         int err;
2460
2461         err = dsl_dataset_hold_obj(dd->dd_pool,
2462             dd->dd_phys->dd_head_dataset_obj, FTAG, &hds);
2463         if (err)
2464                 return (err);
2465
2466         /* new name better not be in use */
2467         err = dsl_dataset_snap_lookup(hds, newsnapname, &val);
2468         dsl_dataset_rele(hds, FTAG);
2469
2470         if (err == 0)
2471                 err = EEXIST;
2472         else if (err == ENOENT)
2473                 err = 0;
2474
2475         /* dataset name + 1 for the "@" + the new snapshot name must fit */
2476         if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN)
2477                 err = ENAMETOOLONG;
2478
2479         return (err);
2480 }
2481
2482 static void
2483 dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2484 {
2485         char oldname[MAXPATHLEN], newname[MAXPATHLEN];
2486         dsl_dataset_t *ds = arg1;
2487         const char *newsnapname = arg2;
2488         dsl_dir_t *dd = ds->ds_dir;
2489         objset_t *mos = dd->dd_pool->dp_meta_objset;
2490         dsl_dataset_t *hds;
2491         int err;
2492
2493         ASSERT(ds->ds_phys->ds_next_snap_obj != 0);
2494
2495         VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
2496             dd->dd_phys->dd_head_dataset_obj, FTAG, &hds));
2497
2498         VERIFY(0 == dsl_dataset_get_snapname(ds));
2499         err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx);
2500         ASSERT0(err);
2501         dsl_dataset_name(ds, oldname);
2502         mutex_enter(&ds->ds_lock);
2503         (void) strcpy(ds->ds_snapname, newsnapname);
2504         mutex_exit(&ds->ds_lock);
2505         err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj,
2506             ds->ds_snapname, 8, 1, &ds->ds_object, tx);
2507         ASSERT0(err);
2508         dsl_dataset_name(ds, newname);
2509 #ifdef _KERNEL
2510         zvol_rename_minors(oldname, newname);
2511 #endif
2512
2513         spa_history_log_internal(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx,
2514             "dataset = %llu", ds->ds_object);
2515         dsl_dataset_rele(hds, FTAG);
2516 }
2517
2518 struct renamesnaparg {
2519         dsl_sync_task_group_t *dstg;
2520         char failed[MAXPATHLEN];
2521         char *oldsnap;
2522         char *newsnap;
2523         int error;
2524 };
2525
2526 static int
2527 dsl_snapshot_rename_one(const char *name, void *arg)
2528 {
2529         struct renamesnaparg *ra = arg;
2530         dsl_dataset_t *ds = NULL;
2531         char *snapname;
2532         int err;
2533
2534         snapname = kmem_asprintf("%s@%s", name, ra->oldsnap);
2535         (void) strlcpy(ra->failed, snapname, sizeof (ra->failed));
2536
2537         /*
2538          * For recursive snapshot renames the parent won't be changing
2539          * so we just pass name for both the to/from argument.
2540          */
2541         err = zfs_secpolicy_rename_perms(snapname, snapname, CRED());
2542         if (err != 0) {
2543                 strfree(snapname);
2544                 return (err == ENOENT ? 0 : err);
2545         }
2546
2547 #ifdef _KERNEL
2548         /*
2549          * For all filesystems undergoing rename, we'll need to unmount it.
2550          */
2551         (void) zfs_unmount_snap(snapname, NULL);
2552 #endif
2553         err = dsl_dataset_hold(snapname, ra->dstg, &ds);
2554         strfree(snapname);
2555         if (err != 0)
2556                 return (err == ENOENT ? 0 : err);
2557
2558         dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check,
2559             dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0);
2560
2561         /* First successful rename clears the error. */
2562         ra->error = 0;
2563
2564         return (0);
2565 }
2566
2567 static int
2568 dsl_recursive_rename(char *oldname, const char *newname)
2569 {
2570         int err;
2571         struct renamesnaparg *ra;
2572         dsl_sync_task_t *dst;
2573         spa_t *spa;
2574         char *cp, *fsname = spa_strdup(oldname);
2575         int len = strlen(oldname) + 1;
2576
2577         /* truncate the snapshot name to get the fsname */
2578         cp = strchr(fsname, '@');
2579         *cp = '\0';
2580
2581         err = spa_open(fsname, &spa, FTAG);
2582         if (err) {
2583                 kmem_free(fsname, len);
2584                 return (err);
2585         }
2586         ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP);
2587         ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
2588
2589         ra->oldsnap = strchr(oldname, '@') + 1;
2590         ra->newsnap = strchr(newname, '@') + 1;
2591         *ra->failed = '\0';
2592         ra->error = ENOENT;
2593
2594         err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra,
2595             DS_FIND_CHILDREN);
2596         kmem_free(fsname, len);
2597         if (err == 0)
2598                 err = ra->error;
2599
2600         if (err == 0)
2601                 err = dsl_sync_task_group_wait(ra->dstg);
2602
2603         for (dst = list_head(&ra->dstg->dstg_tasks); dst;
2604             dst = list_next(&ra->dstg->dstg_tasks, dst)) {
2605                 dsl_dataset_t *ds = dst->dst_arg1;
2606                 if (dst->dst_err) {
2607                         dsl_dir_name(ds->ds_dir, ra->failed);
2608                         (void) strlcat(ra->failed, "@", sizeof (ra->failed));
2609                         (void) strlcat(ra->failed, ra->newsnap,
2610                             sizeof (ra->failed));
2611                 }
2612                 dsl_dataset_rele(ds, ra->dstg);
2613         }
2614
2615         if (err)
2616                 (void) strlcpy(oldname, ra->failed, sizeof (ra->failed));
2617
2618         dsl_sync_task_group_destroy(ra->dstg);
2619         kmem_free(ra, sizeof (struct renamesnaparg));
2620         spa_close(spa, FTAG);
2621         return (err);
2622 }
2623
2624 static int
2625 dsl_valid_rename(const char *oldname, void *arg)
2626 {
2627         int delta = *(int *)arg;
2628
2629         if (strlen(oldname) + delta >= MAXNAMELEN)
2630                 return (ENAMETOOLONG);
2631
2632         return (0);
2633 }
2634
2635 #pragma weak dmu_objset_rename = dsl_dataset_rename
2636 int
2637 dsl_dataset_rename(char *oldname, const char *newname, int flags)
2638 {
2639         dsl_dir_t *dd;
2640         dsl_dataset_t *ds;
2641         const char *tail;
2642         int err;
2643
2644         err = dsl_dir_open(oldname, FTAG, &dd, &tail);
2645         if (err)
2646                 return (err);
2647
2648         if (tail == NULL) {
2649                 int delta = strlen(newname) - strlen(oldname);
2650
2651                 /* if we're growing, validate child name lengths */
2652                 if (delta > 0)
2653                         err = dmu_objset_find(oldname, dsl_valid_rename,
2654                             &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2655
2656                 if (err == 0)
2657                         err = dsl_dir_rename(dd, newname, flags);
2658                 dsl_dir_close(dd, FTAG);
2659                 return (err);
2660         }
2661
2662         if (tail[0] != '@') {
2663                 /* the name ended in a nonexistent component */
2664                 dsl_dir_close(dd, FTAG);
2665                 return (ENOENT);
2666         }
2667
2668         dsl_dir_close(dd, FTAG);
2669
2670         /* new name must be snapshot in same filesystem */
2671         tail = strchr(newname, '@');
2672         if (tail == NULL)
2673                 return (EINVAL);
2674         tail++;
2675         if (strncmp(oldname, newname, tail - newname) != 0)
2676                 return (EXDEV);
2677
2678         if (flags & ZFS_RENAME_RECURSIVE) {
2679                 err = dsl_recursive_rename(oldname, newname);
2680         } else {
2681                 err = dsl_dataset_hold(oldname, FTAG, &ds);
2682                 if (err)
2683                         return (err);
2684
2685                 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
2686                     dsl_dataset_snapshot_rename_check,
2687                     dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
2688
2689                 dsl_dataset_rele(ds, FTAG);
2690         }
2691
2692         return (err);
2693 }
2694
2695 struct promotenode {
2696         list_node_t link;
2697         dsl_dataset_t *ds;
2698 };
2699
2700 struct promotearg {
2701         list_t shared_snaps, origin_snaps, clone_snaps;
2702         dsl_dataset_t *origin_origin;
2703         uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap;
2704         char *err_ds;
2705 };
2706
2707 static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
2708 static boolean_t snaplist_unstable(list_t *l);
2709
2710 static int
2711 dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx)
2712 {
2713         dsl_dataset_t *hds = arg1;
2714         struct promotearg *pa = arg2;
2715         struct promotenode *snap = list_head(&pa->shared_snaps);
2716         dsl_dataset_t *origin_ds = snap->ds;
2717         int err;
2718         uint64_t unused;
2719
2720         /* Check that it is a real clone */
2721         if (!dsl_dir_is_clone(hds->ds_dir))
2722                 return (EINVAL);
2723
2724         /* Since this is so expensive, don't do the preliminary check */
2725         if (!dmu_tx_is_syncing(tx))
2726                 return (0);
2727
2728         if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)
2729                 return (EXDEV);
2730
2731         /* compute origin's new unique space */
2732         snap = list_tail(&pa->clone_snaps);
2733         ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2734         dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2735             origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
2736             &pa->unique, &unused, &unused);
2737
2738         /*
2739          * Walk the snapshots that we are moving
2740          *
2741          * Compute space to transfer.  Consider the incremental changes
2742          * to used for each snapshot:
2743          * (my used) = (prev's used) + (blocks born) - (blocks killed)
2744          * So each snapshot gave birth to:
2745          * (blocks born) = (my used) - (prev's used) + (blocks killed)
2746          * So a sequence would look like:
2747          * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2748          * Which simplifies to:
2749          * uN + kN + kN-1 + ... + k1 + k0
2750          * Note however, if we stop before we reach the ORIGIN we get:
2751          * uN + kN + kN-1 + ... + kM - uM-1
2752          */
2753         pa->used = origin_ds->ds_phys->ds_referenced_bytes;
2754         pa->comp = origin_ds->ds_phys->ds_compressed_bytes;
2755         pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes;
2756         for (snap = list_head(&pa->shared_snaps); snap;
2757             snap = list_next(&pa->shared_snaps, snap)) {
2758                 uint64_t val, dlused, dlcomp, dluncomp;
2759                 dsl_dataset_t *ds = snap->ds;
2760
2761                 /* Check that the snapshot name does not conflict */
2762                 VERIFY(0 == dsl_dataset_get_snapname(ds));
2763                 err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
2764                 if (err == 0) {
2765                         err = EEXIST;
2766                         goto out;
2767                 }
2768                 if (err != ENOENT)
2769                         goto out;
2770
2771                 /* The very first snapshot does not have a deadlist */
2772                 if (ds->ds_phys->ds_prev_snap_obj == 0)
2773                         continue;
2774
2775                 dsl_deadlist_space(&ds->ds_deadlist,
2776                     &dlused, &dlcomp, &dluncomp);
2777                 pa->used += dlused;
2778                 pa->comp += dlcomp;
2779                 pa->uncomp += dluncomp;
2780         }
2781
2782         /*
2783          * If we are a clone of a clone then we never reached ORIGIN,
2784          * so we need to subtract out the clone origin's used space.
2785          */
2786         if (pa->origin_origin) {
2787                 pa->used -= pa->origin_origin->ds_phys->ds_referenced_bytes;
2788                 pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes;
2789                 pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes;
2790         }
2791
2792         /* Check that there is enough space here */
2793         err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
2794             pa->used);
2795         if (err)
2796                 return (err);
2797
2798         /*
2799          * Compute the amounts of space that will be used by snapshots
2800          * after the promotion (for both origin and clone).  For each,
2801          * it is the amount of space that will be on all of their
2802          * deadlists (that was not born before their new origin).
2803          */
2804         if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2805                 uint64_t space;
2806
2807                 /*
2808                  * Note, typically this will not be a clone of a clone,
2809                  * so dd_origin_txg will be < TXG_INITIAL, so
2810                  * these snaplist_space() -> dsl_deadlist_space_range()
2811                  * calls will be fast because they do not have to
2812                  * iterate over all bps.
2813                  */
2814                 snap = list_head(&pa->origin_snaps);
2815                 err = snaplist_space(&pa->shared_snaps,
2816                     snap->ds->ds_dir->dd_origin_txg, &pa->cloneusedsnap);
2817                 if (err)
2818                         return (err);
2819
2820                 err = snaplist_space(&pa->clone_snaps,
2821                     snap->ds->ds_dir->dd_origin_txg, &space);
2822                 if (err)
2823                         return (err);
2824                 pa->cloneusedsnap += space;
2825         }
2826         if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2827                 err = snaplist_space(&pa->origin_snaps,
2828                     origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap);
2829                 if (err)
2830                         return (err);
2831         }
2832
2833         return (0);
2834 out:
2835         pa->err_ds =  snap->ds->ds_snapname;
2836         return (err);
2837 }
2838
2839 static void
2840 dsl_dataset_promote_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2841 {
2842         dsl_dataset_t *hds = arg1;
2843         struct promotearg *pa = arg2;
2844         struct promotenode *snap = list_head(&pa->shared_snaps);
2845         dsl_dataset_t *origin_ds = snap->ds;
2846         dsl_dataset_t *origin_head;
2847         dsl_dir_t *dd = hds->ds_dir;
2848         dsl_pool_t *dp = hds->ds_dir->dd_pool;
2849         dsl_dir_t *odd = NULL;
2850         uint64_t oldnext_obj;
2851         int64_t delta;
2852
2853         ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE));
2854
2855         snap = list_head(&pa->origin_snaps);
2856         origin_head = snap->ds;
2857
2858         /*
2859          * We need to explicitly open odd, since origin_ds's dd will be
2860          * changing.
2861          */
2862         VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object,
2863             NULL, FTAG, &odd));
2864
2865         /* change origin's next snap */
2866         dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
2867         oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj;
2868         snap = list_tail(&pa->clone_snaps);
2869         ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2870         origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object;
2871
2872         /* change the origin's next clone */
2873         if (origin_ds->ds_phys->ds_next_clones_obj) {
2874                 remove_from_next_clones(origin_ds, snap->ds->ds_object, tx);
2875                 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2876                     origin_ds->ds_phys->ds_next_clones_obj,
2877                     oldnext_obj, tx));
2878         }
2879
2880         /* change origin */
2881         dmu_buf_will_dirty(dd->dd_dbuf, tx);
2882         ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object);
2883         dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj;
2884         dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg;
2885         dmu_buf_will_dirty(odd->dd_dbuf, tx);
2886         odd->dd_phys->dd_origin_obj = origin_ds->ds_object;
2887         origin_head->ds_dir->dd_origin_txg =
2888             origin_ds->ds_phys->ds_creation_txg;
2889
2890         /* change dd_clone entries */
2891         if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2892                 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2893                     odd->dd_phys->dd_clones, hds->ds_object, tx));
2894                 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2895                     pa->origin_origin->ds_dir->dd_phys->dd_clones,
2896                     hds->ds_object, tx));
2897
2898                 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2899                     pa->origin_origin->ds_dir->dd_phys->dd_clones,
2900                     origin_head->ds_object, tx));
2901                 if (dd->dd_phys->dd_clones == 0) {
2902                         dd->dd_phys->dd_clones = zap_create(dp->dp_meta_objset,
2903                             DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
2904                 }
2905                 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2906                     dd->dd_phys->dd_clones, origin_head->ds_object, tx));
2907
2908         }
2909
2910         /* move snapshots to this dir */
2911         for (snap = list_head(&pa->shared_snaps); snap;
2912             snap = list_next(&pa->shared_snaps, snap)) {
2913                 dsl_dataset_t *ds = snap->ds;
2914
2915                 /* unregister props as dsl_dir is changing */
2916                 if (ds->ds_objset) {
2917                         dmu_objset_evict(ds->ds_objset);
2918                         ds->ds_objset = NULL;
2919                 }
2920                 /* move snap name entry */
2921                 VERIFY(0 == dsl_dataset_get_snapname(ds));
2922                 VERIFY(0 == dsl_dataset_snap_remove(origin_head,
2923                     ds->ds_snapname, tx));
2924                 VERIFY(0 == zap_add(dp->dp_meta_objset,
2925                     hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname,
2926                     8, 1, &ds->ds_object, tx));
2927
2928                 /* change containing dsl_dir */
2929                 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2930                 ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object);
2931                 ds->ds_phys->ds_dir_obj = dd->dd_object;
2932                 ASSERT3P(ds->ds_dir, ==, odd);
2933                 dsl_dir_close(ds->ds_dir, ds);
2934                 VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object,
2935                     NULL, ds, &ds->ds_dir));
2936
2937                 /* move any clone references */
2938                 if (ds->ds_phys->ds_next_clones_obj &&
2939                     spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2940                         zap_cursor_t zc;
2941                         zap_attribute_t za;
2942
2943                         for (zap_cursor_init(&zc, dp->dp_meta_objset,
2944                             ds->ds_phys->ds_next_clones_obj);
2945                             zap_cursor_retrieve(&zc, &za) == 0;
2946                             zap_cursor_advance(&zc)) {
2947                                 dsl_dataset_t *cnds;
2948                                 uint64_t o;
2949
2950                                 if (za.za_first_integer == oldnext_obj) {
2951                                         /*
2952                                          * We've already moved the
2953                                          * origin's reference.
2954                                          */
2955                                         continue;
2956                                 }
2957
2958                                 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
2959                                     za.za_first_integer, FTAG, &cnds));
2960                                 o = cnds->ds_dir->dd_phys->dd_head_dataset_obj;
2961
2962                                 VERIFY3U(zap_remove_int(dp->dp_meta_objset,
2963                                     odd->dd_phys->dd_clones, o, tx), ==, 0);
2964                                 VERIFY3U(zap_add_int(dp->dp_meta_objset,
2965                                     dd->dd_phys->dd_clones, o, tx), ==, 0);
2966                                 dsl_dataset_rele(cnds, FTAG);
2967                         }
2968                         zap_cursor_fini(&zc);
2969                 }
2970
2971                 ASSERT0(dsl_prop_numcb(ds));
2972         }
2973
2974         /*
2975          * Change space accounting.
2976          * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2977          * both be valid, or both be 0 (resulting in delta == 0).  This
2978          * is true for each of {clone,origin} independently.
2979          */
2980
2981         delta = pa->cloneusedsnap -
2982             dd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2983         ASSERT3S(delta, >=, 0);
2984         ASSERT3U(pa->used, >=, delta);
2985         dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
2986         dsl_dir_diduse_space(dd, DD_USED_HEAD,
2987             pa->used - delta, pa->comp, pa->uncomp, tx);
2988
2989         delta = pa->originusedsnap -
2990             odd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2991         ASSERT3S(delta, <=, 0);
2992         ASSERT3U(pa->used, >=, -delta);
2993         dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
2994         dsl_dir_diduse_space(odd, DD_USED_HEAD,
2995             -pa->used - delta, -pa->comp, -pa->uncomp, tx);
2996
2997         origin_ds->ds_phys->ds_unique_bytes = pa->unique;
2998
2999         /* log history record */
3000         spa_history_log_internal(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx,
3001             "dataset = %llu", hds->ds_object);
3002
3003         dsl_dir_close(odd, FTAG);
3004 }
3005
3006 static char *snaplist_tag = "snaplist";
3007 /*
3008  * Make a list of dsl_dataset_t's for the snapshots between first_obj
3009  * (exclusive) and last_obj (inclusive).  The list will be in reverse
3010  * order (last_obj will be the list_head()).  If first_obj == 0, do all
3011  * snapshots back to this dataset's origin.
3012  */
3013 static int
3014 snaplist_make(dsl_pool_t *dp, boolean_t own,
3015     uint64_t first_obj, uint64_t last_obj, list_t *l)
3016 {
3017         uint64_t obj = last_obj;
3018
3019         ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock));
3020
3021         list_create(l, sizeof (struct promotenode),
3022             offsetof(struct promotenode, link));
3023
3024         while (obj != first_obj) {
3025                 dsl_dataset_t *ds;
3026                 struct promotenode *snap;
3027                 int err;
3028
3029                 if (own) {
3030                         err = dsl_dataset_own_obj(dp, obj,
3031                             0, snaplist_tag, &ds);
3032                         if (err == 0)
3033                                 dsl_dataset_make_exclusive(ds, snaplist_tag);
3034                 } else {
3035                         err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds);
3036                 }
3037                 if (err == ENOENT) {
3038                         /* lost race with snapshot destroy */
3039                         struct promotenode *last = list_tail(l);
3040                         ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj);
3041                         obj = last->ds->ds_phys->ds_prev_snap_obj;
3042                         continue;
3043                 } else if (err) {
3044                         return (err);
3045                 }
3046
3047                 if (first_obj == 0)
3048                         first_obj = ds->ds_dir->dd_phys->dd_origin_obj;
3049
3050                 snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP);
3051                 snap->ds = ds;
3052                 list_insert_tail(l, snap);
3053                 obj = ds->ds_phys->ds_prev_snap_obj;
3054         }
3055
3056         return (0);
3057 }
3058
3059 static int
3060 snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
3061 {
3062         struct promotenode *snap;
3063
3064         *spacep = 0;
3065         for (snap = list_head(l); snap; snap = list_next(l, snap)) {
3066                 uint64_t used, comp, uncomp;
3067                 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
3068                     mintxg, UINT64_MAX, &used, &comp, &uncomp);
3069                 *spacep += used;
3070         }
3071         return (0);
3072 }
3073
3074 static void
3075 snaplist_destroy(list_t *l, boolean_t own)
3076 {
3077         struct promotenode *snap;
3078
3079         if (!l || !list_link_active(&l->list_head))
3080                 return;
3081
3082         while ((snap = list_tail(l)) != NULL) {
3083                 list_remove(l, snap);
3084                 if (own)
3085                         dsl_dataset_disown(snap->ds, snaplist_tag);
3086                 else
3087                         dsl_dataset_rele(snap->ds, snaplist_tag);
3088                 kmem_free(snap, sizeof (struct promotenode));
3089         }
3090         list_destroy(l);
3091 }
3092
3093 /*
3094  * Promote a clone.  Nomenclature note:
3095  * "clone" or "cds": the original clone which is being promoted
3096  * "origin" or "ods": the snapshot which is originally clone's origin
3097  * "origin head" or "ohds": the dataset which is the head
3098  * (filesystem/volume) for the origin
3099  * "origin origin": the origin of the origin's filesystem (typically
3100  * NULL, indicating that the clone is not a clone of a clone).
3101  */
3102 int
3103 dsl_dataset_promote(const char *name, char *conflsnap)
3104 {
3105         dsl_dataset_t *ds;
3106         dsl_dir_t *dd;
3107         dsl_pool_t *dp;
3108         dmu_object_info_t doi;
3109         struct promotearg pa = { 0 };
3110         struct promotenode *snap;
3111         int err;
3112
3113         err = dsl_dataset_hold(name, FTAG, &ds);
3114         if (err)
3115                 return (err);
3116         dd = ds->ds_dir;
3117         dp = dd->dd_pool;
3118
3119         err = dmu_object_info(dp->dp_meta_objset,
3120             ds->ds_phys->ds_snapnames_zapobj, &doi);
3121         if (err) {
3122                 dsl_dataset_rele(ds, FTAG);
3123                 return (err);
3124         }
3125
3126         if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) {
3127                 dsl_dataset_rele(ds, FTAG);
3128                 return (EINVAL);
3129         }
3130
3131         /*
3132          * We are going to inherit all the snapshots taken before our
3133          * origin (i.e., our new origin will be our parent's origin).
3134          * Take ownership of them so that we can rename them into our
3135          * namespace.
3136          */
3137         rw_enter(&dp->dp_config_rwlock, RW_READER);
3138
3139         err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj,
3140             &pa.shared_snaps);
3141         if (err != 0)
3142                 goto out;
3143
3144         err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps);
3145         if (err != 0)
3146                 goto out;
3147
3148         snap = list_head(&pa.shared_snaps);
3149         ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj);
3150         err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj,
3151             snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps);
3152         if (err != 0)
3153                 goto out;
3154
3155         if (snap->ds->ds_dir->dd_phys->dd_origin_obj != 0) {
3156                 err = dsl_dataset_hold_obj(dp,
3157                     snap->ds->ds_dir->dd_phys->dd_origin_obj,
3158                     FTAG, &pa.origin_origin);
3159                 if (err != 0)
3160                         goto out;
3161         }
3162
3163 out:
3164         rw_exit(&dp->dp_config_rwlock);
3165
3166         /*
3167          * Add in 128x the snapnames zapobj size, since we will be moving
3168          * a bunch of snapnames to the promoted ds, and dirtying their
3169          * bonus buffers.
3170          */
3171         if (err == 0) {
3172                 err = dsl_sync_task_do(dp, dsl_dataset_promote_check,
3173                     dsl_dataset_promote_sync, ds, &pa,
3174                     2 + 2 * doi.doi_physical_blocks_512);
3175                 if (err && pa.err_ds && conflsnap)
3176                         (void) strncpy(conflsnap, pa.err_ds, MAXNAMELEN);
3177         }
3178
3179         snaplist_destroy(&pa.shared_snaps, B_TRUE);
3180         snaplist_destroy(&pa.clone_snaps, B_FALSE);
3181         snaplist_destroy(&pa.origin_snaps, B_FALSE);
3182         if (pa.origin_origin)
3183                 dsl_dataset_rele(pa.origin_origin, FTAG);
3184         dsl_dataset_rele(ds, FTAG);
3185         return (err);
3186 }
3187
3188 struct cloneswaparg {
3189         dsl_dataset_t *cds; /* clone dataset */
3190         dsl_dataset_t *ohds; /* origin's head dataset */
3191         boolean_t force;
3192         int64_t unused_refres_delta; /* change in unconsumed refreservation */
3193 };
3194
3195 /* ARGSUSED */
3196 static int
3197 dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx)
3198 {
3199         struct cloneswaparg *csa = arg1;
3200
3201         /* they should both be heads */
3202         if (dsl_dataset_is_snapshot(csa->cds) ||
3203             dsl_dataset_is_snapshot(csa->ohds))
3204                 return (EINVAL);
3205
3206         /* the branch point should be just before them */
3207         if (csa->cds->ds_prev != csa->ohds->ds_prev)
3208                 return (EINVAL);
3209
3210         /* cds should be the clone (unless they are unrelated) */
3211         if (csa->cds->ds_prev != NULL &&
3212             csa->cds->ds_prev != csa->cds->ds_dir->dd_pool->dp_origin_snap &&
3213             csa->ohds->ds_object !=
3214             csa->cds->ds_prev->ds_phys->ds_next_snap_obj)
3215                 return (EINVAL);
3216
3217         /* the clone should be a child of the origin */
3218         if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir)
3219                 return (EINVAL);
3220
3221         /* ohds shouldn't be modified unless 'force' */
3222         if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds))
3223                 return (ETXTBSY);
3224
3225         /* adjust amount of any unconsumed refreservation */
3226         csa->unused_refres_delta =
3227             (int64_t)MIN(csa->ohds->ds_reserved,
3228             csa->ohds->ds_phys->ds_unique_bytes) -
3229             (int64_t)MIN(csa->ohds->ds_reserved,
3230             csa->cds->ds_phys->ds_unique_bytes);
3231
3232         if (csa->unused_refres_delta > 0 &&
3233             csa->unused_refres_delta >
3234             dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE))
3235                 return (ENOSPC);
3236
3237         if (csa->ohds->ds_quota != 0 &&
3238             csa->cds->ds_phys->ds_unique_bytes > csa->ohds->ds_quota)
3239                 return (EDQUOT);
3240
3241         return (0);
3242 }
3243
3244 /* ARGSUSED */
3245 static void
3246 dsl_dataset_clone_swap_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3247 {
3248         struct cloneswaparg *csa = arg1;
3249         dsl_pool_t *dp = csa->cds->ds_dir->dd_pool;
3250
3251         ASSERT(csa->cds->ds_reserved == 0);
3252         ASSERT(csa->ohds->ds_quota == 0 ||
3253             csa->cds->ds_phys->ds_unique_bytes <= csa->ohds->ds_quota);
3254
3255         dmu_buf_will_dirty(csa->cds->ds_dbuf, tx);
3256         dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx);
3257
3258         if (csa->cds->ds_objset != NULL) {
3259                 dmu_objset_evict(csa->cds->ds_objset);
3260                 csa->cds->ds_objset = NULL;
3261         }
3262
3263         if (csa->ohds->ds_objset != NULL) {
3264                 dmu_objset_evict(csa->ohds->ds_objset);
3265                 csa->ohds->ds_objset = NULL;
3266         }
3267
3268         /*
3269          * Reset origin's unique bytes, if it exists.
3270          */
3271         if (csa->cds->ds_prev) {
3272                 dsl_dataset_t *origin = csa->cds->ds_prev;
3273                 uint64_t comp, uncomp;
3274
3275                 dmu_buf_will_dirty(origin->ds_dbuf, tx);
3276                 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3277                     origin->ds_phys->ds_prev_snap_txg, UINT64_MAX,
3278                     &origin->ds_phys->ds_unique_bytes, &comp, &uncomp);
3279         }
3280
3281         /* swap blkptrs */
3282         {
3283                 blkptr_t tmp;
3284                 tmp = csa->ohds->ds_phys->ds_bp;
3285                 csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp;
3286                 csa->cds->ds_phys->ds_bp = tmp;
3287         }
3288
3289         /* set dd_*_bytes */
3290         {
3291                 int64_t dused, dcomp, duncomp;
3292                 uint64_t cdl_used, cdl_comp, cdl_uncomp;
3293                 uint64_t odl_used, odl_comp, odl_uncomp;
3294
3295                 ASSERT3U(csa->cds->ds_dir->dd_phys->
3296                     dd_used_breakdown[DD_USED_SNAP], ==, 0);
3297
3298                 dsl_deadlist_space(&csa->cds->ds_deadlist,
3299                     &cdl_used, &cdl_comp, &cdl_uncomp);
3300                 dsl_deadlist_space(&csa->ohds->ds_deadlist,
3301                     &odl_used, &odl_comp, &odl_uncomp);
3302
3303                 dused = csa->cds->ds_phys->ds_referenced_bytes + cdl_used -
3304                     (csa->ohds->ds_phys->ds_referenced_bytes + odl_used);
3305                 dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp -
3306                     (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp);
3307                 duncomp = csa->cds->ds_phys->ds_uncompressed_bytes +
3308                     cdl_uncomp -
3309                     (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp);
3310
3311                 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD,
3312                     dused, dcomp, duncomp, tx);
3313                 dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD,
3314                     -dused, -dcomp, -duncomp, tx);
3315
3316                 /*
3317                  * The difference in the space used by snapshots is the
3318                  * difference in snapshot space due to the head's
3319                  * deadlist (since that's the only thing that's
3320                  * changing that affects the snapused).
3321                  */
3322                 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3323                     csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3324                     &cdl_used, &cdl_comp, &cdl_uncomp);
3325                 dsl_deadlist_space_range(&csa->ohds->ds_deadlist,
3326                     csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3327                     &odl_used, &odl_comp, &odl_uncomp);
3328                 dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used,
3329                     DD_USED_HEAD, DD_USED_SNAP, tx);
3330         }
3331
3332         /* swap ds_*_bytes */
3333         SWITCH64(csa->ohds->ds_phys->ds_referenced_bytes,
3334             csa->cds->ds_phys->ds_referenced_bytes);
3335         SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes,
3336             csa->cds->ds_phys->ds_compressed_bytes);
3337         SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes,
3338             csa->cds->ds_phys->ds_uncompressed_bytes);
3339         SWITCH64(csa->ohds->ds_phys->ds_unique_bytes,
3340             csa->cds->ds_phys->ds_unique_bytes);
3341
3342         /* apply any parent delta for change in unconsumed refreservation */
3343         dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV,
3344             csa->unused_refres_delta, 0, 0, tx);
3345
3346         /*
3347          * Swap deadlists.
3348          */
3349         dsl_deadlist_close(&csa->cds->ds_deadlist);
3350         dsl_deadlist_close(&csa->ohds->ds_deadlist);
3351         SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj,
3352             csa->cds->ds_phys->ds_deadlist_obj);
3353         dsl_deadlist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset,
3354             csa->cds->ds_phys->ds_deadlist_obj);
3355         dsl_deadlist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset,
3356             csa->ohds->ds_phys->ds_deadlist_obj);
3357
3358         dsl_scan_ds_clone_swapped(csa->ohds, csa->cds, tx);
3359 }
3360
3361 /*
3362  * Swap 'clone' with its origin head datasets.  Used at the end of "zfs
3363  * recv" into an existing fs to swizzle the file system to the new
3364  * version, and by "zfs rollback".  Can also be used to swap two
3365  * independent head datasets if neither has any snapshots.
3366  */
3367 int
3368 dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head,
3369     boolean_t force)
3370 {
3371         struct cloneswaparg csa;
3372         int error;
3373
3374         ASSERT(clone->ds_owner);
3375         ASSERT(origin_head->ds_owner);
3376 retry:
3377         /*
3378          * Need exclusive access for the swap. If we're swapping these
3379          * datasets back after an error, we already hold the locks.
3380          */
3381         if (!RW_WRITE_HELD(&clone->ds_rwlock))
3382                 rw_enter(&clone->ds_rwlock, RW_WRITER);
3383         if (!RW_WRITE_HELD(&origin_head->ds_rwlock) &&
3384             !rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) {
3385                 rw_exit(&clone->ds_rwlock);
3386                 rw_enter(&origin_head->ds_rwlock, RW_WRITER);
3387                 if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) {
3388                         rw_exit(&origin_head->ds_rwlock);
3389                         goto retry;
3390                 }
3391         }
3392         csa.cds = clone;
3393         csa.ohds = origin_head;
3394         csa.force = force;
3395         error = dsl_sync_task_do(clone->ds_dir->dd_pool,
3396             dsl_dataset_clone_swap_check,
3397             dsl_dataset_clone_swap_sync, &csa, NULL, 9);
3398         return (error);
3399 }
3400
3401 /*
3402  * Given a pool name and a dataset object number in that pool,
3403  * return the name of that dataset.
3404  */
3405 int
3406 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
3407 {
3408         spa_t *spa;
3409         dsl_pool_t *dp;
3410         dsl_dataset_t *ds;
3411         int error;
3412
3413         if ((error = spa_open(pname, &spa, FTAG)) != 0)
3414                 return (error);
3415         dp = spa_get_dsl(spa);
3416         rw_enter(&dp->dp_config_rwlock, RW_READER);
3417         if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) {
3418                 dsl_dataset_name(ds, buf);
3419                 dsl_dataset_rele(ds, FTAG);
3420         }
3421         rw_exit(&dp->dp_config_rwlock);
3422         spa_close(spa, FTAG);
3423
3424         return (error);
3425 }
3426
3427 int
3428 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
3429     uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
3430 {
3431         int error = 0;
3432
3433         ASSERT3S(asize, >, 0);
3434
3435         /*
3436          * *ref_rsrv is the portion of asize that will come from any
3437          * unconsumed refreservation space.
3438          */
3439         *ref_rsrv = 0;
3440
3441         mutex_enter(&ds->ds_lock);
3442         /*
3443          * Make a space adjustment for reserved bytes.
3444          */
3445         if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) {
3446                 ASSERT3U(*used, >=,
3447                     ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3448                 *used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3449                 *ref_rsrv =
3450                     asize - MIN(asize, parent_delta(ds, asize + inflight));
3451         }
3452
3453         if (!check_quota || ds->ds_quota == 0) {
3454                 mutex_exit(&ds->ds_lock);
3455                 return (0);
3456         }
3457         /*
3458          * If they are requesting more space, and our current estimate
3459          * is over quota, they get to try again unless the actual
3460          * on-disk is over quota and there are no pending changes (which
3461          * may free up space for us).
3462          */
3463         if (ds->ds_phys->ds_referenced_bytes + inflight >= ds->ds_quota) {
3464                 if (inflight > 0 ||
3465                     ds->ds_phys->ds_referenced_bytes < ds->ds_quota)
3466                         error = ERESTART;
3467                 else
3468                         error = EDQUOT;
3469         }
3470         mutex_exit(&ds->ds_lock);
3471
3472         return (error);
3473 }
3474
3475 /* ARGSUSED */
3476 static int
3477 dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
3478 {
3479         dsl_dataset_t *ds = arg1;
3480         dsl_prop_setarg_t *psa = arg2;
3481         int err;
3482
3483         if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA)
3484                 return (ENOTSUP);
3485
3486         if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3487                 return (err);
3488
3489         if (psa->psa_effective_value == 0)
3490                 return (0);
3491
3492         if (psa->psa_effective_value < ds->ds_phys->ds_referenced_bytes ||
3493             psa->psa_effective_value < ds->ds_reserved)
3494                 return (ENOSPC);
3495
3496         return (0);
3497 }
3498
3499 extern void dsl_prop_set_sync(void *, void *, dmu_tx_t *);
3500
3501 void
3502 dsl_dataset_set_quota_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3503 {
3504         dsl_dataset_t *ds = arg1;
3505         dsl_prop_setarg_t *psa = arg2;
3506         uint64_t effective_value = psa->psa_effective_value;
3507
3508         dsl_prop_set_sync(ds, psa, tx);
3509         DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3510
3511         if (ds->ds_quota != effective_value) {
3512                 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3513                 ds->ds_quota = effective_value;
3514         }
3515 }
3516
3517 int
3518 dsl_dataset_set_quota(const char *dsname, zprop_source_t source, uint64_t quota)
3519 {
3520         dsl_dataset_t *ds;
3521         dsl_prop_setarg_t psa;
3522         int err;
3523
3524         dsl_prop_setarg_init_uint64(&psa, "refquota", source, &quota);
3525
3526         err = dsl_dataset_hold(dsname, FTAG, &ds);
3527         if (err)
3528                 return (err);
3529
3530         /*
3531          * If someone removes a file, then tries to set the quota, we
3532          * want to make sure the file freeing takes effect.
3533          */
3534         txg_wait_open(ds->ds_dir->dd_pool, 0);
3535
3536         err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3537             dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync,
3538             ds, &psa, 0);
3539
3540         dsl_dataset_rele(ds, FTAG);
3541         return (err);
3542 }
3543
3544 static int
3545 dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
3546 {
3547         dsl_dataset_t *ds = arg1;
3548         dsl_prop_setarg_t *psa = arg2;
3549         uint64_t effective_value;
3550         uint64_t unique;
3551         int err;
3552
3553         if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
3554             SPA_VERSION_REFRESERVATION)
3555                 return (ENOTSUP);
3556
3557         if (dsl_dataset_is_snapshot(ds))
3558                 return (EINVAL);
3559
3560         if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3561                 return (err);
3562
3563         effective_value = psa->psa_effective_value;
3564
3565         /*
3566          * If we are doing the preliminary check in open context, the
3567          * space estimates may be inaccurate.
3568          */
3569         if (!dmu_tx_is_syncing(tx))
3570                 return (0);
3571
3572         mutex_enter(&ds->ds_lock);
3573         if (!DS_UNIQUE_IS_ACCURATE(ds))
3574                 dsl_dataset_recalc_head_uniq(ds);
3575         unique = ds->ds_phys->ds_unique_bytes;
3576         mutex_exit(&ds->ds_lock);
3577
3578         if (MAX(unique, effective_value) > MAX(unique, ds->ds_reserved)) {
3579                 uint64_t delta = MAX(unique, effective_value) -
3580                     MAX(unique, ds->ds_reserved);
3581
3582                 if (delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
3583                         return (ENOSPC);
3584                 if (ds->ds_quota > 0 &&
3585                     effective_value > ds->ds_quota)
3586                         return (ENOSPC);
3587         }
3588
3589         return (0);
3590 }
3591
3592 static void
3593 dsl_dataset_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3594 {
3595         dsl_dataset_t *ds = arg1;
3596         dsl_prop_setarg_t *psa = arg2;
3597         uint64_t effective_value = psa->psa_effective_value;
3598         uint64_t unique;
3599         int64_t delta;
3600
3601         dsl_prop_set_sync(ds, psa, tx);
3602         DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3603
3604         dmu_buf_will_dirty(ds->ds_dbuf, tx);
3605
3606         mutex_enter(&ds->ds_dir->dd_lock);
3607         mutex_enter(&ds->ds_lock);
3608         ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
3609         unique = ds->ds_phys->ds_unique_bytes;
3610         delta = MAX(0, (int64_t)(effective_value - unique)) -
3611             MAX(0, (int64_t)(ds->ds_reserved - unique));
3612         ds->ds_reserved = effective_value;
3613         mutex_exit(&ds->ds_lock);
3614
3615         dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
3616         mutex_exit(&ds->ds_dir->dd_lock);
3617 }
3618
3619 int
3620 dsl_dataset_set_reservation(const char *dsname, zprop_source_t source,
3621     uint64_t reservation)
3622 {
3623         dsl_dataset_t *ds;
3624         dsl_prop_setarg_t psa;
3625         int err;
3626
3627         dsl_prop_setarg_init_uint64(&psa, "refreservation", source,
3628             &reservation);
3629
3630         err = dsl_dataset_hold(dsname, FTAG, &ds);
3631         if (err)
3632                 return (err);
3633
3634         err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3635             dsl_dataset_set_reservation_check,
3636             dsl_dataset_set_reservation_sync, ds, &psa, 0);
3637
3638         dsl_dataset_rele(ds, FTAG);
3639         return (err);
3640 }
3641
3642 typedef struct zfs_hold_cleanup_arg {
3643         dsl_pool_t *dp;
3644         uint64_t dsobj;
3645         char htag[MAXNAMELEN];
3646 } zfs_hold_cleanup_arg_t;
3647
3648 static void
3649 dsl_dataset_user_release_onexit(void *arg)
3650 {
3651         zfs_hold_cleanup_arg_t *ca = arg;
3652
3653         (void) dsl_dataset_user_release_tmp(ca->dp, ca->dsobj, ca->htag,
3654             B_TRUE);
3655         kmem_free(ca, sizeof (zfs_hold_cleanup_arg_t));
3656 }
3657
3658 void
3659 dsl_register_onexit_hold_cleanup(dsl_dataset_t *ds, const char *htag,
3660     minor_t minor)
3661 {
3662         zfs_hold_cleanup_arg_t *ca;
3663
3664         ca = kmem_alloc(sizeof (zfs_hold_cleanup_arg_t), KM_SLEEP);
3665         ca->dp = ds->ds_dir->dd_pool;
3666         ca->dsobj = ds->ds_object;
3667         (void) strlcpy(ca->htag, htag, sizeof (ca->htag));
3668         VERIFY3U(0, ==, zfs_onexit_add_cb(minor,
3669             dsl_dataset_user_release_onexit, ca, NULL));
3670 }
3671
3672 /*
3673  * If you add new checks here, you may need to add
3674  * additional checks to the "temporary" case in
3675  * snapshot_check() in dmu_objset.c.
3676  */
3677 static int
3678 dsl_dataset_user_hold_check(void *arg1, void *arg2, dmu_tx_t *tx)
3679 {
3680         dsl_dataset_t *ds = arg1;
3681         struct dsl_ds_holdarg *ha = arg2;
3682         char *htag = ha->htag;
3683         objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3684         int error = 0;
3685
3686         if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3687                 return (ENOTSUP);
3688
3689         if (!dsl_dataset_is_snapshot(ds))
3690                 return (EINVAL);
3691
3692         /* tags must be unique */
3693         mutex_enter(&ds->ds_lock);
3694         if (ds->ds_phys->ds_userrefs_obj) {
3695                 error = zap_lookup(mos, ds->ds_phys->ds_userrefs_obj, htag,
3696                     8, 1, tx);
3697                 if (error == 0)
3698                         error = EEXIST;
3699                 else if (error == ENOENT)
3700                         error = 0;
3701         }
3702         mutex_exit(&ds->ds_lock);
3703
3704         if (error == 0 && ha->temphold &&
3705             strlen(htag) + MAX_TAG_PREFIX_LEN >= MAXNAMELEN)
3706                 error = E2BIG;
3707
3708         return (error);
3709 }
3710
3711 void
3712 dsl_dataset_user_hold_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3713 {
3714         dsl_dataset_t *ds = arg1;
3715         struct dsl_ds_holdarg *ha = arg2;
3716         char *htag = ha->htag;
3717         dsl_pool_t *dp = ds->ds_dir->dd_pool;
3718         objset_t *mos = dp->dp_meta_objset;
3719         uint64_t now = gethrestime_sec();
3720         uint64_t zapobj;
3721
3722         mutex_enter(&ds->ds_lock);
3723         if (ds->ds_phys->ds_userrefs_obj == 0) {
3724                 /*
3725                  * This is the first user hold for this dataset.  Create
3726                  * the userrefs zap object.
3727                  */
3728                 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3729                 zapobj = ds->ds_phys->ds_userrefs_obj =
3730                     zap_create(mos, DMU_OT_USERREFS, DMU_OT_NONE, 0, tx);
3731         } else {
3732                 zapobj = ds->ds_phys->ds_userrefs_obj;
3733         }
3734         ds->ds_userrefs++;
3735         mutex_exit(&ds->ds_lock);
3736
3737         VERIFY(0 == zap_add(mos, zapobj, htag, 8, 1, &now, tx));
3738
3739         if (ha->temphold) {
3740                 VERIFY(0 == dsl_pool_user_hold(dp, ds->ds_object,
3741                     htag, &now, tx));
3742         }
3743
3744         spa_history_log_internal(LOG_DS_USER_HOLD,
3745             dp->dp_spa, tx, "<%s> temp = %d dataset = %llu", htag,
3746             (int)ha->temphold, ds->ds_object);
3747 }
3748
3749 static int
3750 dsl_dataset_user_hold_one(const char *dsname, void *arg)
3751 {
3752         struct dsl_ds_holdarg *ha = arg;
3753         dsl_dataset_t *ds;
3754         int error;
3755         char *name;
3756
3757         /* alloc a buffer to hold dsname@snapname plus terminating NULL */
3758         name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3759         error = dsl_dataset_hold(name, ha->dstg, &ds);
3760         strfree(name);
3761         if (error == 0) {
3762                 ha->gotone = B_TRUE;
3763                 dsl_sync_task_create(ha->dstg, dsl_dataset_user_hold_check,
3764                     dsl_dataset_user_hold_sync, ds, ha, 0);
3765         } else if (error == ENOENT && ha->recursive) {
3766                 error = 0;
3767         } else {
3768                 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3769         }
3770         return (error);
3771 }
3772
3773 int
3774 dsl_dataset_user_hold_for_send(dsl_dataset_t *ds, char *htag,
3775     boolean_t temphold)
3776 {
3777         struct dsl_ds_holdarg *ha;
3778         int error;
3779
3780         ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3781         ha->htag = htag;
3782         ha->temphold = temphold;
3783         error = dsl_sync_task_do(ds->ds_dir->dd_pool,
3784             dsl_dataset_user_hold_check, dsl_dataset_user_hold_sync,
3785             ds, ha, 0);
3786         kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3787
3788         return (error);
3789 }
3790
3791 int
3792 dsl_dataset_user_hold(char *dsname, char *snapname, char *htag,
3793     boolean_t recursive, boolean_t temphold, int cleanup_fd)
3794 {
3795         struct dsl_ds_holdarg *ha;
3796         dsl_sync_task_t *dst;
3797         spa_t *spa;
3798         int error;
3799         minor_t minor = 0;
3800
3801         if (cleanup_fd != -1) {
3802                 /* Currently we only support cleanup-on-exit of tempholds. */
3803                 if (!temphold)
3804                         return (EINVAL);
3805                 error = zfs_onexit_fd_hold(cleanup_fd, &minor);
3806                 if (error)
3807                         return (error);
3808         }
3809
3810         ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3811
3812         (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3813
3814         error = spa_open(dsname, &spa, FTAG);
3815         if (error) {
3816                 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3817                 if (cleanup_fd != -1)
3818                         zfs_onexit_fd_rele(cleanup_fd);
3819                 return (error);
3820         }
3821
3822         ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3823         ha->htag = htag;
3824         ha->snapname = snapname;
3825         ha->recursive = recursive;
3826         ha->temphold = temphold;
3827
3828         if (recursive) {
3829                 error = dmu_objset_find(dsname, dsl_dataset_user_hold_one,
3830                     ha, DS_FIND_CHILDREN);
3831         } else {
3832                 error = dsl_dataset_user_hold_one(dsname, ha);
3833         }
3834         if (error == 0)
3835                 error = dsl_sync_task_group_wait(ha->dstg);
3836
3837         for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3838             dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3839                 dsl_dataset_t *ds = dst->dst_arg1;
3840
3841                 if (dst->dst_err) {
3842                         dsl_dataset_name(ds, ha->failed);
3843                         *strchr(ha->failed, '@') = '\0';
3844                 } else if (error == 0 && minor != 0 && temphold) {
3845                         /*
3846                          * If this hold is to be released upon process exit,
3847                          * register that action now.
3848                          */
3849                         dsl_register_onexit_hold_cleanup(ds, htag, minor);
3850                 }
3851                 dsl_dataset_rele(ds, ha->dstg);
3852         }
3853
3854         if (error == 0 && recursive && !ha->gotone)
3855                 error = ENOENT;
3856
3857         if (error)
3858                 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3859
3860         dsl_sync_task_group_destroy(ha->dstg);
3861
3862         kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3863         spa_close(spa, FTAG);
3864         if (cleanup_fd != -1)
3865                 zfs_onexit_fd_rele(cleanup_fd);
3866         return (error);
3867 }
3868
3869 struct dsl_ds_releasearg {
3870         dsl_dataset_t *ds;
3871         const char *htag;
3872         boolean_t own;          /* do we own or just hold ds? */
3873 };
3874
3875 static int
3876 dsl_dataset_release_might_destroy(dsl_dataset_t *ds, const char *htag,
3877     boolean_t *might_destroy)
3878 {
3879         objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3880         uint64_t zapobj;
3881         uint64_t tmp;
3882         int error;
3883
3884         *might_destroy = B_FALSE;
3885
3886         mutex_enter(&ds->ds_lock);
3887         zapobj = ds->ds_phys->ds_userrefs_obj;
3888         if (zapobj == 0) {
3889                 /* The tag can't possibly exist */
3890                 mutex_exit(&ds->ds_lock);
3891                 return (ESRCH);
3892         }
3893
3894         /* Make sure the tag exists */
3895         error = zap_lookup(mos, zapobj, htag, 8, 1, &tmp);
3896         if (error) {
3897                 mutex_exit(&ds->ds_lock);
3898                 if (error == ENOENT)
3899                         error = ESRCH;
3900                 return (error);
3901         }
3902
3903         if (ds->ds_userrefs == 1 && ds->ds_phys->ds_num_children == 1 &&
3904             DS_IS_DEFER_DESTROY(ds))
3905                 *might_destroy = B_TRUE;
3906
3907         mutex_exit(&ds->ds_lock);
3908         return (0);
3909 }
3910
3911 static int
3912 dsl_dataset_user_release_check(void *arg1, void *tag, dmu_tx_t *tx)
3913 {
3914         struct dsl_ds_releasearg *ra = arg1;
3915         dsl_dataset_t *ds = ra->ds;
3916         boolean_t might_destroy;
3917         int error;
3918
3919         if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3920                 return (ENOTSUP);
3921
3922         error = dsl_dataset_release_might_destroy(ds, ra->htag, &might_destroy);
3923         if (error)
3924                 return (error);
3925
3926         if (might_destroy) {
3927                 struct dsl_ds_destroyarg dsda = {0};
3928
3929                 if (dmu_tx_is_syncing(tx)) {
3930                         /*
3931                          * If we're not prepared to remove the snapshot,
3932                          * we can't allow the release to happen right now.
3933                          */
3934                         if (!ra->own)
3935                                 return (EBUSY);
3936                 }
3937                 dsda.ds = ds;
3938                 dsda.releasing = B_TRUE;
3939                 return (dsl_dataset_destroy_check(&dsda, tag, tx));
3940         }
3941
3942         return (0);
3943 }
3944
3945 static void
3946 dsl_dataset_user_release_sync(void *arg1, void *tag, dmu_tx_t *tx)
3947 {
3948         struct dsl_ds_releasearg *ra = arg1;
3949         dsl_dataset_t *ds = ra->ds;
3950         dsl_pool_t *dp = ds->ds_dir->dd_pool;
3951         objset_t *mos = dp->dp_meta_objset;
3952         uint64_t zapobj;
3953         uint64_t dsobj = ds->ds_object;
3954         uint64_t refs;
3955         int error;
3956
3957         mutex_enter(&ds->ds_lock);
3958         ds->ds_userrefs--;
3959         refs = ds->ds_userrefs;
3960         mutex_exit(&ds->ds_lock);
3961         error = dsl_pool_user_release(dp, ds->ds_object, ra->htag, tx);
3962         VERIFY(error == 0 || error == ENOENT);
3963         zapobj = ds->ds_phys->ds_userrefs_obj;
3964         VERIFY(0 == zap_remove(mos, zapobj, ra->htag, tx));
3965
3966         spa_history_log_internal(LOG_DS_USER_RELEASE,
3967             dp->dp_spa, tx, "<%s> %lld dataset = %llu",
3968             ra->htag, (longlong_t)refs, dsobj);
3969
3970         if (ds->ds_userrefs == 0 && ds->ds_phys->ds_num_children == 1 &&
3971             DS_IS_DEFER_DESTROY(ds)) {
3972                 struct dsl_ds_destroyarg dsda = {0};
3973
3974                 ASSERT(ra->own);
3975                 dsda.ds = ds;
3976                 dsda.releasing = B_TRUE;
3977                 /* We already did the destroy_check */
3978                 dsl_dataset_destroy_sync(&dsda, tag, tx);
3979         }
3980 }
3981
3982 static int
3983 dsl_dataset_user_release_one(const char *dsname, void *arg)
3984 {
3985         struct dsl_ds_holdarg *ha = arg;
3986         struct dsl_ds_releasearg *ra;
3987         dsl_dataset_t *ds;
3988         int error;
3989         void *dtag = ha->dstg;
3990         char *name;
3991         boolean_t own = B_FALSE;
3992         boolean_t might_destroy;
3993
3994         /* alloc a buffer to hold dsname@snapname, plus the terminating NULL */
3995         name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3996         error = dsl_dataset_hold(name, dtag, &ds);
3997         strfree(name);
3998         if (error == ENOENT && ha->recursive)
3999                 return (0);
4000         (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
4001         if (error)
4002                 return (error);
4003
4004         ha->gotone = B_TRUE;
4005
4006         ASSERT(dsl_dataset_is_snapshot(ds));
4007
4008         error = dsl_dataset_release_might_destroy(ds, ha->htag, &might_destroy);
4009         if (error) {
4010                 dsl_dataset_rele(ds, dtag);
4011                 return (error);
4012         }
4013
4014         if (might_destroy) {
4015 #ifdef _KERNEL
4016                 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
4017                 error = zfs_unmount_snap(name, NULL);
4018                 strfree(name);
4019                 if (error) {
4020                         dsl_dataset_rele(ds, dtag);
4021                         return (error);
4022                 }
4023 #endif
4024                 if (!dsl_dataset_tryown(ds, B_TRUE, dtag)) {
4025                         dsl_dataset_rele(ds, dtag);
4026                         return (EBUSY);
4027                 } else {
4028                         own = B_TRUE;
4029                         dsl_dataset_make_exclusive(ds, dtag);
4030                 }
4031         }
4032
4033         ra = kmem_alloc(sizeof (struct dsl_ds_releasearg), KM_SLEEP);
4034         ra->ds = ds;
4035         ra->htag = ha->htag;
4036         ra->own = own;
4037         dsl_sync_task_create(ha->dstg, dsl_dataset_user_release_check,
4038             dsl_dataset_user_release_sync, ra, dtag, 0);
4039
4040         return (0);
4041 }
4042
4043 int
4044 dsl_dataset_user_release(char *dsname, char *snapname, char *htag,
4045     boolean_t recursive)
4046 {
4047         struct dsl_ds_holdarg *ha;
4048         dsl_sync_task_t *dst;
4049         spa_t *spa;
4050         int error;
4051
4052 top:
4053         ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
4054
4055         (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
4056
4057         error = spa_open(dsname, &spa, FTAG);
4058         if (error) {
4059                 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
4060                 return (error);
4061         }
4062
4063         ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
4064         ha->htag = htag;
4065         ha->snapname = snapname;
4066         ha->recursive = recursive;
4067         if (recursive) {
4068                 error = dmu_objset_find(dsname, dsl_dataset_user_release_one,
4069                     ha, DS_FIND_CHILDREN);
4070         } else {
4071                 error = dsl_dataset_user_release_one(dsname, ha);
4072         }
4073         if (error == 0)
4074                 error = dsl_sync_task_group_wait(ha->dstg);
4075
4076         for (dst = list_head(&ha->dstg->dstg_tasks); dst;
4077             dst = list_next(&ha->dstg->dstg_tasks, dst)) {
4078                 struct dsl_ds_releasearg *ra = dst->dst_arg1;
4079                 dsl_dataset_t *ds = ra->ds;
4080
4081                 if (dst->dst_err)
4082                         dsl_dataset_name(ds, ha->failed);
4083
4084                 if (ra->own)
4085                         dsl_dataset_disown(ds, ha->dstg);
4086                 else
4087                         dsl_dataset_rele(ds, ha->dstg);
4088
4089                 kmem_free(ra, sizeof (struct dsl_ds_releasearg));
4090         }
4091
4092         if (error == 0 && recursive && !ha->gotone)
4093                 error = ENOENT;
4094
4095         if (error && error != EBUSY)
4096                 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
4097
4098         dsl_sync_task_group_destroy(ha->dstg);
4099         kmem_free(ha, sizeof (struct dsl_ds_holdarg));
4100         spa_close(spa, FTAG);
4101
4102         /*
4103          * We can get EBUSY if we were racing with deferred destroy and
4104          * dsl_dataset_user_release_check() hadn't done the necessary
4105          * open context setup.  We can also get EBUSY if we're racing
4106          * with destroy and that thread is the ds_owner.  Either way
4107          * the busy condition should be transient, and we should retry
4108          * the release operation.
4109          */
4110         if (error == EBUSY)
4111                 goto top;
4112
4113         return (error);
4114 }
4115
4116 /*
4117  * Called at spa_load time (with retry == B_FALSE) to release a stale
4118  * temporary user hold. Also called by the onexit code (with retry == B_TRUE).
4119  */
4120 int
4121 dsl_dataset_user_release_tmp(dsl_pool_t *dp, uint64_t dsobj, char *htag,
4122     boolean_t retry)
4123 {
4124         dsl_dataset_t *ds;
4125         char *snap;
4126         char *name;
4127         int namelen;
4128         int error;
4129
4130         do {
4131                 rw_enter(&dp->dp_config_rwlock, RW_READER);
4132                 error = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds);
4133                 rw_exit(&dp->dp_config_rwlock);
4134                 if (error)
4135                         return (error);
4136                 namelen = dsl_dataset_namelen(ds)+1;
4137                 name = kmem_alloc(namelen, KM_SLEEP);
4138                 dsl_dataset_name(ds, name);
4139                 dsl_dataset_rele(ds, FTAG);
4140
4141                 snap = strchr(name, '@');
4142                 *snap = '\0';
4143                 ++snap;
4144                 error = dsl_dataset_user_release(name, snap, htag, B_FALSE);
4145                 kmem_free(name, namelen);
4146
4147                 /*
4148                  * The object can't have been destroyed because we have a hold,
4149                  * but it might have been renamed, resulting in ENOENT.  Retry
4150                  * if we've been requested to do so.
4151                  *
4152                  * It would be nice if we could use the dsobj all the way
4153                  * through and avoid ENOENT entirely.  But we might need to
4154                  * unmount the snapshot, and there's currently no way to lookup
4155                  * a vfsp using a ZFS object id.
4156                  */
4157         } while ((error == ENOENT) && retry);
4158
4159         return (error);
4160 }
4161
4162 int
4163 dsl_dataset_get_holds(const char *dsname, nvlist_t **nvp)
4164 {
4165         dsl_dataset_t *ds;
4166         int err;
4167
4168         err = dsl_dataset_hold(dsname, FTAG, &ds);
4169         if (err)
4170                 return (err);
4171
4172         VERIFY(0 == nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP));
4173         if (ds->ds_phys->ds_userrefs_obj != 0) {
4174                 zap_attribute_t *za;
4175                 zap_cursor_t zc;
4176
4177                 za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
4178                 for (zap_cursor_init(&zc, ds->ds_dir->dd_pool->dp_meta_objset,
4179                     ds->ds_phys->ds_userrefs_obj);
4180                     zap_cursor_retrieve(&zc, za) == 0;
4181                     zap_cursor_advance(&zc)) {
4182                         VERIFY(0 == nvlist_add_uint64(*nvp, za->za_name,
4183                             za->za_first_integer));
4184                 }
4185                 zap_cursor_fini(&zc);
4186                 kmem_free(za, sizeof (zap_attribute_t));
4187         }
4188         dsl_dataset_rele(ds, FTAG);
4189         return (0);
4190 }
4191
4192 /*
4193  * Note, this function is used as the callback for dmu_objset_find().  We
4194  * always return 0 so that we will continue to find and process
4195  * inconsistent datasets, even if we encounter an error trying to
4196  * process one of them.
4197  */
4198 /* ARGSUSED */
4199 int
4200 dsl_destroy_inconsistent(const char *dsname, void *arg)
4201 {
4202         dsl_dataset_t *ds;
4203
4204         if (dsl_dataset_own(dsname, B_TRUE, FTAG, &ds) == 0) {
4205                 if (DS_IS_INCONSISTENT(ds))
4206                         (void) dsl_dataset_destroy(ds, FTAG, B_FALSE);
4207                 else
4208                         dsl_dataset_disown(ds, FTAG);
4209         }
4210         return (0);
4211 }
4212
4213 /*
4214  * Return (in *usedp) the amount of space written in new that is not
4215  * present in oldsnap.  New may be a snapshot or the head.  Old must be
4216  * a snapshot before new, in new's filesystem (or its origin).  If not then
4217  * fail and return EINVAL.
4218  *
4219  * The written space is calculated by considering two components:  First, we
4220  * ignore any freed space, and calculate the written as new's used space
4221  * minus old's used space.  Next, we add in the amount of space that was freed
4222  * between the two snapshots, thus reducing new's used space relative to old's.
4223  * Specifically, this is the space that was born before old->ds_creation_txg,
4224  * and freed before new (ie. on new's deadlist or a previous deadlist).
4225  *
4226  * space freed                         [---------------------]
4227  * snapshots                       ---O-------O--------O-------O------
4228  *                                         oldsnap            new
4229  */
4230 int
4231 dsl_dataset_space_written(dsl_dataset_t *oldsnap, dsl_dataset_t *new,
4232     uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
4233 {
4234         int err = 0;
4235         uint64_t snapobj;
4236         dsl_pool_t *dp = new->ds_dir->dd_pool;
4237
4238         *usedp = 0;
4239         *usedp += new->ds_phys->ds_referenced_bytes;
4240         *usedp -= oldsnap->ds_phys->ds_referenced_bytes;
4241
4242         *compp = 0;
4243         *compp += new->ds_phys->ds_compressed_bytes;
4244         *compp -= oldsnap->ds_phys->ds_compressed_bytes;
4245
4246         *uncompp = 0;
4247         *uncompp += new->ds_phys->ds_uncompressed_bytes;
4248         *uncompp -= oldsnap->ds_phys->ds_uncompressed_bytes;
4249
4250         rw_enter(&dp->dp_config_rwlock, RW_READER);
4251         snapobj = new->ds_object;
4252         while (snapobj != oldsnap->ds_object) {
4253                 dsl_dataset_t *snap;
4254                 uint64_t used, comp, uncomp;
4255
4256                 if (snapobj == new->ds_object) {
4257                         snap = new;
4258                 } else {
4259                         err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &snap);
4260                         if (err != 0)
4261                                 break;
4262                 }
4263
4264                 if (snap->ds_phys->ds_prev_snap_txg ==
4265                     oldsnap->ds_phys->ds_creation_txg) {
4266                         /*
4267                          * The blocks in the deadlist can not be born after
4268                          * ds_prev_snap_txg, so get the whole deadlist space,
4269                          * which is more efficient (especially for old-format
4270                          * deadlists).  Unfortunately the deadlist code
4271                          * doesn't have enough information to make this
4272                          * optimization itself.
4273                          */
4274                         dsl_deadlist_space(&snap->ds_deadlist,
4275                             &used, &comp, &uncomp);
4276                 } else {
4277                         dsl_deadlist_space_range(&snap->ds_deadlist,
4278                             0, oldsnap->ds_phys->ds_creation_txg,
4279                             &used, &comp, &uncomp);
4280                 }
4281                 *usedp += used;
4282                 *compp += comp;
4283                 *uncompp += uncomp;
4284
4285                 /*
4286                  * If we get to the beginning of the chain of snapshots
4287                  * (ds_prev_snap_obj == 0) before oldsnap, then oldsnap
4288                  * was not a snapshot of/before new.
4289                  */
4290                 snapobj = snap->ds_phys->ds_prev_snap_obj;
4291                 if (snap != new)
4292                         dsl_dataset_rele(snap, FTAG);
4293                 if (snapobj == 0) {
4294                         err = EINVAL;
4295                         break;
4296                 }
4297
4298         }
4299         rw_exit(&dp->dp_config_rwlock);
4300         return (err);
4301 }
4302
4303 /*
4304  * Return (in *usedp) the amount of space that will be reclaimed if firstsnap,
4305  * lastsnap, and all snapshots in between are deleted.
4306  *
4307  * blocks that would be freed            [---------------------------]
4308  * snapshots                       ---O-------O--------O-------O--------O
4309  *                                        firstsnap        lastsnap
4310  *
4311  * This is the set of blocks that were born after the snap before firstsnap,
4312  * (birth > firstsnap->prev_snap_txg) and died before the snap after the
4313  * last snap (ie, is on lastsnap->ds_next->ds_deadlist or an earlier deadlist).
4314  * We calculate this by iterating over the relevant deadlists (from the snap
4315  * after lastsnap, backward to the snap after firstsnap), summing up the
4316  * space on the deadlist that was born after the snap before firstsnap.
4317  */
4318 int
4319 dsl_dataset_space_wouldfree(dsl_dataset_t *firstsnap,
4320     dsl_dataset_t *lastsnap,
4321     uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
4322 {
4323         int err = 0;
4324         uint64_t snapobj;
4325         dsl_pool_t *dp = firstsnap->ds_dir->dd_pool;
4326
4327         ASSERT(dsl_dataset_is_snapshot(firstsnap));
4328         ASSERT(dsl_dataset_is_snapshot(lastsnap));
4329
4330         /*
4331          * Check that the snapshots are in the same dsl_dir, and firstsnap
4332          * is before lastsnap.
4333          */
4334         if (firstsnap->ds_dir != lastsnap->ds_dir ||
4335             firstsnap->ds_phys->ds_creation_txg >
4336             lastsnap->ds_phys->ds_creation_txg)
4337                 return (EINVAL);
4338
4339         *usedp = *compp = *uncompp = 0;
4340
4341         rw_enter(&dp->dp_config_rwlock, RW_READER);
4342         snapobj = lastsnap->ds_phys->ds_next_snap_obj;
4343         while (snapobj != firstsnap->ds_object) {
4344                 dsl_dataset_t *ds;
4345                 uint64_t used, comp, uncomp;
4346
4347                 err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &ds);
4348                 if (err != 0)
4349                         break;
4350
4351                 dsl_deadlist_space_range(&ds->ds_deadlist,
4352                     firstsnap->ds_phys->ds_prev_snap_txg, UINT64_MAX,
4353                     &used, &comp, &uncomp);
4354                 *usedp += used;
4355                 *compp += comp;
4356                 *uncompp += uncomp;
4357
4358                 snapobj = ds->ds_phys->ds_prev_snap_obj;
4359                 ASSERT3U(snapobj, !=, 0);
4360                 dsl_dataset_rele(ds, FTAG);
4361         }
4362         rw_exit(&dp->dp_config_rwlock);
4363         return (err);
4364 }