]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dataset.c
Merge recent vendor changes:
[FreeBSD/FreeBSD.git] / sys / cddl / contrib / opensolaris / uts / common / fs / zfs / dsl_dataset.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012 by Delphix. All rights reserved.
24  * Copyright (c) 2012, Joyent, Inc. All rights reserved.
25  * Copyright (c) 2011 Pawel Jakub Dawidek <pawel@dawidek.net>.
26  * All rights reserved.
27  * Portions Copyright (c) 2011 Martin Matuska <mm@FreeBSD.org>
28  */
29
30 #include <sys/dmu_objset.h>
31 #include <sys/dsl_dataset.h>
32 #include <sys/dsl_dir.h>
33 #include <sys/dsl_prop.h>
34 #include <sys/dsl_synctask.h>
35 #include <sys/dmu_traverse.h>
36 #include <sys/dmu_impl.h>
37 #include <sys/dmu_tx.h>
38 #include <sys/arc.h>
39 #include <sys/zio.h>
40 #include <sys/zap.h>
41 #include <sys/zfeature.h>
42 #include <sys/unique.h>
43 #include <sys/zfs_context.h>
44 #include <sys/zfs_ioctl.h>
45 #include <sys/spa.h>
46 #include <sys/zfs_znode.h>
47 #include <sys/zfs_onexit.h>
48 #include <sys/zvol.h>
49 #include <sys/dsl_scan.h>
50 #include <sys/dsl_deadlist.h>
51
52 static char *dsl_reaper = "the grim reaper";
53
54 static dsl_checkfunc_t dsl_dataset_destroy_begin_check;
55 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync;
56 static dsl_syncfunc_t dsl_dataset_set_reservation_sync;
57
58 #define SWITCH64(x, y) \
59         { \
60                 uint64_t __tmp = (x); \
61                 (x) = (y); \
62                 (y) = __tmp; \
63         }
64
65 #define DS_REF_MAX      (1ULL << 62)
66
67 #define DSL_DEADLIST_BLOCKSIZE  SPA_MAXBLOCKSIZE
68
69 #define DSL_DATASET_IS_DESTROYED(ds)    ((ds)->ds_owner == dsl_reaper)
70
71
72 /*
73  * Figure out how much of this delta should be propogated to the dsl_dir
74  * layer.  If there's a refreservation, that space has already been
75  * partially accounted for in our ancestors.
76  */
77 static int64_t
78 parent_delta(dsl_dataset_t *ds, int64_t delta)
79 {
80         uint64_t old_bytes, new_bytes;
81
82         if (ds->ds_reserved == 0)
83                 return (delta);
84
85         old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
86         new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
87
88         ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
89         return (new_bytes - old_bytes);
90 }
91
92 void
93 dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
94 {
95         int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
96         int compressed = BP_GET_PSIZE(bp);
97         int uncompressed = BP_GET_UCSIZE(bp);
98         int64_t delta;
99
100         dprintf_bp(bp, "ds=%p", ds);
101
102         ASSERT(dmu_tx_is_syncing(tx));
103         /* It could have been compressed away to nothing */
104         if (BP_IS_HOLE(bp))
105                 return;
106         ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
107         ASSERT(DMU_OT_IS_VALID(BP_GET_TYPE(bp)));
108         if (ds == NULL) {
109                 dsl_pool_mos_diduse_space(tx->tx_pool,
110                     used, compressed, uncompressed);
111                 return;
112         }
113         dmu_buf_will_dirty(ds->ds_dbuf, tx);
114
115         mutex_enter(&ds->ds_dir->dd_lock);
116         mutex_enter(&ds->ds_lock);
117         delta = parent_delta(ds, used);
118         ds->ds_phys->ds_referenced_bytes += used;
119         ds->ds_phys->ds_compressed_bytes += compressed;
120         ds->ds_phys->ds_uncompressed_bytes += uncompressed;
121         ds->ds_phys->ds_unique_bytes += used;
122         mutex_exit(&ds->ds_lock);
123         dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta,
124             compressed, uncompressed, tx);
125         dsl_dir_transfer_space(ds->ds_dir, used - delta,
126             DD_USED_REFRSRV, DD_USED_HEAD, tx);
127         mutex_exit(&ds->ds_dir->dd_lock);
128 }
129
130 int
131 dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
132     boolean_t async)
133 {
134         if (BP_IS_HOLE(bp))
135                 return (0);
136
137         ASSERT(dmu_tx_is_syncing(tx));
138         ASSERT(bp->blk_birth <= tx->tx_txg);
139
140         int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
141         int compressed = BP_GET_PSIZE(bp);
142         int uncompressed = BP_GET_UCSIZE(bp);
143
144         ASSERT(used > 0);
145         if (ds == NULL) {
146                 dsl_free(tx->tx_pool, tx->tx_txg, bp);
147                 dsl_pool_mos_diduse_space(tx->tx_pool,
148                     -used, -compressed, -uncompressed);
149                 return (used);
150         }
151         ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
152
153         ASSERT(!dsl_dataset_is_snapshot(ds));
154         dmu_buf_will_dirty(ds->ds_dbuf, tx);
155
156         if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) {
157                 int64_t delta;
158
159                 dprintf_bp(bp, "freeing ds=%llu", ds->ds_object);
160                 dsl_free(tx->tx_pool, tx->tx_txg, bp);
161
162                 mutex_enter(&ds->ds_dir->dd_lock);
163                 mutex_enter(&ds->ds_lock);
164                 ASSERT(ds->ds_phys->ds_unique_bytes >= used ||
165                     !DS_UNIQUE_IS_ACCURATE(ds));
166                 delta = parent_delta(ds, -used);
167                 ds->ds_phys->ds_unique_bytes -= used;
168                 mutex_exit(&ds->ds_lock);
169                 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
170                     delta, -compressed, -uncompressed, tx);
171                 dsl_dir_transfer_space(ds->ds_dir, -used - delta,
172                     DD_USED_REFRSRV, DD_USED_HEAD, tx);
173                 mutex_exit(&ds->ds_dir->dd_lock);
174         } else {
175                 dprintf_bp(bp, "putting on dead list: %s", "");
176                 if (async) {
177                         /*
178                          * We are here as part of zio's write done callback,
179                          * which means we're a zio interrupt thread.  We can't
180                          * call dsl_deadlist_insert() now because it may block
181                          * waiting for I/O.  Instead, put bp on the deferred
182                          * queue and let dsl_pool_sync() finish the job.
183                          */
184                         bplist_append(&ds->ds_pending_deadlist, bp);
185                 } else {
186                         dsl_deadlist_insert(&ds->ds_deadlist, bp, tx);
187                 }
188                 ASSERT3U(ds->ds_prev->ds_object, ==,
189                     ds->ds_phys->ds_prev_snap_obj);
190                 ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0);
191                 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
192                 if (ds->ds_prev->ds_phys->ds_next_snap_obj ==
193                     ds->ds_object && bp->blk_birth >
194                     ds->ds_prev->ds_phys->ds_prev_snap_txg) {
195                         dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
196                         mutex_enter(&ds->ds_prev->ds_lock);
197                         ds->ds_prev->ds_phys->ds_unique_bytes += used;
198                         mutex_exit(&ds->ds_prev->ds_lock);
199                 }
200                 if (bp->blk_birth > ds->ds_dir->dd_origin_txg) {
201                         dsl_dir_transfer_space(ds->ds_dir, used,
202                             DD_USED_HEAD, DD_USED_SNAP, tx);
203                 }
204         }
205         mutex_enter(&ds->ds_lock);
206         ASSERT3U(ds->ds_phys->ds_referenced_bytes, >=, used);
207         ds->ds_phys->ds_referenced_bytes -= used;
208         ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed);
209         ds->ds_phys->ds_compressed_bytes -= compressed;
210         ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed);
211         ds->ds_phys->ds_uncompressed_bytes -= uncompressed;
212         mutex_exit(&ds->ds_lock);
213
214         return (used);
215 }
216
217 uint64_t
218 dsl_dataset_prev_snap_txg(dsl_dataset_t *ds)
219 {
220         uint64_t trysnap = 0;
221
222         if (ds == NULL)
223                 return (0);
224         /*
225          * The snapshot creation could fail, but that would cause an
226          * incorrect FALSE return, which would only result in an
227          * overestimation of the amount of space that an operation would
228          * consume, which is OK.
229          *
230          * There's also a small window where we could miss a pending
231          * snapshot, because we could set the sync task in the quiescing
232          * phase.  So this should only be used as a guess.
233          */
234         if (ds->ds_trysnap_txg >
235             spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa))
236                 trysnap = ds->ds_trysnap_txg;
237         return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap));
238 }
239
240 boolean_t
241 dsl_dataset_block_freeable(dsl_dataset_t *ds, const blkptr_t *bp,
242     uint64_t blk_birth)
243 {
244         if (blk_birth <= dsl_dataset_prev_snap_txg(ds))
245                 return (B_FALSE);
246
247         ddt_prefetch(dsl_dataset_get_spa(ds), bp);
248
249         return (B_TRUE);
250 }
251
252 /* ARGSUSED */
253 static void
254 dsl_dataset_evict(dmu_buf_t *db, void *dsv)
255 {
256         dsl_dataset_t *ds = dsv;
257
258         ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds));
259
260         unique_remove(ds->ds_fsid_guid);
261
262         if (ds->ds_objset != NULL)
263                 dmu_objset_evict(ds->ds_objset);
264
265         if (ds->ds_prev) {
266                 dsl_dataset_drop_ref(ds->ds_prev, ds);
267                 ds->ds_prev = NULL;
268         }
269
270         bplist_destroy(&ds->ds_pending_deadlist);
271         if (db != NULL) {
272                 dsl_deadlist_close(&ds->ds_deadlist);
273         } else {
274                 ASSERT(ds->ds_deadlist.dl_dbuf == NULL);
275                 ASSERT(!ds->ds_deadlist.dl_oldfmt);
276         }
277         if (ds->ds_dir)
278                 dsl_dir_close(ds->ds_dir, ds);
279
280         ASSERT(!list_link_active(&ds->ds_synced_link));
281
282         if (mutex_owned(&ds->ds_lock))
283                 mutex_exit(&ds->ds_lock);
284         mutex_destroy(&ds->ds_lock);
285         mutex_destroy(&ds->ds_recvlock);
286         if (mutex_owned(&ds->ds_opening_lock))
287                 mutex_exit(&ds->ds_opening_lock);
288         mutex_destroy(&ds->ds_opening_lock);
289         rw_destroy(&ds->ds_rwlock);
290         cv_destroy(&ds->ds_exclusive_cv);
291
292         kmem_free(ds, sizeof (dsl_dataset_t));
293 }
294
295 static int
296 dsl_dataset_get_snapname(dsl_dataset_t *ds)
297 {
298         dsl_dataset_phys_t *headphys;
299         int err;
300         dmu_buf_t *headdbuf;
301         dsl_pool_t *dp = ds->ds_dir->dd_pool;
302         objset_t *mos = dp->dp_meta_objset;
303
304         if (ds->ds_snapname[0])
305                 return (0);
306         if (ds->ds_phys->ds_next_snap_obj == 0)
307                 return (0);
308
309         err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj,
310             FTAG, &headdbuf);
311         if (err)
312                 return (err);
313         headphys = headdbuf->db_data;
314         err = zap_value_search(dp->dp_meta_objset,
315             headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
316         dmu_buf_rele(headdbuf, FTAG);
317         return (err);
318 }
319
320 static int
321 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
322 {
323         objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
324         uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
325         matchtype_t mt;
326         int err;
327
328         if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
329                 mt = MT_FIRST;
330         else
331                 mt = MT_EXACT;
332
333         err = zap_lookup_norm(mos, snapobj, name, 8, 1,
334             value, mt, NULL, 0, NULL);
335         if (err == ENOTSUP && mt == MT_FIRST)
336                 err = zap_lookup(mos, snapobj, name, 8, 1, value);
337         return (err);
338 }
339
340 static int
341 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx)
342 {
343         objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
344         uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
345         matchtype_t mt;
346         int err;
347
348         dsl_dir_snap_cmtime_update(ds->ds_dir);
349
350         if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
351                 mt = MT_FIRST;
352         else
353                 mt = MT_EXACT;
354
355         err = zap_remove_norm(mos, snapobj, name, mt, tx);
356         if (err == ENOTSUP && mt == MT_FIRST)
357                 err = zap_remove(mos, snapobj, name, tx);
358         return (err);
359 }
360
361 static int
362 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
363     dsl_dataset_t **dsp)
364 {
365         objset_t *mos = dp->dp_meta_objset;
366         dmu_buf_t *dbuf;
367         dsl_dataset_t *ds;
368         int err;
369         dmu_object_info_t doi;
370
371         ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
372             dsl_pool_sync_context(dp));
373
374         err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
375         if (err)
376                 return (err);
377
378         /* Make sure dsobj has the correct object type. */
379         dmu_object_info_from_db(dbuf, &doi);
380         if (doi.doi_type != DMU_OT_DSL_DATASET)
381                 return (EINVAL);
382
383         ds = dmu_buf_get_user(dbuf);
384         if (ds == NULL) {
385                 dsl_dataset_t *winner;
386
387                 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
388                 ds->ds_dbuf = dbuf;
389                 ds->ds_object = dsobj;
390                 ds->ds_phys = dbuf->db_data;
391
392                 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
393                 mutex_init(&ds->ds_recvlock, NULL, MUTEX_DEFAULT, NULL);
394                 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
395                 mutex_init(&ds->ds_sendstream_lock, NULL, MUTEX_DEFAULT, NULL);
396
397                 rw_init(&ds->ds_rwlock, 0, 0, 0);
398                 cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
399
400                 bplist_create(&ds->ds_pending_deadlist);
401                 dsl_deadlist_open(&ds->ds_deadlist,
402                     mos, ds->ds_phys->ds_deadlist_obj);
403
404                 list_create(&ds->ds_sendstreams, sizeof (dmu_sendarg_t),
405                     offsetof(dmu_sendarg_t, dsa_link));
406
407                 if (err == 0) {
408                         err = dsl_dir_open_obj(dp,
409                             ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir);
410                 }
411                 if (err) {
412                         mutex_destroy(&ds->ds_lock);
413                         mutex_destroy(&ds->ds_recvlock);
414                         mutex_destroy(&ds->ds_opening_lock);
415                         rw_destroy(&ds->ds_rwlock);
416                         cv_destroy(&ds->ds_exclusive_cv);
417                         bplist_destroy(&ds->ds_pending_deadlist);
418                         dsl_deadlist_close(&ds->ds_deadlist);
419                         kmem_free(ds, sizeof (dsl_dataset_t));
420                         dmu_buf_rele(dbuf, tag);
421                         return (err);
422                 }
423
424                 if (!dsl_dataset_is_snapshot(ds)) {
425                         ds->ds_snapname[0] = '\0';
426                         if (ds->ds_phys->ds_prev_snap_obj) {
427                                 err = dsl_dataset_get_ref(dp,
428                                     ds->ds_phys->ds_prev_snap_obj,
429                                     ds, &ds->ds_prev);
430                         }
431                 } else {
432                         if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
433                                 err = dsl_dataset_get_snapname(ds);
434                         if (err == 0 && ds->ds_phys->ds_userrefs_obj != 0) {
435                                 err = zap_count(
436                                     ds->ds_dir->dd_pool->dp_meta_objset,
437                                     ds->ds_phys->ds_userrefs_obj,
438                                     &ds->ds_userrefs);
439                         }
440                 }
441
442                 if (err == 0 && !dsl_dataset_is_snapshot(ds)) {
443                         /*
444                          * In sync context, we're called with either no lock
445                          * or with the write lock.  If we're not syncing,
446                          * we're always called with the read lock held.
447                          */
448                         boolean_t need_lock =
449                             !RW_WRITE_HELD(&dp->dp_config_rwlock) &&
450                             dsl_pool_sync_context(dp);
451
452                         if (need_lock)
453                                 rw_enter(&dp->dp_config_rwlock, RW_READER);
454
455                         err = dsl_prop_get_ds(ds,
456                             "refreservation", sizeof (uint64_t), 1,
457                             &ds->ds_reserved, NULL);
458                         if (err == 0) {
459                                 err = dsl_prop_get_ds(ds,
460                                     "refquota", sizeof (uint64_t), 1,
461                                     &ds->ds_quota, NULL);
462                         }
463
464                         if (need_lock)
465                                 rw_exit(&dp->dp_config_rwlock);
466                 } else {
467                         ds->ds_reserved = ds->ds_quota = 0;
468                 }
469
470                 if (err == 0) {
471                         winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys,
472                             dsl_dataset_evict);
473                 }
474                 if (err || winner) {
475                         bplist_destroy(&ds->ds_pending_deadlist);
476                         dsl_deadlist_close(&ds->ds_deadlist);
477                         if (ds->ds_prev)
478                                 dsl_dataset_drop_ref(ds->ds_prev, ds);
479                         dsl_dir_close(ds->ds_dir, ds);
480                         mutex_destroy(&ds->ds_lock);
481                         mutex_destroy(&ds->ds_recvlock);
482                         mutex_destroy(&ds->ds_opening_lock);
483                         rw_destroy(&ds->ds_rwlock);
484                         cv_destroy(&ds->ds_exclusive_cv);
485                         kmem_free(ds, sizeof (dsl_dataset_t));
486                         if (err) {
487                                 dmu_buf_rele(dbuf, tag);
488                                 return (err);
489                         }
490                         ds = winner;
491                 } else {
492                         ds->ds_fsid_guid =
493                             unique_insert(ds->ds_phys->ds_fsid_guid);
494                 }
495         }
496         ASSERT3P(ds->ds_dbuf, ==, dbuf);
497         ASSERT3P(ds->ds_phys, ==, dbuf->db_data);
498         ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 ||
499             spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
500             dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
501         mutex_enter(&ds->ds_lock);
502         if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) {
503                 mutex_exit(&ds->ds_lock);
504                 dmu_buf_rele(ds->ds_dbuf, tag);
505                 return (ENOENT);
506         }
507         mutex_exit(&ds->ds_lock);
508         *dsp = ds;
509         return (0);
510 }
511
512 static int
513 dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag)
514 {
515         dsl_pool_t *dp = ds->ds_dir->dd_pool;
516
517         /*
518          * In syncing context we don't want the rwlock lock: there
519          * may be an existing writer waiting for sync phase to
520          * finish.  We don't need to worry about such writers, since
521          * sync phase is single-threaded, so the writer can't be
522          * doing anything while we are active.
523          */
524         if (dsl_pool_sync_context(dp)) {
525                 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
526                 return (0);
527         }
528
529         /*
530          * Normal users will hold the ds_rwlock as a READER until they
531          * are finished (i.e., call dsl_dataset_rele()).  "Owners" will
532          * drop their READER lock after they set the ds_owner field.
533          *
534          * If the dataset is being destroyed, the destroy thread will
535          * obtain a WRITER lock for exclusive access after it's done its
536          * open-context work and then change the ds_owner to
537          * dsl_reaper once destruction is assured.  So threads
538          * may block here temporarily, until the "destructability" of
539          * the dataset is determined.
540          */
541         ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock));
542         mutex_enter(&ds->ds_lock);
543         while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) {
544                 rw_exit(&dp->dp_config_rwlock);
545                 cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock);
546                 if (DSL_DATASET_IS_DESTROYED(ds)) {
547                         mutex_exit(&ds->ds_lock);
548                         dsl_dataset_drop_ref(ds, tag);
549                         rw_enter(&dp->dp_config_rwlock, RW_READER);
550                         return (ENOENT);
551                 }
552                 /*
553                  * The dp_config_rwlock lives above the ds_lock. And
554                  * we need to check DSL_DATASET_IS_DESTROYED() while
555                  * holding the ds_lock, so we have to drop and reacquire
556                  * the ds_lock here.
557                  */
558                 mutex_exit(&ds->ds_lock);
559                 rw_enter(&dp->dp_config_rwlock, RW_READER);
560                 mutex_enter(&ds->ds_lock);
561         }
562         mutex_exit(&ds->ds_lock);
563         return (0);
564 }
565
566 int
567 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
568     dsl_dataset_t **dsp)
569 {
570         int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp);
571
572         if (err)
573                 return (err);
574         return (dsl_dataset_hold_ref(*dsp, tag));
575 }
576
577 int
578 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, boolean_t inconsistentok,
579     void *tag, dsl_dataset_t **dsp)
580 {
581         int err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp);
582         if (err)
583                 return (err);
584         if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
585                 dsl_dataset_rele(*dsp, tag);
586                 *dsp = NULL;
587                 return (EBUSY);
588         }
589         return (0);
590 }
591
592 int
593 dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp)
594 {
595         dsl_dir_t *dd;
596         dsl_pool_t *dp;
597         const char *snapname;
598         uint64_t obj;
599         int err = 0;
600
601         err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname);
602         if (err)
603                 return (err);
604
605         dp = dd->dd_pool;
606         obj = dd->dd_phys->dd_head_dataset_obj;
607         rw_enter(&dp->dp_config_rwlock, RW_READER);
608         if (obj)
609                 err = dsl_dataset_get_ref(dp, obj, tag, dsp);
610         else
611                 err = ENOENT;
612         if (err)
613                 goto out;
614
615         err = dsl_dataset_hold_ref(*dsp, tag);
616
617         /* we may be looking for a snapshot */
618         if (err == 0 && snapname != NULL) {
619                 dsl_dataset_t *ds = NULL;
620
621                 if (*snapname++ != '@') {
622                         dsl_dataset_rele(*dsp, tag);
623                         err = ENOENT;
624                         goto out;
625                 }
626
627                 dprintf("looking for snapshot '%s'\n", snapname);
628                 err = dsl_dataset_snap_lookup(*dsp, snapname, &obj);
629                 if (err == 0)
630                         err = dsl_dataset_get_ref(dp, obj, tag, &ds);
631                 dsl_dataset_rele(*dsp, tag);
632
633                 ASSERT3U((err == 0), ==, (ds != NULL));
634
635                 if (ds) {
636                         mutex_enter(&ds->ds_lock);
637                         if (ds->ds_snapname[0] == 0)
638                                 (void) strlcpy(ds->ds_snapname, snapname,
639                                     sizeof (ds->ds_snapname));
640                         mutex_exit(&ds->ds_lock);
641                         err = dsl_dataset_hold_ref(ds, tag);
642                         *dsp = err ? NULL : ds;
643                 }
644         }
645 out:
646         rw_exit(&dp->dp_config_rwlock);
647         dsl_dir_close(dd, FTAG);
648         return (err);
649 }
650
651 int
652 dsl_dataset_own(const char *name, boolean_t inconsistentok,
653     void *tag, dsl_dataset_t **dsp)
654 {
655         int err = dsl_dataset_hold(name, tag, dsp);
656         if (err)
657                 return (err);
658         if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
659                 dsl_dataset_rele(*dsp, tag);
660                 return (EBUSY);
661         }
662         return (0);
663 }
664
665 void
666 dsl_dataset_name(dsl_dataset_t *ds, char *name)
667 {
668         if (ds == NULL) {
669                 (void) strcpy(name, "mos");
670         } else {
671                 dsl_dir_name(ds->ds_dir, name);
672                 VERIFY(0 == dsl_dataset_get_snapname(ds));
673                 if (ds->ds_snapname[0]) {
674                         (void) strcat(name, "@");
675                         /*
676                          * We use a "recursive" mutex so that we
677                          * can call dprintf_ds() with ds_lock held.
678                          */
679                         if (!MUTEX_HELD(&ds->ds_lock)) {
680                                 mutex_enter(&ds->ds_lock);
681                                 (void) strcat(name, ds->ds_snapname);
682                                 mutex_exit(&ds->ds_lock);
683                         } else {
684                                 (void) strcat(name, ds->ds_snapname);
685                         }
686                 }
687         }
688 }
689
690 static int
691 dsl_dataset_namelen(dsl_dataset_t *ds)
692 {
693         int result;
694
695         if (ds == NULL) {
696                 result = 3;     /* "mos" */
697         } else {
698                 result = dsl_dir_namelen(ds->ds_dir);
699                 VERIFY(0 == dsl_dataset_get_snapname(ds));
700                 if (ds->ds_snapname[0]) {
701                         ++result;       /* adding one for the @-sign */
702                         if (!MUTEX_HELD(&ds->ds_lock)) {
703                                 mutex_enter(&ds->ds_lock);
704                                 result += strlen(ds->ds_snapname);
705                                 mutex_exit(&ds->ds_lock);
706                         } else {
707                                 result += strlen(ds->ds_snapname);
708                         }
709                 }
710         }
711
712         return (result);
713 }
714
715 void
716 dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag)
717 {
718         dmu_buf_rele(ds->ds_dbuf, tag);
719 }
720
721 void
722 dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
723 {
724         if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) {
725                 rw_exit(&ds->ds_rwlock);
726         }
727         dsl_dataset_drop_ref(ds, tag);
728 }
729
730 void
731 dsl_dataset_disown(dsl_dataset_t *ds, void *tag)
732 {
733         ASSERT((ds->ds_owner == tag && ds->ds_dbuf) ||
734             (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL));
735
736         mutex_enter(&ds->ds_lock);
737         ds->ds_owner = NULL;
738         if (RW_WRITE_HELD(&ds->ds_rwlock)) {
739                 rw_exit(&ds->ds_rwlock);
740                 cv_broadcast(&ds->ds_exclusive_cv);
741         }
742         mutex_exit(&ds->ds_lock);
743         if (ds->ds_dbuf)
744                 dsl_dataset_drop_ref(ds, tag);
745         else
746                 dsl_dataset_evict(NULL, ds);
747 }
748
749 boolean_t
750 dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *tag)
751 {
752         boolean_t gotit = FALSE;
753
754         mutex_enter(&ds->ds_lock);
755         if (ds->ds_owner == NULL &&
756             (!DS_IS_INCONSISTENT(ds) || inconsistentok)) {
757                 ds->ds_owner = tag;
758                 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool))
759                         rw_exit(&ds->ds_rwlock);
760                 gotit = TRUE;
761         }
762         mutex_exit(&ds->ds_lock);
763         return (gotit);
764 }
765
766 void
767 dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner)
768 {
769         ASSERT3P(owner, ==, ds->ds_owner);
770         if (!RW_WRITE_HELD(&ds->ds_rwlock))
771                 rw_enter(&ds->ds_rwlock, RW_WRITER);
772 }
773
774 uint64_t
775 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
776     uint64_t flags, dmu_tx_t *tx)
777 {
778         dsl_pool_t *dp = dd->dd_pool;
779         dmu_buf_t *dbuf;
780         dsl_dataset_phys_t *dsphys;
781         uint64_t dsobj;
782         objset_t *mos = dp->dp_meta_objset;
783
784         if (origin == NULL)
785                 origin = dp->dp_origin_snap;
786
787         ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
788         ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0);
789         ASSERT(dmu_tx_is_syncing(tx));
790         ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
791
792         dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
793             DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
794         VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
795         dmu_buf_will_dirty(dbuf, tx);
796         dsphys = dbuf->db_data;
797         bzero(dsphys, sizeof (dsl_dataset_phys_t));
798         dsphys->ds_dir_obj = dd->dd_object;
799         dsphys->ds_flags = flags;
800         dsphys->ds_fsid_guid = unique_create();
801         do {
802                 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
803                     sizeof (dsphys->ds_guid));
804         } while (dsphys->ds_guid == 0);
805         dsphys->ds_snapnames_zapobj =
806             zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
807             DMU_OT_NONE, 0, tx);
808         dsphys->ds_creation_time = gethrestime_sec();
809         dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
810
811         if (origin == NULL) {
812                 dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx);
813         } else {
814                 dsl_dataset_t *ohds;
815
816                 dsphys->ds_prev_snap_obj = origin->ds_object;
817                 dsphys->ds_prev_snap_txg =
818                     origin->ds_phys->ds_creation_txg;
819                 dsphys->ds_referenced_bytes =
820                     origin->ds_phys->ds_referenced_bytes;
821                 dsphys->ds_compressed_bytes =
822                     origin->ds_phys->ds_compressed_bytes;
823                 dsphys->ds_uncompressed_bytes =
824                     origin->ds_phys->ds_uncompressed_bytes;
825                 dsphys->ds_bp = origin->ds_phys->ds_bp;
826                 dsphys->ds_flags |= origin->ds_phys->ds_flags;
827
828                 dmu_buf_will_dirty(origin->ds_dbuf, tx);
829                 origin->ds_phys->ds_num_children++;
830
831                 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
832                     origin->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ohds));
833                 dsphys->ds_deadlist_obj = dsl_deadlist_clone(&ohds->ds_deadlist,
834                     dsphys->ds_prev_snap_txg, dsphys->ds_prev_snap_obj, tx);
835                 dsl_dataset_rele(ohds, FTAG);
836
837                 if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
838                         if (origin->ds_phys->ds_next_clones_obj == 0) {
839                                 origin->ds_phys->ds_next_clones_obj =
840                                     zap_create(mos,
841                                     DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
842                         }
843                         VERIFY(0 == zap_add_int(mos,
844                             origin->ds_phys->ds_next_clones_obj,
845                             dsobj, tx));
846                 }
847
848                 dmu_buf_will_dirty(dd->dd_dbuf, tx);
849                 dd->dd_phys->dd_origin_obj = origin->ds_object;
850                 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
851                         if (origin->ds_dir->dd_phys->dd_clones == 0) {
852                                 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
853                                 origin->ds_dir->dd_phys->dd_clones =
854                                     zap_create(mos,
855                                     DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
856                         }
857                         VERIFY3U(0, ==, zap_add_int(mos,
858                             origin->ds_dir->dd_phys->dd_clones, dsobj, tx));
859                 }
860         }
861
862         if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
863                 dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
864
865         dmu_buf_rele(dbuf, FTAG);
866
867         dmu_buf_will_dirty(dd->dd_dbuf, tx);
868         dd->dd_phys->dd_head_dataset_obj = dsobj;
869
870         return (dsobj);
871 }
872
873 uint64_t
874 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
875     dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx)
876 {
877         dsl_pool_t *dp = pdd->dd_pool;
878         uint64_t dsobj, ddobj;
879         dsl_dir_t *dd;
880
881         ASSERT(lastname[0] != '@');
882
883         ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
884         VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd));
885
886         dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx);
887
888         dsl_deleg_set_create_perms(dd, tx, cr);
889
890         dsl_dir_close(dd, FTAG);
891
892         /*
893          * If we are creating a clone, make sure we zero out any stale
894          * data from the origin snapshots zil header.
895          */
896         if (origin != NULL) {
897                 dsl_dataset_t *ds;
898                 objset_t *os;
899
900                 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
901                 VERIFY3U(0, ==, dmu_objset_from_ds(ds, &os));
902                 bzero(&os->os_zil_header, sizeof (os->os_zil_header));
903                 dsl_dataset_dirty(ds, tx);
904                 dsl_dataset_rele(ds, FTAG);
905         }
906
907         return (dsobj);
908 }
909
910 #ifdef __FreeBSD__
911 /* FreeBSD ioctl compat begin */
912 struct destroyarg {
913         nvlist_t *nvl;
914         const char *snapname;
915 };
916
917 static int
918 dsl_check_snap_cb(const char *name, void *arg)
919 {
920         struct destroyarg *da = arg;
921         dsl_dataset_t *ds;
922         char *dsname;
923
924         dsname = kmem_asprintf("%s@%s", name, da->snapname);
925         VERIFY(nvlist_add_boolean(da->nvl, dsname) == 0);
926
927         return (0);
928 }
929
930 int
931 dmu_get_recursive_snaps_nvl(const char *fsname, const char *snapname,
932     nvlist_t *snaps)
933 {
934         struct destroyarg *da;
935         int err;
936
937         da = kmem_zalloc(sizeof (struct destroyarg), KM_SLEEP);
938         da->nvl = snaps;
939         da->snapname = snapname;
940         err = dmu_objset_find(fsname, dsl_check_snap_cb, da,
941             DS_FIND_CHILDREN);
942         kmem_free(da, sizeof (struct destroyarg));
943
944         return (err);
945 }
946 /* FreeBSD ioctl compat end */
947 #endif /* __FreeBSD__ */
948
949 /*
950  * The snapshots must all be in the same pool.
951  */
952 int
953 dmu_snapshots_destroy_nvl(nvlist_t *snaps, boolean_t defer, char *failed)
954 {
955         int err;
956         dsl_sync_task_t *dst;
957         spa_t *spa;
958         nvpair_t *pair;
959         dsl_sync_task_group_t *dstg;
960
961         pair = nvlist_next_nvpair(snaps, NULL);
962         if (pair == NULL)
963                 return (0);
964
965         err = spa_open(nvpair_name(pair), &spa, FTAG);
966         if (err)
967                 return (err);
968         dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
969
970         for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
971             pair = nvlist_next_nvpair(snaps, pair)) {
972                 dsl_dataset_t *ds;
973
974                 err = dsl_dataset_own(nvpair_name(pair), B_TRUE, dstg, &ds);
975                 if (err == 0) {
976                         struct dsl_ds_destroyarg *dsda;
977
978                         dsl_dataset_make_exclusive(ds, dstg);
979                         dsda = kmem_zalloc(sizeof (struct dsl_ds_destroyarg),
980                             KM_SLEEP);
981                         dsda->ds = ds;
982                         dsda->defer = defer;
983                         dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
984                             dsl_dataset_destroy_sync, dsda, dstg, 0);
985                 } else if (err == ENOENT) {
986                         err = 0;
987                 } else {
988                         (void) strcpy(failed, nvpair_name(pair));
989                         break;
990                 }
991         }
992
993         if (err == 0)
994                 err = dsl_sync_task_group_wait(dstg);
995
996         for (dst = list_head(&dstg->dstg_tasks); dst;
997             dst = list_next(&dstg->dstg_tasks, dst)) {
998                 struct dsl_ds_destroyarg *dsda = dst->dst_arg1;
999                 dsl_dataset_t *ds = dsda->ds;
1000
1001                 /*
1002                  * Return the file system name that triggered the error
1003                  */
1004                 if (dst->dst_err) {
1005                         dsl_dataset_name(ds, failed);
1006                 }
1007                 ASSERT3P(dsda->rm_origin, ==, NULL);
1008                 dsl_dataset_disown(ds, dstg);
1009                 kmem_free(dsda, sizeof (struct dsl_ds_destroyarg));
1010         }
1011
1012         dsl_sync_task_group_destroy(dstg);
1013         spa_close(spa, FTAG);
1014         return (err);
1015
1016 }
1017
1018 static boolean_t
1019 dsl_dataset_might_destroy_origin(dsl_dataset_t *ds)
1020 {
1021         boolean_t might_destroy = B_FALSE;
1022
1023         mutex_enter(&ds->ds_lock);
1024         if (ds->ds_phys->ds_num_children == 2 && ds->ds_userrefs == 0 &&
1025             DS_IS_DEFER_DESTROY(ds))
1026                 might_destroy = B_TRUE;
1027         mutex_exit(&ds->ds_lock);
1028
1029         return (might_destroy);
1030 }
1031
1032 /*
1033  * If we're removing a clone, and these three conditions are true:
1034  *      1) the clone's origin has no other children
1035  *      2) the clone's origin has no user references
1036  *      3) the clone's origin has been marked for deferred destruction
1037  * Then, prepare to remove the origin as part of this sync task group.
1038  */
1039 static int
1040 dsl_dataset_origin_rm_prep(struct dsl_ds_destroyarg *dsda, void *tag)
1041 {
1042         dsl_dataset_t *ds = dsda->ds;
1043         dsl_dataset_t *origin = ds->ds_prev;
1044
1045         if (dsl_dataset_might_destroy_origin(origin)) {
1046                 char *name;
1047                 int namelen;
1048                 int error;
1049
1050                 namelen = dsl_dataset_namelen(origin) + 1;
1051                 name = kmem_alloc(namelen, KM_SLEEP);
1052                 dsl_dataset_name(origin, name);
1053 #ifdef _KERNEL
1054                 error = zfs_unmount_snap(name, NULL);
1055                 if (error) {
1056                         kmem_free(name, namelen);
1057                         return (error);
1058                 }
1059 #endif
1060                 error = dsl_dataset_own(name, B_TRUE, tag, &origin);
1061                 kmem_free(name, namelen);
1062                 if (error)
1063                         return (error);
1064                 dsda->rm_origin = origin;
1065                 dsl_dataset_make_exclusive(origin, tag);
1066         }
1067
1068         return (0);
1069 }
1070
1071 /*
1072  * ds must be opened as OWNER.  On return (whether successful or not),
1073  * ds will be closed and caller can no longer dereference it.
1074  */
1075 int
1076 dsl_dataset_destroy(dsl_dataset_t *ds, void *tag, boolean_t defer)
1077 {
1078         int err;
1079         dsl_sync_task_group_t *dstg;
1080         objset_t *os;
1081         dsl_dir_t *dd;
1082         uint64_t obj;
1083         struct dsl_ds_destroyarg dsda = { 0 };
1084         dsl_dataset_t dummy_ds = { 0 };
1085
1086         dsda.ds = ds;
1087
1088         if (dsl_dataset_is_snapshot(ds)) {
1089                 /* Destroying a snapshot is simpler */
1090                 dsl_dataset_make_exclusive(ds, tag);
1091
1092                 dsda.defer = defer;
1093                 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1094                     dsl_dataset_destroy_check, dsl_dataset_destroy_sync,
1095                     &dsda, tag, 0);
1096                 ASSERT3P(dsda.rm_origin, ==, NULL);
1097                 goto out;
1098         } else if (defer) {
1099                 err = EINVAL;
1100                 goto out;
1101         }
1102
1103         dd = ds->ds_dir;
1104         dummy_ds.ds_dir = dd;
1105         dummy_ds.ds_object = ds->ds_object;
1106
1107         if (!spa_feature_is_enabled(dsl_dataset_get_spa(ds),
1108             &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY])) {
1109                 /*
1110                  * Check for errors and mark this ds as inconsistent, in
1111                  * case we crash while freeing the objects.
1112                  */
1113                 err = dsl_sync_task_do(dd->dd_pool,
1114                     dsl_dataset_destroy_begin_check,
1115                     dsl_dataset_destroy_begin_sync, ds, NULL, 0);
1116                 if (err)
1117                         goto out;
1118
1119                 err = dmu_objset_from_ds(ds, &os);
1120                 if (err)
1121                         goto out;
1122
1123                 /*
1124                  * Remove all objects while in the open context so that
1125                  * there is less work to do in the syncing context.
1126                  */
1127                 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE,
1128                     ds->ds_phys->ds_prev_snap_txg)) {
1129                         /*
1130                          * Ignore errors, if there is not enough disk space
1131                          * we will deal with it in dsl_dataset_destroy_sync().
1132                          */
1133                         (void) dmu_free_object(os, obj);
1134                 }
1135                 if (err != ESRCH)
1136                         goto out;
1137
1138                 /*
1139                  * Sync out all in-flight IO.
1140                  */
1141                 txg_wait_synced(dd->dd_pool, 0);
1142
1143                 /*
1144                  * If we managed to free all the objects in open
1145                  * context, the user space accounting should be zero.
1146                  */
1147                 if (ds->ds_phys->ds_bp.blk_fill == 0 &&
1148                     dmu_objset_userused_enabled(os)) {
1149                         uint64_t count;
1150
1151                         ASSERT(zap_count(os, DMU_USERUSED_OBJECT,
1152                             &count) != 0 || count == 0);
1153                         ASSERT(zap_count(os, DMU_GROUPUSED_OBJECT,
1154                             &count) != 0 || count == 0);
1155                 }
1156         }
1157
1158         rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
1159         err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd);
1160         rw_exit(&dd->dd_pool->dp_config_rwlock);
1161
1162         if (err)
1163                 goto out;
1164
1165         /*
1166          * Blow away the dsl_dir + head dataset.
1167          */
1168         dsl_dataset_make_exclusive(ds, tag);
1169         /*
1170          * If we're removing a clone, we might also need to remove its
1171          * origin.
1172          */
1173         do {
1174                 dsda.need_prep = B_FALSE;
1175                 if (dsl_dir_is_clone(dd)) {
1176                         err = dsl_dataset_origin_rm_prep(&dsda, tag);
1177                         if (err) {
1178                                 dsl_dir_close(dd, FTAG);
1179                                 goto out;
1180                         }
1181                 }
1182
1183                 dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool);
1184                 dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
1185                     dsl_dataset_destroy_sync, &dsda, tag, 0);
1186                 dsl_sync_task_create(dstg, dsl_dir_destroy_check,
1187                     dsl_dir_destroy_sync, &dummy_ds, FTAG, 0);
1188                 err = dsl_sync_task_group_wait(dstg);
1189                 dsl_sync_task_group_destroy(dstg);
1190
1191                 /*
1192                  * We could be racing against 'zfs release' or 'zfs destroy -d'
1193                  * on the origin snap, in which case we can get EBUSY if we
1194                  * needed to destroy the origin snap but were not ready to
1195                  * do so.
1196                  */
1197                 if (dsda.need_prep) {
1198                         ASSERT(err == EBUSY);
1199                         ASSERT(dsl_dir_is_clone(dd));
1200                         ASSERT(dsda.rm_origin == NULL);
1201                 }
1202         } while (dsda.need_prep);
1203
1204         if (dsda.rm_origin != NULL)
1205                 dsl_dataset_disown(dsda.rm_origin, tag);
1206
1207         /* if it is successful, dsl_dir_destroy_sync will close the dd */
1208         if (err)
1209                 dsl_dir_close(dd, FTAG);
1210 out:
1211         dsl_dataset_disown(ds, tag);
1212         return (err);
1213 }
1214
1215 blkptr_t *
1216 dsl_dataset_get_blkptr(dsl_dataset_t *ds)
1217 {
1218         return (&ds->ds_phys->ds_bp);
1219 }
1220
1221 void
1222 dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
1223 {
1224         ASSERT(dmu_tx_is_syncing(tx));
1225         /* If it's the meta-objset, set dp_meta_rootbp */
1226         if (ds == NULL) {
1227                 tx->tx_pool->dp_meta_rootbp = *bp;
1228         } else {
1229                 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1230                 ds->ds_phys->ds_bp = *bp;
1231         }
1232 }
1233
1234 spa_t *
1235 dsl_dataset_get_spa(dsl_dataset_t *ds)
1236 {
1237         return (ds->ds_dir->dd_pool->dp_spa);
1238 }
1239
1240 void
1241 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
1242 {
1243         dsl_pool_t *dp;
1244
1245         if (ds == NULL) /* this is the meta-objset */
1246                 return;
1247
1248         ASSERT(ds->ds_objset != NULL);
1249
1250         if (ds->ds_phys->ds_next_snap_obj != 0)
1251                 panic("dirtying snapshot!");
1252
1253         dp = ds->ds_dir->dd_pool;
1254
1255         if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) {
1256                 /* up the hold count until we can be written out */
1257                 dmu_buf_add_ref(ds->ds_dbuf, ds);
1258         }
1259 }
1260
1261 boolean_t
1262 dsl_dataset_is_dirty(dsl_dataset_t *ds)
1263 {
1264         for (int t = 0; t < TXG_SIZE; t++) {
1265                 if (txg_list_member(&ds->ds_dir->dd_pool->dp_dirty_datasets,
1266                     ds, t))
1267                         return (B_TRUE);
1268         }
1269         return (B_FALSE);
1270 }
1271
1272 /*
1273  * The unique space in the head dataset can be calculated by subtracting
1274  * the space used in the most recent snapshot, that is still being used
1275  * in this file system, from the space currently in use.  To figure out
1276  * the space in the most recent snapshot still in use, we need to take
1277  * the total space used in the snapshot and subtract out the space that
1278  * has been freed up since the snapshot was taken.
1279  */
1280 static void
1281 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
1282 {
1283         uint64_t mrs_used;
1284         uint64_t dlused, dlcomp, dluncomp;
1285
1286         ASSERT(!dsl_dataset_is_snapshot(ds));
1287
1288         if (ds->ds_phys->ds_prev_snap_obj != 0)
1289                 mrs_used = ds->ds_prev->ds_phys->ds_referenced_bytes;
1290         else
1291                 mrs_used = 0;
1292
1293         dsl_deadlist_space(&ds->ds_deadlist, &dlused, &dlcomp, &dluncomp);
1294
1295         ASSERT3U(dlused, <=, mrs_used);
1296         ds->ds_phys->ds_unique_bytes =
1297             ds->ds_phys->ds_referenced_bytes - (mrs_used - dlused);
1298
1299         if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1300             SPA_VERSION_UNIQUE_ACCURATE)
1301                 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1302 }
1303
1304 struct killarg {
1305         dsl_dataset_t *ds;
1306         dmu_tx_t *tx;
1307 };
1308
1309 /* ARGSUSED */
1310 static int
1311 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
1312     const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
1313 {
1314         struct killarg *ka = arg;
1315         dmu_tx_t *tx = ka->tx;
1316
1317         if (bp == NULL)
1318                 return (0);
1319
1320         if (zb->zb_level == ZB_ZIL_LEVEL) {
1321                 ASSERT(zilog != NULL);
1322                 /*
1323                  * It's a block in the intent log.  It has no
1324                  * accounting, so just free it.
1325                  */
1326                 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
1327         } else {
1328                 ASSERT(zilog == NULL);
1329                 ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
1330                 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
1331         }
1332
1333         return (0);
1334 }
1335
1336 /* ARGSUSED */
1337 static int
1338 dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx)
1339 {
1340         dsl_dataset_t *ds = arg1;
1341         objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1342         uint64_t count;
1343         int err;
1344
1345         /*
1346          * Can't delete a head dataset if there are snapshots of it.
1347          * (Except if the only snapshots are from the branch we cloned
1348          * from.)
1349          */
1350         if (ds->ds_prev != NULL &&
1351             ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1352                 return (EBUSY);
1353
1354         /*
1355          * This is really a dsl_dir thing, but check it here so that
1356          * we'll be less likely to leave this dataset inconsistent &
1357          * nearly destroyed.
1358          */
1359         err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
1360         if (err)
1361                 return (err);
1362         if (count != 0)
1363                 return (EEXIST);
1364
1365         return (0);
1366 }
1367
1368 /* ARGSUSED */
1369 static void
1370 dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1371 {
1372         dsl_dataset_t *ds = arg1;
1373         dsl_pool_t *dp = ds->ds_dir->dd_pool;
1374
1375         /* Mark it as inconsistent on-disk, in case we crash */
1376         dmu_buf_will_dirty(ds->ds_dbuf, tx);
1377         ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
1378
1379         spa_history_log_internal(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx,
1380             "dataset = %llu", ds->ds_object);
1381 }
1382
1383 static int
1384 dsl_dataset_origin_check(struct dsl_ds_destroyarg *dsda, void *tag,
1385     dmu_tx_t *tx)
1386 {
1387         dsl_dataset_t *ds = dsda->ds;
1388         dsl_dataset_t *ds_prev = ds->ds_prev;
1389
1390         if (dsl_dataset_might_destroy_origin(ds_prev)) {
1391                 struct dsl_ds_destroyarg ndsda = {0};
1392
1393                 /*
1394                  * If we're not prepared to remove the origin, don't remove
1395                  * the clone either.
1396                  */
1397                 if (dsda->rm_origin == NULL) {
1398                         dsda->need_prep = B_TRUE;
1399                         return (EBUSY);
1400                 }
1401
1402                 ndsda.ds = ds_prev;
1403                 ndsda.is_origin_rm = B_TRUE;
1404                 return (dsl_dataset_destroy_check(&ndsda, tag, tx));
1405         }
1406
1407         /*
1408          * If we're not going to remove the origin after all,
1409          * undo the open context setup.
1410          */
1411         if (dsda->rm_origin != NULL) {
1412                 dsl_dataset_disown(dsda->rm_origin, tag);
1413                 dsda->rm_origin = NULL;
1414         }
1415
1416         return (0);
1417 }
1418
1419 /*
1420  * If you add new checks here, you may need to add
1421  * additional checks to the "temporary" case in
1422  * snapshot_check() in dmu_objset.c.
1423  */
1424 /* ARGSUSED */
1425 int
1426 dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
1427 {
1428         struct dsl_ds_destroyarg *dsda = arg1;
1429         dsl_dataset_t *ds = dsda->ds;
1430
1431         /* we have an owner hold, so noone else can destroy us */
1432         ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
1433
1434         /*
1435          * Only allow deferred destroy on pools that support it.
1436          * NOTE: deferred destroy is only supported on snapshots.
1437          */
1438         if (dsda->defer) {
1439                 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
1440                     SPA_VERSION_USERREFS)
1441                         return (ENOTSUP);
1442                 ASSERT(dsl_dataset_is_snapshot(ds));
1443                 return (0);
1444         }
1445
1446         /*
1447          * Can't delete a head dataset if there are snapshots of it.
1448          * (Except if the only snapshots are from the branch we cloned
1449          * from.)
1450          */
1451         if (ds->ds_prev != NULL &&
1452             ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1453                 return (EBUSY);
1454
1455         /*
1456          * If we made changes this txg, traverse_dsl_dataset won't find
1457          * them.  Try again.
1458          */
1459         if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1460                 return (EAGAIN);
1461
1462         if (dsl_dataset_is_snapshot(ds)) {
1463                 /*
1464                  * If this snapshot has an elevated user reference count,
1465                  * we can't destroy it yet.
1466                  */
1467                 if (ds->ds_userrefs > 0 && !dsda->releasing)
1468                         return (EBUSY);
1469
1470                 mutex_enter(&ds->ds_lock);
1471                 /*
1472                  * Can't delete a branch point. However, if we're destroying
1473                  * a clone and removing its origin due to it having a user
1474                  * hold count of 0 and having been marked for deferred destroy,
1475                  * it's OK for the origin to have a single clone.
1476                  */
1477                 if (ds->ds_phys->ds_num_children >
1478                     (dsda->is_origin_rm ? 2 : 1)) {
1479                         mutex_exit(&ds->ds_lock);
1480                         return (EEXIST);
1481                 }
1482                 mutex_exit(&ds->ds_lock);
1483         } else if (dsl_dir_is_clone(ds->ds_dir)) {
1484                 return (dsl_dataset_origin_check(dsda, arg2, tx));
1485         }
1486
1487         /* XXX we should do some i/o error checking... */
1488         return (0);
1489 }
1490
1491 struct refsarg {
1492         kmutex_t lock;
1493         boolean_t gone;
1494         kcondvar_t cv;
1495 };
1496
1497 /* ARGSUSED */
1498 static void
1499 dsl_dataset_refs_gone(dmu_buf_t *db, void *argv)
1500 {
1501         struct refsarg *arg = argv;
1502
1503         mutex_enter(&arg->lock);
1504         arg->gone = TRUE;
1505         cv_signal(&arg->cv);
1506         mutex_exit(&arg->lock);
1507 }
1508
1509 static void
1510 dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag)
1511 {
1512         struct refsarg arg;
1513
1514         bzero(&arg, sizeof(arg));
1515         mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL);
1516         cv_init(&arg.cv, NULL, CV_DEFAULT, NULL);
1517         arg.gone = FALSE;
1518         (void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys,
1519             dsl_dataset_refs_gone);
1520         dmu_buf_rele(ds->ds_dbuf, tag);
1521         mutex_enter(&arg.lock);
1522         while (!arg.gone)
1523                 cv_wait(&arg.cv, &arg.lock);
1524         ASSERT(arg.gone);
1525         mutex_exit(&arg.lock);
1526         ds->ds_dbuf = NULL;
1527         ds->ds_phys = NULL;
1528         mutex_destroy(&arg.lock);
1529         cv_destroy(&arg.cv);
1530 }
1531
1532 static void
1533 remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj, dmu_tx_t *tx)
1534 {
1535         objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1536         uint64_t count;
1537         int err;
1538
1539         ASSERT(ds->ds_phys->ds_num_children >= 2);
1540         err = zap_remove_int(mos, ds->ds_phys->ds_next_clones_obj, obj, tx);
1541         /*
1542          * The err should not be ENOENT, but a bug in a previous version
1543          * of the code could cause upgrade_clones_cb() to not set
1544          * ds_next_snap_obj when it should, leading to a missing entry.
1545          * If we knew that the pool was created after
1546          * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
1547          * ENOENT.  However, at least we can check that we don't have
1548          * too many entries in the next_clones_obj even after failing to
1549          * remove this one.
1550          */
1551         if (err != ENOENT) {
1552                 VERIFY3U(err, ==, 0);
1553         }
1554         ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
1555             &count));
1556         ASSERT3U(count, <=, ds->ds_phys->ds_num_children - 2);
1557 }
1558
1559 static void
1560 dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
1561 {
1562         objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1563         zap_cursor_t zc;
1564         zap_attribute_t za;
1565
1566         /*
1567          * If it is the old version, dd_clones doesn't exist so we can't
1568          * find the clones, but deadlist_remove_key() is a no-op so it
1569          * doesn't matter.
1570          */
1571         if (ds->ds_dir->dd_phys->dd_clones == 0)
1572                 return;
1573
1574         for (zap_cursor_init(&zc, mos, ds->ds_dir->dd_phys->dd_clones);
1575             zap_cursor_retrieve(&zc, &za) == 0;
1576             zap_cursor_advance(&zc)) {
1577                 dsl_dataset_t *clone;
1578
1579                 VERIFY3U(0, ==, dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
1580                     za.za_first_integer, FTAG, &clone));
1581                 if (clone->ds_dir->dd_origin_txg > mintxg) {
1582                         dsl_deadlist_remove_key(&clone->ds_deadlist,
1583                             mintxg, tx);
1584                         dsl_dataset_remove_clones_key(clone, mintxg, tx);
1585                 }
1586                 dsl_dataset_rele(clone, FTAG);
1587         }
1588         zap_cursor_fini(&zc);
1589 }
1590
1591 struct process_old_arg {
1592         dsl_dataset_t *ds;
1593         dsl_dataset_t *ds_prev;
1594         boolean_t after_branch_point;
1595         zio_t *pio;
1596         uint64_t used, comp, uncomp;
1597 };
1598
1599 static int
1600 process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1601 {
1602         struct process_old_arg *poa = arg;
1603         dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
1604
1605         if (bp->blk_birth <= poa->ds->ds_phys->ds_prev_snap_txg) {
1606                 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
1607                 if (poa->ds_prev && !poa->after_branch_point &&
1608                     bp->blk_birth >
1609                     poa->ds_prev->ds_phys->ds_prev_snap_txg) {
1610                         poa->ds_prev->ds_phys->ds_unique_bytes +=
1611                             bp_get_dsize_sync(dp->dp_spa, bp);
1612                 }
1613         } else {
1614                 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
1615                 poa->comp += BP_GET_PSIZE(bp);
1616                 poa->uncomp += BP_GET_UCSIZE(bp);
1617                 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
1618         }
1619         return (0);
1620 }
1621
1622 static void
1623 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
1624     dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
1625 {
1626         struct process_old_arg poa = { 0 };
1627         dsl_pool_t *dp = ds->ds_dir->dd_pool;
1628         objset_t *mos = dp->dp_meta_objset;
1629
1630         ASSERT(ds->ds_deadlist.dl_oldfmt);
1631         ASSERT(ds_next->ds_deadlist.dl_oldfmt);
1632
1633         poa.ds = ds;
1634         poa.ds_prev = ds_prev;
1635         poa.after_branch_point = after_branch_point;
1636         poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
1637         VERIFY3U(0, ==, bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
1638             process_old_cb, &poa, tx));
1639         VERIFY3U(zio_wait(poa.pio), ==, 0);
1640         ASSERT3U(poa.used, ==, ds->ds_phys->ds_unique_bytes);
1641
1642         /* change snapused */
1643         dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1644             -poa.used, -poa.comp, -poa.uncomp, tx);
1645
1646         /* swap next's deadlist to our deadlist */
1647         dsl_deadlist_close(&ds->ds_deadlist);
1648         dsl_deadlist_close(&ds_next->ds_deadlist);
1649         SWITCH64(ds_next->ds_phys->ds_deadlist_obj,
1650             ds->ds_phys->ds_deadlist_obj);
1651         dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
1652         dsl_deadlist_open(&ds_next->ds_deadlist, mos,
1653             ds_next->ds_phys->ds_deadlist_obj);
1654 }
1655
1656 static int
1657 old_synchronous_dataset_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
1658 {
1659         int err;
1660         struct killarg ka;
1661
1662         /*
1663          * Free everything that we point to (that's born after
1664          * the previous snapshot, if we are a clone)
1665          *
1666          * NB: this should be very quick, because we already
1667          * freed all the objects in open context.
1668          */
1669         ka.ds = ds;
1670         ka.tx = tx;
1671         err = traverse_dataset(ds,
1672             ds->ds_phys->ds_prev_snap_txg, TRAVERSE_POST,
1673             kill_blkptr, &ka);
1674         ASSERT3U(err, ==, 0);
1675         ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) || ds->ds_phys->ds_unique_bytes == 0);
1676
1677         return (err);
1678 }
1679
1680 void
1681 dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx)
1682 {
1683         struct dsl_ds_destroyarg *dsda = arg1;
1684         dsl_dataset_t *ds = dsda->ds;
1685         int err;
1686         int after_branch_point = FALSE;
1687         dsl_pool_t *dp = ds->ds_dir->dd_pool;
1688         objset_t *mos = dp->dp_meta_objset;
1689         dsl_dataset_t *ds_prev = NULL;
1690         boolean_t wont_destroy;
1691         uint64_t obj;
1692
1693         wont_destroy = (dsda->defer &&
1694             (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1));
1695
1696         ASSERT(ds->ds_owner || wont_destroy);
1697         ASSERT(dsda->defer || ds->ds_phys->ds_num_children <= 1);
1698         ASSERT(ds->ds_prev == NULL ||
1699             ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
1700         ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
1701
1702         if (wont_destroy) {
1703                 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1704                 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1705                 ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY;
1706                 return;
1707         }
1708
1709         /* signal any waiters that this dataset is going away */
1710         mutex_enter(&ds->ds_lock);
1711         ds->ds_owner = dsl_reaper;
1712         cv_broadcast(&ds->ds_exclusive_cv);
1713         mutex_exit(&ds->ds_lock);
1714
1715         /* Remove our reservation */
1716         if (ds->ds_reserved != 0) {
1717                 dsl_prop_setarg_t psa;
1718                 uint64_t value = 0;
1719
1720                 dsl_prop_setarg_init_uint64(&psa, "refreservation",
1721                     (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
1722                     &value);
1723                 psa.psa_effective_value = 0;    /* predict default value */
1724
1725                 dsl_dataset_set_reservation_sync(ds, &psa, tx);
1726                 ASSERT3U(ds->ds_reserved, ==, 0);
1727         }
1728
1729         ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1730
1731         dsl_scan_ds_destroyed(ds, tx);
1732
1733         obj = ds->ds_object;
1734
1735         if (ds->ds_phys->ds_prev_snap_obj != 0) {
1736                 if (ds->ds_prev) {
1737                         ds_prev = ds->ds_prev;
1738                 } else {
1739                         VERIFY(0 == dsl_dataset_hold_obj(dp,
1740                             ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
1741                 }
1742                 after_branch_point =
1743                     (ds_prev->ds_phys->ds_next_snap_obj != obj);
1744
1745                 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
1746                 if (after_branch_point &&
1747                     ds_prev->ds_phys->ds_next_clones_obj != 0) {
1748                         remove_from_next_clones(ds_prev, obj, tx);
1749                         if (ds->ds_phys->ds_next_snap_obj != 0) {
1750                                 VERIFY(0 == zap_add_int(mos,
1751                                     ds_prev->ds_phys->ds_next_clones_obj,
1752                                     ds->ds_phys->ds_next_snap_obj, tx));
1753                         }
1754                 }
1755                 if (after_branch_point &&
1756                     ds->ds_phys->ds_next_snap_obj == 0) {
1757                         /* This clone is toast. */
1758                         ASSERT(ds_prev->ds_phys->ds_num_children > 1);
1759                         ds_prev->ds_phys->ds_num_children--;
1760
1761                         /*
1762                          * If the clone's origin has no other clones, no
1763                          * user holds, and has been marked for deferred
1764                          * deletion, then we should have done the necessary
1765                          * destroy setup for it.
1766                          */
1767                         if (ds_prev->ds_phys->ds_num_children == 1 &&
1768                             ds_prev->ds_userrefs == 0 &&
1769                             DS_IS_DEFER_DESTROY(ds_prev)) {
1770                                 ASSERT3P(dsda->rm_origin, !=, NULL);
1771                         } else {
1772                                 ASSERT3P(dsda->rm_origin, ==, NULL);
1773                         }
1774                 } else if (!after_branch_point) {
1775                         ds_prev->ds_phys->ds_next_snap_obj =
1776                             ds->ds_phys->ds_next_snap_obj;
1777                 }
1778         }
1779
1780         if (dsl_dataset_is_snapshot(ds)) {
1781                 dsl_dataset_t *ds_next;
1782                 uint64_t old_unique;
1783                 uint64_t used = 0, comp = 0, uncomp = 0;
1784
1785                 VERIFY(0 == dsl_dataset_hold_obj(dp,
1786                     ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
1787                 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
1788
1789                 old_unique = ds_next->ds_phys->ds_unique_bytes;
1790
1791                 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
1792                 ds_next->ds_phys->ds_prev_snap_obj =
1793                     ds->ds_phys->ds_prev_snap_obj;
1794                 ds_next->ds_phys->ds_prev_snap_txg =
1795                     ds->ds_phys->ds_prev_snap_txg;
1796                 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1797                     ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
1798
1799
1800                 if (ds_next->ds_deadlist.dl_oldfmt) {
1801                         process_old_deadlist(ds, ds_prev, ds_next,
1802                             after_branch_point, tx);
1803                 } else {
1804                         /* Adjust prev's unique space. */
1805                         if (ds_prev && !after_branch_point) {
1806                                 dsl_deadlist_space_range(&ds_next->ds_deadlist,
1807                                     ds_prev->ds_phys->ds_prev_snap_txg,
1808                                     ds->ds_phys->ds_prev_snap_txg,
1809                                     &used, &comp, &uncomp);
1810                                 ds_prev->ds_phys->ds_unique_bytes += used;
1811                         }
1812
1813                         /* Adjust snapused. */
1814                         dsl_deadlist_space_range(&ds_next->ds_deadlist,
1815                             ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
1816                             &used, &comp, &uncomp);
1817                         dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1818                             -used, -comp, -uncomp, tx);
1819
1820                         /* Move blocks to be freed to pool's free list. */
1821                         dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
1822                             &dp->dp_free_bpobj, ds->ds_phys->ds_prev_snap_txg,
1823                             tx);
1824                         dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
1825                             DD_USED_HEAD, used, comp, uncomp, tx);
1826
1827                         /* Merge our deadlist into next's and free it. */
1828                         dsl_deadlist_merge(&ds_next->ds_deadlist,
1829                             ds->ds_phys->ds_deadlist_obj, tx);
1830                 }
1831                 dsl_deadlist_close(&ds->ds_deadlist);
1832                 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1833
1834                 /* Collapse range in clone heads */
1835                 dsl_dataset_remove_clones_key(ds,
1836                     ds->ds_phys->ds_creation_txg, tx);
1837
1838                 if (dsl_dataset_is_snapshot(ds_next)) {
1839                         dsl_dataset_t *ds_nextnext;
1840
1841                         /*
1842                          * Update next's unique to include blocks which
1843                          * were previously shared by only this snapshot
1844                          * and it.  Those blocks will be born after the
1845                          * prev snap and before this snap, and will have
1846                          * died after the next snap and before the one
1847                          * after that (ie. be on the snap after next's
1848                          * deadlist).
1849                          */
1850                         VERIFY(0 == dsl_dataset_hold_obj(dp,
1851                             ds_next->ds_phys->ds_next_snap_obj,
1852                             FTAG, &ds_nextnext));
1853                         dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
1854                             ds->ds_phys->ds_prev_snap_txg,
1855                             ds->ds_phys->ds_creation_txg,
1856                             &used, &comp, &uncomp);
1857                         ds_next->ds_phys->ds_unique_bytes += used;
1858                         dsl_dataset_rele(ds_nextnext, FTAG);
1859                         ASSERT3P(ds_next->ds_prev, ==, NULL);
1860
1861                         /* Collapse range in this head. */
1862                         dsl_dataset_t *hds;
1863                         VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
1864                             ds->ds_dir->dd_phys->dd_head_dataset_obj,
1865                             FTAG, &hds));
1866                         dsl_deadlist_remove_key(&hds->ds_deadlist,
1867                             ds->ds_phys->ds_creation_txg, tx);
1868                         dsl_dataset_rele(hds, FTAG);
1869
1870                 } else {
1871                         ASSERT3P(ds_next->ds_prev, ==, ds);
1872                         dsl_dataset_drop_ref(ds_next->ds_prev, ds_next);
1873                         ds_next->ds_prev = NULL;
1874                         if (ds_prev) {
1875                                 VERIFY(0 == dsl_dataset_get_ref(dp,
1876                                     ds->ds_phys->ds_prev_snap_obj,
1877                                     ds_next, &ds_next->ds_prev));
1878                         }
1879
1880                         dsl_dataset_recalc_head_uniq(ds_next);
1881
1882                         /*
1883                          * Reduce the amount of our unconsmed refreservation
1884                          * being charged to our parent by the amount of
1885                          * new unique data we have gained.
1886                          */
1887                         if (old_unique < ds_next->ds_reserved) {
1888                                 int64_t mrsdelta;
1889                                 uint64_t new_unique =
1890                                     ds_next->ds_phys->ds_unique_bytes;
1891
1892                                 ASSERT(old_unique <= new_unique);
1893                                 mrsdelta = MIN(new_unique - old_unique,
1894                                     ds_next->ds_reserved - old_unique);
1895                                 dsl_dir_diduse_space(ds->ds_dir,
1896                                     DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
1897                         }
1898                 }
1899                 dsl_dataset_rele(ds_next, FTAG);
1900         } else {
1901                 zfeature_info_t *async_destroy =
1902                     &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY];
1903                 objset_t *os;
1904
1905                 /*
1906                  * There's no next snapshot, so this is a head dataset.
1907                  * Destroy the deadlist.  Unless it's a clone, the
1908                  * deadlist should be empty.  (If it's a clone, it's
1909                  * safe to ignore the deadlist contents.)
1910                  */
1911                 dsl_deadlist_close(&ds->ds_deadlist);
1912                 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1913                 ds->ds_phys->ds_deadlist_obj = 0;
1914
1915                 VERIFY3U(0, ==, dmu_objset_from_ds(ds, &os));
1916
1917                 if (!spa_feature_is_enabled(dp->dp_spa, async_destroy)) {
1918                         err = old_synchronous_dataset_destroy(ds, tx);
1919                 } else {
1920                         /*
1921                          * Move the bptree into the pool's list of trees to
1922                          * clean up and update space accounting information.
1923                          */
1924                         uint64_t used, comp, uncomp;
1925
1926                         zil_destroy_sync(dmu_objset_zil(os), tx);
1927
1928                         if (!spa_feature_is_active(dp->dp_spa, async_destroy)) {
1929                                 spa_feature_incr(dp->dp_spa, async_destroy, tx);
1930                                 dp->dp_bptree_obj = bptree_alloc(mos, tx);
1931                                 VERIFY(zap_add(mos,
1932                                     DMU_POOL_DIRECTORY_OBJECT,
1933                                     DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
1934                                     &dp->dp_bptree_obj, tx) == 0);
1935                         }
1936
1937                         used = ds->ds_dir->dd_phys->dd_used_bytes;
1938                         comp = ds->ds_dir->dd_phys->dd_compressed_bytes;
1939                         uncomp = ds->ds_dir->dd_phys->dd_uncompressed_bytes;
1940
1941                         ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
1942                             ds->ds_phys->ds_unique_bytes == used);
1943
1944                         bptree_add(mos, dp->dp_bptree_obj,
1945                             &ds->ds_phys->ds_bp, ds->ds_phys->ds_prev_snap_txg,
1946                             used, comp, uncomp, tx);
1947                         dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
1948                             -used, -comp, -uncomp, tx);
1949                         dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
1950                             used, comp, uncomp, tx);
1951                 }
1952
1953                 if (ds->ds_prev != NULL) {
1954                         if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
1955                                 VERIFY3U(0, ==, zap_remove_int(mos,
1956                                     ds->ds_prev->ds_dir->dd_phys->dd_clones,
1957                                     ds->ds_object, tx));
1958                         }
1959                         dsl_dataset_rele(ds->ds_prev, ds);
1960                         ds->ds_prev = ds_prev = NULL;
1961                 }
1962         }
1963
1964         /*
1965          * This must be done after the dsl_traverse(), because it will
1966          * re-open the objset.
1967          */
1968         if (ds->ds_objset) {
1969                 dmu_objset_evict(ds->ds_objset);
1970                 ds->ds_objset = NULL;
1971         }
1972
1973         if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) {
1974                 /* Erase the link in the dir */
1975                 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1976                 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
1977                 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
1978                 err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx);
1979                 ASSERT(err == 0);
1980         } else {
1981                 /* remove from snapshot namespace */
1982                 dsl_dataset_t *ds_head;
1983                 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
1984                 VERIFY(0 == dsl_dataset_hold_obj(dp,
1985                     ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
1986                 VERIFY(0 == dsl_dataset_get_snapname(ds));
1987 #ifdef ZFS_DEBUG
1988                 {
1989                         uint64_t val;
1990
1991                         err = dsl_dataset_snap_lookup(ds_head,
1992                             ds->ds_snapname, &val);
1993                         ASSERT3U(err, ==, 0);
1994                         ASSERT3U(val, ==, obj);
1995                 }
1996 #endif
1997                 err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx);
1998                 ASSERT(err == 0);
1999                 dsl_dataset_rele(ds_head, FTAG);
2000         }
2001
2002         if (ds_prev && ds->ds_prev != ds_prev)
2003                 dsl_dataset_rele(ds_prev, FTAG);
2004
2005         spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
2006         spa_history_log_internal(LOG_DS_DESTROY, dp->dp_spa, tx,
2007             "dataset = %llu", ds->ds_object);
2008
2009         if (ds->ds_phys->ds_next_clones_obj != 0) {
2010                 uint64_t count;
2011                 ASSERT(0 == zap_count(mos,
2012                     ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
2013                 VERIFY(0 == dmu_object_free(mos,
2014                     ds->ds_phys->ds_next_clones_obj, tx));
2015         }
2016         if (ds->ds_phys->ds_props_obj != 0)
2017                 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
2018         if (ds->ds_phys->ds_userrefs_obj != 0)
2019                 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_userrefs_obj, tx));
2020         dsl_dir_close(ds->ds_dir, ds);
2021         ds->ds_dir = NULL;
2022         dsl_dataset_drain_refs(ds, tag);
2023         VERIFY(0 == dmu_object_free(mos, obj, tx));
2024
2025         if (dsda->rm_origin) {
2026                 /*
2027                  * Remove the origin of the clone we just destroyed.
2028                  */
2029                 struct dsl_ds_destroyarg ndsda = {0};
2030
2031                 ndsda.ds = dsda->rm_origin;
2032                 dsl_dataset_destroy_sync(&ndsda, tag, tx);
2033         }
2034 }
2035
2036 static int
2037 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
2038 {
2039         uint64_t asize;
2040
2041         if (!dmu_tx_is_syncing(tx))
2042                 return (0);
2043
2044         /*
2045          * If there's an fs-only reservation, any blocks that might become
2046          * owned by the snapshot dataset must be accommodated by space
2047          * outside of the reservation.
2048          */
2049         ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
2050         asize = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
2051         if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
2052                 return (ENOSPC);
2053
2054         /*
2055          * Propogate any reserved space for this snapshot to other
2056          * snapshot checks in this sync group.
2057          */
2058         if (asize > 0)
2059                 dsl_dir_willuse_space(ds->ds_dir, asize, tx);
2060
2061         return (0);
2062 }
2063
2064 int
2065 dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
2066 {
2067         dsl_dataset_t *ds = arg1;
2068         const char *snapname = arg2;
2069         int err;
2070         uint64_t value;
2071
2072         /*
2073          * We don't allow multiple snapshots of the same txg.  If there
2074          * is already one, try again.
2075          */
2076         if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
2077                 return (EAGAIN);
2078
2079         /*
2080          * Check for conflicting name snapshot name.
2081          */
2082         err = dsl_dataset_snap_lookup(ds, snapname, &value);
2083         if (err == 0)
2084                 return (EEXIST);
2085         if (err != ENOENT)
2086                 return (err);
2087
2088         /*
2089          * Check that the dataset's name is not too long.  Name consists
2090          * of the dataset's length + 1 for the @-sign + snapshot name's length
2091          */
2092         if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN)
2093                 return (ENAMETOOLONG);
2094
2095         err = dsl_dataset_snapshot_reserve_space(ds, tx);
2096         if (err)
2097                 return (err);
2098
2099         ds->ds_trysnap_txg = tx->tx_txg;
2100         return (0);
2101 }
2102
2103 void
2104 dsl_dataset_snapshot_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2105 {
2106         dsl_dataset_t *ds = arg1;
2107         const char *snapname = arg2;
2108         dsl_pool_t *dp = ds->ds_dir->dd_pool;
2109         dmu_buf_t *dbuf;
2110         dsl_dataset_phys_t *dsphys;
2111         uint64_t dsobj, crtxg;
2112         objset_t *mos = dp->dp_meta_objset;
2113         int err;
2114
2115         ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
2116
2117         /*
2118          * The origin's ds_creation_txg has to be < TXG_INITIAL
2119          */
2120         if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
2121                 crtxg = 1;
2122         else
2123                 crtxg = tx->tx_txg;
2124
2125         dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
2126             DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
2127         VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
2128         dmu_buf_will_dirty(dbuf, tx);
2129         dsphys = dbuf->db_data;
2130         bzero(dsphys, sizeof (dsl_dataset_phys_t));
2131         dsphys->ds_dir_obj = ds->ds_dir->dd_object;
2132         dsphys->ds_fsid_guid = unique_create();
2133         do {
2134                 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
2135                     sizeof (dsphys->ds_guid));
2136         } while (dsphys->ds_guid == 0);
2137         dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj;
2138         dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg;
2139         dsphys->ds_next_snap_obj = ds->ds_object;
2140         dsphys->ds_num_children = 1;
2141         dsphys->ds_creation_time = gethrestime_sec();
2142         dsphys->ds_creation_txg = crtxg;
2143         dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj;
2144         dsphys->ds_referenced_bytes = ds->ds_phys->ds_referenced_bytes;
2145         dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes;
2146         dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes;
2147         dsphys->ds_flags = ds->ds_phys->ds_flags;
2148         dsphys->ds_bp = ds->ds_phys->ds_bp;
2149         dmu_buf_rele(dbuf, FTAG);
2150
2151         ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0);
2152         if (ds->ds_prev) {
2153                 uint64_t next_clones_obj =
2154                     ds->ds_prev->ds_phys->ds_next_clones_obj;
2155                 ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj ==
2156                     ds->ds_object ||
2157                     ds->ds_prev->ds_phys->ds_num_children > 1);
2158                 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
2159                         dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
2160                         ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
2161                             ds->ds_prev->ds_phys->ds_creation_txg);
2162                         ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj;
2163                 } else if (next_clones_obj != 0) {
2164                         remove_from_next_clones(ds->ds_prev,
2165                             dsphys->ds_next_snap_obj, tx);
2166                         VERIFY3U(0, ==, zap_add_int(mos,
2167                             next_clones_obj, dsobj, tx));
2168                 }
2169         }
2170
2171         /*
2172          * If we have a reference-reservation on this dataset, we will
2173          * need to increase the amount of refreservation being charged
2174          * since our unique space is going to zero.
2175          */
2176         if (ds->ds_reserved) {
2177                 int64_t delta;
2178                 ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
2179                 delta = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
2180                 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
2181                     delta, 0, 0, tx);
2182         }
2183
2184         dmu_buf_will_dirty(ds->ds_dbuf, tx);
2185         zfs_dbgmsg("taking snapshot %s@%s/%llu; newkey=%llu",
2186             ds->ds_dir->dd_myname, snapname, dsobj,
2187             ds->ds_phys->ds_prev_snap_txg);
2188         ds->ds_phys->ds_deadlist_obj = dsl_deadlist_clone(&ds->ds_deadlist,
2189             UINT64_MAX, ds->ds_phys->ds_prev_snap_obj, tx);
2190         dsl_deadlist_close(&ds->ds_deadlist);
2191         dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
2192         dsl_deadlist_add_key(&ds->ds_deadlist,
2193             ds->ds_phys->ds_prev_snap_txg, tx);
2194
2195         ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg);
2196         ds->ds_phys->ds_prev_snap_obj = dsobj;
2197         ds->ds_phys->ds_prev_snap_txg = crtxg;
2198         ds->ds_phys->ds_unique_bytes = 0;
2199         if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
2200                 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
2201
2202         err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj,
2203             snapname, 8, 1, &dsobj, tx);
2204         ASSERT(err == 0);
2205
2206         if (ds->ds_prev)
2207                 dsl_dataset_drop_ref(ds->ds_prev, ds);
2208         VERIFY(0 == dsl_dataset_get_ref(dp,
2209             ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
2210
2211         dsl_scan_ds_snapshotted(ds, tx);
2212
2213         dsl_dir_snap_cmtime_update(ds->ds_dir);
2214
2215         spa_history_log_internal(LOG_DS_SNAPSHOT, dp->dp_spa, tx,
2216             "dataset = %llu", dsobj);
2217 }
2218
2219 void
2220 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
2221 {
2222         ASSERT(dmu_tx_is_syncing(tx));
2223         ASSERT(ds->ds_objset != NULL);
2224         ASSERT(ds->ds_phys->ds_next_snap_obj == 0);
2225
2226         /*
2227          * in case we had to change ds_fsid_guid when we opened it,
2228          * sync it out now.
2229          */
2230         dmu_buf_will_dirty(ds->ds_dbuf, tx);
2231         ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid;
2232
2233         dmu_objset_sync(ds->ds_objset, zio, tx);
2234 }
2235
2236 static void
2237 get_clones_stat(dsl_dataset_t *ds, nvlist_t *nv)
2238 {
2239         uint64_t count = 0;
2240         objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
2241         zap_cursor_t zc;
2242         zap_attribute_t za;
2243         nvlist_t *propval;
2244         nvlist_t *val;
2245
2246         rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2247         VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2248         VERIFY(nvlist_alloc(&val, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2249
2250         /*
2251          * There may me missing entries in ds_next_clones_obj
2252          * due to a bug in a previous version of the code.
2253          * Only trust it if it has the right number of entries.
2254          */
2255         if (ds->ds_phys->ds_next_clones_obj != 0) {
2256                 ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
2257                     &count));
2258         }
2259         if (count != ds->ds_phys->ds_num_children - 1) {
2260                 goto fail;
2261         }
2262         for (zap_cursor_init(&zc, mos, ds->ds_phys->ds_next_clones_obj);
2263             zap_cursor_retrieve(&zc, &za) == 0;
2264             zap_cursor_advance(&zc)) {
2265                 dsl_dataset_t *clone;
2266                 char buf[ZFS_MAXNAMELEN];
2267                 /*
2268                  * Even though we hold the dp_config_rwlock, the dataset
2269                  * may fail to open, returning ENOENT.  If there is a
2270                  * thread concurrently attempting to destroy this
2271                  * dataset, it will have the ds_rwlock held for
2272                  * RW_WRITER.  Our call to dsl_dataset_hold_obj() ->
2273                  * dsl_dataset_hold_ref() will fail its
2274                  * rw_tryenter(&ds->ds_rwlock, RW_READER), drop the
2275                  * dp_config_rwlock, and wait for the destroy progress
2276                  * and signal ds_exclusive_cv.  If the destroy was
2277                  * successful, we will see that
2278                  * DSL_DATASET_IS_DESTROYED(), and return ENOENT.
2279                  */
2280                 if (dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
2281                     za.za_first_integer, FTAG, &clone) != 0)
2282                         continue;
2283                 dsl_dir_name(clone->ds_dir, buf);
2284                 VERIFY(nvlist_add_boolean(val, buf) == 0);
2285                 dsl_dataset_rele(clone, FTAG);
2286         }
2287         zap_cursor_fini(&zc);
2288         VERIFY(nvlist_add_nvlist(propval, ZPROP_VALUE, val) == 0);
2289         VERIFY(nvlist_add_nvlist(nv, zfs_prop_to_name(ZFS_PROP_CLONES),
2290             propval) == 0);
2291 fail:
2292         nvlist_free(val);
2293         nvlist_free(propval);
2294         rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2295 }
2296
2297 void
2298 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
2299 {
2300         uint64_t refd, avail, uobjs, aobjs, ratio;
2301
2302         dsl_dir_stats(ds->ds_dir, nv);
2303
2304         dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs);
2305         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail);
2306         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd);
2307
2308         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
2309             ds->ds_phys->ds_creation_time);
2310         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
2311             ds->ds_phys->ds_creation_txg);
2312         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
2313             ds->ds_quota);
2314         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
2315             ds->ds_reserved);
2316         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
2317             ds->ds_phys->ds_guid);
2318         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE,
2319             ds->ds_phys->ds_unique_bytes);
2320         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID,
2321             ds->ds_object);
2322         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS,
2323             ds->ds_userrefs);
2324         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY,
2325             DS_IS_DEFER_DESTROY(ds) ? 1 : 0);
2326
2327         if (ds->ds_phys->ds_prev_snap_obj != 0) {
2328                 uint64_t written, comp, uncomp;
2329                 dsl_pool_t *dp = ds->ds_dir->dd_pool;
2330                 dsl_dataset_t *prev;
2331
2332                 rw_enter(&dp->dp_config_rwlock, RW_READER);
2333                 int err = dsl_dataset_hold_obj(dp,
2334                     ds->ds_phys->ds_prev_snap_obj, FTAG, &prev);
2335                 rw_exit(&dp->dp_config_rwlock);
2336                 if (err == 0) {
2337                         err = dsl_dataset_space_written(prev, ds, &written,
2338                             &comp, &uncomp);
2339                         dsl_dataset_rele(prev, FTAG);
2340                         if (err == 0) {
2341                                 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_WRITTEN,
2342                                     written);
2343                         }
2344                 }
2345         }
2346
2347         ratio = ds->ds_phys->ds_compressed_bytes == 0 ? 100 :
2348             (ds->ds_phys->ds_uncompressed_bytes * 100 /
2349             ds->ds_phys->ds_compressed_bytes);
2350         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRATIO, ratio);
2351
2352         if (ds->ds_phys->ds_next_snap_obj) {
2353                 /*
2354                  * This is a snapshot; override the dd's space used with
2355                  * our unique space and compression ratio.
2356                  */
2357                 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
2358                     ds->ds_phys->ds_unique_bytes);
2359                 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO, ratio);
2360
2361                 get_clones_stat(ds, nv);
2362         }
2363 }
2364
2365 void
2366 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
2367 {
2368         stat->dds_creation_txg = ds->ds_phys->ds_creation_txg;
2369         stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT;
2370         stat->dds_guid = ds->ds_phys->ds_guid;
2371         if (ds->ds_phys->ds_next_snap_obj) {
2372                 stat->dds_is_snapshot = B_TRUE;
2373                 stat->dds_num_clones = ds->ds_phys->ds_num_children - 1;
2374         } else {
2375                 stat->dds_is_snapshot = B_FALSE;
2376                 stat->dds_num_clones = 0;
2377         }
2378
2379         /* clone origin is really a dsl_dir thing... */
2380         rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2381         if (dsl_dir_is_clone(ds->ds_dir)) {
2382                 dsl_dataset_t *ods;
2383
2384                 VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool,
2385                     ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods));
2386                 dsl_dataset_name(ods, stat->dds_origin);
2387                 dsl_dataset_drop_ref(ods, FTAG);
2388         } else {
2389                 stat->dds_origin[0] = '\0';
2390         }
2391         rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2392 }
2393
2394 uint64_t
2395 dsl_dataset_fsid_guid(dsl_dataset_t *ds)
2396 {
2397         return (ds->ds_fsid_guid);
2398 }
2399
2400 void
2401 dsl_dataset_space(dsl_dataset_t *ds,
2402     uint64_t *refdbytesp, uint64_t *availbytesp,
2403     uint64_t *usedobjsp, uint64_t *availobjsp)
2404 {
2405         *refdbytesp = ds->ds_phys->ds_referenced_bytes;
2406         *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
2407         if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes)
2408                 *availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes;
2409         if (ds->ds_quota != 0) {
2410                 /*
2411                  * Adjust available bytes according to refquota
2412                  */
2413                 if (*refdbytesp < ds->ds_quota)
2414                         *availbytesp = MIN(*availbytesp,
2415                             ds->ds_quota - *refdbytesp);
2416                 else
2417                         *availbytesp = 0;
2418         }
2419         *usedobjsp = ds->ds_phys->ds_bp.blk_fill;
2420         *availobjsp = DN_MAX_OBJECT - *usedobjsp;
2421 }
2422
2423 boolean_t
2424 dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds)
2425 {
2426         dsl_pool_t *dp = ds->ds_dir->dd_pool;
2427
2428         ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
2429             dsl_pool_sync_context(dp));
2430         if (ds->ds_prev == NULL)
2431                 return (B_FALSE);
2432         if (ds->ds_phys->ds_bp.blk_birth >
2433             ds->ds_prev->ds_phys->ds_creation_txg) {
2434                 objset_t *os, *os_prev;
2435                 /*
2436                  * It may be that only the ZIL differs, because it was
2437                  * reset in the head.  Don't count that as being
2438                  * modified.
2439                  */
2440                 if (dmu_objset_from_ds(ds, &os) != 0)
2441                         return (B_TRUE);
2442                 if (dmu_objset_from_ds(ds->ds_prev, &os_prev) != 0)
2443                         return (B_TRUE);
2444                 return (bcmp(&os->os_phys->os_meta_dnode,
2445                     &os_prev->os_phys->os_meta_dnode,
2446                     sizeof (os->os_phys->os_meta_dnode)) != 0);
2447         }
2448         return (B_FALSE);
2449 }
2450
2451 /* ARGSUSED */
2452 static int
2453 dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
2454 {
2455         dsl_dataset_t *ds = arg1;
2456         char *newsnapname = arg2;
2457         dsl_dir_t *dd = ds->ds_dir;
2458         dsl_dataset_t *hds;
2459         uint64_t val;
2460         int err;
2461
2462         err = dsl_dataset_hold_obj(dd->dd_pool,
2463             dd->dd_phys->dd_head_dataset_obj, FTAG, &hds);
2464         if (err)
2465                 return (err);
2466
2467         /* new name better not be in use */
2468         err = dsl_dataset_snap_lookup(hds, newsnapname, &val);
2469         dsl_dataset_rele(hds, FTAG);
2470
2471         if (err == 0)
2472                 err = EEXIST;
2473         else if (err == ENOENT)
2474                 err = 0;
2475
2476         /* dataset name + 1 for the "@" + the new snapshot name must fit */
2477         if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN)
2478                 err = ENAMETOOLONG;
2479
2480         return (err);
2481 }
2482
2483 static void
2484 dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2485 {
2486         char oldname[MAXPATHLEN], newname[MAXPATHLEN];
2487         dsl_dataset_t *ds = arg1;
2488         const char *newsnapname = arg2;
2489         dsl_dir_t *dd = ds->ds_dir;
2490         objset_t *mos = dd->dd_pool->dp_meta_objset;
2491         dsl_dataset_t *hds;
2492         int err;
2493
2494         ASSERT(ds->ds_phys->ds_next_snap_obj != 0);
2495
2496         VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
2497             dd->dd_phys->dd_head_dataset_obj, FTAG, &hds));
2498
2499         VERIFY(0 == dsl_dataset_get_snapname(ds));
2500         err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx);
2501         ASSERT3U(err, ==, 0);
2502         dsl_dataset_name(ds, oldname);
2503         mutex_enter(&ds->ds_lock);
2504         (void) strcpy(ds->ds_snapname, newsnapname);
2505         mutex_exit(&ds->ds_lock);
2506         err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj,
2507             ds->ds_snapname, 8, 1, &ds->ds_object, tx);
2508         ASSERT3U(err, ==, 0);
2509         dsl_dataset_name(ds, newname);
2510 #ifdef _KERNEL
2511         zvol_rename_minors(oldname, newname);
2512 #endif
2513
2514         spa_history_log_internal(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx,
2515             "dataset = %llu", ds->ds_object);
2516         dsl_dataset_rele(hds, FTAG);
2517 }
2518
2519 struct renamesnaparg {
2520         dsl_sync_task_group_t *dstg;
2521         char failed[MAXPATHLEN];
2522         char *oldsnap;
2523         char *newsnap;
2524 };
2525
2526 static int
2527 dsl_snapshot_rename_one(const char *name, void *arg)
2528 {
2529         struct renamesnaparg *ra = arg;
2530         dsl_dataset_t *ds = NULL;
2531         char *snapname;
2532         int err;
2533
2534         snapname = kmem_asprintf("%s@%s", name, ra->oldsnap);
2535         (void) strlcpy(ra->failed, snapname, sizeof (ra->failed));
2536
2537         /*
2538          * For recursive snapshot renames the parent won't be changing
2539          * so we just pass name for both the to/from argument.
2540          */
2541         err = zfs_secpolicy_rename_perms(snapname, snapname, CRED());
2542         if (err != 0) {
2543                 strfree(snapname);
2544                 return (err == ENOENT ? 0 : err);
2545         }
2546
2547 #ifdef _KERNEL
2548         /*
2549          * For all filesystems undergoing rename, we'll need to unmount it.
2550          */
2551         (void) zfs_unmount_snap(snapname, NULL);
2552 #endif
2553         err = dsl_dataset_hold(snapname, ra->dstg, &ds);
2554         strfree(snapname);
2555         if (err != 0)
2556                 return (err == ENOENT ? 0 : err);
2557
2558         dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check,
2559             dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0);
2560
2561         return (0);
2562 }
2563
2564 static int
2565 dsl_recursive_rename(char *oldname, const char *newname)
2566 {
2567         int err;
2568         struct renamesnaparg *ra;
2569         dsl_sync_task_t *dst;
2570         spa_t *spa;
2571         char *cp, *fsname = spa_strdup(oldname);
2572         int len = strlen(oldname) + 1;
2573
2574         /* truncate the snapshot name to get the fsname */
2575         cp = strchr(fsname, '@');
2576         *cp = '\0';
2577
2578         err = spa_open(fsname, &spa, FTAG);
2579         if (err) {
2580                 kmem_free(fsname, len);
2581                 return (err);
2582         }
2583         ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP);
2584         ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
2585
2586         ra->oldsnap = strchr(oldname, '@') + 1;
2587         ra->newsnap = strchr(newname, '@') + 1;
2588         *ra->failed = '\0';
2589
2590         err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra,
2591             DS_FIND_CHILDREN);
2592         kmem_free(fsname, len);
2593
2594         if (err == 0) {
2595                 err = dsl_sync_task_group_wait(ra->dstg);
2596         }
2597
2598         for (dst = list_head(&ra->dstg->dstg_tasks); dst;
2599             dst = list_next(&ra->dstg->dstg_tasks, dst)) {
2600                 dsl_dataset_t *ds = dst->dst_arg1;
2601                 if (dst->dst_err) {
2602                         dsl_dir_name(ds->ds_dir, ra->failed);
2603                         (void) strlcat(ra->failed, "@", sizeof (ra->failed));
2604                         (void) strlcat(ra->failed, ra->newsnap,
2605                             sizeof (ra->failed));
2606                 }
2607                 dsl_dataset_rele(ds, ra->dstg);
2608         }
2609
2610         if (err)
2611                 (void) strlcpy(oldname, ra->failed, sizeof (ra->failed));
2612
2613         dsl_sync_task_group_destroy(ra->dstg);
2614         kmem_free(ra, sizeof (struct renamesnaparg));
2615         spa_close(spa, FTAG);
2616         return (err);
2617 }
2618
2619 static int
2620 dsl_valid_rename(const char *oldname, void *arg)
2621 {
2622         int delta = *(int *)arg;
2623
2624         if (strlen(oldname) + delta >= MAXNAMELEN)
2625                 return (ENAMETOOLONG);
2626
2627         return (0);
2628 }
2629
2630 #pragma weak dmu_objset_rename = dsl_dataset_rename
2631 int
2632 dsl_dataset_rename(char *oldname, const char *newname, int flags)
2633 {
2634         dsl_dir_t *dd;
2635         dsl_dataset_t *ds;
2636         const char *tail;
2637         int err;
2638
2639         err = dsl_dir_open(oldname, FTAG, &dd, &tail);
2640         if (err)
2641                 return (err);
2642
2643         if (tail == NULL) {
2644                 int delta = strlen(newname) - strlen(oldname);
2645
2646                 /* if we're growing, validate child name lengths */
2647                 if (delta > 0)
2648                         err = dmu_objset_find(oldname, dsl_valid_rename,
2649                             &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2650
2651                 if (err == 0)
2652                         err = dsl_dir_rename(dd, newname, flags);
2653                 dsl_dir_close(dd, FTAG);
2654                 return (err);
2655         }
2656
2657         if (tail[0] != '@') {
2658                 /* the name ended in a nonexistent component */
2659                 dsl_dir_close(dd, FTAG);
2660                 return (ENOENT);
2661         }
2662
2663         dsl_dir_close(dd, FTAG);
2664
2665         /* new name must be snapshot in same filesystem */
2666         tail = strchr(newname, '@');
2667         if (tail == NULL)
2668                 return (EINVAL);
2669         tail++;
2670         if (strncmp(oldname, newname, tail - newname) != 0)
2671                 return (EXDEV);
2672
2673         if (flags & ZFS_RENAME_RECURSIVE) {
2674                 err = dsl_recursive_rename(oldname, newname);
2675         } else {
2676                 err = dsl_dataset_hold(oldname, FTAG, &ds);
2677                 if (err)
2678                         return (err);
2679
2680                 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
2681                     dsl_dataset_snapshot_rename_check,
2682                     dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
2683
2684                 dsl_dataset_rele(ds, FTAG);
2685         }
2686
2687         return (err);
2688 }
2689
2690 struct promotenode {
2691         list_node_t link;
2692         dsl_dataset_t *ds;
2693 };
2694
2695 struct promotearg {
2696         list_t shared_snaps, origin_snaps, clone_snaps;
2697         dsl_dataset_t *origin_origin;
2698         uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap;
2699         char *err_ds;
2700 };
2701
2702 static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
2703 static boolean_t snaplist_unstable(list_t *l);
2704
2705 static int
2706 dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx)
2707 {
2708         dsl_dataset_t *hds = arg1;
2709         struct promotearg *pa = arg2;
2710         struct promotenode *snap = list_head(&pa->shared_snaps);
2711         dsl_dataset_t *origin_ds = snap->ds;
2712         int err;
2713         uint64_t unused;
2714
2715         /* Check that it is a real clone */
2716         if (!dsl_dir_is_clone(hds->ds_dir))
2717                 return (EINVAL);
2718
2719         /* Since this is so expensive, don't do the preliminary check */
2720         if (!dmu_tx_is_syncing(tx))
2721                 return (0);
2722
2723         if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)
2724                 return (EXDEV);
2725
2726         /* compute origin's new unique space */
2727         snap = list_tail(&pa->clone_snaps);
2728         ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2729         dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2730             origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
2731             &pa->unique, &unused, &unused);
2732
2733         /*
2734          * Walk the snapshots that we are moving
2735          *
2736          * Compute space to transfer.  Consider the incremental changes
2737          * to used for each snapshot:
2738          * (my used) = (prev's used) + (blocks born) - (blocks killed)
2739          * So each snapshot gave birth to:
2740          * (blocks born) = (my used) - (prev's used) + (blocks killed)
2741          * So a sequence would look like:
2742          * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2743          * Which simplifies to:
2744          * uN + kN + kN-1 + ... + k1 + k0
2745          * Note however, if we stop before we reach the ORIGIN we get:
2746          * uN + kN + kN-1 + ... + kM - uM-1
2747          */
2748         pa->used = origin_ds->ds_phys->ds_referenced_bytes;
2749         pa->comp = origin_ds->ds_phys->ds_compressed_bytes;
2750         pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes;
2751         for (snap = list_head(&pa->shared_snaps); snap;
2752             snap = list_next(&pa->shared_snaps, snap)) {
2753                 uint64_t val, dlused, dlcomp, dluncomp;
2754                 dsl_dataset_t *ds = snap->ds;
2755
2756                 /* Check that the snapshot name does not conflict */
2757                 VERIFY(0 == dsl_dataset_get_snapname(ds));
2758                 err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
2759                 if (err == 0) {
2760                         err = EEXIST;
2761                         goto out;
2762                 }
2763                 if (err != ENOENT)
2764                         goto out;
2765
2766                 /* The very first snapshot does not have a deadlist */
2767                 if (ds->ds_phys->ds_prev_snap_obj == 0)
2768                         continue;
2769
2770                 dsl_deadlist_space(&ds->ds_deadlist,
2771                     &dlused, &dlcomp, &dluncomp);
2772                 pa->used += dlused;
2773                 pa->comp += dlcomp;
2774                 pa->uncomp += dluncomp;
2775         }
2776
2777         /*
2778          * If we are a clone of a clone then we never reached ORIGIN,
2779          * so we need to subtract out the clone origin's used space.
2780          */
2781         if (pa->origin_origin) {
2782                 pa->used -= pa->origin_origin->ds_phys->ds_referenced_bytes;
2783                 pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes;
2784                 pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes;
2785         }
2786
2787         /* Check that there is enough space here */
2788         err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
2789             pa->used);
2790         if (err)
2791                 return (err);
2792
2793         /*
2794          * Compute the amounts of space that will be used by snapshots
2795          * after the promotion (for both origin and clone).  For each,
2796          * it is the amount of space that will be on all of their
2797          * deadlists (that was not born before their new origin).
2798          */
2799         if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2800                 uint64_t space;
2801
2802                 /*
2803                  * Note, typically this will not be a clone of a clone,
2804                  * so dd_origin_txg will be < TXG_INITIAL, so
2805                  * these snaplist_space() -> dsl_deadlist_space_range()
2806                  * calls will be fast because they do not have to
2807                  * iterate over all bps.
2808                  */
2809                 snap = list_head(&pa->origin_snaps);
2810                 err = snaplist_space(&pa->shared_snaps,
2811                     snap->ds->ds_dir->dd_origin_txg, &pa->cloneusedsnap);
2812                 if (err)
2813                         return (err);
2814
2815                 err = snaplist_space(&pa->clone_snaps,
2816                     snap->ds->ds_dir->dd_origin_txg, &space);
2817                 if (err)
2818                         return (err);
2819                 pa->cloneusedsnap += space;
2820         }
2821         if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2822                 err = snaplist_space(&pa->origin_snaps,
2823                     origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap);
2824                 if (err)
2825                         return (err);
2826         }
2827
2828         return (0);
2829 out:
2830         pa->err_ds =  snap->ds->ds_snapname;
2831         return (err);
2832 }
2833
2834 static void
2835 dsl_dataset_promote_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2836 {
2837         dsl_dataset_t *hds = arg1;
2838         struct promotearg *pa = arg2;
2839         struct promotenode *snap = list_head(&pa->shared_snaps);
2840         dsl_dataset_t *origin_ds = snap->ds;
2841         dsl_dataset_t *origin_head;
2842         dsl_dir_t *dd = hds->ds_dir;
2843         dsl_pool_t *dp = hds->ds_dir->dd_pool;
2844         dsl_dir_t *odd = NULL;
2845         uint64_t oldnext_obj;
2846         int64_t delta;
2847
2848         ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE));
2849
2850         snap = list_head(&pa->origin_snaps);
2851         origin_head = snap->ds;
2852
2853         /*
2854          * We need to explicitly open odd, since origin_ds's dd will be
2855          * changing.
2856          */
2857         VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object,
2858             NULL, FTAG, &odd));
2859
2860         /* change origin's next snap */
2861         dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
2862         oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj;
2863         snap = list_tail(&pa->clone_snaps);
2864         ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2865         origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object;
2866
2867         /* change the origin's next clone */
2868         if (origin_ds->ds_phys->ds_next_clones_obj) {
2869                 remove_from_next_clones(origin_ds, snap->ds->ds_object, tx);
2870                 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2871                     origin_ds->ds_phys->ds_next_clones_obj,
2872                     oldnext_obj, tx));
2873         }
2874
2875         /* change origin */
2876         dmu_buf_will_dirty(dd->dd_dbuf, tx);
2877         ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object);
2878         dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj;
2879         dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg;
2880         dmu_buf_will_dirty(odd->dd_dbuf, tx);
2881         odd->dd_phys->dd_origin_obj = origin_ds->ds_object;
2882         origin_head->ds_dir->dd_origin_txg =
2883             origin_ds->ds_phys->ds_creation_txg;
2884
2885         /* change dd_clone entries */
2886         if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2887                 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2888                     odd->dd_phys->dd_clones, hds->ds_object, tx));
2889                 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2890                     pa->origin_origin->ds_dir->dd_phys->dd_clones,
2891                     hds->ds_object, tx));
2892
2893                 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2894                     pa->origin_origin->ds_dir->dd_phys->dd_clones,
2895                     origin_head->ds_object, tx));
2896                 if (dd->dd_phys->dd_clones == 0) {
2897                         dd->dd_phys->dd_clones = zap_create(dp->dp_meta_objset,
2898                             DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
2899                 }
2900                 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2901                     dd->dd_phys->dd_clones, origin_head->ds_object, tx));
2902
2903         }
2904
2905         /* move snapshots to this dir */
2906         for (snap = list_head(&pa->shared_snaps); snap;
2907             snap = list_next(&pa->shared_snaps, snap)) {
2908                 dsl_dataset_t *ds = snap->ds;
2909
2910                 /* unregister props as dsl_dir is changing */
2911                 if (ds->ds_objset) {
2912                         dmu_objset_evict(ds->ds_objset);
2913                         ds->ds_objset = NULL;
2914                 }
2915                 /* move snap name entry */
2916                 VERIFY(0 == dsl_dataset_get_snapname(ds));
2917                 VERIFY(0 == dsl_dataset_snap_remove(origin_head,
2918                     ds->ds_snapname, tx));
2919                 VERIFY(0 == zap_add(dp->dp_meta_objset,
2920                     hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname,
2921                     8, 1, &ds->ds_object, tx));
2922
2923                 /* change containing dsl_dir */
2924                 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2925                 ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object);
2926                 ds->ds_phys->ds_dir_obj = dd->dd_object;
2927                 ASSERT3P(ds->ds_dir, ==, odd);
2928                 dsl_dir_close(ds->ds_dir, ds);
2929                 VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object,
2930                     NULL, ds, &ds->ds_dir));
2931
2932                 /* move any clone references */
2933                 if (ds->ds_phys->ds_next_clones_obj &&
2934                     spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2935                         zap_cursor_t zc;
2936                         zap_attribute_t za;
2937
2938                         for (zap_cursor_init(&zc, dp->dp_meta_objset,
2939                             ds->ds_phys->ds_next_clones_obj);
2940                             zap_cursor_retrieve(&zc, &za) == 0;
2941                             zap_cursor_advance(&zc)) {
2942                                 dsl_dataset_t *cnds;
2943                                 uint64_t o;
2944
2945                                 if (za.za_first_integer == oldnext_obj) {
2946                                         /*
2947                                          * We've already moved the
2948                                          * origin's reference.
2949                                          */
2950                                         continue;
2951                                 }
2952
2953                                 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
2954                                     za.za_first_integer, FTAG, &cnds));
2955                                 o = cnds->ds_dir->dd_phys->dd_head_dataset_obj;
2956
2957                                 VERIFY3U(zap_remove_int(dp->dp_meta_objset,
2958                                     odd->dd_phys->dd_clones, o, tx), ==, 0);
2959                                 VERIFY3U(zap_add_int(dp->dp_meta_objset,
2960                                     dd->dd_phys->dd_clones, o, tx), ==, 0);
2961                                 dsl_dataset_rele(cnds, FTAG);
2962                         }
2963                         zap_cursor_fini(&zc);
2964                 }
2965
2966                 ASSERT3U(dsl_prop_numcb(ds), ==, 0);
2967         }
2968
2969         /*
2970          * Change space accounting.
2971          * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2972          * both be valid, or both be 0 (resulting in delta == 0).  This
2973          * is true for each of {clone,origin} independently.
2974          */
2975
2976         delta = pa->cloneusedsnap -
2977             dd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2978         ASSERT3S(delta, >=, 0);
2979         ASSERT3U(pa->used, >=, delta);
2980         dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
2981         dsl_dir_diduse_space(dd, DD_USED_HEAD,
2982             pa->used - delta, pa->comp, pa->uncomp, tx);
2983
2984         delta = pa->originusedsnap -
2985             odd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2986         ASSERT3S(delta, <=, 0);
2987         ASSERT3U(pa->used, >=, -delta);
2988         dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
2989         dsl_dir_diduse_space(odd, DD_USED_HEAD,
2990             -pa->used - delta, -pa->comp, -pa->uncomp, tx);
2991
2992         origin_ds->ds_phys->ds_unique_bytes = pa->unique;
2993
2994         /* log history record */
2995         spa_history_log_internal(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx,
2996             "dataset = %llu", hds->ds_object);
2997
2998         dsl_dir_close(odd, FTAG);
2999 }
3000
3001 static char *snaplist_tag = "snaplist";
3002 /*
3003  * Make a list of dsl_dataset_t's for the snapshots between first_obj
3004  * (exclusive) and last_obj (inclusive).  The list will be in reverse
3005  * order (last_obj will be the list_head()).  If first_obj == 0, do all
3006  * snapshots back to this dataset's origin.
3007  */
3008 static int
3009 snaplist_make(dsl_pool_t *dp, boolean_t own,
3010     uint64_t first_obj, uint64_t last_obj, list_t *l)
3011 {
3012         uint64_t obj = last_obj;
3013
3014         ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock));
3015
3016         list_create(l, sizeof (struct promotenode),
3017             offsetof(struct promotenode, link));
3018
3019         while (obj != first_obj) {
3020                 dsl_dataset_t *ds;
3021                 struct promotenode *snap;
3022                 int err;
3023
3024                 if (own) {
3025                         err = dsl_dataset_own_obj(dp, obj,
3026                             0, snaplist_tag, &ds);
3027                         if (err == 0)
3028                                 dsl_dataset_make_exclusive(ds, snaplist_tag);
3029                 } else {
3030                         err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds);
3031                 }
3032                 if (err == ENOENT) {
3033                         /* lost race with snapshot destroy */
3034                         struct promotenode *last = list_tail(l);
3035                         ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj);
3036                         obj = last->ds->ds_phys->ds_prev_snap_obj;
3037                         continue;
3038                 } else if (err) {
3039                         return (err);
3040                 }
3041
3042                 if (first_obj == 0)
3043                         first_obj = ds->ds_dir->dd_phys->dd_origin_obj;
3044
3045                 snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP);
3046                 snap->ds = ds;
3047                 list_insert_tail(l, snap);
3048                 obj = ds->ds_phys->ds_prev_snap_obj;
3049         }
3050
3051         return (0);
3052 }
3053
3054 static int
3055 snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
3056 {
3057         struct promotenode *snap;
3058
3059         *spacep = 0;
3060         for (snap = list_head(l); snap; snap = list_next(l, snap)) {
3061                 uint64_t used, comp, uncomp;
3062                 dsl_deadlist_space_range(&snap->ds->ds_deadlist,
3063                     mintxg, UINT64_MAX, &used, &comp, &uncomp);
3064                 *spacep += used;
3065         }
3066         return (0);
3067 }
3068
3069 static void
3070 snaplist_destroy(list_t *l, boolean_t own)
3071 {
3072         struct promotenode *snap;
3073
3074         if (!l || !list_link_active(&l->list_head))
3075                 return;
3076
3077         while ((snap = list_tail(l)) != NULL) {
3078                 list_remove(l, snap);
3079                 if (own)
3080                         dsl_dataset_disown(snap->ds, snaplist_tag);
3081                 else
3082                         dsl_dataset_rele(snap->ds, snaplist_tag);
3083                 kmem_free(snap, sizeof (struct promotenode));
3084         }
3085         list_destroy(l);
3086 }
3087
3088 /*
3089  * Promote a clone.  Nomenclature note:
3090  * "clone" or "cds": the original clone which is being promoted
3091  * "origin" or "ods": the snapshot which is originally clone's origin
3092  * "origin head" or "ohds": the dataset which is the head
3093  * (filesystem/volume) for the origin
3094  * "origin origin": the origin of the origin's filesystem (typically
3095  * NULL, indicating that the clone is not a clone of a clone).
3096  */
3097 int
3098 dsl_dataset_promote(const char *name, char *conflsnap)
3099 {
3100         dsl_dataset_t *ds;
3101         dsl_dir_t *dd;
3102         dsl_pool_t *dp;
3103         dmu_object_info_t doi;
3104         struct promotearg pa = { 0 };
3105         struct promotenode *snap;
3106         int err;
3107
3108         err = dsl_dataset_hold(name, FTAG, &ds);
3109         if (err)
3110                 return (err);
3111         dd = ds->ds_dir;
3112         dp = dd->dd_pool;
3113
3114         err = dmu_object_info(dp->dp_meta_objset,
3115             ds->ds_phys->ds_snapnames_zapobj, &doi);
3116         if (err) {
3117                 dsl_dataset_rele(ds, FTAG);
3118                 return (err);
3119         }
3120
3121         if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) {
3122                 dsl_dataset_rele(ds, FTAG);
3123                 return (EINVAL);
3124         }
3125
3126         /*
3127          * We are going to inherit all the snapshots taken before our
3128          * origin (i.e., our new origin will be our parent's origin).
3129          * Take ownership of them so that we can rename them into our
3130          * namespace.
3131          */
3132         rw_enter(&dp->dp_config_rwlock, RW_READER);
3133
3134         err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj,
3135             &pa.shared_snaps);
3136         if (err != 0)
3137                 goto out;
3138
3139         err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps);
3140         if (err != 0)
3141                 goto out;
3142
3143         snap = list_head(&pa.shared_snaps);
3144         ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj);
3145         err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj,
3146             snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps);
3147         if (err != 0)
3148                 goto out;
3149
3150         if (snap->ds->ds_dir->dd_phys->dd_origin_obj != 0) {
3151                 err = dsl_dataset_hold_obj(dp,
3152                     snap->ds->ds_dir->dd_phys->dd_origin_obj,
3153                     FTAG, &pa.origin_origin);
3154                 if (err != 0)
3155                         goto out;
3156         }
3157
3158 out:
3159         rw_exit(&dp->dp_config_rwlock);
3160
3161         /*
3162          * Add in 128x the snapnames zapobj size, since we will be moving
3163          * a bunch of snapnames to the promoted ds, and dirtying their
3164          * bonus buffers.
3165          */
3166         if (err == 0) {
3167                 err = dsl_sync_task_do(dp, dsl_dataset_promote_check,
3168                     dsl_dataset_promote_sync, ds, &pa,
3169                     2 + 2 * doi.doi_physical_blocks_512);
3170                 if (err && pa.err_ds && conflsnap)
3171                         (void) strncpy(conflsnap, pa.err_ds, MAXNAMELEN);
3172         }
3173
3174         snaplist_destroy(&pa.shared_snaps, B_TRUE);
3175         snaplist_destroy(&pa.clone_snaps, B_FALSE);
3176         snaplist_destroy(&pa.origin_snaps, B_FALSE);
3177         if (pa.origin_origin)
3178                 dsl_dataset_rele(pa.origin_origin, FTAG);
3179         dsl_dataset_rele(ds, FTAG);
3180         return (err);
3181 }
3182
3183 struct cloneswaparg {
3184         dsl_dataset_t *cds; /* clone dataset */
3185         dsl_dataset_t *ohds; /* origin's head dataset */
3186         boolean_t force;
3187         int64_t unused_refres_delta; /* change in unconsumed refreservation */
3188 };
3189
3190 /* ARGSUSED */
3191 static int
3192 dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx)
3193 {
3194         struct cloneswaparg *csa = arg1;
3195
3196         /* they should both be heads */
3197         if (dsl_dataset_is_snapshot(csa->cds) ||
3198             dsl_dataset_is_snapshot(csa->ohds))
3199                 return (EINVAL);
3200
3201         /* the branch point should be just before them */
3202         if (csa->cds->ds_prev != csa->ohds->ds_prev)
3203                 return (EINVAL);
3204
3205         /* cds should be the clone (unless they are unrelated) */
3206         if (csa->cds->ds_prev != NULL &&
3207             csa->cds->ds_prev != csa->cds->ds_dir->dd_pool->dp_origin_snap &&
3208             csa->ohds->ds_object !=
3209             csa->cds->ds_prev->ds_phys->ds_next_snap_obj)
3210                 return (EINVAL);
3211
3212         /* the clone should be a child of the origin */
3213         if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir)
3214                 return (EINVAL);
3215
3216         /* ohds shouldn't be modified unless 'force' */
3217         if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds))
3218                 return (ETXTBSY);
3219
3220         /* adjust amount of any unconsumed refreservation */
3221         csa->unused_refres_delta =
3222             (int64_t)MIN(csa->ohds->ds_reserved,
3223             csa->ohds->ds_phys->ds_unique_bytes) -
3224             (int64_t)MIN(csa->ohds->ds_reserved,
3225             csa->cds->ds_phys->ds_unique_bytes);
3226
3227         if (csa->unused_refres_delta > 0 &&
3228             csa->unused_refres_delta >
3229             dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE))
3230                 return (ENOSPC);
3231
3232         if (csa->ohds->ds_quota != 0 &&
3233             csa->cds->ds_phys->ds_unique_bytes > csa->ohds->ds_quota)
3234                 return (EDQUOT);
3235
3236         return (0);
3237 }
3238
3239 /* ARGSUSED */
3240 static void
3241 dsl_dataset_clone_swap_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3242 {
3243         struct cloneswaparg *csa = arg1;
3244         dsl_pool_t *dp = csa->cds->ds_dir->dd_pool;
3245
3246         ASSERT(csa->cds->ds_reserved == 0);
3247         ASSERT(csa->ohds->ds_quota == 0 ||
3248             csa->cds->ds_phys->ds_unique_bytes <= csa->ohds->ds_quota);
3249
3250         dmu_buf_will_dirty(csa->cds->ds_dbuf, tx);
3251         dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx);
3252
3253         if (csa->cds->ds_objset != NULL) {
3254                 dmu_objset_evict(csa->cds->ds_objset);
3255                 csa->cds->ds_objset = NULL;
3256         }
3257
3258         if (csa->ohds->ds_objset != NULL) {
3259                 dmu_objset_evict(csa->ohds->ds_objset);
3260                 csa->ohds->ds_objset = NULL;
3261         }
3262
3263         /*
3264          * Reset origin's unique bytes, if it exists.
3265          */
3266         if (csa->cds->ds_prev) {
3267                 dsl_dataset_t *origin = csa->cds->ds_prev;
3268                 uint64_t comp, uncomp;
3269
3270                 dmu_buf_will_dirty(origin->ds_dbuf, tx);
3271                 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3272                     origin->ds_phys->ds_prev_snap_txg, UINT64_MAX,
3273                     &origin->ds_phys->ds_unique_bytes, &comp, &uncomp);
3274         }
3275
3276         /* swap blkptrs */
3277         {
3278                 blkptr_t tmp;
3279                 tmp = csa->ohds->ds_phys->ds_bp;
3280                 csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp;
3281                 csa->cds->ds_phys->ds_bp = tmp;
3282         }
3283
3284         /* set dd_*_bytes */
3285         {
3286                 int64_t dused, dcomp, duncomp;
3287                 uint64_t cdl_used, cdl_comp, cdl_uncomp;
3288                 uint64_t odl_used, odl_comp, odl_uncomp;
3289
3290                 ASSERT3U(csa->cds->ds_dir->dd_phys->
3291                     dd_used_breakdown[DD_USED_SNAP], ==, 0);
3292
3293                 dsl_deadlist_space(&csa->cds->ds_deadlist,
3294                     &cdl_used, &cdl_comp, &cdl_uncomp);
3295                 dsl_deadlist_space(&csa->ohds->ds_deadlist,
3296                     &odl_used, &odl_comp, &odl_uncomp);
3297
3298                 dused = csa->cds->ds_phys->ds_referenced_bytes + cdl_used -
3299                     (csa->ohds->ds_phys->ds_referenced_bytes + odl_used);
3300                 dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp -
3301                     (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp);
3302                 duncomp = csa->cds->ds_phys->ds_uncompressed_bytes +
3303                     cdl_uncomp -
3304                     (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp);
3305
3306                 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD,
3307                     dused, dcomp, duncomp, tx);
3308                 dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD,
3309                     -dused, -dcomp, -duncomp, tx);
3310
3311                 /*
3312                  * The difference in the space used by snapshots is the
3313                  * difference in snapshot space due to the head's
3314                  * deadlist (since that's the only thing that's
3315                  * changing that affects the snapused).
3316                  */
3317                 dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3318                     csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3319                     &cdl_used, &cdl_comp, &cdl_uncomp);
3320                 dsl_deadlist_space_range(&csa->ohds->ds_deadlist,
3321                     csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3322                     &odl_used, &odl_comp, &odl_uncomp);
3323                 dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used,
3324                     DD_USED_HEAD, DD_USED_SNAP, tx);
3325         }
3326
3327         /* swap ds_*_bytes */
3328         SWITCH64(csa->ohds->ds_phys->ds_referenced_bytes,
3329             csa->cds->ds_phys->ds_referenced_bytes);
3330         SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes,
3331             csa->cds->ds_phys->ds_compressed_bytes);
3332         SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes,
3333             csa->cds->ds_phys->ds_uncompressed_bytes);
3334         SWITCH64(csa->ohds->ds_phys->ds_unique_bytes,
3335             csa->cds->ds_phys->ds_unique_bytes);
3336
3337         /* apply any parent delta for change in unconsumed refreservation */
3338         dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV,
3339             csa->unused_refres_delta, 0, 0, tx);
3340
3341         /*
3342          * Swap deadlists.
3343          */
3344         dsl_deadlist_close(&csa->cds->ds_deadlist);
3345         dsl_deadlist_close(&csa->ohds->ds_deadlist);
3346         SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj,
3347             csa->cds->ds_phys->ds_deadlist_obj);
3348         dsl_deadlist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset,
3349             csa->cds->ds_phys->ds_deadlist_obj);
3350         dsl_deadlist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset,
3351             csa->ohds->ds_phys->ds_deadlist_obj);
3352
3353         dsl_scan_ds_clone_swapped(csa->ohds, csa->cds, tx);
3354 }
3355
3356 /*
3357  * Swap 'clone' with its origin head datasets.  Used at the end of "zfs
3358  * recv" into an existing fs to swizzle the file system to the new
3359  * version, and by "zfs rollback".  Can also be used to swap two
3360  * independent head datasets if neither has any snapshots.
3361  */
3362 int
3363 dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head,
3364     boolean_t force)
3365 {
3366         struct cloneswaparg csa;
3367         int error;
3368
3369         ASSERT(clone->ds_owner);
3370         ASSERT(origin_head->ds_owner);
3371 retry:
3372         /*
3373          * Need exclusive access for the swap. If we're swapping these
3374          * datasets back after an error, we already hold the locks.
3375          */
3376         if (!RW_WRITE_HELD(&clone->ds_rwlock))
3377                 rw_enter(&clone->ds_rwlock, RW_WRITER);
3378         if (!RW_WRITE_HELD(&origin_head->ds_rwlock) &&
3379             !rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) {
3380                 rw_exit(&clone->ds_rwlock);
3381                 rw_enter(&origin_head->ds_rwlock, RW_WRITER);
3382                 if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) {
3383                         rw_exit(&origin_head->ds_rwlock);
3384                         goto retry;
3385                 }
3386         }
3387         csa.cds = clone;
3388         csa.ohds = origin_head;
3389         csa.force = force;
3390         error = dsl_sync_task_do(clone->ds_dir->dd_pool,
3391             dsl_dataset_clone_swap_check,
3392             dsl_dataset_clone_swap_sync, &csa, NULL, 9);
3393         return (error);
3394 }
3395
3396 /*
3397  * Given a pool name and a dataset object number in that pool,
3398  * return the name of that dataset.
3399  */
3400 int
3401 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
3402 {
3403         spa_t *spa;
3404         dsl_pool_t *dp;
3405         dsl_dataset_t *ds;
3406         int error;
3407
3408         if ((error = spa_open(pname, &spa, FTAG)) != 0)
3409                 return (error);
3410         dp = spa_get_dsl(spa);
3411         rw_enter(&dp->dp_config_rwlock, RW_READER);
3412         if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) {
3413                 dsl_dataset_name(ds, buf);
3414                 dsl_dataset_rele(ds, FTAG);
3415         }
3416         rw_exit(&dp->dp_config_rwlock);
3417         spa_close(spa, FTAG);
3418
3419         return (error);
3420 }
3421
3422 int
3423 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
3424     uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
3425 {
3426         int error = 0;
3427
3428         ASSERT3S(asize, >, 0);
3429
3430         /*
3431          * *ref_rsrv is the portion of asize that will come from any
3432          * unconsumed refreservation space.
3433          */
3434         *ref_rsrv = 0;
3435
3436         mutex_enter(&ds->ds_lock);
3437         /*
3438          * Make a space adjustment for reserved bytes.
3439          */
3440         if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) {
3441                 ASSERT3U(*used, >=,
3442                     ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3443                 *used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3444                 *ref_rsrv =
3445                     asize - MIN(asize, parent_delta(ds, asize + inflight));
3446         }
3447
3448         if (!check_quota || ds->ds_quota == 0) {
3449                 mutex_exit(&ds->ds_lock);
3450                 return (0);
3451         }
3452         /*
3453          * If they are requesting more space, and our current estimate
3454          * is over quota, they get to try again unless the actual
3455          * on-disk is over quota and there are no pending changes (which
3456          * may free up space for us).
3457          */
3458         if (ds->ds_phys->ds_referenced_bytes + inflight >= ds->ds_quota) {
3459                 if (inflight > 0 ||
3460                     ds->ds_phys->ds_referenced_bytes < ds->ds_quota)
3461                         error = ERESTART;
3462                 else
3463                         error = EDQUOT;
3464         }
3465         mutex_exit(&ds->ds_lock);
3466
3467         return (error);
3468 }
3469
3470 /* ARGSUSED */
3471 static int
3472 dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
3473 {
3474         dsl_dataset_t *ds = arg1;
3475         dsl_prop_setarg_t *psa = arg2;
3476         int err;
3477
3478         if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA)
3479                 return (ENOTSUP);
3480
3481         if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3482                 return (err);
3483
3484         if (psa->psa_effective_value == 0)
3485                 return (0);
3486
3487         if (psa->psa_effective_value < ds->ds_phys->ds_referenced_bytes ||
3488             psa->psa_effective_value < ds->ds_reserved)
3489                 return (ENOSPC);
3490
3491         return (0);
3492 }
3493
3494 extern void dsl_prop_set_sync(void *, void *, dmu_tx_t *);
3495
3496 void
3497 dsl_dataset_set_quota_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3498 {
3499         dsl_dataset_t *ds = arg1;
3500         dsl_prop_setarg_t *psa = arg2;
3501         uint64_t effective_value = psa->psa_effective_value;
3502
3503         dsl_prop_set_sync(ds, psa, tx);
3504         DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3505
3506         if (ds->ds_quota != effective_value) {
3507                 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3508                 ds->ds_quota = effective_value;
3509         }
3510 }
3511
3512 int
3513 dsl_dataset_set_quota(const char *dsname, zprop_source_t source, uint64_t quota)
3514 {
3515         dsl_dataset_t *ds;
3516         dsl_prop_setarg_t psa;
3517         int err;
3518
3519         dsl_prop_setarg_init_uint64(&psa, "refquota", source, &quota);
3520
3521         err = dsl_dataset_hold(dsname, FTAG, &ds);
3522         if (err)
3523                 return (err);
3524
3525         /*
3526          * If someone removes a file, then tries to set the quota, we
3527          * want to make sure the file freeing takes effect.
3528          */
3529         txg_wait_open(ds->ds_dir->dd_pool, 0);
3530
3531         err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3532             dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync,
3533             ds, &psa, 0);
3534
3535         dsl_dataset_rele(ds, FTAG);
3536         return (err);
3537 }
3538
3539 static int
3540 dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
3541 {
3542         dsl_dataset_t *ds = arg1;
3543         dsl_prop_setarg_t *psa = arg2;
3544         uint64_t effective_value;
3545         uint64_t unique;
3546         int err;
3547
3548         if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
3549             SPA_VERSION_REFRESERVATION)
3550                 return (ENOTSUP);
3551
3552         if (dsl_dataset_is_snapshot(ds))
3553                 return (EINVAL);
3554
3555         if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3556                 return (err);
3557
3558         effective_value = psa->psa_effective_value;
3559
3560         /*
3561          * If we are doing the preliminary check in open context, the
3562          * space estimates may be inaccurate.
3563          */
3564         if (!dmu_tx_is_syncing(tx))
3565                 return (0);
3566
3567         mutex_enter(&ds->ds_lock);
3568         if (!DS_UNIQUE_IS_ACCURATE(ds))
3569                 dsl_dataset_recalc_head_uniq(ds);
3570         unique = ds->ds_phys->ds_unique_bytes;
3571         mutex_exit(&ds->ds_lock);
3572
3573         if (MAX(unique, effective_value) > MAX(unique, ds->ds_reserved)) {
3574                 uint64_t delta = MAX(unique, effective_value) -
3575                     MAX(unique, ds->ds_reserved);
3576
3577                 if (delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
3578                         return (ENOSPC);
3579                 if (ds->ds_quota > 0 &&
3580                     effective_value > ds->ds_quota)
3581                         return (ENOSPC);
3582         }
3583
3584         return (0);
3585 }
3586
3587 static void
3588 dsl_dataset_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3589 {
3590         dsl_dataset_t *ds = arg1;
3591         dsl_prop_setarg_t *psa = arg2;
3592         uint64_t effective_value = psa->psa_effective_value;
3593         uint64_t unique;
3594         int64_t delta;
3595
3596         dsl_prop_set_sync(ds, psa, tx);
3597         DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3598
3599         dmu_buf_will_dirty(ds->ds_dbuf, tx);
3600
3601         mutex_enter(&ds->ds_dir->dd_lock);
3602         mutex_enter(&ds->ds_lock);
3603         ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
3604         unique = ds->ds_phys->ds_unique_bytes;
3605         delta = MAX(0, (int64_t)(effective_value - unique)) -
3606             MAX(0, (int64_t)(ds->ds_reserved - unique));
3607         ds->ds_reserved = effective_value;
3608         mutex_exit(&ds->ds_lock);
3609
3610         dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
3611         mutex_exit(&ds->ds_dir->dd_lock);
3612 }
3613
3614 int
3615 dsl_dataset_set_reservation(const char *dsname, zprop_source_t source,
3616     uint64_t reservation)
3617 {
3618         dsl_dataset_t *ds;
3619         dsl_prop_setarg_t psa;
3620         int err;
3621
3622         dsl_prop_setarg_init_uint64(&psa, "refreservation", source,
3623             &reservation);
3624
3625         err = dsl_dataset_hold(dsname, FTAG, &ds);
3626         if (err)
3627                 return (err);
3628
3629         err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3630             dsl_dataset_set_reservation_check,
3631             dsl_dataset_set_reservation_sync, ds, &psa, 0);
3632
3633         dsl_dataset_rele(ds, FTAG);
3634         return (err);
3635 }
3636
3637 typedef struct zfs_hold_cleanup_arg {
3638         dsl_pool_t *dp;
3639         uint64_t dsobj;
3640         char htag[MAXNAMELEN];
3641 } zfs_hold_cleanup_arg_t;
3642
3643 static void
3644 dsl_dataset_user_release_onexit(void *arg)
3645 {
3646         zfs_hold_cleanup_arg_t *ca = arg;
3647
3648         (void) dsl_dataset_user_release_tmp(ca->dp, ca->dsobj, ca->htag,
3649             B_TRUE);
3650         kmem_free(ca, sizeof (zfs_hold_cleanup_arg_t));
3651 }
3652
3653 void
3654 dsl_register_onexit_hold_cleanup(dsl_dataset_t *ds, const char *htag,
3655     minor_t minor)
3656 {
3657         zfs_hold_cleanup_arg_t *ca;
3658
3659         ca = kmem_alloc(sizeof (zfs_hold_cleanup_arg_t), KM_SLEEP);
3660         ca->dp = ds->ds_dir->dd_pool;
3661         ca->dsobj = ds->ds_object;
3662         (void) strlcpy(ca->htag, htag, sizeof (ca->htag));
3663         VERIFY3U(0, ==, zfs_onexit_add_cb(minor,
3664             dsl_dataset_user_release_onexit, ca, NULL));
3665 }
3666
3667 /*
3668  * If you add new checks here, you may need to add
3669  * additional checks to the "temporary" case in
3670  * snapshot_check() in dmu_objset.c.
3671  */
3672 static int
3673 dsl_dataset_user_hold_check(void *arg1, void *arg2, dmu_tx_t *tx)
3674 {
3675         dsl_dataset_t *ds = arg1;
3676         struct dsl_ds_holdarg *ha = arg2;
3677         char *htag = ha->htag;
3678         objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3679         int error = 0;
3680
3681         if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3682                 return (ENOTSUP);
3683
3684         if (!dsl_dataset_is_snapshot(ds))
3685                 return (EINVAL);
3686
3687         /* tags must be unique */
3688         mutex_enter(&ds->ds_lock);
3689         if (ds->ds_phys->ds_userrefs_obj) {
3690                 error = zap_lookup(mos, ds->ds_phys->ds_userrefs_obj, htag,
3691                     8, 1, tx);
3692                 if (error == 0)
3693                         error = EEXIST;
3694                 else if (error == ENOENT)
3695                         error = 0;
3696         }
3697         mutex_exit(&ds->ds_lock);
3698
3699         if (error == 0 && ha->temphold &&
3700             strlen(htag) + MAX_TAG_PREFIX_LEN >= MAXNAMELEN)
3701                 error = E2BIG;
3702
3703         return (error);
3704 }
3705
3706 void
3707 dsl_dataset_user_hold_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3708 {
3709         dsl_dataset_t *ds = arg1;
3710         struct dsl_ds_holdarg *ha = arg2;
3711         char *htag = ha->htag;
3712         dsl_pool_t *dp = ds->ds_dir->dd_pool;
3713         objset_t *mos = dp->dp_meta_objset;
3714         uint64_t now = gethrestime_sec();
3715         uint64_t zapobj;
3716
3717         mutex_enter(&ds->ds_lock);
3718         if (ds->ds_phys->ds_userrefs_obj == 0) {
3719                 /*
3720                  * This is the first user hold for this dataset.  Create
3721                  * the userrefs zap object.
3722                  */
3723                 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3724                 zapobj = ds->ds_phys->ds_userrefs_obj =
3725                     zap_create(mos, DMU_OT_USERREFS, DMU_OT_NONE, 0, tx);
3726         } else {
3727                 zapobj = ds->ds_phys->ds_userrefs_obj;
3728         }
3729         ds->ds_userrefs++;
3730         mutex_exit(&ds->ds_lock);
3731
3732         VERIFY(0 == zap_add(mos, zapobj, htag, 8, 1, &now, tx));
3733
3734         if (ha->temphold) {
3735                 VERIFY(0 == dsl_pool_user_hold(dp, ds->ds_object,
3736                     htag, &now, tx));
3737         }
3738
3739         spa_history_log_internal(LOG_DS_USER_HOLD,
3740             dp->dp_spa, tx, "<%s> temp = %d dataset = %llu", htag,
3741             (int)ha->temphold, ds->ds_object);
3742 }
3743
3744 static int
3745 dsl_dataset_user_hold_one(const char *dsname, void *arg)
3746 {
3747         struct dsl_ds_holdarg *ha = arg;
3748         dsl_dataset_t *ds;
3749         int error;
3750         char *name;
3751
3752         /* alloc a buffer to hold dsname@snapname plus terminating NULL */
3753         name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3754         error = dsl_dataset_hold(name, ha->dstg, &ds);
3755         strfree(name);
3756         if (error == 0) {
3757                 ha->gotone = B_TRUE;
3758                 dsl_sync_task_create(ha->dstg, dsl_dataset_user_hold_check,
3759                     dsl_dataset_user_hold_sync, ds, ha, 0);
3760         } else if (error == ENOENT && ha->recursive) {
3761                 error = 0;
3762         } else {
3763                 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3764         }
3765         return (error);
3766 }
3767
3768 int
3769 dsl_dataset_user_hold_for_send(dsl_dataset_t *ds, char *htag,
3770     boolean_t temphold)
3771 {
3772         struct dsl_ds_holdarg *ha;
3773         int error;
3774
3775         ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3776         ha->htag = htag;
3777         ha->temphold = temphold;
3778         error = dsl_sync_task_do(ds->ds_dir->dd_pool,
3779             dsl_dataset_user_hold_check, dsl_dataset_user_hold_sync,
3780             ds, ha, 0);
3781         kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3782
3783         return (error);
3784 }
3785
3786 int
3787 dsl_dataset_user_hold(char *dsname, char *snapname, char *htag,
3788     boolean_t recursive, boolean_t temphold, int cleanup_fd)
3789 {
3790         struct dsl_ds_holdarg *ha;
3791         dsl_sync_task_t *dst;
3792         spa_t *spa;
3793         int error;
3794         minor_t minor = 0;
3795
3796         if (cleanup_fd != -1) {
3797                 /* Currently we only support cleanup-on-exit of tempholds. */
3798                 if (!temphold)
3799                         return (EINVAL);
3800                 error = zfs_onexit_fd_hold(cleanup_fd, &minor);
3801                 if (error)
3802                         return (error);
3803         }
3804
3805         ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3806
3807         (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3808
3809         error = spa_open(dsname, &spa, FTAG);
3810         if (error) {
3811                 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3812                 if (cleanup_fd != -1)
3813                         zfs_onexit_fd_rele(cleanup_fd);
3814                 return (error);
3815         }
3816
3817         ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3818         ha->htag = htag;
3819         ha->snapname = snapname;
3820         ha->recursive = recursive;
3821         ha->temphold = temphold;
3822
3823         if (recursive) {
3824                 error = dmu_objset_find(dsname, dsl_dataset_user_hold_one,
3825                     ha, DS_FIND_CHILDREN);
3826         } else {
3827                 error = dsl_dataset_user_hold_one(dsname, ha);
3828         }
3829         if (error == 0)
3830                 error = dsl_sync_task_group_wait(ha->dstg);
3831
3832         for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3833             dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3834                 dsl_dataset_t *ds = dst->dst_arg1;
3835
3836                 if (dst->dst_err) {
3837                         dsl_dataset_name(ds, ha->failed);
3838                         *strchr(ha->failed, '@') = '\0';
3839                 } else if (error == 0 && minor != 0 && temphold) {
3840                         /*
3841                          * If this hold is to be released upon process exit,
3842                          * register that action now.
3843                          */
3844                         dsl_register_onexit_hold_cleanup(ds, htag, minor);
3845                 }
3846                 dsl_dataset_rele(ds, ha->dstg);
3847         }
3848
3849         if (error == 0 && recursive && !ha->gotone)
3850                 error = ENOENT;
3851
3852         if (error)
3853                 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3854
3855         dsl_sync_task_group_destroy(ha->dstg);
3856
3857         kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3858         spa_close(spa, FTAG);
3859         if (cleanup_fd != -1)
3860                 zfs_onexit_fd_rele(cleanup_fd);
3861         return (error);
3862 }
3863
3864 struct dsl_ds_releasearg {
3865         dsl_dataset_t *ds;
3866         const char *htag;
3867         boolean_t own;          /* do we own or just hold ds? */
3868 };
3869
3870 static int
3871 dsl_dataset_release_might_destroy(dsl_dataset_t *ds, const char *htag,
3872     boolean_t *might_destroy)
3873 {
3874         objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3875         uint64_t zapobj;
3876         uint64_t tmp;
3877         int error;
3878
3879         *might_destroy = B_FALSE;
3880
3881         mutex_enter(&ds->ds_lock);
3882         zapobj = ds->ds_phys->ds_userrefs_obj;
3883         if (zapobj == 0) {
3884                 /* The tag can't possibly exist */
3885                 mutex_exit(&ds->ds_lock);
3886                 return (ESRCH);
3887         }
3888
3889         /* Make sure the tag exists */
3890         error = zap_lookup(mos, zapobj, htag, 8, 1, &tmp);
3891         if (error) {
3892                 mutex_exit(&ds->ds_lock);
3893                 if (error == ENOENT)
3894                         error = ESRCH;
3895                 return (error);
3896         }
3897
3898         if (ds->ds_userrefs == 1 && ds->ds_phys->ds_num_children == 1 &&
3899             DS_IS_DEFER_DESTROY(ds))
3900                 *might_destroy = B_TRUE;
3901
3902         mutex_exit(&ds->ds_lock);
3903         return (0);
3904 }
3905
3906 static int
3907 dsl_dataset_user_release_check(void *arg1, void *tag, dmu_tx_t *tx)
3908 {
3909         struct dsl_ds_releasearg *ra = arg1;
3910         dsl_dataset_t *ds = ra->ds;
3911         boolean_t might_destroy;
3912         int error;
3913
3914         if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3915                 return (ENOTSUP);
3916
3917         error = dsl_dataset_release_might_destroy(ds, ra->htag, &might_destroy);
3918         if (error)
3919                 return (error);
3920
3921         if (might_destroy) {
3922                 struct dsl_ds_destroyarg dsda = {0};
3923
3924                 if (dmu_tx_is_syncing(tx)) {
3925                         /*
3926                          * If we're not prepared to remove the snapshot,
3927                          * we can't allow the release to happen right now.
3928                          */
3929                         if (!ra->own)
3930                                 return (EBUSY);
3931                 }
3932                 dsda.ds = ds;
3933                 dsda.releasing = B_TRUE;
3934                 return (dsl_dataset_destroy_check(&dsda, tag, tx));
3935         }
3936
3937         return (0);
3938 }
3939
3940 static void
3941 dsl_dataset_user_release_sync(void *arg1, void *tag, dmu_tx_t *tx)
3942 {
3943         struct dsl_ds_releasearg *ra = arg1;
3944         dsl_dataset_t *ds = ra->ds;
3945         dsl_pool_t *dp = ds->ds_dir->dd_pool;
3946         objset_t *mos = dp->dp_meta_objset;
3947         uint64_t zapobj;
3948         uint64_t dsobj = ds->ds_object;
3949         uint64_t refs;
3950         int error;
3951
3952         mutex_enter(&ds->ds_lock);
3953         ds->ds_userrefs--;
3954         refs = ds->ds_userrefs;
3955         mutex_exit(&ds->ds_lock);
3956         error = dsl_pool_user_release(dp, ds->ds_object, ra->htag, tx);
3957         VERIFY(error == 0 || error == ENOENT);
3958         zapobj = ds->ds_phys->ds_userrefs_obj;
3959         VERIFY(0 == zap_remove(mos, zapobj, ra->htag, tx));
3960
3961         spa_history_log_internal(LOG_DS_USER_RELEASE,
3962             dp->dp_spa, tx, "<%s> %lld dataset = %llu",
3963             ra->htag, (longlong_t)refs, dsobj);
3964
3965         if (ds->ds_userrefs == 0 && ds->ds_phys->ds_num_children == 1 &&
3966             DS_IS_DEFER_DESTROY(ds)) {
3967                 struct dsl_ds_destroyarg dsda = {0};
3968
3969                 ASSERT(ra->own);
3970                 dsda.ds = ds;
3971                 dsda.releasing = B_TRUE;
3972                 /* We already did the destroy_check */
3973                 dsl_dataset_destroy_sync(&dsda, tag, tx);
3974         }
3975 }
3976
3977 static int
3978 dsl_dataset_user_release_one(const char *dsname, void *arg)
3979 {
3980         struct dsl_ds_holdarg *ha = arg;
3981         struct dsl_ds_releasearg *ra;
3982         dsl_dataset_t *ds;
3983         int error;
3984         void *dtag = ha->dstg;
3985         char *name;
3986         boolean_t own = B_FALSE;
3987         boolean_t might_destroy;
3988
3989         /* alloc a buffer to hold dsname@snapname, plus the terminating NULL */
3990         name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3991         error = dsl_dataset_hold(name, dtag, &ds);
3992         strfree(name);
3993         if (error == ENOENT && ha->recursive)
3994                 return (0);
3995         (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3996         if (error)
3997                 return (error);
3998
3999         ha->gotone = B_TRUE;
4000
4001         ASSERT(dsl_dataset_is_snapshot(ds));
4002
4003         error = dsl_dataset_release_might_destroy(ds, ha->htag, &might_destroy);
4004         if (error) {
4005                 dsl_dataset_rele(ds, dtag);
4006                 return (error);
4007         }
4008
4009         if (might_destroy) {
4010 #ifdef _KERNEL
4011                 name = kmem_asprintf("%s@%s", dsname, ha->snapname);
4012                 error = zfs_unmount_snap(name, NULL);
4013                 strfree(name);
4014                 if (error) {
4015                         dsl_dataset_rele(ds, dtag);
4016                         return (error);
4017                 }
4018 #endif
4019                 if (!dsl_dataset_tryown(ds, B_TRUE, dtag)) {
4020                         dsl_dataset_rele(ds, dtag);
4021                         return (EBUSY);
4022                 } else {
4023                         own = B_TRUE;
4024                         dsl_dataset_make_exclusive(ds, dtag);
4025                 }
4026         }
4027
4028         ra = kmem_alloc(sizeof (struct dsl_ds_releasearg), KM_SLEEP);
4029         ra->ds = ds;
4030         ra->htag = ha->htag;
4031         ra->own = own;
4032         dsl_sync_task_create(ha->dstg, dsl_dataset_user_release_check,
4033             dsl_dataset_user_release_sync, ra, dtag, 0);
4034
4035         return (0);
4036 }
4037
4038 int
4039 dsl_dataset_user_release(char *dsname, char *snapname, char *htag,
4040     boolean_t recursive)
4041 {
4042         struct dsl_ds_holdarg *ha;
4043         dsl_sync_task_t *dst;
4044         spa_t *spa;
4045         int error;
4046
4047 top:
4048         ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
4049
4050         (void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
4051
4052         error = spa_open(dsname, &spa, FTAG);
4053         if (error) {
4054                 kmem_free(ha, sizeof (struct dsl_ds_holdarg));
4055                 return (error);
4056         }
4057
4058         ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
4059         ha->htag = htag;
4060         ha->snapname = snapname;
4061         ha->recursive = recursive;
4062         if (recursive) {
4063                 error = dmu_objset_find(dsname, dsl_dataset_user_release_one,
4064                     ha, DS_FIND_CHILDREN);
4065         } else {
4066                 error = dsl_dataset_user_release_one(dsname, ha);
4067         }
4068         if (error == 0)
4069                 error = dsl_sync_task_group_wait(ha->dstg);
4070
4071         for (dst = list_head(&ha->dstg->dstg_tasks); dst;
4072             dst = list_next(&ha->dstg->dstg_tasks, dst)) {
4073                 struct dsl_ds_releasearg *ra = dst->dst_arg1;
4074                 dsl_dataset_t *ds = ra->ds;
4075
4076                 if (dst->dst_err)
4077                         dsl_dataset_name(ds, ha->failed);
4078
4079                 if (ra->own)
4080                         dsl_dataset_disown(ds, ha->dstg);
4081                 else
4082                         dsl_dataset_rele(ds, ha->dstg);
4083
4084                 kmem_free(ra, sizeof (struct dsl_ds_releasearg));
4085         }
4086
4087         if (error == 0 && recursive && !ha->gotone)
4088                 error = ENOENT;
4089
4090         if (error && error != EBUSY)
4091                 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
4092
4093         dsl_sync_task_group_destroy(ha->dstg);
4094         kmem_free(ha, sizeof (struct dsl_ds_holdarg));
4095         spa_close(spa, FTAG);
4096
4097         /*
4098          * We can get EBUSY if we were racing with deferred destroy and
4099          * dsl_dataset_user_release_check() hadn't done the necessary
4100          * open context setup.  We can also get EBUSY if we're racing
4101          * with destroy and that thread is the ds_owner.  Either way
4102          * the busy condition should be transient, and we should retry
4103          * the release operation.
4104          */
4105         if (error == EBUSY)
4106                 goto top;
4107
4108         return (error);
4109 }
4110
4111 /*
4112  * Called at spa_load time (with retry == B_FALSE) to release a stale
4113  * temporary user hold. Also called by the onexit code (with retry == B_TRUE).
4114  */
4115 int
4116 dsl_dataset_user_release_tmp(dsl_pool_t *dp, uint64_t dsobj, char *htag,
4117     boolean_t retry)
4118 {
4119         dsl_dataset_t *ds;
4120         char *snap;
4121         char *name;
4122         int namelen;
4123         int error;
4124
4125         do {
4126                 rw_enter(&dp->dp_config_rwlock, RW_READER);
4127                 error = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds);
4128                 rw_exit(&dp->dp_config_rwlock);
4129                 if (error)
4130                         return (error);
4131                 namelen = dsl_dataset_namelen(ds)+1;
4132                 name = kmem_alloc(namelen, KM_SLEEP);
4133                 dsl_dataset_name(ds, name);
4134                 dsl_dataset_rele(ds, FTAG);
4135
4136                 snap = strchr(name, '@');
4137                 *snap = '\0';
4138                 ++snap;
4139                 error = dsl_dataset_user_release(name, snap, htag, B_FALSE);
4140                 kmem_free(name, namelen);
4141
4142                 /*
4143                  * The object can't have been destroyed because we have a hold,
4144                  * but it might have been renamed, resulting in ENOENT.  Retry
4145                  * if we've been requested to do so.
4146                  *
4147                  * It would be nice if we could use the dsobj all the way
4148                  * through and avoid ENOENT entirely.  But we might need to
4149                  * unmount the snapshot, and there's currently no way to lookup
4150                  * a vfsp using a ZFS object id.
4151                  */
4152         } while ((error == ENOENT) && retry);
4153
4154         return (error);
4155 }
4156
4157 int
4158 dsl_dataset_get_holds(const char *dsname, nvlist_t **nvp)
4159 {
4160         dsl_dataset_t *ds;
4161         int err;
4162
4163         err = dsl_dataset_hold(dsname, FTAG, &ds);
4164         if (err)
4165                 return (err);
4166
4167         VERIFY(0 == nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP));
4168         if (ds->ds_phys->ds_userrefs_obj != 0) {
4169                 zap_attribute_t *za;
4170                 zap_cursor_t zc;
4171
4172                 za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
4173                 for (zap_cursor_init(&zc, ds->ds_dir->dd_pool->dp_meta_objset,
4174                     ds->ds_phys->ds_userrefs_obj);
4175                     zap_cursor_retrieve(&zc, za) == 0;
4176                     zap_cursor_advance(&zc)) {
4177                         VERIFY(0 == nvlist_add_uint64(*nvp, za->za_name,
4178                             za->za_first_integer));
4179                 }
4180                 zap_cursor_fini(&zc);
4181                 kmem_free(za, sizeof (zap_attribute_t));
4182         }
4183         dsl_dataset_rele(ds, FTAG);
4184         return (0);
4185 }
4186
4187 /*
4188  * Note, this function is used as the callback for dmu_objset_find().  We
4189  * always return 0 so that we will continue to find and process
4190  * inconsistent datasets, even if we encounter an error trying to
4191  * process one of them.
4192  */
4193 /* ARGSUSED */
4194 int
4195 dsl_destroy_inconsistent(const char *dsname, void *arg)
4196 {
4197         dsl_dataset_t *ds;
4198
4199         if (dsl_dataset_own(dsname, B_TRUE, FTAG, &ds) == 0) {
4200                 if (DS_IS_INCONSISTENT(ds))
4201                         (void) dsl_dataset_destroy(ds, FTAG, B_FALSE);
4202                 else
4203                         dsl_dataset_disown(ds, FTAG);
4204         }
4205         return (0);
4206 }
4207
4208 /*
4209  * Return (in *usedp) the amount of space written in new that is not
4210  * present in oldsnap.  New may be a snapshot or the head.  Old must be
4211  * a snapshot before new, in new's filesystem (or its origin).  If not then
4212  * fail and return EINVAL.
4213  *
4214  * The written space is calculated by considering two components:  First, we
4215  * ignore any freed space, and calculate the written as new's used space
4216  * minus old's used space.  Next, we add in the amount of space that was freed
4217  * between the two snapshots, thus reducing new's used space relative to old's.
4218  * Specifically, this is the space that was born before old->ds_creation_txg,
4219  * and freed before new (ie. on new's deadlist or a previous deadlist).
4220  *
4221  * space freed                         [---------------------]
4222  * snapshots                       ---O-------O--------O-------O------
4223  *                                         oldsnap            new
4224  */
4225 int
4226 dsl_dataset_space_written(dsl_dataset_t *oldsnap, dsl_dataset_t *new,
4227     uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
4228 {
4229         int err = 0;
4230         uint64_t snapobj;
4231         dsl_pool_t *dp = new->ds_dir->dd_pool;
4232
4233         *usedp = 0;
4234         *usedp += new->ds_phys->ds_referenced_bytes;
4235         *usedp -= oldsnap->ds_phys->ds_referenced_bytes;
4236
4237         *compp = 0;
4238         *compp += new->ds_phys->ds_compressed_bytes;
4239         *compp -= oldsnap->ds_phys->ds_compressed_bytes;
4240
4241         *uncompp = 0;
4242         *uncompp += new->ds_phys->ds_uncompressed_bytes;
4243         *uncompp -= oldsnap->ds_phys->ds_uncompressed_bytes;
4244
4245         rw_enter(&dp->dp_config_rwlock, RW_READER);
4246         snapobj = new->ds_object;
4247         while (snapobj != oldsnap->ds_object) {
4248                 dsl_dataset_t *snap;
4249                 uint64_t used, comp, uncomp;
4250
4251                 if (snapobj == new->ds_object) {
4252                         snap = new;
4253                 } else {
4254                         err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &snap);
4255                         if (err != 0)
4256                                 break;
4257                 }
4258
4259                 if (snap->ds_phys->ds_prev_snap_txg ==
4260                     oldsnap->ds_phys->ds_creation_txg) {
4261                         /*
4262                          * The blocks in the deadlist can not be born after
4263                          * ds_prev_snap_txg, so get the whole deadlist space,
4264                          * which is more efficient (especially for old-format
4265                          * deadlists).  Unfortunately the deadlist code
4266                          * doesn't have enough information to make this
4267                          * optimization itself.
4268                          */
4269                         dsl_deadlist_space(&snap->ds_deadlist,
4270                             &used, &comp, &uncomp);
4271                 } else {
4272                         dsl_deadlist_space_range(&snap->ds_deadlist,
4273                             0, oldsnap->ds_phys->ds_creation_txg,
4274                             &used, &comp, &uncomp);
4275                 }
4276                 *usedp += used;
4277                 *compp += comp;
4278                 *uncompp += uncomp;
4279
4280                 /*
4281                  * If we get to the beginning of the chain of snapshots
4282                  * (ds_prev_snap_obj == 0) before oldsnap, then oldsnap
4283                  * was not a snapshot of/before new.
4284                  */
4285                 snapobj = snap->ds_phys->ds_prev_snap_obj;
4286                 if (snap != new)
4287                         dsl_dataset_rele(snap, FTAG);
4288                 if (snapobj == 0) {
4289                         err = EINVAL;
4290                         break;
4291                 }
4292
4293         }
4294         rw_exit(&dp->dp_config_rwlock);
4295         return (err);
4296 }
4297
4298 /*
4299  * Return (in *usedp) the amount of space that will be reclaimed if firstsnap,
4300  * lastsnap, and all snapshots in between are deleted.
4301  *
4302  * blocks that would be freed            [---------------------------]
4303  * snapshots                       ---O-------O--------O-------O--------O
4304  *                                        firstsnap        lastsnap
4305  *
4306  * This is the set of blocks that were born after the snap before firstsnap,
4307  * (birth > firstsnap->prev_snap_txg) and died before the snap after the
4308  * last snap (ie, is on lastsnap->ds_next->ds_deadlist or an earlier deadlist).
4309  * We calculate this by iterating over the relevant deadlists (from the snap
4310  * after lastsnap, backward to the snap after firstsnap), summing up the
4311  * space on the deadlist that was born after the snap before firstsnap.
4312  */
4313 int
4314 dsl_dataset_space_wouldfree(dsl_dataset_t *firstsnap,
4315     dsl_dataset_t *lastsnap,
4316     uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
4317 {
4318         int err = 0;
4319         uint64_t snapobj;
4320         dsl_pool_t *dp = firstsnap->ds_dir->dd_pool;
4321
4322         ASSERT(dsl_dataset_is_snapshot(firstsnap));
4323         ASSERT(dsl_dataset_is_snapshot(lastsnap));
4324
4325         /*
4326          * Check that the snapshots are in the same dsl_dir, and firstsnap
4327          * is before lastsnap.
4328          */
4329         if (firstsnap->ds_dir != lastsnap->ds_dir ||
4330             firstsnap->ds_phys->ds_creation_txg >
4331             lastsnap->ds_phys->ds_creation_txg)
4332                 return (EINVAL);
4333
4334         *usedp = *compp = *uncompp = 0;
4335
4336         rw_enter(&dp->dp_config_rwlock, RW_READER);
4337         snapobj = lastsnap->ds_phys->ds_next_snap_obj;
4338         while (snapobj != firstsnap->ds_object) {
4339                 dsl_dataset_t *ds;
4340                 uint64_t used, comp, uncomp;
4341
4342                 err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &ds);
4343                 if (err != 0)
4344                         break;
4345
4346                 dsl_deadlist_space_range(&ds->ds_deadlist,
4347                     firstsnap->ds_phys->ds_prev_snap_txg, UINT64_MAX,
4348                     &used, &comp, &uncomp);
4349                 *usedp += used;
4350                 *compp += comp;
4351                 *uncompp += uncomp;
4352
4353                 snapobj = ds->ds_phys->ds_prev_snap_obj;
4354                 ASSERT3U(snapobj, !=, 0);
4355                 dsl_dataset_rele(ds, FTAG);
4356         }
4357         rw_exit(&dp->dp_config_rwlock);
4358         return (err);
4359 }