]> CyberLeo.Net >> Repos - FreeBSD/releng/8.1.git/blob - sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dataset.c
Copy stable/8 to releng/8.1 in preparation for 8.1-RC1.
[FreeBSD/releng/8.1.git] / sys / cddl / contrib / opensolaris / uts / common / fs / zfs / dsl_dataset.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25
26 #include <sys/dmu_objset.h>
27 #include <sys/dsl_dataset.h>
28 #include <sys/dsl_dir.h>
29 #include <sys/dsl_prop.h>
30 #include <sys/dsl_synctask.h>
31 #include <sys/dmu_traverse.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/arc.h>
34 #include <sys/zio.h>
35 #include <sys/zap.h>
36 #include <sys/unique.h>
37 #include <sys/zfs_context.h>
38 #include <sys/zfs_ioctl.h>
39 #include <sys/spa.h>
40 #include <sys/zfs_znode.h>
41 #include <sys/sunddi.h>
42
43 static char *dsl_reaper = "the grim reaper";
44
45 static dsl_checkfunc_t dsl_dataset_destroy_begin_check;
46 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync;
47 static dsl_checkfunc_t dsl_dataset_rollback_check;
48 static dsl_syncfunc_t dsl_dataset_rollback_sync;
49 static dsl_syncfunc_t dsl_dataset_set_reservation_sync;
50
51 #define DS_REF_MAX      (1ULL << 62)
52
53 #define DSL_DEADLIST_BLOCKSIZE  SPA_MAXBLOCKSIZE
54
55 #define DSL_DATASET_IS_DESTROYED(ds)    ((ds)->ds_owner == dsl_reaper)
56
57
58 /*
59  * Figure out how much of this delta should be propogated to the dsl_dir
60  * layer.  If there's a refreservation, that space has already been
61  * partially accounted for in our ancestors.
62  */
63 static int64_t
64 parent_delta(dsl_dataset_t *ds, int64_t delta)
65 {
66         uint64_t old_bytes, new_bytes;
67
68         if (ds->ds_reserved == 0)
69                 return (delta);
70
71         old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
72         new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
73
74         ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
75         return (new_bytes - old_bytes);
76 }
77
78 void
79 dsl_dataset_block_born(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
80 {
81         int used = bp_get_dasize(tx->tx_pool->dp_spa, bp);
82         int compressed = BP_GET_PSIZE(bp);
83         int uncompressed = BP_GET_UCSIZE(bp);
84         int64_t delta;
85
86         dprintf_bp(bp, "born, ds=%p\n", ds);
87
88         ASSERT(dmu_tx_is_syncing(tx));
89         /* It could have been compressed away to nothing */
90         if (BP_IS_HOLE(bp))
91                 return;
92         ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
93         ASSERT3U(BP_GET_TYPE(bp), <, DMU_OT_NUMTYPES);
94         if (ds == NULL) {
95                 /*
96                  * Account for the meta-objset space in its placeholder
97                  * dsl_dir.
98                  */
99                 ASSERT3U(compressed, ==, uncompressed); /* it's all metadata */
100                 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
101                     used, compressed, uncompressed, tx);
102                 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
103                 return;
104         }
105         dmu_buf_will_dirty(ds->ds_dbuf, tx);
106         mutex_enter(&ds->ds_dir->dd_lock);
107         mutex_enter(&ds->ds_lock);
108         delta = parent_delta(ds, used);
109         ds->ds_phys->ds_used_bytes += used;
110         ds->ds_phys->ds_compressed_bytes += compressed;
111         ds->ds_phys->ds_uncompressed_bytes += uncompressed;
112         ds->ds_phys->ds_unique_bytes += used;
113         mutex_exit(&ds->ds_lock);
114         dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta,
115             compressed, uncompressed, tx);
116         dsl_dir_transfer_space(ds->ds_dir, used - delta,
117             DD_USED_REFRSRV, DD_USED_HEAD, tx);
118         mutex_exit(&ds->ds_dir->dd_lock);
119 }
120
121 int
122 dsl_dataset_block_kill(dsl_dataset_t *ds, blkptr_t *bp, zio_t *pio,
123     dmu_tx_t *tx)
124 {
125         int used = bp_get_dasize(tx->tx_pool->dp_spa, bp);
126         int compressed = BP_GET_PSIZE(bp);
127         int uncompressed = BP_GET_UCSIZE(bp);
128
129         ASSERT(pio != NULL);
130         ASSERT(dmu_tx_is_syncing(tx));
131         /* No block pointer => nothing to free */
132         if (BP_IS_HOLE(bp))
133                 return (0);
134
135         ASSERT(used > 0);
136         if (ds == NULL) {
137                 int err;
138                 /*
139                  * Account for the meta-objset space in its placeholder
140                  * dataset.
141                  */
142                 err = dsl_free(pio, tx->tx_pool,
143                     tx->tx_txg, bp, NULL, NULL, ARC_NOWAIT);
144                 ASSERT(err == 0);
145
146                 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
147                     -used, -compressed, -uncompressed, tx);
148                 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
149                 return (used);
150         }
151         ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
152
153         ASSERT(!dsl_dataset_is_snapshot(ds));
154         dmu_buf_will_dirty(ds->ds_dbuf, tx);
155
156         if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) {
157                 int err;
158                 int64_t delta;
159
160                 dprintf_bp(bp, "freeing: %s", "");
161                 err = dsl_free(pio, tx->tx_pool,
162                     tx->tx_txg, bp, NULL, NULL, ARC_NOWAIT);
163                 ASSERT(err == 0);
164
165                 mutex_enter(&ds->ds_dir->dd_lock);
166                 mutex_enter(&ds->ds_lock);
167                 ASSERT(ds->ds_phys->ds_unique_bytes >= used ||
168                     !DS_UNIQUE_IS_ACCURATE(ds));
169                 delta = parent_delta(ds, -used);
170                 ds->ds_phys->ds_unique_bytes -= used;
171                 mutex_exit(&ds->ds_lock);
172                 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
173                     delta, -compressed, -uncompressed, tx);
174                 dsl_dir_transfer_space(ds->ds_dir, -used - delta,
175                     DD_USED_REFRSRV, DD_USED_HEAD, tx);
176                 mutex_exit(&ds->ds_dir->dd_lock);
177         } else {
178                 dprintf_bp(bp, "putting on dead list: %s", "");
179                 VERIFY(0 == bplist_enqueue(&ds->ds_deadlist, bp, tx));
180                 ASSERT3U(ds->ds_prev->ds_object, ==,
181                     ds->ds_phys->ds_prev_snap_obj);
182                 ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0);
183                 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
184                 if (ds->ds_prev->ds_phys->ds_next_snap_obj ==
185                     ds->ds_object && bp->blk_birth >
186                     ds->ds_prev->ds_phys->ds_prev_snap_txg) {
187                         dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
188                         mutex_enter(&ds->ds_prev->ds_lock);
189                         ds->ds_prev->ds_phys->ds_unique_bytes += used;
190                         mutex_exit(&ds->ds_prev->ds_lock);
191                 }
192                 if (bp->blk_birth > ds->ds_origin_txg) {
193                         dsl_dir_transfer_space(ds->ds_dir, used,
194                             DD_USED_HEAD, DD_USED_SNAP, tx);
195                 }
196         }
197         mutex_enter(&ds->ds_lock);
198         ASSERT3U(ds->ds_phys->ds_used_bytes, >=, used);
199         ds->ds_phys->ds_used_bytes -= used;
200         ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed);
201         ds->ds_phys->ds_compressed_bytes -= compressed;
202         ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed);
203         ds->ds_phys->ds_uncompressed_bytes -= uncompressed;
204         mutex_exit(&ds->ds_lock);
205
206         return (used);
207 }
208
209 uint64_t
210 dsl_dataset_prev_snap_txg(dsl_dataset_t *ds)
211 {
212         uint64_t trysnap = 0;
213
214         if (ds == NULL)
215                 return (0);
216         /*
217          * The snapshot creation could fail, but that would cause an
218          * incorrect FALSE return, which would only result in an
219          * overestimation of the amount of space that an operation would
220          * consume, which is OK.
221          *
222          * There's also a small window where we could miss a pending
223          * snapshot, because we could set the sync task in the quiescing
224          * phase.  So this should only be used as a guess.
225          */
226         if (ds->ds_trysnap_txg >
227             spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa))
228                 trysnap = ds->ds_trysnap_txg;
229         return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap));
230 }
231
232 int
233 dsl_dataset_block_freeable(dsl_dataset_t *ds, uint64_t blk_birth)
234 {
235         return (blk_birth > dsl_dataset_prev_snap_txg(ds));
236 }
237
238 /* ARGSUSED */
239 static void
240 dsl_dataset_evict(dmu_buf_t *db, void *dsv)
241 {
242         dsl_dataset_t *ds = dsv;
243
244         ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds));
245
246         dprintf_ds(ds, "evicting %s\n", "");
247
248         unique_remove(ds->ds_fsid_guid);
249
250         if (ds->ds_user_ptr != NULL)
251                 ds->ds_user_evict_func(ds, ds->ds_user_ptr);
252
253         if (ds->ds_prev) {
254                 dsl_dataset_drop_ref(ds->ds_prev, ds);
255                 ds->ds_prev = NULL;
256         }
257
258         bplist_close(&ds->ds_deadlist);
259         if (ds->ds_dir)
260                 dsl_dir_close(ds->ds_dir, ds);
261
262         ASSERT(!list_link_active(&ds->ds_synced_link));
263
264         if (mutex_owned(&ds->ds_lock))
265                 mutex_exit(&ds->ds_lock);
266         mutex_destroy(&ds->ds_lock);
267         if (mutex_owned(&ds->ds_opening_lock))
268                 mutex_exit(&ds->ds_opening_lock);
269         mutex_destroy(&ds->ds_opening_lock);
270         if (mutex_owned(&ds->ds_deadlist.bpl_lock))
271                 mutex_exit(&ds->ds_deadlist.bpl_lock);
272         mutex_destroy(&ds->ds_deadlist.bpl_lock);
273         rw_destroy(&ds->ds_rwlock);
274         cv_destroy(&ds->ds_exclusive_cv);
275
276         kmem_free(ds, sizeof (dsl_dataset_t));
277 }
278
279 static int
280 dsl_dataset_get_snapname(dsl_dataset_t *ds)
281 {
282         dsl_dataset_phys_t *headphys;
283         int err;
284         dmu_buf_t *headdbuf;
285         dsl_pool_t *dp = ds->ds_dir->dd_pool;
286         objset_t *mos = dp->dp_meta_objset;
287
288         if (ds->ds_snapname[0])
289                 return (0);
290         if (ds->ds_phys->ds_next_snap_obj == 0)
291                 return (0);
292
293         err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj,
294             FTAG, &headdbuf);
295         if (err)
296                 return (err);
297         headphys = headdbuf->db_data;
298         err = zap_value_search(dp->dp_meta_objset,
299             headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
300         dmu_buf_rele(headdbuf, FTAG);
301         return (err);
302 }
303
304 static int
305 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
306 {
307         objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
308         uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
309         matchtype_t mt;
310         int err;
311
312         if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
313                 mt = MT_FIRST;
314         else
315                 mt = MT_EXACT;
316
317         err = zap_lookup_norm(mos, snapobj, name, 8, 1,
318             value, mt, NULL, 0, NULL);
319         if (err == ENOTSUP && mt == MT_FIRST)
320                 err = zap_lookup(mos, snapobj, name, 8, 1, value);
321         return (err);
322 }
323
324 static int
325 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx)
326 {
327         objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
328         uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
329         matchtype_t mt;
330         int err;
331
332         if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
333                 mt = MT_FIRST;
334         else
335                 mt = MT_EXACT;
336
337         err = zap_remove_norm(mos, snapobj, name, mt, tx);
338         if (err == ENOTSUP && mt == MT_FIRST)
339                 err = zap_remove(mos, snapobj, name, tx);
340         return (err);
341 }
342
343 static int
344 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
345     dsl_dataset_t **dsp)
346 {
347         objset_t *mos = dp->dp_meta_objset;
348         dmu_buf_t *dbuf;
349         dsl_dataset_t *ds;
350         int err;
351
352         ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
353             dsl_pool_sync_context(dp));
354
355         err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
356         if (err)
357                 return (err);
358         ds = dmu_buf_get_user(dbuf);
359         if (ds == NULL) {
360                 dsl_dataset_t *winner;
361
362                 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
363                 ds->ds_dbuf = dbuf;
364                 ds->ds_object = dsobj;
365                 ds->ds_phys = dbuf->db_data;
366
367                 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
368                 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
369                 mutex_init(&ds->ds_deadlist.bpl_lock, NULL, MUTEX_DEFAULT,
370                     NULL);
371                 rw_init(&ds->ds_rwlock, 0, 0, 0);
372                 cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
373
374                 err = bplist_open(&ds->ds_deadlist,
375                     mos, ds->ds_phys->ds_deadlist_obj);
376                 if (err == 0) {
377                         err = dsl_dir_open_obj(dp,
378                             ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir);
379                 }
380                 if (err) {
381                         /*
382                          * we don't really need to close the blist if we
383                          * just opened it.
384                          */
385                         mutex_destroy(&ds->ds_lock);
386                         mutex_destroy(&ds->ds_opening_lock);
387                         mutex_destroy(&ds->ds_deadlist.bpl_lock);
388                         rw_destroy(&ds->ds_rwlock);
389                         cv_destroy(&ds->ds_exclusive_cv);
390                         kmem_free(ds, sizeof (dsl_dataset_t));
391                         dmu_buf_rele(dbuf, tag);
392                         return (err);
393                 }
394
395                 if (!dsl_dataset_is_snapshot(ds)) {
396                         ds->ds_snapname[0] = '\0';
397                         if (ds->ds_phys->ds_prev_snap_obj) {
398                                 err = dsl_dataset_get_ref(dp,
399                                     ds->ds_phys->ds_prev_snap_obj,
400                                     ds, &ds->ds_prev);
401                         }
402
403                         if (err == 0 && dsl_dir_is_clone(ds->ds_dir)) {
404                                 dsl_dataset_t *origin;
405
406                                 err = dsl_dataset_hold_obj(dp,
407                                     ds->ds_dir->dd_phys->dd_origin_obj,
408                                     FTAG, &origin);
409                                 if (err == 0) {
410                                         ds->ds_origin_txg =
411                                             origin->ds_phys->ds_creation_txg;
412                                         dsl_dataset_rele(origin, FTAG);
413                                 }
414                         }
415                 } else if (zfs_flags & ZFS_DEBUG_SNAPNAMES) {
416                         err = dsl_dataset_get_snapname(ds);
417                 }
418
419                 if (err == 0 && !dsl_dataset_is_snapshot(ds)) {
420                         /*
421                          * In sync context, we're called with either no lock
422                          * or with the write lock.  If we're not syncing,
423                          * we're always called with the read lock held.
424                          */
425                         boolean_t need_lock =
426                             !RW_WRITE_HELD(&dp->dp_config_rwlock) &&
427                             dsl_pool_sync_context(dp);
428
429                         if (need_lock)
430                                 rw_enter(&dp->dp_config_rwlock, RW_READER);
431
432                         err = dsl_prop_get_ds(ds,
433                             "refreservation", sizeof (uint64_t), 1,
434                             &ds->ds_reserved, NULL);
435                         if (err == 0) {
436                                 err = dsl_prop_get_ds(ds,
437                                     "refquota", sizeof (uint64_t), 1,
438                                     &ds->ds_quota, NULL);
439                         }
440
441                         if (need_lock)
442                                 rw_exit(&dp->dp_config_rwlock);
443                 } else {
444                         ds->ds_reserved = ds->ds_quota = 0;
445                 }
446
447                 if (err == 0) {
448                         winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys,
449                             dsl_dataset_evict);
450                 }
451                 if (err || winner) {
452                         bplist_close(&ds->ds_deadlist);
453                         if (ds->ds_prev)
454                                 dsl_dataset_drop_ref(ds->ds_prev, ds);
455                         dsl_dir_close(ds->ds_dir, ds);
456                         mutex_destroy(&ds->ds_lock);
457                         mutex_destroy(&ds->ds_opening_lock);
458                         mutex_destroy(&ds->ds_deadlist.bpl_lock);
459                         rw_destroy(&ds->ds_rwlock);
460                         cv_destroy(&ds->ds_exclusive_cv);
461                         kmem_free(ds, sizeof (dsl_dataset_t));
462                         if (err) {
463                                 dmu_buf_rele(dbuf, tag);
464                                 return (err);
465                         }
466                         ds = winner;
467                 } else {
468                         ds->ds_fsid_guid =
469                             unique_insert(ds->ds_phys->ds_fsid_guid);
470                 }
471         }
472         ASSERT3P(ds->ds_dbuf, ==, dbuf);
473         ASSERT3P(ds->ds_phys, ==, dbuf->db_data);
474         ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 ||
475             spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
476             dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
477         mutex_enter(&ds->ds_lock);
478         if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) {
479                 mutex_exit(&ds->ds_lock);
480                 dmu_buf_rele(ds->ds_dbuf, tag);
481                 return (ENOENT);
482         }
483         mutex_exit(&ds->ds_lock);
484         *dsp = ds;
485         return (0);
486 }
487
488 static int
489 dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag)
490 {
491         dsl_pool_t *dp = ds->ds_dir->dd_pool;
492
493         /*
494          * In syncing context we don't want the rwlock lock: there
495          * may be an existing writer waiting for sync phase to
496          * finish.  We don't need to worry about such writers, since
497          * sync phase is single-threaded, so the writer can't be
498          * doing anything while we are active.
499          */
500         if (dsl_pool_sync_context(dp)) {
501                 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
502                 return (0);
503         }
504
505         /*
506          * Normal users will hold the ds_rwlock as a READER until they
507          * are finished (i.e., call dsl_dataset_rele()).  "Owners" will
508          * drop their READER lock after they set the ds_owner field.
509          *
510          * If the dataset is being destroyed, the destroy thread will
511          * obtain a WRITER lock for exclusive access after it's done its
512          * open-context work and then change the ds_owner to
513          * dsl_reaper once destruction is assured.  So threads
514          * may block here temporarily, until the "destructability" of
515          * the dataset is determined.
516          */
517         ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock));
518         mutex_enter(&ds->ds_lock);
519         while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) {
520                 rw_exit(&dp->dp_config_rwlock);
521                 cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock);
522                 if (DSL_DATASET_IS_DESTROYED(ds)) {
523                         mutex_exit(&ds->ds_lock);
524                         dsl_dataset_drop_ref(ds, tag);
525                         rw_enter(&dp->dp_config_rwlock, RW_READER);
526                         return (ENOENT);
527                 }
528                 rw_enter(&dp->dp_config_rwlock, RW_READER);
529         }
530         mutex_exit(&ds->ds_lock);
531         return (0);
532 }
533
534 int
535 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
536     dsl_dataset_t **dsp)
537 {
538         int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp);
539
540         if (err)
541                 return (err);
542         return (dsl_dataset_hold_ref(*dsp, tag));
543 }
544
545 int
546 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, int flags, void *owner,
547     dsl_dataset_t **dsp)
548 {
549         int err = dsl_dataset_hold_obj(dp, dsobj, owner, dsp);
550
551         ASSERT(DS_MODE_TYPE(flags) != DS_MODE_USER);
552
553         if (err)
554                 return (err);
555         if (!dsl_dataset_tryown(*dsp, DS_MODE_IS_INCONSISTENT(flags), owner)) {
556                 dsl_dataset_rele(*dsp, owner);
557                 *dsp = NULL;
558                 return (EBUSY);
559         }
560         return (0);
561 }
562
563 int
564 dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp)
565 {
566         dsl_dir_t *dd;
567         dsl_pool_t *dp;
568         const char *snapname;
569         uint64_t obj;
570         int err = 0;
571
572         err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname);
573         if (err)
574                 return (err);
575
576         dp = dd->dd_pool;
577         obj = dd->dd_phys->dd_head_dataset_obj;
578         rw_enter(&dp->dp_config_rwlock, RW_READER);
579         if (obj)
580                 err = dsl_dataset_get_ref(dp, obj, tag, dsp);
581         else
582                 err = ENOENT;
583         if (err)
584                 goto out;
585
586         err = dsl_dataset_hold_ref(*dsp, tag);
587
588         /* we may be looking for a snapshot */
589         if (err == 0 && snapname != NULL) {
590                 dsl_dataset_t *ds = NULL;
591
592                 if (*snapname++ != '@') {
593                         dsl_dataset_rele(*dsp, tag);
594                         err = ENOENT;
595                         goto out;
596                 }
597
598                 dprintf("looking for snapshot '%s'\n", snapname);
599                 err = dsl_dataset_snap_lookup(*dsp, snapname, &obj);
600                 if (err == 0)
601                         err = dsl_dataset_get_ref(dp, obj, tag, &ds);
602                 dsl_dataset_rele(*dsp, tag);
603
604                 ASSERT3U((err == 0), ==, (ds != NULL));
605
606                 if (ds) {
607                         mutex_enter(&ds->ds_lock);
608                         if (ds->ds_snapname[0] == 0)
609                                 (void) strlcpy(ds->ds_snapname, snapname,
610                                     sizeof (ds->ds_snapname));
611                         mutex_exit(&ds->ds_lock);
612                         err = dsl_dataset_hold_ref(ds, tag);
613                         *dsp = err ? NULL : ds;
614                 }
615         }
616 out:
617         rw_exit(&dp->dp_config_rwlock);
618         dsl_dir_close(dd, FTAG);
619         return (err);
620 }
621
622 int
623 dsl_dataset_own(const char *name, int flags, void *owner, dsl_dataset_t **dsp)
624 {
625         int err = dsl_dataset_hold(name, owner, dsp);
626         if (err)
627                 return (err);
628         if ((*dsp)->ds_phys->ds_num_children > 0 &&
629             !DS_MODE_IS_READONLY(flags)) {
630                 dsl_dataset_rele(*dsp, owner);
631                 return (EROFS);
632         }
633         if (!dsl_dataset_tryown(*dsp, DS_MODE_IS_INCONSISTENT(flags), owner)) {
634                 dsl_dataset_rele(*dsp, owner);
635                 return (EBUSY);
636         }
637         return (0);
638 }
639
640 void
641 dsl_dataset_name(dsl_dataset_t *ds, char *name)
642 {
643         if (ds == NULL) {
644                 (void) strcpy(name, "mos");
645         } else {
646                 dsl_dir_name(ds->ds_dir, name);
647                 VERIFY(0 == dsl_dataset_get_snapname(ds));
648                 if (ds->ds_snapname[0]) {
649                         (void) strcat(name, "@");
650                         /*
651                          * We use a "recursive" mutex so that we
652                          * can call dprintf_ds() with ds_lock held.
653                          */
654                         if (!MUTEX_HELD(&ds->ds_lock)) {
655                                 mutex_enter(&ds->ds_lock);
656                                 (void) strcat(name, ds->ds_snapname);
657                                 mutex_exit(&ds->ds_lock);
658                         } else {
659                                 (void) strcat(name, ds->ds_snapname);
660                         }
661                 }
662         }
663 }
664
665 static int
666 dsl_dataset_namelen(dsl_dataset_t *ds)
667 {
668         int result;
669
670         if (ds == NULL) {
671                 result = 3;     /* "mos" */
672         } else {
673                 result = dsl_dir_namelen(ds->ds_dir);
674                 VERIFY(0 == dsl_dataset_get_snapname(ds));
675                 if (ds->ds_snapname[0]) {
676                         ++result;       /* adding one for the @-sign */
677                         if (!MUTEX_HELD(&ds->ds_lock)) {
678                                 mutex_enter(&ds->ds_lock);
679                                 result += strlen(ds->ds_snapname);
680                                 mutex_exit(&ds->ds_lock);
681                         } else {
682                                 result += strlen(ds->ds_snapname);
683                         }
684                 }
685         }
686
687         return (result);
688 }
689
690 void
691 dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag)
692 {
693         dmu_buf_rele(ds->ds_dbuf, tag);
694 }
695
696 void
697 dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
698 {
699         if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) {
700                 rw_exit(&ds->ds_rwlock);
701         }
702         dsl_dataset_drop_ref(ds, tag);
703 }
704
705 void
706 dsl_dataset_disown(dsl_dataset_t *ds, void *owner)
707 {
708         ASSERT((ds->ds_owner == owner && ds->ds_dbuf) ||
709             (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL));
710
711         mutex_enter(&ds->ds_lock);
712         ds->ds_owner = NULL;
713         if (RW_WRITE_HELD(&ds->ds_rwlock)) {
714                 rw_exit(&ds->ds_rwlock);
715                 cv_broadcast(&ds->ds_exclusive_cv);
716         }
717         mutex_exit(&ds->ds_lock);
718         if (ds->ds_dbuf)
719                 dsl_dataset_drop_ref(ds, owner);
720         else
721                 dsl_dataset_evict(ds->ds_dbuf, ds);
722 }
723
724 boolean_t
725 dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *owner)
726 {
727         boolean_t gotit = FALSE;
728
729         mutex_enter(&ds->ds_lock);
730         if (ds->ds_owner == NULL &&
731             (!DS_IS_INCONSISTENT(ds) || inconsistentok)) {
732                 ds->ds_owner = owner;
733                 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool))
734                         rw_exit(&ds->ds_rwlock);
735                 gotit = TRUE;
736         }
737         mutex_exit(&ds->ds_lock);
738         return (gotit);
739 }
740
741 void
742 dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner)
743 {
744         ASSERT3P(owner, ==, ds->ds_owner);
745         if (!RW_WRITE_HELD(&ds->ds_rwlock))
746                 rw_enter(&ds->ds_rwlock, RW_WRITER);
747 }
748
749 uint64_t
750 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
751     uint64_t flags, dmu_tx_t *tx)
752 {
753         dsl_pool_t *dp = dd->dd_pool;
754         dmu_buf_t *dbuf;
755         dsl_dataset_phys_t *dsphys;
756         uint64_t dsobj;
757         objset_t *mos = dp->dp_meta_objset;
758
759         if (origin == NULL)
760                 origin = dp->dp_origin_snap;
761
762         ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
763         ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0);
764         ASSERT(dmu_tx_is_syncing(tx));
765         ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
766
767         dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
768             DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
769         VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
770         dmu_buf_will_dirty(dbuf, tx);
771         dsphys = dbuf->db_data;
772         bzero(dsphys, sizeof (dsl_dataset_phys_t));
773         dsphys->ds_dir_obj = dd->dd_object;
774         dsphys->ds_flags = flags;
775         dsphys->ds_fsid_guid = unique_create();
776         (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
777             sizeof (dsphys->ds_guid));
778         dsphys->ds_snapnames_zapobj =
779             zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
780             DMU_OT_NONE, 0, tx);
781         dsphys->ds_creation_time = gethrestime_sec();
782         dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
783         dsphys->ds_deadlist_obj =
784             bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx);
785
786         if (origin) {
787                 dsphys->ds_prev_snap_obj = origin->ds_object;
788                 dsphys->ds_prev_snap_txg =
789                     origin->ds_phys->ds_creation_txg;
790                 dsphys->ds_used_bytes =
791                     origin->ds_phys->ds_used_bytes;
792                 dsphys->ds_compressed_bytes =
793                     origin->ds_phys->ds_compressed_bytes;
794                 dsphys->ds_uncompressed_bytes =
795                     origin->ds_phys->ds_uncompressed_bytes;
796                 dsphys->ds_bp = origin->ds_phys->ds_bp;
797                 dsphys->ds_flags |= origin->ds_phys->ds_flags;
798
799                 dmu_buf_will_dirty(origin->ds_dbuf, tx);
800                 origin->ds_phys->ds_num_children++;
801
802                 if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
803                         if (origin->ds_phys->ds_next_clones_obj == 0) {
804                                 origin->ds_phys->ds_next_clones_obj =
805                                     zap_create(mos,
806                                     DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
807                         }
808                         VERIFY(0 == zap_add_int(mos,
809                             origin->ds_phys->ds_next_clones_obj,
810                             dsobj, tx));
811                 }
812
813                 dmu_buf_will_dirty(dd->dd_dbuf, tx);
814                 dd->dd_phys->dd_origin_obj = origin->ds_object;
815         }
816
817         if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
818                 dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
819
820         dmu_buf_rele(dbuf, FTAG);
821
822         dmu_buf_will_dirty(dd->dd_dbuf, tx);
823         dd->dd_phys->dd_head_dataset_obj = dsobj;
824
825         return (dsobj);
826 }
827
828 uint64_t
829 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
830     dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx)
831 {
832         dsl_pool_t *dp = pdd->dd_pool;
833         uint64_t dsobj, ddobj;
834         dsl_dir_t *dd;
835
836         ASSERT(lastname[0] != '@');
837
838         ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
839         VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd));
840
841         dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx);
842
843         dsl_deleg_set_create_perms(dd, tx, cr);
844
845         dsl_dir_close(dd, FTAG);
846
847         return (dsobj);
848 }
849
850 struct destroyarg {
851         dsl_sync_task_group_t *dstg;
852         char *snapname;
853         char *failed;
854 };
855
856 static int
857 dsl_snapshot_destroy_one(char *name, void *arg)
858 {
859         struct destroyarg *da = arg;
860         dsl_dataset_t *ds;
861         char *cp;
862         int err;
863
864         (void) strcat(name, "@");
865         (void) strcat(name, da->snapname);
866         err = dsl_dataset_own(name, DS_MODE_READONLY | DS_MODE_INCONSISTENT,
867             da->dstg, &ds);
868         cp = strchr(name, '@');
869         *cp = '\0';
870         if (err == 0) {
871                 dsl_dataset_make_exclusive(ds, da->dstg);
872                 if (ds->ds_user_ptr) {
873                         ds->ds_user_evict_func(ds, ds->ds_user_ptr);
874                         ds->ds_user_ptr = NULL;
875                 }
876                 dsl_sync_task_create(da->dstg, dsl_dataset_destroy_check,
877                     dsl_dataset_destroy_sync, ds, da->dstg, 0);
878         } else if (err == ENOENT) {
879                 err = 0;
880         } else {
881                 (void) strcpy(da->failed, name);
882         }
883         return (err);
884 }
885
886 /*
887  * Destroy 'snapname' in all descendants of 'fsname'.
888  */
889 #pragma weak dmu_snapshots_destroy = dsl_snapshots_destroy
890 int
891 dsl_snapshots_destroy(char *fsname, char *snapname)
892 {
893         int err;
894         struct destroyarg da;
895         dsl_sync_task_t *dst;
896         spa_t *spa;
897
898         err = spa_open(fsname, &spa, FTAG);
899         if (err)
900                 return (err);
901         da.dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
902         da.snapname = snapname;
903         da.failed = fsname;
904
905         err = dmu_objset_find(fsname,
906             dsl_snapshot_destroy_one, &da, DS_FIND_CHILDREN);
907
908         if (err == 0)
909                 err = dsl_sync_task_group_wait(da.dstg);
910
911         for (dst = list_head(&da.dstg->dstg_tasks); dst;
912             dst = list_next(&da.dstg->dstg_tasks, dst)) {
913                 dsl_dataset_t *ds = dst->dst_arg1;
914                 /*
915                  * Return the file system name that triggered the error
916                  */
917                 if (dst->dst_err) {
918                         dsl_dataset_name(ds, fsname);
919                         *strchr(fsname, '@') = '\0';
920                 }
921                 dsl_dataset_disown(ds, da.dstg);
922         }
923
924         dsl_sync_task_group_destroy(da.dstg);
925         spa_close(spa, FTAG);
926         return (err);
927 }
928
929 /*
930  * ds must be opened as OWNER.  On return (whether successful or not),
931  * ds will be closed and caller can no longer dereference it.
932  */
933 int
934 dsl_dataset_destroy(dsl_dataset_t *ds, void *tag)
935 {
936         int err;
937         dsl_sync_task_group_t *dstg;
938         objset_t *os;
939         dsl_dir_t *dd;
940         uint64_t obj;
941
942         if (dsl_dataset_is_snapshot(ds)) {
943                 /* Destroying a snapshot is simpler */
944                 dsl_dataset_make_exclusive(ds, tag);
945
946                 if (ds->ds_user_ptr) {
947                         ds->ds_user_evict_func(ds, ds->ds_user_ptr);
948                         ds->ds_user_ptr = NULL;
949                 }
950                 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
951                     dsl_dataset_destroy_check, dsl_dataset_destroy_sync,
952                     ds, tag, 0);
953                 goto out;
954         }
955
956         dd = ds->ds_dir;
957
958         /*
959          * Check for errors and mark this ds as inconsistent, in
960          * case we crash while freeing the objects.
961          */
962         err = dsl_sync_task_do(dd->dd_pool, dsl_dataset_destroy_begin_check,
963             dsl_dataset_destroy_begin_sync, ds, NULL, 0);
964         if (err)
965                 goto out;
966
967         err = dmu_objset_open_ds(ds, DMU_OST_ANY, &os);
968         if (err)
969                 goto out;
970
971         /*
972          * remove the objects in open context, so that we won't
973          * have too much to do in syncing context.
974          */
975         for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE,
976             ds->ds_phys->ds_prev_snap_txg)) {
977                 /*
978                  * Ignore errors, if there is not enough disk space
979                  * we will deal with it in dsl_dataset_destroy_sync().
980                  */
981                 (void) dmu_free_object(os, obj);
982         }
983
984         dmu_objset_close(os);
985         if (err != ESRCH)
986                 goto out;
987
988         rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
989         err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd);
990         rw_exit(&dd->dd_pool->dp_config_rwlock);
991
992         if (err)
993                 goto out;
994
995         if (ds->ds_user_ptr) {
996                 /*
997                  * We need to sync out all in-flight IO before we try
998                  * to evict (the dataset evict func is trying to clear
999                  * the cached entries for this dataset in the ARC).
1000                  */
1001                 txg_wait_synced(dd->dd_pool, 0);
1002         }
1003
1004         /*
1005          * Blow away the dsl_dir + head dataset.
1006          */
1007         dsl_dataset_make_exclusive(ds, tag);
1008         if (ds->ds_user_ptr) {
1009                 ds->ds_user_evict_func(ds, ds->ds_user_ptr);
1010                 ds->ds_user_ptr = NULL;
1011         }
1012         dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool);
1013         dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
1014             dsl_dataset_destroy_sync, ds, tag, 0);
1015         dsl_sync_task_create(dstg, dsl_dir_destroy_check,
1016             dsl_dir_destroy_sync, dd, FTAG, 0);
1017         err = dsl_sync_task_group_wait(dstg);
1018         dsl_sync_task_group_destroy(dstg);
1019         /* if it is successful, dsl_dir_destroy_sync will close the dd */
1020         if (err)
1021                 dsl_dir_close(dd, FTAG);
1022 out:
1023         dsl_dataset_disown(ds, tag);
1024         return (err);
1025 }
1026
1027 int
1028 dsl_dataset_rollback(dsl_dataset_t *ds, dmu_objset_type_t ost)
1029 {
1030         int err;
1031
1032         ASSERT(ds->ds_owner);
1033
1034         dsl_dataset_make_exclusive(ds, ds->ds_owner);
1035         err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1036             dsl_dataset_rollback_check, dsl_dataset_rollback_sync,
1037             ds, &ost, 0);
1038         /* drop exclusive access */
1039         mutex_enter(&ds->ds_lock);
1040         rw_exit(&ds->ds_rwlock);
1041         cv_broadcast(&ds->ds_exclusive_cv);
1042         mutex_exit(&ds->ds_lock);
1043         return (err);
1044 }
1045
1046 void *
1047 dsl_dataset_set_user_ptr(dsl_dataset_t *ds,
1048     void *p, dsl_dataset_evict_func_t func)
1049 {
1050         void *old;
1051
1052         mutex_enter(&ds->ds_lock);
1053         old = ds->ds_user_ptr;
1054         if (old == NULL) {
1055                 ds->ds_user_ptr = p;
1056                 ds->ds_user_evict_func = func;
1057         }
1058         mutex_exit(&ds->ds_lock);
1059         return (old);
1060 }
1061
1062 void *
1063 dsl_dataset_get_user_ptr(dsl_dataset_t *ds)
1064 {
1065         return (ds->ds_user_ptr);
1066 }
1067
1068
1069 blkptr_t *
1070 dsl_dataset_get_blkptr(dsl_dataset_t *ds)
1071 {
1072         return (&ds->ds_phys->ds_bp);
1073 }
1074
1075 void
1076 dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
1077 {
1078         ASSERT(dmu_tx_is_syncing(tx));
1079         /* If it's the meta-objset, set dp_meta_rootbp */
1080         if (ds == NULL) {
1081                 tx->tx_pool->dp_meta_rootbp = *bp;
1082         } else {
1083                 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1084                 ds->ds_phys->ds_bp = *bp;
1085         }
1086 }
1087
1088 spa_t *
1089 dsl_dataset_get_spa(dsl_dataset_t *ds)
1090 {
1091         return (ds->ds_dir->dd_pool->dp_spa);
1092 }
1093
1094 void
1095 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
1096 {
1097         dsl_pool_t *dp;
1098
1099         if (ds == NULL) /* this is the meta-objset */
1100                 return;
1101
1102         ASSERT(ds->ds_user_ptr != NULL);
1103
1104         if (ds->ds_phys->ds_next_snap_obj != 0)
1105                 panic("dirtying snapshot!");
1106
1107         dp = ds->ds_dir->dd_pool;
1108
1109         if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) {
1110                 /* up the hold count until we can be written out */
1111                 dmu_buf_add_ref(ds->ds_dbuf, ds);
1112         }
1113 }
1114
1115 /*
1116  * The unique space in the head dataset can be calculated by subtracting
1117  * the space used in the most recent snapshot, that is still being used
1118  * in this file system, from the space currently in use.  To figure out
1119  * the space in the most recent snapshot still in use, we need to take
1120  * the total space used in the snapshot and subtract out the space that
1121  * has been freed up since the snapshot was taken.
1122  */
1123 static void
1124 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
1125 {
1126         uint64_t mrs_used;
1127         uint64_t dlused, dlcomp, dluncomp;
1128
1129         ASSERT(ds->ds_object == ds->ds_dir->dd_phys->dd_head_dataset_obj);
1130
1131         if (ds->ds_phys->ds_prev_snap_obj != 0)
1132                 mrs_used = ds->ds_prev->ds_phys->ds_used_bytes;
1133         else
1134                 mrs_used = 0;
1135
1136         VERIFY(0 == bplist_space(&ds->ds_deadlist, &dlused, &dlcomp,
1137             &dluncomp));
1138
1139         ASSERT3U(dlused, <=, mrs_used);
1140         ds->ds_phys->ds_unique_bytes =
1141             ds->ds_phys->ds_used_bytes - (mrs_used - dlused);
1142
1143         if (!DS_UNIQUE_IS_ACCURATE(ds) &&
1144             spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1145             SPA_VERSION_UNIQUE_ACCURATE)
1146                 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1147 }
1148
1149 static uint64_t
1150 dsl_dataset_unique(dsl_dataset_t *ds)
1151 {
1152         if (!DS_UNIQUE_IS_ACCURATE(ds) && !dsl_dataset_is_snapshot(ds))
1153                 dsl_dataset_recalc_head_uniq(ds);
1154
1155         return (ds->ds_phys->ds_unique_bytes);
1156 }
1157
1158 struct killarg {
1159         dsl_dataset_t *ds;
1160         zio_t *zio;
1161         dmu_tx_t *tx;
1162 };
1163
1164 /* ARGSUSED */
1165 static int
1166 kill_blkptr(spa_t *spa, blkptr_t *bp, const zbookmark_t *zb,
1167     const dnode_phys_t *dnp, void *arg)
1168 {
1169         struct killarg *ka = arg;
1170
1171         if (bp == NULL)
1172                 return (0);
1173
1174         if ((zb->zb_level == -1ULL && zb->zb_blkid != 0) ||
1175             (zb->zb_object != 0 && dnp == NULL)) {
1176                 /*
1177                  * It's a block in the intent log.  It has no
1178                  * accounting, so just free it.
1179                  */
1180                 VERIFY3U(0, ==, dsl_free(ka->zio, ka->tx->tx_pool,
1181                     ka->tx->tx_txg, bp, NULL, NULL, ARC_NOWAIT));
1182         } else {
1183                 ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
1184                 (void) dsl_dataset_block_kill(ka->ds, bp, ka->zio, ka->tx);
1185         }
1186
1187         return (0);
1188 }
1189
1190 /* ARGSUSED */
1191 static int
1192 dsl_dataset_rollback_check(void *arg1, void *arg2, dmu_tx_t *tx)
1193 {
1194         dsl_dataset_t *ds = arg1;
1195         dmu_objset_type_t *ost = arg2;
1196
1197         /*
1198          * We can only roll back to emptyness if it is a ZPL objset.
1199          */
1200         if (*ost != DMU_OST_ZFS && ds->ds_phys->ds_prev_snap_txg == 0)
1201                 return (EINVAL);
1202
1203         /*
1204          * This must not be a snapshot.
1205          */
1206         if (ds->ds_phys->ds_next_snap_obj != 0)
1207                 return (EINVAL);
1208
1209         /*
1210          * If we made changes this txg, traverse_dataset won't find
1211          * them.  Try again.
1212          */
1213         if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1214                 return (EAGAIN);
1215
1216         return (0);
1217 }
1218
1219 /* ARGSUSED */
1220 static void
1221 dsl_dataset_rollback_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
1222 {
1223         dsl_dataset_t *ds = arg1;
1224         dmu_objset_type_t *ost = arg2;
1225         objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1226
1227         dmu_buf_will_dirty(ds->ds_dbuf, tx);
1228
1229         if (ds->ds_user_ptr != NULL) {
1230                 /*
1231                  * We need to make sure that the objset_impl_t is reopened after
1232                  * we do the rollback, otherwise it will have the wrong
1233                  * objset_phys_t.  Normally this would happen when this
1234                  * dataset-open is closed, thus causing the
1235                  * dataset to be immediately evicted.  But when doing "zfs recv
1236                  * -F", we reopen the objset before that, so that there is no
1237                  * window where the dataset is closed and inconsistent.
1238                  */
1239                 ds->ds_user_evict_func(ds, ds->ds_user_ptr);
1240                 ds->ds_user_ptr = NULL;
1241         }
1242
1243         /* Transfer space that was freed since last snap back to the head. */
1244         {
1245                 uint64_t used;
1246
1247                 VERIFY(0 == bplist_space_birthrange(&ds->ds_deadlist,
1248                     ds->ds_origin_txg, UINT64_MAX, &used));
1249                 dsl_dir_transfer_space(ds->ds_dir, used,
1250                     DD_USED_SNAP, DD_USED_HEAD, tx);
1251         }
1252
1253         /* Zero out the deadlist. */
1254         bplist_close(&ds->ds_deadlist);
1255         bplist_destroy(mos, ds->ds_phys->ds_deadlist_obj, tx);
1256         ds->ds_phys->ds_deadlist_obj =
1257             bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx);
1258         VERIFY(0 == bplist_open(&ds->ds_deadlist, mos,
1259             ds->ds_phys->ds_deadlist_obj));
1260
1261         {
1262                 /*
1263                  * Free blkptrs that we gave birth to - this covers
1264                  * claimed but not played log blocks too.
1265                  */
1266                 zio_t *zio;
1267                 struct killarg ka;
1268
1269                 zio = zio_root(tx->tx_pool->dp_spa, NULL, NULL,
1270                     ZIO_FLAG_MUSTSUCCEED);
1271                 ka.ds = ds;
1272                 ka.zio = zio;
1273                 ka.tx = tx;
1274                 (void) traverse_dataset(ds, ds->ds_phys->ds_prev_snap_txg,
1275                     TRAVERSE_POST, kill_blkptr, &ka);
1276                 (void) zio_wait(zio);
1277         }
1278
1279         ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) || ds->ds_phys->ds_unique_bytes == 0);
1280
1281         if (ds->ds_prev && ds->ds_prev != ds->ds_dir->dd_pool->dp_origin_snap) {
1282                 /* Change our contents to that of the prev snapshot */
1283
1284                 ASSERT3U(ds->ds_prev->ds_object, ==,
1285                     ds->ds_phys->ds_prev_snap_obj);
1286                 ASSERT3U(ds->ds_phys->ds_used_bytes, <=,
1287                     ds->ds_prev->ds_phys->ds_used_bytes);
1288
1289                 ds->ds_phys->ds_bp = ds->ds_prev->ds_phys->ds_bp;
1290                 ds->ds_phys->ds_used_bytes =
1291                     ds->ds_prev->ds_phys->ds_used_bytes;
1292                 ds->ds_phys->ds_compressed_bytes =
1293                     ds->ds_prev->ds_phys->ds_compressed_bytes;
1294                 ds->ds_phys->ds_uncompressed_bytes =
1295                     ds->ds_prev->ds_phys->ds_uncompressed_bytes;
1296                 ds->ds_phys->ds_flags = ds->ds_prev->ds_phys->ds_flags;
1297
1298                 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
1299                         dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1300                         ds->ds_prev->ds_phys->ds_unique_bytes = 0;
1301                 }
1302         } else {
1303                 objset_impl_t *osi;
1304
1305                 ASSERT3U(ds->ds_phys->ds_used_bytes, ==, 0);
1306                 ASSERT3U(ds->ds_phys->ds_compressed_bytes, ==, 0);
1307                 ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, ==, 0);
1308
1309                 bzero(&ds->ds_phys->ds_bp, sizeof (blkptr_t));
1310                 ds->ds_phys->ds_flags = 0;
1311                 ds->ds_phys->ds_unique_bytes = 0;
1312                 if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1313                     SPA_VERSION_UNIQUE_ACCURATE)
1314                         ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1315
1316                 osi = dmu_objset_create_impl(ds->ds_dir->dd_pool->dp_spa, ds,
1317                     &ds->ds_phys->ds_bp, *ost, tx);
1318 #ifdef _KERNEL
1319                 zfs_create_fs(&osi->os, kcred, NULL, tx);
1320 #endif
1321         }
1322
1323         spa_history_internal_log(LOG_DS_ROLLBACK, ds->ds_dir->dd_pool->dp_spa,
1324             tx, cr, "dataset = %llu", ds->ds_object);
1325 }
1326
1327 /* ARGSUSED */
1328 static int
1329 dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx)
1330 {
1331         dsl_dataset_t *ds = arg1;
1332         objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1333         uint64_t count;
1334         int err;
1335
1336         /*
1337          * Can't delete a head dataset if there are snapshots of it.
1338          * (Except if the only snapshots are from the branch we cloned
1339          * from.)
1340          */
1341         if (ds->ds_prev != NULL &&
1342             ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1343                 return (EINVAL);
1344
1345         /*
1346          * This is really a dsl_dir thing, but check it here so that
1347          * we'll be less likely to leave this dataset inconsistent &
1348          * nearly destroyed.
1349          */
1350         err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
1351         if (err)
1352                 return (err);
1353         if (count != 0)
1354                 return (EEXIST);
1355
1356         return (0);
1357 }
1358
1359 /* ARGSUSED */
1360 static void
1361 dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
1362 {
1363         dsl_dataset_t *ds = arg1;
1364         dsl_pool_t *dp = ds->ds_dir->dd_pool;
1365
1366         /* Mark it as inconsistent on-disk, in case we crash */
1367         dmu_buf_will_dirty(ds->ds_dbuf, tx);
1368         ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
1369
1370         spa_history_internal_log(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx,
1371             cr, "dataset = %llu", ds->ds_object);
1372 }
1373
1374 /* ARGSUSED */
1375 int
1376 dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
1377 {
1378         dsl_dataset_t *ds = arg1;
1379
1380         /* we have an owner hold, so noone else can destroy us */
1381         ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
1382
1383         /* Can't delete a branch point. */
1384         if (ds->ds_phys->ds_num_children > 1)
1385                 return (EEXIST);
1386
1387         /*
1388          * Can't delete a head dataset if there are snapshots of it.
1389          * (Except if the only snapshots are from the branch we cloned
1390          * from.)
1391          */
1392         if (ds->ds_prev != NULL &&
1393             ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1394                 return (EINVAL);
1395
1396         /*
1397          * If we made changes this txg, traverse_dsl_dataset won't find
1398          * them.  Try again.
1399          */
1400         if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1401                 return (EAGAIN);
1402
1403         /* XXX we should do some i/o error checking... */
1404         return (0);
1405 }
1406
1407 struct refsarg {
1408         kmutex_t lock;
1409         boolean_t gone;
1410         kcondvar_t cv;
1411 };
1412
1413 /* ARGSUSED */
1414 static void
1415 dsl_dataset_refs_gone(dmu_buf_t *db, void *argv)
1416 {
1417         struct refsarg *arg = argv;
1418
1419         mutex_enter(&arg->lock);
1420         arg->gone = TRUE;
1421         cv_signal(&arg->cv);
1422         mutex_exit(&arg->lock);
1423 }
1424
1425 static void
1426 dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag)
1427 {
1428         struct refsarg arg;
1429
1430         bzero(&arg, sizeof(arg));
1431         mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL);
1432         cv_init(&arg.cv, NULL, CV_DEFAULT, NULL);
1433         arg.gone = FALSE;
1434         (void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys,
1435             dsl_dataset_refs_gone);
1436         dmu_buf_rele(ds->ds_dbuf, tag);
1437         mutex_enter(&arg.lock);
1438         while (!arg.gone)
1439                 cv_wait(&arg.cv, &arg.lock);
1440         ASSERT(arg.gone);
1441         mutex_exit(&arg.lock);
1442         ds->ds_dbuf = NULL;
1443         ds->ds_phys = NULL;
1444         mutex_destroy(&arg.lock);
1445         cv_destroy(&arg.cv);
1446 }
1447
1448 void
1449 dsl_dataset_destroy_sync(void *arg1, void *tag, cred_t *cr, dmu_tx_t *tx)
1450 {
1451         dsl_dataset_t *ds = arg1;
1452         zio_t *zio;
1453         int err;
1454         int after_branch_point = FALSE;
1455         dsl_pool_t *dp = ds->ds_dir->dd_pool;
1456         objset_t *mos = dp->dp_meta_objset;
1457         dsl_dataset_t *ds_prev = NULL;
1458         uint64_t obj;
1459
1460         ASSERT(ds->ds_owner);
1461         ASSERT3U(ds->ds_phys->ds_num_children, <=, 1);
1462         ASSERT(ds->ds_prev == NULL ||
1463             ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
1464         ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
1465
1466         /* signal any waiters that this dataset is going away */
1467         mutex_enter(&ds->ds_lock);
1468         ds->ds_owner = dsl_reaper;
1469         cv_broadcast(&ds->ds_exclusive_cv);
1470         mutex_exit(&ds->ds_lock);
1471
1472         /* Remove our reservation */
1473         if (ds->ds_reserved != 0) {
1474                 uint64_t val = 0;
1475                 dsl_dataset_set_reservation_sync(ds, &val, cr, tx);
1476                 ASSERT3U(ds->ds_reserved, ==, 0);
1477         }
1478
1479         ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1480
1481         dsl_pool_ds_destroyed(ds, tx);
1482
1483         obj = ds->ds_object;
1484
1485         if (ds->ds_phys->ds_prev_snap_obj != 0) {
1486                 if (ds->ds_prev) {
1487                         ds_prev = ds->ds_prev;
1488                 } else {
1489                         VERIFY(0 == dsl_dataset_hold_obj(dp,
1490                             ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
1491                 }
1492                 after_branch_point =
1493                     (ds_prev->ds_phys->ds_next_snap_obj != obj);
1494
1495                 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
1496                 if (after_branch_point &&
1497                     ds_prev->ds_phys->ds_next_clones_obj != 0) {
1498                         VERIFY(0 == zap_remove_int(mos,
1499                             ds_prev->ds_phys->ds_next_clones_obj, obj, tx));
1500                         if (ds->ds_phys->ds_next_snap_obj != 0) {
1501                                 VERIFY(0 == zap_add_int(mos,
1502                                     ds_prev->ds_phys->ds_next_clones_obj,
1503                                     ds->ds_phys->ds_next_snap_obj, tx));
1504                         }
1505                 }
1506                 if (after_branch_point &&
1507                     ds->ds_phys->ds_next_snap_obj == 0) {
1508                         /* This clone is toast. */
1509                         ASSERT(ds_prev->ds_phys->ds_num_children > 1);
1510                         ds_prev->ds_phys->ds_num_children--;
1511                 } else if (!after_branch_point) {
1512                         ds_prev->ds_phys->ds_next_snap_obj =
1513                             ds->ds_phys->ds_next_snap_obj;
1514                 }
1515         }
1516
1517         zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
1518
1519         if (ds->ds_phys->ds_next_snap_obj != 0) {
1520                 blkptr_t bp;
1521                 dsl_dataset_t *ds_next;
1522                 uint64_t itor = 0;
1523                 uint64_t old_unique;
1524                 int64_t used = 0, compressed = 0, uncompressed = 0;
1525
1526                 VERIFY(0 == dsl_dataset_hold_obj(dp,
1527                     ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
1528                 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
1529
1530                 old_unique = dsl_dataset_unique(ds_next);
1531
1532                 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
1533                 ds_next->ds_phys->ds_prev_snap_obj =
1534                     ds->ds_phys->ds_prev_snap_obj;
1535                 ds_next->ds_phys->ds_prev_snap_txg =
1536                     ds->ds_phys->ds_prev_snap_txg;
1537                 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1538                     ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
1539
1540                 /*
1541                  * Transfer to our deadlist (which will become next's
1542                  * new deadlist) any entries from next's current
1543                  * deadlist which were born before prev, and free the
1544                  * other entries.
1545                  *
1546                  * XXX we're doing this long task with the config lock held
1547                  */
1548                 while (bplist_iterate(&ds_next->ds_deadlist, &itor, &bp) == 0) {
1549                         if (bp.blk_birth <= ds->ds_phys->ds_prev_snap_txg) {
1550                                 VERIFY(0 == bplist_enqueue(&ds->ds_deadlist,
1551                                     &bp, tx));
1552                                 if (ds_prev && !after_branch_point &&
1553                                     bp.blk_birth >
1554                                     ds_prev->ds_phys->ds_prev_snap_txg) {
1555                                         ds_prev->ds_phys->ds_unique_bytes +=
1556                                             bp_get_dasize(dp->dp_spa, &bp);
1557                                 }
1558                         } else {
1559                                 used += bp_get_dasize(dp->dp_spa, &bp);
1560                                 compressed += BP_GET_PSIZE(&bp);
1561                                 uncompressed += BP_GET_UCSIZE(&bp);
1562                                 /* XXX check return value? */
1563                                 (void) dsl_free(zio, dp, tx->tx_txg,
1564                                     &bp, NULL, NULL, ARC_NOWAIT);
1565                         }
1566                 }
1567
1568                 ASSERT3U(used, ==, ds->ds_phys->ds_unique_bytes);
1569
1570                 /* change snapused */
1571                 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1572                     -used, -compressed, -uncompressed, tx);
1573
1574                 /* free next's deadlist */
1575                 bplist_close(&ds_next->ds_deadlist);
1576                 bplist_destroy(mos, ds_next->ds_phys->ds_deadlist_obj, tx);
1577
1578                 /* set next's deadlist to our deadlist */
1579                 bplist_close(&ds->ds_deadlist);
1580                 ds_next->ds_phys->ds_deadlist_obj =
1581                     ds->ds_phys->ds_deadlist_obj;
1582                 VERIFY(0 == bplist_open(&ds_next->ds_deadlist, mos,
1583                     ds_next->ds_phys->ds_deadlist_obj));
1584                 ds->ds_phys->ds_deadlist_obj = 0;
1585
1586                 if (ds_next->ds_phys->ds_next_snap_obj != 0) {
1587                         /*
1588                          * Update next's unique to include blocks which
1589                          * were previously shared by only this snapshot
1590                          * and it.  Those blocks will be born after the
1591                          * prev snap and before this snap, and will have
1592                          * died after the next snap and before the one
1593                          * after that (ie. be on the snap after next's
1594                          * deadlist).
1595                          *
1596                          * XXX we're doing this long task with the
1597                          * config lock held
1598                          */
1599                         dsl_dataset_t *ds_after_next;
1600                         uint64_t space;
1601
1602                         VERIFY(0 == dsl_dataset_hold_obj(dp,
1603                             ds_next->ds_phys->ds_next_snap_obj,
1604                             FTAG, &ds_after_next));
1605
1606                         VERIFY(0 ==
1607                             bplist_space_birthrange(&ds_after_next->ds_deadlist,
1608                             ds->ds_phys->ds_prev_snap_txg,
1609                             ds->ds_phys->ds_creation_txg, &space));
1610                         ds_next->ds_phys->ds_unique_bytes += space;
1611
1612                         dsl_dataset_rele(ds_after_next, FTAG);
1613                         ASSERT3P(ds_next->ds_prev, ==, NULL);
1614                 } else {
1615                         ASSERT3P(ds_next->ds_prev, ==, ds);
1616                         dsl_dataset_drop_ref(ds_next->ds_prev, ds_next);
1617                         ds_next->ds_prev = NULL;
1618                         if (ds_prev) {
1619                                 VERIFY(0 == dsl_dataset_get_ref(dp,
1620                                     ds->ds_phys->ds_prev_snap_obj,
1621                                     ds_next, &ds_next->ds_prev));
1622                         }
1623
1624                         dsl_dataset_recalc_head_uniq(ds_next);
1625
1626                         /*
1627                          * Reduce the amount of our unconsmed refreservation
1628                          * being charged to our parent by the amount of
1629                          * new unique data we have gained.
1630                          */
1631                         if (old_unique < ds_next->ds_reserved) {
1632                                 int64_t mrsdelta;
1633                                 uint64_t new_unique =
1634                                     ds_next->ds_phys->ds_unique_bytes;
1635
1636                                 ASSERT(old_unique <= new_unique);
1637                                 mrsdelta = MIN(new_unique - old_unique,
1638                                     ds_next->ds_reserved - old_unique);
1639                                 dsl_dir_diduse_space(ds->ds_dir,
1640                                     DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
1641                         }
1642                 }
1643                 dsl_dataset_rele(ds_next, FTAG);
1644         } else {
1645                 /*
1646                  * There's no next snapshot, so this is a head dataset.
1647                  * Destroy the deadlist.  Unless it's a clone, the
1648                  * deadlist should be empty.  (If it's a clone, it's
1649                  * safe to ignore the deadlist contents.)
1650                  */
1651                 struct killarg ka;
1652
1653                 ASSERT(after_branch_point || bplist_empty(&ds->ds_deadlist));
1654                 bplist_close(&ds->ds_deadlist);
1655                 bplist_destroy(mos, ds->ds_phys->ds_deadlist_obj, tx);
1656                 ds->ds_phys->ds_deadlist_obj = 0;
1657
1658                 /*
1659                  * Free everything that we point to (that's born after
1660                  * the previous snapshot, if we are a clone)
1661                  *
1662                  * NB: this should be very quick, because we already
1663                  * freed all the objects in open context.
1664                  */
1665                 ka.ds = ds;
1666                 ka.zio = zio;
1667                 ka.tx = tx;
1668                 err = traverse_dataset(ds, ds->ds_phys->ds_prev_snap_txg,
1669                     TRAVERSE_POST, kill_blkptr, &ka);
1670                 ASSERT3U(err, ==, 0);
1671                 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
1672                     ds->ds_phys->ds_unique_bytes == 0);
1673         }
1674
1675         err = zio_wait(zio);
1676         ASSERT3U(err, ==, 0);
1677
1678         if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) {
1679                 /* Erase the link in the dir */
1680                 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1681                 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
1682                 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
1683                 err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx);
1684                 ASSERT(err == 0);
1685         } else {
1686                 /* remove from snapshot namespace */
1687                 dsl_dataset_t *ds_head;
1688                 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
1689                 VERIFY(0 == dsl_dataset_hold_obj(dp,
1690                     ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
1691                 VERIFY(0 == dsl_dataset_get_snapname(ds));
1692 #ifdef ZFS_DEBUG
1693                 {
1694                         uint64_t val;
1695
1696                         err = dsl_dataset_snap_lookup(ds_head,
1697                             ds->ds_snapname, &val);
1698                         ASSERT3U(err, ==, 0);
1699                         ASSERT3U(val, ==, obj);
1700                 }
1701 #endif
1702                 err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx);
1703                 ASSERT(err == 0);
1704                 dsl_dataset_rele(ds_head, FTAG);
1705         }
1706
1707         if (ds_prev && ds->ds_prev != ds_prev)
1708                 dsl_dataset_rele(ds_prev, FTAG);
1709
1710         spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
1711         spa_history_internal_log(LOG_DS_DESTROY, dp->dp_spa, tx,
1712             cr, "dataset = %llu", ds->ds_object);
1713
1714         if (ds->ds_phys->ds_next_clones_obj != 0) {
1715                 uint64_t count;
1716                 ASSERT(0 == zap_count(mos,
1717                     ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
1718                 VERIFY(0 == dmu_object_free(mos,
1719                     ds->ds_phys->ds_next_clones_obj, tx));
1720         }
1721         if (ds->ds_phys->ds_props_obj != 0)
1722                 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
1723         dsl_dir_close(ds->ds_dir, ds);
1724         ds->ds_dir = NULL;
1725         dsl_dataset_drain_refs(ds, tag);
1726         VERIFY(0 == dmu_object_free(mos, obj, tx));
1727 }
1728
1729 static int
1730 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
1731 {
1732         uint64_t asize;
1733
1734         if (!dmu_tx_is_syncing(tx))
1735                 return (0);
1736
1737         /*
1738          * If there's an fs-only reservation, any blocks that might become
1739          * owned by the snapshot dataset must be accommodated by space
1740          * outside of the reservation.
1741          */
1742         asize = MIN(dsl_dataset_unique(ds), ds->ds_reserved);
1743         if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, FALSE))
1744                 return (ENOSPC);
1745
1746         /*
1747          * Propogate any reserved space for this snapshot to other
1748          * snapshot checks in this sync group.
1749          */
1750         if (asize > 0)
1751                 dsl_dir_willuse_space(ds->ds_dir, asize, tx);
1752
1753         return (0);
1754 }
1755
1756 /* ARGSUSED */
1757 int
1758 dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
1759 {
1760         dsl_dataset_t *ds = arg1;
1761         const char *snapname = arg2;
1762         int err;
1763         uint64_t value;
1764
1765         /*
1766          * We don't allow multiple snapshots of the same txg.  If there
1767          * is already one, try again.
1768          */
1769         if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
1770                 return (EAGAIN);
1771
1772         /*
1773          * Check for conflicting name snapshot name.
1774          */
1775         err = dsl_dataset_snap_lookup(ds, snapname, &value);
1776         if (err == 0)
1777                 return (EEXIST);
1778         if (err != ENOENT)
1779                 return (err);
1780
1781         /*
1782          * Check that the dataset's name is not too long.  Name consists
1783          * of the dataset's length + 1 for the @-sign + snapshot name's length
1784          */
1785         if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN)
1786                 return (ENAMETOOLONG);
1787
1788         err = dsl_dataset_snapshot_reserve_space(ds, tx);
1789         if (err)
1790                 return (err);
1791
1792         ds->ds_trysnap_txg = tx->tx_txg;
1793         return (0);
1794 }
1795
1796 void
1797 dsl_dataset_snapshot_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
1798 {
1799         dsl_dataset_t *ds = arg1;
1800         const char *snapname = arg2;
1801         dsl_pool_t *dp = ds->ds_dir->dd_pool;
1802         dmu_buf_t *dbuf;
1803         dsl_dataset_phys_t *dsphys;
1804         uint64_t dsobj, crtxg;
1805         objset_t *mos = dp->dp_meta_objset;
1806         int err;
1807
1808         ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1809
1810         /*
1811          * The origin's ds_creation_txg has to be < TXG_INITIAL
1812          */
1813         if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
1814                 crtxg = 1;
1815         else
1816                 crtxg = tx->tx_txg;
1817
1818         dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
1819             DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
1820         VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
1821         dmu_buf_will_dirty(dbuf, tx);
1822         dsphys = dbuf->db_data;
1823         bzero(dsphys, sizeof (dsl_dataset_phys_t));
1824         dsphys->ds_dir_obj = ds->ds_dir->dd_object;
1825         dsphys->ds_fsid_guid = unique_create();
1826         (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
1827             sizeof (dsphys->ds_guid));
1828         dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj;
1829         dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg;
1830         dsphys->ds_next_snap_obj = ds->ds_object;
1831         dsphys->ds_num_children = 1;
1832         dsphys->ds_creation_time = gethrestime_sec();
1833         dsphys->ds_creation_txg = crtxg;
1834         dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj;
1835         dsphys->ds_used_bytes = ds->ds_phys->ds_used_bytes;
1836         dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes;
1837         dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes;
1838         dsphys->ds_flags = ds->ds_phys->ds_flags;
1839         dsphys->ds_bp = ds->ds_phys->ds_bp;
1840         dmu_buf_rele(dbuf, FTAG);
1841
1842         ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0);
1843         if (ds->ds_prev) {
1844                 uint64_t next_clones_obj =
1845                     ds->ds_prev->ds_phys->ds_next_clones_obj;
1846                 ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj ==
1847                     ds->ds_object ||
1848                     ds->ds_prev->ds_phys->ds_num_children > 1);
1849                 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
1850                         dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1851                         ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1852                             ds->ds_prev->ds_phys->ds_creation_txg);
1853                         ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj;
1854                 } else if (next_clones_obj != 0) {
1855                         VERIFY3U(0, ==, zap_remove_int(mos,
1856                             next_clones_obj, dsphys->ds_next_snap_obj, tx));
1857                         VERIFY3U(0, ==, zap_add_int(mos,
1858                             next_clones_obj, dsobj, tx));
1859                 }
1860         }
1861
1862         /*
1863          * If we have a reference-reservation on this dataset, we will
1864          * need to increase the amount of refreservation being charged
1865          * since our unique space is going to zero.
1866          */
1867         if (ds->ds_reserved) {
1868                 int64_t add = MIN(dsl_dataset_unique(ds), ds->ds_reserved);
1869                 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
1870                     add, 0, 0, tx);
1871         }
1872
1873         bplist_close(&ds->ds_deadlist);
1874         dmu_buf_will_dirty(ds->ds_dbuf, tx);
1875         ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg);
1876         ds->ds_phys->ds_prev_snap_obj = dsobj;
1877         ds->ds_phys->ds_prev_snap_txg = crtxg;
1878         ds->ds_phys->ds_unique_bytes = 0;
1879         if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
1880                 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1881         ds->ds_phys->ds_deadlist_obj =
1882             bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx);
1883         VERIFY(0 == bplist_open(&ds->ds_deadlist, mos,
1884             ds->ds_phys->ds_deadlist_obj));
1885
1886         dprintf("snap '%s' -> obj %llu\n", snapname, dsobj);
1887         err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj,
1888             snapname, 8, 1, &dsobj, tx);
1889         ASSERT(err == 0);
1890
1891         if (ds->ds_prev)
1892                 dsl_dataset_drop_ref(ds->ds_prev, ds);
1893         VERIFY(0 == dsl_dataset_get_ref(dp,
1894             ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
1895
1896         dsl_pool_ds_snapshotted(ds, tx);
1897
1898         spa_history_internal_log(LOG_DS_SNAPSHOT, dp->dp_spa, tx, cr,
1899             "dataset = %llu", dsobj);
1900 }
1901
1902 void
1903 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
1904 {
1905         ASSERT(dmu_tx_is_syncing(tx));
1906         ASSERT(ds->ds_user_ptr != NULL);
1907         ASSERT(ds->ds_phys->ds_next_snap_obj == 0);
1908
1909         /*
1910          * in case we had to change ds_fsid_guid when we opened it,
1911          * sync it out now.
1912          */
1913         dmu_buf_will_dirty(ds->ds_dbuf, tx);
1914         ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid;
1915
1916         dsl_dir_dirty(ds->ds_dir, tx);
1917         dmu_objset_sync(ds->ds_user_ptr, zio, tx);
1918 }
1919
1920 void
1921 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
1922 {
1923         uint64_t refd, avail, uobjs, aobjs;
1924
1925         dsl_dir_stats(ds->ds_dir, nv);
1926
1927         dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs);
1928         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail);
1929         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd);
1930
1931         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
1932             ds->ds_phys->ds_creation_time);
1933         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
1934             ds->ds_phys->ds_creation_txg);
1935         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
1936             ds->ds_quota);
1937         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
1938             ds->ds_reserved);
1939         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
1940             ds->ds_phys->ds_guid);
1941
1942         if (ds->ds_phys->ds_next_snap_obj) {
1943                 /*
1944                  * This is a snapshot; override the dd's space used with
1945                  * our unique space and compression ratio.
1946                  */
1947                 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
1948                     ds->ds_phys->ds_unique_bytes);
1949                 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
1950                     ds->ds_phys->ds_compressed_bytes == 0 ? 100 :
1951                     (ds->ds_phys->ds_uncompressed_bytes * 100 /
1952                     ds->ds_phys->ds_compressed_bytes));
1953         }
1954 }
1955
1956 void
1957 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
1958 {
1959         stat->dds_creation_txg = ds->ds_phys->ds_creation_txg;
1960         stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT;
1961         stat->dds_guid = ds->ds_phys->ds_guid;
1962         if (ds->ds_phys->ds_next_snap_obj) {
1963                 stat->dds_is_snapshot = B_TRUE;
1964                 stat->dds_num_clones = ds->ds_phys->ds_num_children - 1;
1965         }
1966
1967         /* clone origin is really a dsl_dir thing... */
1968         rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
1969         if (dsl_dir_is_clone(ds->ds_dir)) {
1970                 dsl_dataset_t *ods;
1971
1972                 VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool,
1973                     ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods));
1974                 dsl_dataset_name(ods, stat->dds_origin);
1975                 dsl_dataset_drop_ref(ods, FTAG);
1976         }
1977         rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
1978 }
1979
1980 uint64_t
1981 dsl_dataset_fsid_guid(dsl_dataset_t *ds)
1982 {
1983         return (ds->ds_fsid_guid);
1984 }
1985
1986 void
1987 dsl_dataset_space(dsl_dataset_t *ds,
1988     uint64_t *refdbytesp, uint64_t *availbytesp,
1989     uint64_t *usedobjsp, uint64_t *availobjsp)
1990 {
1991         *refdbytesp = ds->ds_phys->ds_used_bytes;
1992         *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
1993         if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes)
1994                 *availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes;
1995         if (ds->ds_quota != 0) {
1996                 /*
1997                  * Adjust available bytes according to refquota
1998                  */
1999                 if (*refdbytesp < ds->ds_quota)
2000                         *availbytesp = MIN(*availbytesp,
2001                             ds->ds_quota - *refdbytesp);
2002                 else
2003                         *availbytesp = 0;
2004         }
2005         *usedobjsp = ds->ds_phys->ds_bp.blk_fill;
2006         *availobjsp = DN_MAX_OBJECT - *usedobjsp;
2007 }
2008
2009 boolean_t
2010 dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds)
2011 {
2012         dsl_pool_t *dp = ds->ds_dir->dd_pool;
2013
2014         ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
2015             dsl_pool_sync_context(dp));
2016         if (ds->ds_prev == NULL)
2017                 return (B_FALSE);
2018         if (ds->ds_phys->ds_bp.blk_birth >
2019             ds->ds_prev->ds_phys->ds_creation_txg)
2020                 return (B_TRUE);
2021         return (B_FALSE);
2022 }
2023
2024 /* ARGSUSED */
2025 static int
2026 dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
2027 {
2028         dsl_dataset_t *ds = arg1;
2029         char *newsnapname = arg2;
2030         dsl_dir_t *dd = ds->ds_dir;
2031         dsl_dataset_t *hds;
2032         uint64_t val;
2033         int err;
2034
2035         err = dsl_dataset_hold_obj(dd->dd_pool,
2036             dd->dd_phys->dd_head_dataset_obj, FTAG, &hds);
2037         if (err)
2038                 return (err);
2039
2040         /* new name better not be in use */
2041         err = dsl_dataset_snap_lookup(hds, newsnapname, &val);
2042         dsl_dataset_rele(hds, FTAG);
2043
2044         if (err == 0)
2045                 err = EEXIST;
2046         else if (err == ENOENT)
2047                 err = 0;
2048
2049         /* dataset name + 1 for the "@" + the new snapshot name must fit */
2050         if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN)
2051                 err = ENAMETOOLONG;
2052
2053         return (err);
2054 }
2055
2056 static void
2057 dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2,
2058     cred_t *cr, dmu_tx_t *tx)
2059 {
2060         dsl_dataset_t *ds = arg1;
2061         const char *newsnapname = arg2;
2062         dsl_dir_t *dd = ds->ds_dir;
2063         objset_t *mos = dd->dd_pool->dp_meta_objset;
2064         dsl_dataset_t *hds;
2065         int err;
2066
2067         ASSERT(ds->ds_phys->ds_next_snap_obj != 0);
2068
2069         VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
2070             dd->dd_phys->dd_head_dataset_obj, FTAG, &hds));
2071
2072         VERIFY(0 == dsl_dataset_get_snapname(ds));
2073         err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx);
2074         ASSERT3U(err, ==, 0);
2075         mutex_enter(&ds->ds_lock);
2076         (void) strcpy(ds->ds_snapname, newsnapname);
2077         mutex_exit(&ds->ds_lock);
2078         err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj,
2079             ds->ds_snapname, 8, 1, &ds->ds_object, tx);
2080         ASSERT3U(err, ==, 0);
2081
2082         spa_history_internal_log(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx,
2083             cr, "dataset = %llu", ds->ds_object);
2084         dsl_dataset_rele(hds, FTAG);
2085 }
2086
2087 struct renamesnaparg {
2088         dsl_sync_task_group_t *dstg;
2089         char failed[MAXPATHLEN];
2090         char *oldsnap;
2091         char *newsnap;
2092 };
2093
2094 static int
2095 dsl_snapshot_rename_one(char *name, void *arg)
2096 {
2097         struct renamesnaparg *ra = arg;
2098         dsl_dataset_t *ds = NULL;
2099         char *cp;
2100         int err;
2101
2102         cp = name + strlen(name);
2103         *cp = '@';
2104         (void) strcpy(cp + 1, ra->oldsnap);
2105
2106         /*
2107          * For recursive snapshot renames the parent won't be changing
2108          * so we just pass name for both the to/from argument.
2109          */
2110         err = zfs_secpolicy_rename_perms(name, name, CRED());
2111         if (err == ENOENT) {
2112                 return (0);
2113         } else if (err) {
2114                 (void) strcpy(ra->failed, name);
2115                 return (err);
2116         }
2117
2118 #ifdef _KERNEL
2119         /*
2120          * For all filesystems undergoing rename, we'll need to unmount it.
2121          */
2122         (void) zfs_unmount_snap(name, NULL);
2123 #endif
2124         err = dsl_dataset_hold(name, ra->dstg, &ds);
2125         *cp = '\0';
2126         if (err == ENOENT) {
2127                 return (0);
2128         } else if (err) {
2129                 (void) strcpy(ra->failed, name);
2130                 return (err);
2131         }
2132
2133         dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check,
2134             dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0);
2135
2136         return (0);
2137 }
2138
2139 static int
2140 dsl_recursive_rename(char *oldname, const char *newname)
2141 {
2142         int err;
2143         struct renamesnaparg *ra;
2144         dsl_sync_task_t *dst;
2145         spa_t *spa;
2146         char *cp, *fsname = spa_strdup(oldname);
2147         int len = strlen(oldname);
2148
2149         /* truncate the snapshot name to get the fsname */
2150         cp = strchr(fsname, '@');
2151         *cp = '\0';
2152
2153         err = spa_open(fsname, &spa, FTAG);
2154         if (err) {
2155                 kmem_free(fsname, len + 1);
2156                 return (err);
2157         }
2158         ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP);
2159         ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
2160
2161         ra->oldsnap = strchr(oldname, '@') + 1;
2162         ra->newsnap = strchr(newname, '@') + 1;
2163         *ra->failed = '\0';
2164
2165         err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra,
2166             DS_FIND_CHILDREN);
2167         kmem_free(fsname, len + 1);
2168
2169         if (err == 0) {
2170                 err = dsl_sync_task_group_wait(ra->dstg);
2171         }
2172
2173         for (dst = list_head(&ra->dstg->dstg_tasks); dst;
2174             dst = list_next(&ra->dstg->dstg_tasks, dst)) {
2175                 dsl_dataset_t *ds = dst->dst_arg1;
2176                 if (dst->dst_err) {
2177                         dsl_dir_name(ds->ds_dir, ra->failed);
2178                         (void) strcat(ra->failed, "@");
2179                         (void) strcat(ra->failed, ra->newsnap);
2180                 }
2181                 dsl_dataset_rele(ds, ra->dstg);
2182         }
2183
2184         if (err)
2185                 (void) strcpy(oldname, ra->failed);
2186
2187         dsl_sync_task_group_destroy(ra->dstg);
2188         kmem_free(ra, sizeof (struct renamesnaparg));
2189         spa_close(spa, FTAG);
2190         return (err);
2191 }
2192
2193 static int
2194 dsl_valid_rename(char *oldname, void *arg)
2195 {
2196         int delta = *(int *)arg;
2197
2198         if (strlen(oldname) + delta >= MAXNAMELEN)
2199                 return (ENAMETOOLONG);
2200
2201         return (0);
2202 }
2203
2204 #pragma weak dmu_objset_rename = dsl_dataset_rename
2205 int
2206 dsl_dataset_rename(char *oldname, const char *newname, boolean_t recursive)
2207 {
2208         dsl_dir_t *dd;
2209         dsl_dataset_t *ds;
2210         const char *tail;
2211         int err;
2212
2213         err = dsl_dir_open(oldname, FTAG, &dd, &tail);
2214         if (err)
2215                 return (err);
2216         /*
2217          * If there are more than 2 references there may be holds
2218          * hanging around that haven't been cleared out yet.
2219          */
2220         if (dmu_buf_refcount(dd->dd_dbuf) > 2)
2221                 txg_wait_synced(dd->dd_pool, 0);
2222         if (tail == NULL) {
2223                 int delta = strlen(newname) - strlen(oldname);
2224
2225                 /* if we're growing, validate child name lengths */
2226                 if (delta > 0)
2227                         err = dmu_objset_find(oldname, dsl_valid_rename,
2228                             &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2229
2230                 if (!err)
2231                         err = dsl_dir_rename(dd, newname);
2232                 dsl_dir_close(dd, FTAG);
2233                 return (err);
2234         }
2235         if (tail[0] != '@') {
2236                 /* the name ended in a nonexistant component */
2237                 dsl_dir_close(dd, FTAG);
2238                 return (ENOENT);
2239         }
2240
2241         dsl_dir_close(dd, FTAG);
2242
2243         /* new name must be snapshot in same filesystem */
2244         tail = strchr(newname, '@');
2245         if (tail == NULL)
2246                 return (EINVAL);
2247         tail++;
2248         if (strncmp(oldname, newname, tail - newname) != 0)
2249                 return (EXDEV);
2250
2251         if (recursive) {
2252                 err = dsl_recursive_rename(oldname, newname);
2253         } else {
2254                 err = dsl_dataset_hold(oldname, FTAG, &ds);
2255                 if (err)
2256                         return (err);
2257
2258                 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
2259                     dsl_dataset_snapshot_rename_check,
2260                     dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
2261
2262                 dsl_dataset_rele(ds, FTAG);
2263         }
2264
2265         return (err);
2266 }
2267
2268 struct promotenode {
2269         list_node_t link;
2270         dsl_dataset_t *ds;
2271 };
2272
2273 struct promotearg {
2274         list_t shared_snaps, origin_snaps, clone_snaps;
2275         dsl_dataset_t *origin_origin, *origin_head;
2276         uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap;
2277 };
2278
2279 static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
2280
2281 /* ARGSUSED */
2282 static int
2283 dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx)
2284 {
2285         dsl_dataset_t *hds = arg1;
2286         struct promotearg *pa = arg2;
2287         struct promotenode *snap = list_head(&pa->shared_snaps);
2288         dsl_dataset_t *origin_ds = snap->ds;
2289         int err;
2290
2291         /* Check that it is a real clone */
2292         if (!dsl_dir_is_clone(hds->ds_dir))
2293                 return (EINVAL);
2294
2295         /* Since this is so expensive, don't do the preliminary check */
2296         if (!dmu_tx_is_syncing(tx))
2297                 return (0);
2298
2299         if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)
2300                 return (EXDEV);
2301
2302         /* compute origin's new unique space */
2303         snap = list_tail(&pa->clone_snaps);
2304         ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2305         err = bplist_space_birthrange(&snap->ds->ds_deadlist,
2306             origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX, &pa->unique);
2307         if (err)
2308                 return (err);
2309
2310         /*
2311          * Walk the snapshots that we are moving
2312          *
2313          * Compute space to transfer.  Consider the incremental changes
2314          * to used for each snapshot:
2315          * (my used) = (prev's used) + (blocks born) - (blocks killed)
2316          * So each snapshot gave birth to:
2317          * (blocks born) = (my used) - (prev's used) + (blocks killed)
2318          * So a sequence would look like:
2319          * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2320          * Which simplifies to:
2321          * uN + kN + kN-1 + ... + k1 + k0
2322          * Note however, if we stop before we reach the ORIGIN we get:
2323          * uN + kN + kN-1 + ... + kM - uM-1
2324          */
2325         pa->used = origin_ds->ds_phys->ds_used_bytes;
2326         pa->comp = origin_ds->ds_phys->ds_compressed_bytes;
2327         pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes;
2328         for (snap = list_head(&pa->shared_snaps); snap;
2329             snap = list_next(&pa->shared_snaps, snap)) {
2330                 uint64_t val, dlused, dlcomp, dluncomp;
2331                 dsl_dataset_t *ds = snap->ds;
2332
2333                 /* Check that the snapshot name does not conflict */
2334                 VERIFY(0 == dsl_dataset_get_snapname(ds));
2335                 err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
2336                 if (err == 0)
2337                         return (EEXIST);
2338                 if (err != ENOENT)
2339                         return (err);
2340
2341                 /* The very first snapshot does not have a deadlist */
2342                 if (ds->ds_phys->ds_prev_snap_obj == 0)
2343                         continue;
2344
2345                 if (err = bplist_space(&ds->ds_deadlist,
2346                     &dlused, &dlcomp, &dluncomp))
2347                         return (err);
2348                 pa->used += dlused;
2349                 pa->comp += dlcomp;
2350                 pa->uncomp += dluncomp;
2351         }
2352
2353         /*
2354          * If we are a clone of a clone then we never reached ORIGIN,
2355          * so we need to subtract out the clone origin's used space.
2356          */
2357         if (pa->origin_origin) {
2358                 pa->used -= pa->origin_origin->ds_phys->ds_used_bytes;
2359                 pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes;
2360                 pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes;
2361         }
2362
2363         /* Check that there is enough space here */
2364         err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
2365             pa->used);
2366         if (err)
2367                 return (err);
2368
2369         /*
2370          * Compute the amounts of space that will be used by snapshots
2371          * after the promotion (for both origin and clone).  For each,
2372          * it is the amount of space that will be on all of their
2373          * deadlists (that was not born before their new origin).
2374          */
2375         if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2376                 uint64_t space;
2377
2378                 /*
2379                  * Note, typically this will not be a clone of a clone,
2380                  * so snap->ds->ds_origin_txg will be < TXG_INITIAL, so
2381                  * these snaplist_space() -> bplist_space_birthrange()
2382                  * calls will be fast because they do not have to
2383                  * iterate over all bps.
2384                  */
2385                 snap = list_head(&pa->origin_snaps);
2386                 err = snaplist_space(&pa->shared_snaps,
2387                     snap->ds->ds_origin_txg, &pa->cloneusedsnap);
2388                 if (err)
2389                         return (err);
2390
2391                 err = snaplist_space(&pa->clone_snaps,
2392                     snap->ds->ds_origin_txg, &space);
2393                 if (err)
2394                         return (err);
2395                 pa->cloneusedsnap += space;
2396         }
2397         if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2398                 err = snaplist_space(&pa->origin_snaps,
2399                     origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap);
2400                 if (err)
2401                         return (err);
2402         }
2403
2404         return (0);
2405 }
2406
2407 static void
2408 dsl_dataset_promote_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
2409 {
2410         dsl_dataset_t *hds = arg1;
2411         struct promotearg *pa = arg2;
2412         struct promotenode *snap = list_head(&pa->shared_snaps);
2413         dsl_dataset_t *origin_ds = snap->ds;
2414         dsl_dataset_t *origin_head;
2415         dsl_dir_t *dd = hds->ds_dir;
2416         dsl_pool_t *dp = hds->ds_dir->dd_pool;
2417         dsl_dir_t *odd = NULL;
2418         uint64_t oldnext_obj;
2419         int64_t delta;
2420
2421         ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE));
2422
2423         snap = list_head(&pa->origin_snaps);
2424         origin_head = snap->ds;
2425
2426         /*
2427          * We need to explicitly open odd, since origin_ds's dd will be
2428          * changing.
2429          */
2430         VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object,
2431             NULL, FTAG, &odd));
2432
2433         /* change origin's next snap */
2434         dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
2435         oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj;
2436         snap = list_tail(&pa->clone_snaps);
2437         ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2438         origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object;
2439
2440         /* change the origin's next clone */
2441         if (origin_ds->ds_phys->ds_next_clones_obj) {
2442                 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2443                     origin_ds->ds_phys->ds_next_clones_obj,
2444                     origin_ds->ds_phys->ds_next_snap_obj, tx));
2445                 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2446                     origin_ds->ds_phys->ds_next_clones_obj,
2447                     oldnext_obj, tx));
2448         }
2449
2450         /* change origin */
2451         dmu_buf_will_dirty(dd->dd_dbuf, tx);
2452         ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object);
2453         dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj;
2454         hds->ds_origin_txg = origin_head->ds_origin_txg;
2455         dmu_buf_will_dirty(odd->dd_dbuf, tx);
2456         odd->dd_phys->dd_origin_obj = origin_ds->ds_object;
2457         origin_head->ds_origin_txg = origin_ds->ds_phys->ds_creation_txg;
2458
2459         /* move snapshots to this dir */
2460         for (snap = list_head(&pa->shared_snaps); snap;
2461             snap = list_next(&pa->shared_snaps, snap)) {
2462                 dsl_dataset_t *ds = snap->ds;
2463
2464                 /* unregister props as dsl_dir is changing */
2465                 if (ds->ds_user_ptr) {
2466                         ds->ds_user_evict_func(ds, ds->ds_user_ptr);
2467                         ds->ds_user_ptr = NULL;
2468                 }
2469                 /* move snap name entry */
2470                 VERIFY(0 == dsl_dataset_get_snapname(ds));
2471                 VERIFY(0 == dsl_dataset_snap_remove(origin_head,
2472                     ds->ds_snapname, tx));
2473                 VERIFY(0 == zap_add(dp->dp_meta_objset,
2474                     hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname,
2475                     8, 1, &ds->ds_object, tx));
2476                 /* change containing dsl_dir */
2477                 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2478                 ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object);
2479                 ds->ds_phys->ds_dir_obj = dd->dd_object;
2480                 ASSERT3P(ds->ds_dir, ==, odd);
2481                 dsl_dir_close(ds->ds_dir, ds);
2482                 VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object,
2483                     NULL, ds, &ds->ds_dir));
2484
2485                 ASSERT3U(dsl_prop_numcb(ds), ==, 0);
2486         }
2487
2488         /*
2489          * Change space accounting.
2490          * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2491          * both be valid, or both be 0 (resulting in delta == 0).  This
2492          * is true for each of {clone,origin} independently.
2493          */
2494
2495         delta = pa->cloneusedsnap -
2496             dd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2497         ASSERT3S(delta, >=, 0);
2498         ASSERT3U(pa->used, >=, delta);
2499         dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
2500         dsl_dir_diduse_space(dd, DD_USED_HEAD,
2501             pa->used - delta, pa->comp, pa->uncomp, tx);
2502
2503         delta = pa->originusedsnap -
2504             odd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2505         ASSERT3S(delta, <=, 0);
2506         ASSERT3U(pa->used, >=, -delta);
2507         dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
2508         dsl_dir_diduse_space(odd, DD_USED_HEAD,
2509             -pa->used - delta, -pa->comp, -pa->uncomp, tx);
2510
2511         origin_ds->ds_phys->ds_unique_bytes = pa->unique;
2512
2513         /* log history record */
2514         spa_history_internal_log(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx,
2515             cr, "dataset = %llu", hds->ds_object);
2516
2517         dsl_dir_close(odd, FTAG);
2518 }
2519
2520 static char *snaplist_tag = "snaplist";
2521 /*
2522  * Make a list of dsl_dataset_t's for the snapshots between first_obj
2523  * (exclusive) and last_obj (inclusive).  The list will be in reverse
2524  * order (last_obj will be the list_head()).  If first_obj == 0, do all
2525  * snapshots back to this dataset's origin.
2526  */
2527 static int
2528 snaplist_make(dsl_pool_t *dp, boolean_t own,
2529     uint64_t first_obj, uint64_t last_obj, list_t *l)
2530 {
2531         uint64_t obj = last_obj;
2532
2533         ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock));
2534
2535         list_create(l, sizeof (struct promotenode),
2536             offsetof(struct promotenode, link));
2537
2538         while (obj != first_obj) {
2539                 dsl_dataset_t *ds;
2540                 struct promotenode *snap;
2541                 int err;
2542
2543                 if (own) {
2544                         err = dsl_dataset_own_obj(dp, obj,
2545                             0, snaplist_tag, &ds);
2546                         if (err == 0)
2547                                 dsl_dataset_make_exclusive(ds, snaplist_tag);
2548                 } else {
2549                         err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds);
2550                 }
2551                 if (err == ENOENT) {
2552                         /* lost race with snapshot destroy */
2553                         struct promotenode *last = list_tail(l);
2554                         ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj);
2555                         obj = last->ds->ds_phys->ds_prev_snap_obj;
2556                         continue;
2557                 } else if (err) {
2558                         return (err);
2559                 }
2560
2561                 if (first_obj == 0)
2562                         first_obj = ds->ds_dir->dd_phys->dd_origin_obj;
2563
2564                 snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP);
2565                 snap->ds = ds;
2566                 list_insert_tail(l, snap);
2567                 obj = ds->ds_phys->ds_prev_snap_obj;
2568         }
2569
2570         return (0);
2571 }
2572
2573 static int
2574 snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
2575 {
2576         struct promotenode *snap;
2577
2578         *spacep = 0;
2579         for (snap = list_head(l); snap; snap = list_next(l, snap)) {
2580                 uint64_t used;
2581                 int err = bplist_space_birthrange(&snap->ds->ds_deadlist,
2582                     mintxg, UINT64_MAX, &used);
2583                 if (err)
2584                         return (err);
2585                 *spacep += used;
2586         }
2587         return (0);
2588 }
2589
2590 static void
2591 snaplist_destroy(list_t *l, boolean_t own)
2592 {
2593         struct promotenode *snap;
2594
2595         if (!l || !list_link_active(&l->list_head))
2596                 return;
2597
2598         while ((snap = list_tail(l)) != NULL) {
2599                 list_remove(l, snap);
2600                 if (own)
2601                         dsl_dataset_disown(snap->ds, snaplist_tag);
2602                 else
2603                         dsl_dataset_rele(snap->ds, snaplist_tag);
2604                 kmem_free(snap, sizeof (struct promotenode));
2605         }
2606         list_destroy(l);
2607 }
2608
2609 /*
2610  * Promote a clone.  Nomenclature note:
2611  * "clone" or "cds": the original clone which is being promoted
2612  * "origin" or "ods": the snapshot which is originally clone's origin
2613  * "origin head" or "ohds": the dataset which is the head
2614  * (filesystem/volume) for the origin
2615  * "origin origin": the origin of the origin's filesystem (typically
2616  * NULL, indicating that the clone is not a clone of a clone).
2617  */
2618 int
2619 dsl_dataset_promote(const char *name)
2620 {
2621         dsl_dataset_t *ds;
2622         dsl_dir_t *dd;
2623         dsl_pool_t *dp;
2624         dmu_object_info_t doi;
2625         struct promotearg pa = { 0 };
2626         struct promotenode *snap;
2627         int err;
2628
2629         err = dsl_dataset_hold(name, FTAG, &ds);
2630         if (err)
2631                 return (err);
2632         dd = ds->ds_dir;
2633         dp = dd->dd_pool;
2634
2635         err = dmu_object_info(dp->dp_meta_objset,
2636             ds->ds_phys->ds_snapnames_zapobj, &doi);
2637         if (err) {
2638                 dsl_dataset_rele(ds, FTAG);
2639                 return (err);
2640         }
2641
2642         if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) {
2643                 dsl_dataset_rele(ds, FTAG);
2644                 return (EINVAL);
2645         }
2646
2647         /*
2648          * We are going to inherit all the snapshots taken before our
2649          * origin (i.e., our new origin will be our parent's origin).
2650          * Take ownership of them so that we can rename them into our
2651          * namespace.
2652          */
2653         rw_enter(&dp->dp_config_rwlock, RW_READER);
2654
2655         err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj,
2656             &pa.shared_snaps);
2657         if (err != 0)
2658                 goto out;
2659
2660         err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps);
2661         if (err != 0)
2662                 goto out;
2663
2664         snap = list_head(&pa.shared_snaps);
2665         ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj);
2666         err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj,
2667             snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps);
2668         if (err != 0)
2669                 goto out;
2670
2671         if (dsl_dir_is_clone(snap->ds->ds_dir)) {
2672                 err = dsl_dataset_own_obj(dp,
2673                     snap->ds->ds_dir->dd_phys->dd_origin_obj,
2674                     0, FTAG, &pa.origin_origin);
2675                 if (err != 0)
2676                         goto out;
2677         }
2678
2679 out:
2680         rw_exit(&dp->dp_config_rwlock);
2681
2682         /*
2683          * Add in 128x the snapnames zapobj size, since we will be moving
2684          * a bunch of snapnames to the promoted ds, and dirtying their
2685          * bonus buffers.
2686          */
2687         if (err == 0) {
2688                 err = dsl_sync_task_do(dp, dsl_dataset_promote_check,
2689                     dsl_dataset_promote_sync, ds, &pa,
2690                     2 + 2 * doi.doi_physical_blks);
2691         }
2692
2693         snaplist_destroy(&pa.shared_snaps, B_TRUE);
2694         snaplist_destroy(&pa.clone_snaps, B_FALSE);
2695         snaplist_destroy(&pa.origin_snaps, B_FALSE);
2696         if (pa.origin_origin)
2697                 dsl_dataset_disown(pa.origin_origin, FTAG);
2698         dsl_dataset_rele(ds, FTAG);
2699         return (err);
2700 }
2701
2702 struct cloneswaparg {
2703         dsl_dataset_t *cds; /* clone dataset */
2704         dsl_dataset_t *ohds; /* origin's head dataset */
2705         boolean_t force;
2706         int64_t unused_refres_delta; /* change in unconsumed refreservation */
2707 };
2708
2709 /* ARGSUSED */
2710 static int
2711 dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx)
2712 {
2713         struct cloneswaparg *csa = arg1;
2714
2715         /* they should both be heads */
2716         if (dsl_dataset_is_snapshot(csa->cds) ||
2717             dsl_dataset_is_snapshot(csa->ohds))
2718                 return (EINVAL);
2719
2720         /* the branch point should be just before them */
2721         if (csa->cds->ds_prev != csa->ohds->ds_prev)
2722                 return (EINVAL);
2723
2724         /* cds should be the clone */
2725         if (csa->cds->ds_prev->ds_phys->ds_next_snap_obj !=
2726             csa->ohds->ds_object)
2727                 return (EINVAL);
2728
2729         /* the clone should be a child of the origin */
2730         if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir)
2731                 return (EINVAL);
2732
2733         /* ohds shouldn't be modified unless 'force' */
2734         if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds))
2735                 return (ETXTBSY);
2736
2737         /* adjust amount of any unconsumed refreservation */
2738         csa->unused_refres_delta =
2739             (int64_t)MIN(csa->ohds->ds_reserved,
2740             csa->ohds->ds_phys->ds_unique_bytes) -
2741             (int64_t)MIN(csa->ohds->ds_reserved,
2742             csa->cds->ds_phys->ds_unique_bytes);
2743
2744         if (csa->unused_refres_delta > 0 &&
2745             csa->unused_refres_delta >
2746             dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE))
2747                 return (ENOSPC);
2748
2749         return (0);
2750 }
2751
2752 /* ARGSUSED */
2753 static void
2754 dsl_dataset_clone_swap_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
2755 {
2756         struct cloneswaparg *csa = arg1;
2757         dsl_pool_t *dp = csa->cds->ds_dir->dd_pool;
2758
2759         ASSERT(csa->cds->ds_reserved == 0);
2760         ASSERT(csa->cds->ds_quota == csa->ohds->ds_quota);
2761
2762         dmu_buf_will_dirty(csa->cds->ds_dbuf, tx);
2763         dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx);
2764         dmu_buf_will_dirty(csa->cds->ds_prev->ds_dbuf, tx);
2765
2766         if (csa->cds->ds_user_ptr != NULL) {
2767                 csa->cds->ds_user_evict_func(csa->cds, csa->cds->ds_user_ptr);
2768                 csa->cds->ds_user_ptr = NULL;
2769         }
2770
2771         if (csa->ohds->ds_user_ptr != NULL) {
2772                 csa->ohds->ds_user_evict_func(csa->ohds,
2773                     csa->ohds->ds_user_ptr);
2774                 csa->ohds->ds_user_ptr = NULL;
2775         }
2776
2777         /* reset origin's unique bytes */
2778         VERIFY(0 == bplist_space_birthrange(&csa->cds->ds_deadlist,
2779             csa->cds->ds_prev->ds_phys->ds_prev_snap_txg, UINT64_MAX,
2780             &csa->cds->ds_prev->ds_phys->ds_unique_bytes));
2781
2782         /* swap blkptrs */
2783         {
2784                 blkptr_t tmp;
2785                 tmp = csa->ohds->ds_phys->ds_bp;
2786                 csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp;
2787                 csa->cds->ds_phys->ds_bp = tmp;
2788         }
2789
2790         /* set dd_*_bytes */
2791         {
2792                 int64_t dused, dcomp, duncomp;
2793                 uint64_t cdl_used, cdl_comp, cdl_uncomp;
2794                 uint64_t odl_used, odl_comp, odl_uncomp;
2795
2796                 ASSERT3U(csa->cds->ds_dir->dd_phys->
2797                     dd_used_breakdown[DD_USED_SNAP], ==, 0);
2798
2799                 VERIFY(0 == bplist_space(&csa->cds->ds_deadlist, &cdl_used,
2800                     &cdl_comp, &cdl_uncomp));
2801                 VERIFY(0 == bplist_space(&csa->ohds->ds_deadlist, &odl_used,
2802                     &odl_comp, &odl_uncomp));
2803
2804                 dused = csa->cds->ds_phys->ds_used_bytes + cdl_used -
2805                     (csa->ohds->ds_phys->ds_used_bytes + odl_used);
2806                 dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp -
2807                     (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp);
2808                 duncomp = csa->cds->ds_phys->ds_uncompressed_bytes +
2809                     cdl_uncomp -
2810                     (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp);
2811
2812                 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD,
2813                     dused, dcomp, duncomp, tx);
2814                 dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD,
2815                     -dused, -dcomp, -duncomp, tx);
2816
2817                 /*
2818                  * The difference in the space used by snapshots is the
2819                  * difference in snapshot space due to the head's
2820                  * deadlist (since that's the only thing that's
2821                  * changing that affects the snapused).
2822                  */
2823                 VERIFY(0 == bplist_space_birthrange(&csa->cds->ds_deadlist,
2824                     csa->ohds->ds_origin_txg, UINT64_MAX, &cdl_used));
2825                 VERIFY(0 == bplist_space_birthrange(&csa->ohds->ds_deadlist,
2826                     csa->ohds->ds_origin_txg, UINT64_MAX, &odl_used));
2827                 dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used,
2828                     DD_USED_HEAD, DD_USED_SNAP, tx);
2829         }
2830
2831 #define SWITCH64(x, y) \
2832         { \
2833                 uint64_t __tmp = (x); \
2834                 (x) = (y); \
2835                 (y) = __tmp; \
2836         }
2837
2838         /* swap ds_*_bytes */
2839         SWITCH64(csa->ohds->ds_phys->ds_used_bytes,
2840             csa->cds->ds_phys->ds_used_bytes);
2841         SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes,
2842             csa->cds->ds_phys->ds_compressed_bytes);
2843         SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes,
2844             csa->cds->ds_phys->ds_uncompressed_bytes);
2845         SWITCH64(csa->ohds->ds_phys->ds_unique_bytes,
2846             csa->cds->ds_phys->ds_unique_bytes);
2847
2848         /* apply any parent delta for change in unconsumed refreservation */
2849         dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV,
2850             csa->unused_refres_delta, 0, 0, tx);
2851
2852         /* swap deadlists */
2853         bplist_close(&csa->cds->ds_deadlist);
2854         bplist_close(&csa->ohds->ds_deadlist);
2855         SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj,
2856             csa->cds->ds_phys->ds_deadlist_obj);
2857         VERIFY(0 == bplist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset,
2858             csa->cds->ds_phys->ds_deadlist_obj));
2859         VERIFY(0 == bplist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset,
2860             csa->ohds->ds_phys->ds_deadlist_obj));
2861
2862         dsl_pool_ds_clone_swapped(csa->ohds, csa->cds, tx);
2863 }
2864
2865 /*
2866  * Swap 'clone' with its origin head file system.  Used at the end
2867  * of "online recv" to swizzle the file system to the new version.
2868  */
2869 int
2870 dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head,
2871     boolean_t force)
2872 {
2873         struct cloneswaparg csa;
2874         int error;
2875
2876         ASSERT(clone->ds_owner);
2877         ASSERT(origin_head->ds_owner);
2878 retry:
2879         /* Need exclusive access for the swap */
2880         rw_enter(&clone->ds_rwlock, RW_WRITER);
2881         if (!rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) {
2882                 rw_exit(&clone->ds_rwlock);
2883                 rw_enter(&origin_head->ds_rwlock, RW_WRITER);
2884                 if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) {
2885                         rw_exit(&origin_head->ds_rwlock);
2886                         goto retry;
2887                 }
2888         }
2889         csa.cds = clone;
2890         csa.ohds = origin_head;
2891         csa.force = force;
2892         error = dsl_sync_task_do(clone->ds_dir->dd_pool,
2893             dsl_dataset_clone_swap_check,
2894             dsl_dataset_clone_swap_sync, &csa, NULL, 9);
2895         return (error);
2896 }
2897
2898 /*
2899  * Given a pool name and a dataset object number in that pool,
2900  * return the name of that dataset.
2901  */
2902 int
2903 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
2904 {
2905         spa_t *spa;
2906         dsl_pool_t *dp;
2907         dsl_dataset_t *ds;
2908         int error;
2909
2910         if ((error = spa_open(pname, &spa, FTAG)) != 0)
2911                 return (error);
2912         dp = spa_get_dsl(spa);
2913         rw_enter(&dp->dp_config_rwlock, RW_READER);
2914         if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) {
2915                 dsl_dataset_name(ds, buf);
2916                 dsl_dataset_rele(ds, FTAG);
2917         }
2918         rw_exit(&dp->dp_config_rwlock);
2919         spa_close(spa, FTAG);
2920
2921         return (error);
2922 }
2923
2924 int
2925 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
2926     uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
2927 {
2928         int error = 0;
2929
2930         ASSERT3S(asize, >, 0);
2931
2932         /*
2933          * *ref_rsrv is the portion of asize that will come from any
2934          * unconsumed refreservation space.
2935          */
2936         *ref_rsrv = 0;
2937
2938         mutex_enter(&ds->ds_lock);
2939         /*
2940          * Make a space adjustment for reserved bytes.
2941          */
2942         if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) {
2943                 ASSERT3U(*used, >=,
2944                     ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
2945                 *used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
2946                 *ref_rsrv =
2947                     asize - MIN(asize, parent_delta(ds, asize + inflight));
2948         }
2949
2950         if (!check_quota || ds->ds_quota == 0) {
2951                 mutex_exit(&ds->ds_lock);
2952                 return (0);
2953         }
2954         /*
2955          * If they are requesting more space, and our current estimate
2956          * is over quota, they get to try again unless the actual
2957          * on-disk is over quota and there are no pending changes (which
2958          * may free up space for us).
2959          */
2960         if (ds->ds_phys->ds_used_bytes + inflight >= ds->ds_quota) {
2961                 if (inflight > 0 || ds->ds_phys->ds_used_bytes < ds->ds_quota)
2962                         error = ERESTART;
2963                 else
2964                         error = EDQUOT;
2965         }
2966         mutex_exit(&ds->ds_lock);
2967
2968         return (error);
2969 }
2970
2971 /* ARGSUSED */
2972 static int
2973 dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
2974 {
2975         dsl_dataset_t *ds = arg1;
2976         uint64_t *quotap = arg2;
2977         uint64_t new_quota = *quotap;
2978
2979         if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA)
2980                 return (ENOTSUP);
2981
2982         if (new_quota == 0)
2983                 return (0);
2984
2985         if (new_quota < ds->ds_phys->ds_used_bytes ||
2986             new_quota < ds->ds_reserved)
2987                 return (ENOSPC);
2988
2989         return (0);
2990 }
2991
2992 /* ARGSUSED */
2993 void
2994 dsl_dataset_set_quota_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
2995 {
2996         dsl_dataset_t *ds = arg1;
2997         uint64_t *quotap = arg2;
2998         uint64_t new_quota = *quotap;
2999
3000         dmu_buf_will_dirty(ds->ds_dbuf, tx);
3001
3002         ds->ds_quota = new_quota;
3003
3004         dsl_prop_set_uint64_sync(ds->ds_dir, "refquota", new_quota, cr, tx);
3005
3006         spa_history_internal_log(LOG_DS_REFQUOTA, ds->ds_dir->dd_pool->dp_spa,
3007             tx, cr, "%lld dataset = %llu ",
3008             (longlong_t)new_quota, ds->ds_object);
3009 }
3010
3011 int
3012 dsl_dataset_set_quota(const char *dsname, uint64_t quota)
3013 {
3014         dsl_dataset_t *ds;
3015         int err;
3016
3017         err = dsl_dataset_hold(dsname, FTAG, &ds);
3018         if (err)
3019                 return (err);
3020
3021         if (quota != ds->ds_quota) {
3022                 /*
3023                  * If someone removes a file, then tries to set the quota, we
3024                  * want to make sure the file freeing takes effect.
3025                  */
3026                 txg_wait_open(ds->ds_dir->dd_pool, 0);
3027
3028                 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3029                     dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync,
3030                     ds, &quota, 0);
3031         }
3032         dsl_dataset_rele(ds, FTAG);
3033         return (err);
3034 }
3035
3036 static int
3037 dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
3038 {
3039         dsl_dataset_t *ds = arg1;
3040         uint64_t *reservationp = arg2;
3041         uint64_t new_reservation = *reservationp;
3042         int64_t delta;
3043         uint64_t unique;
3044
3045         if (new_reservation > INT64_MAX)
3046                 return (EOVERFLOW);
3047
3048         if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
3049             SPA_VERSION_REFRESERVATION)
3050                 return (ENOTSUP);
3051
3052         if (dsl_dataset_is_snapshot(ds))
3053                 return (EINVAL);
3054
3055         /*
3056          * If we are doing the preliminary check in open context, the
3057          * space estimates may be inaccurate.
3058          */
3059         if (!dmu_tx_is_syncing(tx))
3060                 return (0);
3061
3062         mutex_enter(&ds->ds_lock);
3063         unique = dsl_dataset_unique(ds);
3064         delta = MAX(unique, new_reservation) - MAX(unique, ds->ds_reserved);
3065         mutex_exit(&ds->ds_lock);
3066
3067         if (delta > 0 &&
3068             delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
3069                 return (ENOSPC);
3070         if (delta > 0 && ds->ds_quota > 0 &&
3071             new_reservation > ds->ds_quota)
3072                 return (ENOSPC);
3073
3074         return (0);
3075 }
3076
3077 /* ARGSUSED */
3078 static void
3079 dsl_dataset_set_reservation_sync(void *arg1, void *arg2, cred_t *cr,
3080     dmu_tx_t *tx)
3081 {
3082         dsl_dataset_t *ds = arg1;
3083         uint64_t *reservationp = arg2;
3084         uint64_t new_reservation = *reservationp;
3085         uint64_t unique;
3086         int64_t delta;
3087
3088         dmu_buf_will_dirty(ds->ds_dbuf, tx);
3089
3090         mutex_enter(&ds->ds_dir->dd_lock);
3091         mutex_enter(&ds->ds_lock);
3092         unique = dsl_dataset_unique(ds);
3093         delta = MAX(0, (int64_t)(new_reservation - unique)) -
3094             MAX(0, (int64_t)(ds->ds_reserved - unique));
3095         ds->ds_reserved = new_reservation;
3096         mutex_exit(&ds->ds_lock);
3097
3098         dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
3099         mutex_exit(&ds->ds_dir->dd_lock);
3100         dsl_prop_set_uint64_sync(ds->ds_dir, "refreservation",
3101             new_reservation, cr, tx);
3102
3103         spa_history_internal_log(LOG_DS_REFRESERV,
3104             ds->ds_dir->dd_pool->dp_spa, tx, cr, "%lld dataset = %llu",
3105             (longlong_t)new_reservation, ds->ds_object);
3106 }
3107
3108 int
3109 dsl_dataset_set_reservation(const char *dsname, uint64_t reservation)
3110 {
3111         dsl_dataset_t *ds;
3112         int err;
3113
3114         err = dsl_dataset_hold(dsname, FTAG, &ds);
3115         if (err)
3116                 return (err);
3117
3118         err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3119             dsl_dataset_set_reservation_check,
3120             dsl_dataset_set_reservation_sync, ds, &reservation, 0);
3121         dsl_dataset_rele(ds, FTAG);
3122         return (err);
3123 }