]> CyberLeo.Net >> Repos - FreeBSD/stable/8.git/blob - sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dataset.c
MFC r208047:
[FreeBSD/stable/8.git] / sys / cddl / contrib / opensolaris / uts / common / fs / zfs / dsl_dataset.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25
26 #include <sys/dmu_objset.h>
27 #include <sys/dsl_dataset.h>
28 #include <sys/dsl_dir.h>
29 #include <sys/dsl_prop.h>
30 #include <sys/dsl_synctask.h>
31 #include <sys/dmu_traverse.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/arc.h>
34 #include <sys/zio.h>
35 #include <sys/zap.h>
36 #include <sys/unique.h>
37 #include <sys/zfs_context.h>
38 #include <sys/zfs_ioctl.h>
39 #include <sys/spa.h>
40 #include <sys/zfs_znode.h>
41 #include <sys/sunddi.h>
42
43 static char *dsl_reaper = "the grim reaper";
44
45 static dsl_checkfunc_t dsl_dataset_destroy_begin_check;
46 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync;
47 static dsl_checkfunc_t dsl_dataset_rollback_check;
48 static dsl_syncfunc_t dsl_dataset_rollback_sync;
49 static dsl_syncfunc_t dsl_dataset_set_reservation_sync;
50
51 #define DS_REF_MAX      (1ULL << 62)
52
53 #define DSL_DEADLIST_BLOCKSIZE  SPA_MAXBLOCKSIZE
54
55 #define DSL_DATASET_IS_DESTROYED(ds)    ((ds)->ds_owner == dsl_reaper)
56
57
58 /*
59  * Figure out how much of this delta should be propogated to the dsl_dir
60  * layer.  If there's a refreservation, that space has already been
61  * partially accounted for in our ancestors.
62  */
63 static int64_t
64 parent_delta(dsl_dataset_t *ds, int64_t delta)
65 {
66         uint64_t old_bytes, new_bytes;
67
68         if (ds->ds_reserved == 0)
69                 return (delta);
70
71         old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
72         new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
73
74         ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
75         return (new_bytes - old_bytes);
76 }
77
78 void
79 dsl_dataset_block_born(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
80 {
81         int used = bp_get_dasize(tx->tx_pool->dp_spa, bp);
82         int compressed = BP_GET_PSIZE(bp);
83         int uncompressed = BP_GET_UCSIZE(bp);
84         int64_t delta;
85
86         dprintf_bp(bp, "born, ds=%p\n", ds);
87
88         ASSERT(dmu_tx_is_syncing(tx));
89         /* It could have been compressed away to nothing */
90         if (BP_IS_HOLE(bp))
91                 return;
92         ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
93         ASSERT3U(BP_GET_TYPE(bp), <, DMU_OT_NUMTYPES);
94         if (ds == NULL) {
95                 /*
96                  * Account for the meta-objset space in its placeholder
97                  * dsl_dir.
98                  */
99                 ASSERT3U(compressed, ==, uncompressed); /* it's all metadata */
100                 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
101                     used, compressed, uncompressed, tx);
102                 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
103                 return;
104         }
105         dmu_buf_will_dirty(ds->ds_dbuf, tx);
106         mutex_enter(&ds->ds_dir->dd_lock);
107         mutex_enter(&ds->ds_lock);
108         delta = parent_delta(ds, used);
109         ds->ds_phys->ds_used_bytes += used;
110         ds->ds_phys->ds_compressed_bytes += compressed;
111         ds->ds_phys->ds_uncompressed_bytes += uncompressed;
112         ds->ds_phys->ds_unique_bytes += used;
113         mutex_exit(&ds->ds_lock);
114         dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta,
115             compressed, uncompressed, tx);
116         dsl_dir_transfer_space(ds->ds_dir, used - delta,
117             DD_USED_REFRSRV, DD_USED_HEAD, tx);
118         mutex_exit(&ds->ds_dir->dd_lock);
119 }
120
121 int
122 dsl_dataset_block_kill(dsl_dataset_t *ds, blkptr_t *bp, zio_t *pio,
123     dmu_tx_t *tx)
124 {
125         int used = bp_get_dasize(tx->tx_pool->dp_spa, bp);
126         int compressed = BP_GET_PSIZE(bp);
127         int uncompressed = BP_GET_UCSIZE(bp);
128
129         ASSERT(pio != NULL);
130         ASSERT(dmu_tx_is_syncing(tx));
131         /* No block pointer => nothing to free */
132         if (BP_IS_HOLE(bp))
133                 return (0);
134
135         ASSERT(used > 0);
136         if (ds == NULL) {
137                 int err;
138                 /*
139                  * Account for the meta-objset space in its placeholder
140                  * dataset.
141                  */
142                 err = dsl_free(pio, tx->tx_pool,
143                     tx->tx_txg, bp, NULL, NULL, ARC_NOWAIT);
144                 ASSERT(err == 0);
145
146                 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
147                     -used, -compressed, -uncompressed, tx);
148                 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
149                 return (used);
150         }
151         ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
152
153         ASSERT(!dsl_dataset_is_snapshot(ds));
154         dmu_buf_will_dirty(ds->ds_dbuf, tx);
155
156         if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) {
157                 int err;
158                 int64_t delta;
159
160                 dprintf_bp(bp, "freeing: %s", "");
161                 err = dsl_free(pio, tx->tx_pool,
162                     tx->tx_txg, bp, NULL, NULL, ARC_NOWAIT);
163                 ASSERT(err == 0);
164
165                 mutex_enter(&ds->ds_dir->dd_lock);
166                 mutex_enter(&ds->ds_lock);
167                 ASSERT(ds->ds_phys->ds_unique_bytes >= used ||
168                     !DS_UNIQUE_IS_ACCURATE(ds));
169                 delta = parent_delta(ds, -used);
170                 ds->ds_phys->ds_unique_bytes -= used;
171                 mutex_exit(&ds->ds_lock);
172                 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
173                     delta, -compressed, -uncompressed, tx);
174                 dsl_dir_transfer_space(ds->ds_dir, -used - delta,
175                     DD_USED_REFRSRV, DD_USED_HEAD, tx);
176                 mutex_exit(&ds->ds_dir->dd_lock);
177         } else {
178                 dprintf_bp(bp, "putting on dead list: %s", "");
179                 VERIFY(0 == bplist_enqueue(&ds->ds_deadlist, bp, tx));
180                 ASSERT3U(ds->ds_prev->ds_object, ==,
181                     ds->ds_phys->ds_prev_snap_obj);
182                 ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0);
183                 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
184                 if (ds->ds_prev->ds_phys->ds_next_snap_obj ==
185                     ds->ds_object && bp->blk_birth >
186                     ds->ds_prev->ds_phys->ds_prev_snap_txg) {
187                         dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
188                         mutex_enter(&ds->ds_prev->ds_lock);
189                         ds->ds_prev->ds_phys->ds_unique_bytes += used;
190                         mutex_exit(&ds->ds_prev->ds_lock);
191                 }
192                 if (bp->blk_birth > ds->ds_origin_txg) {
193                         dsl_dir_transfer_space(ds->ds_dir, used,
194                             DD_USED_HEAD, DD_USED_SNAP, tx);
195                 }
196         }
197         mutex_enter(&ds->ds_lock);
198         ASSERT3U(ds->ds_phys->ds_used_bytes, >=, used);
199         ds->ds_phys->ds_used_bytes -= used;
200         ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed);
201         ds->ds_phys->ds_compressed_bytes -= compressed;
202         ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed);
203         ds->ds_phys->ds_uncompressed_bytes -= uncompressed;
204         mutex_exit(&ds->ds_lock);
205
206         return (used);
207 }
208
209 uint64_t
210 dsl_dataset_prev_snap_txg(dsl_dataset_t *ds)
211 {
212         uint64_t trysnap = 0;
213
214         if (ds == NULL)
215                 return (0);
216         /*
217          * The snapshot creation could fail, but that would cause an
218          * incorrect FALSE return, which would only result in an
219          * overestimation of the amount of space that an operation would
220          * consume, which is OK.
221          *
222          * There's also a small window where we could miss a pending
223          * snapshot, because we could set the sync task in the quiescing
224          * phase.  So this should only be used as a guess.
225          */
226         if (ds->ds_trysnap_txg >
227             spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa))
228                 trysnap = ds->ds_trysnap_txg;
229         return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap));
230 }
231
232 int
233 dsl_dataset_block_freeable(dsl_dataset_t *ds, uint64_t blk_birth)
234 {
235         return (blk_birth > dsl_dataset_prev_snap_txg(ds));
236 }
237
238 /* ARGSUSED */
239 static void
240 dsl_dataset_evict(dmu_buf_t *db, void *dsv)
241 {
242         dsl_dataset_t *ds = dsv;
243
244         ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds));
245
246         dprintf_ds(ds, "evicting %s\n", "");
247
248         unique_remove(ds->ds_fsid_guid);
249
250         if (ds->ds_user_ptr != NULL)
251                 ds->ds_user_evict_func(ds, ds->ds_user_ptr);
252
253         if (ds->ds_prev) {
254                 dsl_dataset_drop_ref(ds->ds_prev, ds);
255                 ds->ds_prev = NULL;
256         }
257
258         bplist_close(&ds->ds_deadlist);
259         if (ds->ds_dir)
260                 dsl_dir_close(ds->ds_dir, ds);
261
262         ASSERT(!list_link_active(&ds->ds_synced_link));
263
264         if (mutex_owned(&ds->ds_lock))
265                 mutex_exit(&ds->ds_lock);
266         mutex_destroy(&ds->ds_lock);
267         if (mutex_owned(&ds->ds_opening_lock))
268                 mutex_exit(&ds->ds_opening_lock);
269         mutex_destroy(&ds->ds_opening_lock);
270         if (mutex_owned(&ds->ds_deadlist.bpl_lock))
271                 mutex_exit(&ds->ds_deadlist.bpl_lock);
272         mutex_destroy(&ds->ds_deadlist.bpl_lock);
273         rw_destroy(&ds->ds_rwlock);
274         cv_destroy(&ds->ds_exclusive_cv);
275
276         kmem_free(ds, sizeof (dsl_dataset_t));
277 }
278
279 static int
280 dsl_dataset_get_snapname(dsl_dataset_t *ds)
281 {
282         dsl_dataset_phys_t *headphys;
283         int err;
284         dmu_buf_t *headdbuf;
285         dsl_pool_t *dp = ds->ds_dir->dd_pool;
286         objset_t *mos = dp->dp_meta_objset;
287
288         if (ds->ds_snapname[0])
289                 return (0);
290         if (ds->ds_phys->ds_next_snap_obj == 0)
291                 return (0);
292
293         err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj,
294             FTAG, &headdbuf);
295         if (err)
296                 return (err);
297         headphys = headdbuf->db_data;
298         err = zap_value_search(dp->dp_meta_objset,
299             headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
300         dmu_buf_rele(headdbuf, FTAG);
301         return (err);
302 }
303
304 static int
305 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
306 {
307         objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
308         uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
309         matchtype_t mt;
310         int err;
311
312         if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
313                 mt = MT_FIRST;
314         else
315                 mt = MT_EXACT;
316
317         err = zap_lookup_norm(mos, snapobj, name, 8, 1,
318             value, mt, NULL, 0, NULL);
319         if (err == ENOTSUP && mt == MT_FIRST)
320                 err = zap_lookup(mos, snapobj, name, 8, 1, value);
321         return (err);
322 }
323
324 static int
325 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx)
326 {
327         objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
328         uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
329         matchtype_t mt;
330         int err;
331
332         if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
333                 mt = MT_FIRST;
334         else
335                 mt = MT_EXACT;
336
337         err = zap_remove_norm(mos, snapobj, name, mt, tx);
338         if (err == ENOTSUP && mt == MT_FIRST)
339                 err = zap_remove(mos, snapobj, name, tx);
340         return (err);
341 }
342
343 static int
344 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
345     dsl_dataset_t **dsp)
346 {
347         objset_t *mos = dp->dp_meta_objset;
348         dmu_buf_t *dbuf;
349         dsl_dataset_t *ds;
350         int err;
351
352         ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
353             dsl_pool_sync_context(dp));
354
355         err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
356         if (err)
357                 return (err);
358         ds = dmu_buf_get_user(dbuf);
359         if (ds == NULL) {
360                 dsl_dataset_t *winner;
361
362                 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
363                 ds->ds_dbuf = dbuf;
364                 ds->ds_object = dsobj;
365                 ds->ds_phys = dbuf->db_data;
366
367                 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
368                 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
369                 mutex_init(&ds->ds_deadlist.bpl_lock, NULL, MUTEX_DEFAULT,
370                     NULL);
371                 rw_init(&ds->ds_rwlock, 0, 0, 0);
372                 cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
373
374                 err = bplist_open(&ds->ds_deadlist,
375                     mos, ds->ds_phys->ds_deadlist_obj);
376                 if (err == 0) {
377                         err = dsl_dir_open_obj(dp,
378                             ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir);
379                 }
380                 if (err) {
381                         /*
382                          * we don't really need to close the blist if we
383                          * just opened it.
384                          */
385                         mutex_destroy(&ds->ds_lock);
386                         mutex_destroy(&ds->ds_opening_lock);
387                         mutex_destroy(&ds->ds_deadlist.bpl_lock);
388                         rw_destroy(&ds->ds_rwlock);
389                         cv_destroy(&ds->ds_exclusive_cv);
390                         kmem_free(ds, sizeof (dsl_dataset_t));
391                         dmu_buf_rele(dbuf, tag);
392                         return (err);
393                 }
394
395                 if (!dsl_dataset_is_snapshot(ds)) {
396                         ds->ds_snapname[0] = '\0';
397                         if (ds->ds_phys->ds_prev_snap_obj) {
398                                 err = dsl_dataset_get_ref(dp,
399                                     ds->ds_phys->ds_prev_snap_obj,
400                                     ds, &ds->ds_prev);
401                         }
402
403                         if (err == 0 && dsl_dir_is_clone(ds->ds_dir)) {
404                                 dsl_dataset_t *origin;
405
406                                 err = dsl_dataset_hold_obj(dp,
407                                     ds->ds_dir->dd_phys->dd_origin_obj,
408                                     FTAG, &origin);
409                                 if (err == 0) {
410                                         ds->ds_origin_txg =
411                                             origin->ds_phys->ds_creation_txg;
412                                         dsl_dataset_rele(origin, FTAG);
413                                 }
414                         }
415                 } else if (zfs_flags & ZFS_DEBUG_SNAPNAMES) {
416                         err = dsl_dataset_get_snapname(ds);
417                 }
418
419                 if (err == 0 && !dsl_dataset_is_snapshot(ds)) {
420                         /*
421                          * In sync context, we're called with either no lock
422                          * or with the write lock.  If we're not syncing,
423                          * we're always called with the read lock held.
424                          */
425                         boolean_t need_lock =
426                             !RW_WRITE_HELD(&dp->dp_config_rwlock) &&
427                             dsl_pool_sync_context(dp);
428
429                         if (need_lock)
430                                 rw_enter(&dp->dp_config_rwlock, RW_READER);
431
432                         err = dsl_prop_get_ds(ds,
433                             "refreservation", sizeof (uint64_t), 1,
434                             &ds->ds_reserved, NULL);
435                         if (err == 0) {
436                                 err = dsl_prop_get_ds(ds,
437                                     "refquota", sizeof (uint64_t), 1,
438                                     &ds->ds_quota, NULL);
439                         }
440
441                         if (need_lock)
442                                 rw_exit(&dp->dp_config_rwlock);
443                 } else {
444                         ds->ds_reserved = ds->ds_quota = 0;
445                 }
446
447                 if (err == 0) {
448                         winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys,
449                             dsl_dataset_evict);
450                 }
451                 if (err || winner) {
452                         bplist_close(&ds->ds_deadlist);
453                         if (ds->ds_prev)
454                                 dsl_dataset_drop_ref(ds->ds_prev, ds);
455                         dsl_dir_close(ds->ds_dir, ds);
456                         mutex_destroy(&ds->ds_lock);
457                         mutex_destroy(&ds->ds_opening_lock);
458                         mutex_destroy(&ds->ds_deadlist.bpl_lock);
459                         rw_destroy(&ds->ds_rwlock);
460                         cv_destroy(&ds->ds_exclusive_cv);
461                         kmem_free(ds, sizeof (dsl_dataset_t));
462                         if (err) {
463                                 dmu_buf_rele(dbuf, tag);
464                                 return (err);
465                         }
466                         ds = winner;
467                 } else {
468                         ds->ds_fsid_guid =
469                             unique_insert(ds->ds_phys->ds_fsid_guid);
470                 }
471         }
472         ASSERT3P(ds->ds_dbuf, ==, dbuf);
473         ASSERT3P(ds->ds_phys, ==, dbuf->db_data);
474         ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 ||
475             spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
476             dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
477         mutex_enter(&ds->ds_lock);
478         if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) {
479                 mutex_exit(&ds->ds_lock);
480                 dmu_buf_rele(ds->ds_dbuf, tag);
481                 return (ENOENT);
482         }
483         mutex_exit(&ds->ds_lock);
484         *dsp = ds;
485         return (0);
486 }
487
488 static int
489 dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag)
490 {
491         dsl_pool_t *dp = ds->ds_dir->dd_pool;
492
493         /*
494          * In syncing context we don't want the rwlock lock: there
495          * may be an existing writer waiting for sync phase to
496          * finish.  We don't need to worry about such writers, since
497          * sync phase is single-threaded, so the writer can't be
498          * doing anything while we are active.
499          */
500         if (dsl_pool_sync_context(dp)) {
501                 ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
502                 return (0);
503         }
504
505         /*
506          * Normal users will hold the ds_rwlock as a READER until they
507          * are finished (i.e., call dsl_dataset_rele()).  "Owners" will
508          * drop their READER lock after they set the ds_owner field.
509          *
510          * If the dataset is being destroyed, the destroy thread will
511          * obtain a WRITER lock for exclusive access after it's done its
512          * open-context work and then change the ds_owner to
513          * dsl_reaper once destruction is assured.  So threads
514          * may block here temporarily, until the "destructability" of
515          * the dataset is determined.
516          */
517         ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock));
518         mutex_enter(&ds->ds_lock);
519         while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) {
520                 rw_exit(&dp->dp_config_rwlock);
521                 cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock);
522                 if (DSL_DATASET_IS_DESTROYED(ds)) {
523                         mutex_exit(&ds->ds_lock);
524                         dsl_dataset_drop_ref(ds, tag);
525                         rw_enter(&dp->dp_config_rwlock, RW_READER);
526                         return (ENOENT);
527                 }
528                 rw_enter(&dp->dp_config_rwlock, RW_READER);
529         }
530         mutex_exit(&ds->ds_lock);
531         return (0);
532 }
533
534 int
535 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
536     dsl_dataset_t **dsp)
537 {
538         int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp);
539
540         if (err)
541                 return (err);
542         return (dsl_dataset_hold_ref(*dsp, tag));
543 }
544
545 int
546 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, int flags, void *owner,
547     dsl_dataset_t **dsp)
548 {
549         int err = dsl_dataset_hold_obj(dp, dsobj, owner, dsp);
550
551         ASSERT(DS_MODE_TYPE(flags) != DS_MODE_USER);
552
553         if (err)
554                 return (err);
555         if (!dsl_dataset_tryown(*dsp, DS_MODE_IS_INCONSISTENT(flags), owner)) {
556                 dsl_dataset_rele(*dsp, owner);
557                 *dsp = NULL;
558                 return (EBUSY);
559         }
560         return (0);
561 }
562
563 int
564 dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp)
565 {
566         dsl_dir_t *dd;
567         dsl_pool_t *dp;
568         const char *snapname;
569         uint64_t obj;
570         int err = 0;
571
572         err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname);
573         if (err)
574                 return (err);
575
576         dp = dd->dd_pool;
577         obj = dd->dd_phys->dd_head_dataset_obj;
578         rw_enter(&dp->dp_config_rwlock, RW_READER);
579         if (obj)
580                 err = dsl_dataset_get_ref(dp, obj, tag, dsp);
581         else
582                 err = ENOENT;
583         if (err)
584                 goto out;
585
586         err = dsl_dataset_hold_ref(*dsp, tag);
587
588         /* we may be looking for a snapshot */
589         if (err == 0 && snapname != NULL) {
590                 dsl_dataset_t *ds = NULL;
591
592                 if (*snapname++ != '@') {
593                         dsl_dataset_rele(*dsp, tag);
594                         err = ENOENT;
595                         goto out;
596                 }
597
598                 dprintf("looking for snapshot '%s'\n", snapname);
599                 err = dsl_dataset_snap_lookup(*dsp, snapname, &obj);
600                 if (err == 0)
601                         err = dsl_dataset_get_ref(dp, obj, tag, &ds);
602                 dsl_dataset_rele(*dsp, tag);
603
604                 ASSERT3U((err == 0), ==, (ds != NULL));
605
606                 if (ds) {
607                         mutex_enter(&ds->ds_lock);
608                         if (ds->ds_snapname[0] == 0)
609                                 (void) strlcpy(ds->ds_snapname, snapname,
610                                     sizeof (ds->ds_snapname));
611                         mutex_exit(&ds->ds_lock);
612                         err = dsl_dataset_hold_ref(ds, tag);
613                         *dsp = err ? NULL : ds;
614                 }
615         }
616 out:
617         rw_exit(&dp->dp_config_rwlock);
618         dsl_dir_close(dd, FTAG);
619         return (err);
620 }
621
622 int
623 dsl_dataset_own(const char *name, int flags, void *owner, dsl_dataset_t **dsp)
624 {
625         int err = dsl_dataset_hold(name, owner, dsp);
626         if (err)
627                 return (err);
628         if ((*dsp)->ds_phys->ds_num_children > 0 &&
629             !DS_MODE_IS_READONLY(flags)) {
630                 dsl_dataset_rele(*dsp, owner);
631                 return (EROFS);
632         }
633         if (!dsl_dataset_tryown(*dsp, DS_MODE_IS_INCONSISTENT(flags), owner)) {
634                 dsl_dataset_rele(*dsp, owner);
635                 return (EBUSY);
636         }
637         return (0);
638 }
639
640 void
641 dsl_dataset_name(dsl_dataset_t *ds, char *name)
642 {
643         if (ds == NULL) {
644                 (void) strcpy(name, "mos");
645         } else {
646                 dsl_dir_name(ds->ds_dir, name);
647                 VERIFY(0 == dsl_dataset_get_snapname(ds));
648                 if (ds->ds_snapname[0]) {
649                         (void) strcat(name, "@");
650                         /*
651                          * We use a "recursive" mutex so that we
652                          * can call dprintf_ds() with ds_lock held.
653                          */
654                         if (!MUTEX_HELD(&ds->ds_lock)) {
655                                 mutex_enter(&ds->ds_lock);
656                                 (void) strcat(name, ds->ds_snapname);
657                                 mutex_exit(&ds->ds_lock);
658                         } else {
659                                 (void) strcat(name, ds->ds_snapname);
660                         }
661                 }
662         }
663 }
664
665 static int
666 dsl_dataset_namelen(dsl_dataset_t *ds)
667 {
668         int result;
669
670         if (ds == NULL) {
671                 result = 3;     /* "mos" */
672         } else {
673                 result = dsl_dir_namelen(ds->ds_dir);
674                 VERIFY(0 == dsl_dataset_get_snapname(ds));
675                 if (ds->ds_snapname[0]) {
676                         ++result;       /* adding one for the @-sign */
677                         if (!MUTEX_HELD(&ds->ds_lock)) {
678                                 mutex_enter(&ds->ds_lock);
679                                 result += strlen(ds->ds_snapname);
680                                 mutex_exit(&ds->ds_lock);
681                         } else {
682                                 result += strlen(ds->ds_snapname);
683                         }
684                 }
685         }
686
687         return (result);
688 }
689
690 void
691 dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag)
692 {
693         dmu_buf_rele(ds->ds_dbuf, tag);
694 }
695
696 void
697 dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
698 {
699         if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) {
700                 rw_exit(&ds->ds_rwlock);
701         }
702         dsl_dataset_drop_ref(ds, tag);
703 }
704
705 void
706 dsl_dataset_disown(dsl_dataset_t *ds, void *owner)
707 {
708         ASSERT((ds->ds_owner == owner && ds->ds_dbuf) ||
709             (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL));
710
711         mutex_enter(&ds->ds_lock);
712         ds->ds_owner = NULL;
713         if (RW_WRITE_HELD(&ds->ds_rwlock)) {
714                 rw_exit(&ds->ds_rwlock);
715                 cv_broadcast(&ds->ds_exclusive_cv);
716         }
717         mutex_exit(&ds->ds_lock);
718         if (ds->ds_dbuf)
719                 dsl_dataset_drop_ref(ds, owner);
720         else
721                 dsl_dataset_evict(ds->ds_dbuf, ds);
722 }
723
724 boolean_t
725 dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *owner)
726 {
727         boolean_t gotit = FALSE;
728
729         mutex_enter(&ds->ds_lock);
730         if (ds->ds_owner == NULL &&
731             (!DS_IS_INCONSISTENT(ds) || inconsistentok)) {
732                 ds->ds_owner = owner;
733                 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool))
734                         rw_exit(&ds->ds_rwlock);
735                 gotit = TRUE;
736         }
737         mutex_exit(&ds->ds_lock);
738         return (gotit);
739 }
740
741 void
742 dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner)
743 {
744         ASSERT3P(owner, ==, ds->ds_owner);
745         if (!RW_WRITE_HELD(&ds->ds_rwlock))
746                 rw_enter(&ds->ds_rwlock, RW_WRITER);
747 }
748
749 uint64_t
750 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
751     uint64_t flags, dmu_tx_t *tx)
752 {
753         dsl_pool_t *dp = dd->dd_pool;
754         dmu_buf_t *dbuf;
755         dsl_dataset_phys_t *dsphys;
756         uint64_t dsobj;
757         objset_t *mos = dp->dp_meta_objset;
758
759         if (origin == NULL)
760                 origin = dp->dp_origin_snap;
761
762         ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
763         ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0);
764         ASSERT(dmu_tx_is_syncing(tx));
765         ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
766
767         dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
768             DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
769         VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
770         dmu_buf_will_dirty(dbuf, tx);
771         dsphys = dbuf->db_data;
772         bzero(dsphys, sizeof (dsl_dataset_phys_t));
773         dsphys->ds_dir_obj = dd->dd_object;
774         dsphys->ds_flags = flags;
775         dsphys->ds_fsid_guid = unique_create();
776         (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
777             sizeof (dsphys->ds_guid));
778         dsphys->ds_snapnames_zapobj =
779             zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
780             DMU_OT_NONE, 0, tx);
781         dsphys->ds_creation_time = gethrestime_sec();
782         dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
783         dsphys->ds_deadlist_obj =
784             bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx);
785
786         if (origin) {
787                 dsphys->ds_prev_snap_obj = origin->ds_object;
788                 dsphys->ds_prev_snap_txg =
789                     origin->ds_phys->ds_creation_txg;
790                 dsphys->ds_used_bytes =
791                     origin->ds_phys->ds_used_bytes;
792                 dsphys->ds_compressed_bytes =
793                     origin->ds_phys->ds_compressed_bytes;
794                 dsphys->ds_uncompressed_bytes =
795                     origin->ds_phys->ds_uncompressed_bytes;
796                 dsphys->ds_bp = origin->ds_phys->ds_bp;
797                 dsphys->ds_flags |= origin->ds_phys->ds_flags;
798
799                 dmu_buf_will_dirty(origin->ds_dbuf, tx);
800                 origin->ds_phys->ds_num_children++;
801
802                 if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
803                         if (origin->ds_phys->ds_next_clones_obj == 0) {
804                                 origin->ds_phys->ds_next_clones_obj =
805                                     zap_create(mos,
806                                     DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
807                         }
808                         VERIFY(0 == zap_add_int(mos,
809                             origin->ds_phys->ds_next_clones_obj,
810                             dsobj, tx));
811                 }
812
813                 dmu_buf_will_dirty(dd->dd_dbuf, tx);
814                 dd->dd_phys->dd_origin_obj = origin->ds_object;
815         }
816
817         if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
818                 dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
819
820         dmu_buf_rele(dbuf, FTAG);
821
822         dmu_buf_will_dirty(dd->dd_dbuf, tx);
823         dd->dd_phys->dd_head_dataset_obj = dsobj;
824
825         return (dsobj);
826 }
827
828 uint64_t
829 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
830     dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx)
831 {
832         dsl_pool_t *dp = pdd->dd_pool;
833         uint64_t dsobj, ddobj;
834         dsl_dir_t *dd;
835
836         ASSERT(lastname[0] != '@');
837
838         ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
839         VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd));
840
841         dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx);
842
843         dsl_deleg_set_create_perms(dd, tx, cr);
844
845         dsl_dir_close(dd, FTAG);
846
847         return (dsobj);
848 }
849
850 struct destroyarg {
851         dsl_sync_task_group_t *dstg;
852         char *snapname;
853         char *failed;
854 };
855
856 static int
857 dsl_snapshot_destroy_one(char *name, void *arg)
858 {
859         struct destroyarg *da = arg;
860         dsl_dataset_t *ds;
861         char *cp;
862         int err;
863
864         (void) strcat(name, "@");
865         (void) strcat(name, da->snapname);
866         err = dsl_dataset_own(name, DS_MODE_READONLY | DS_MODE_INCONSISTENT,
867             da->dstg, &ds);
868         cp = strchr(name, '@');
869         *cp = '\0';
870         if (err == 0) {
871                 dsl_dataset_make_exclusive(ds, da->dstg);
872                 if (ds->ds_user_ptr) {
873                         ds->ds_user_evict_func(ds, ds->ds_user_ptr);
874                         ds->ds_user_ptr = NULL;
875                 }
876                 dsl_sync_task_create(da->dstg, dsl_dataset_destroy_check,
877                     dsl_dataset_destroy_sync, ds, da->dstg, 0);
878         } else if (err == ENOENT) {
879                 err = 0;
880         } else {
881                 (void) strcpy(da->failed, name);
882         }
883         return (err);
884 }
885
886 /*
887  * Destroy 'snapname' in all descendants of 'fsname'.
888  */
889 #pragma weak dmu_snapshots_destroy = dsl_snapshots_destroy
890 int
891 dsl_snapshots_destroy(char *fsname, char *snapname)
892 {
893         int err;
894         struct destroyarg da;
895         dsl_sync_task_t *dst;
896         spa_t *spa;
897
898         err = spa_open(fsname, &spa, FTAG);
899         if (err)
900                 return (err);
901         da.dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
902         da.snapname = snapname;
903         da.failed = fsname;
904
905         err = dmu_objset_find(fsname,
906             dsl_snapshot_destroy_one, &da, DS_FIND_CHILDREN);
907
908         if (err == 0)
909                 err = dsl_sync_task_group_wait(da.dstg);
910
911         for (dst = list_head(&da.dstg->dstg_tasks); dst;
912             dst = list_next(&da.dstg->dstg_tasks, dst)) {
913                 dsl_dataset_t *ds = dst->dst_arg1;
914                 /*
915                  * Return the file system name that triggered the error
916                  */
917                 if (dst->dst_err) {
918                         dsl_dataset_name(ds, fsname);
919                         *strchr(fsname, '@') = '\0';
920                 }
921                 dsl_dataset_disown(ds, da.dstg);
922         }
923
924         dsl_sync_task_group_destroy(da.dstg);
925         spa_close(spa, FTAG);
926         return (err);
927 }
928
929 /*
930  * ds must be opened as OWNER.  On return (whether successful or not),
931  * ds will be closed and caller can no longer dereference it.
932  */
933 int
934 dsl_dataset_destroy(dsl_dataset_t *ds, void *tag)
935 {
936         int err;
937         dsl_sync_task_group_t *dstg;
938         objset_t *os;
939         dsl_dir_t *dd;
940         uint64_t obj;
941
942         if (dsl_dataset_is_snapshot(ds)) {
943                 /* Destroying a snapshot is simpler */
944                 dsl_dataset_make_exclusive(ds, tag);
945
946                 if (ds->ds_user_ptr) {
947                         ds->ds_user_evict_func(ds, ds->ds_user_ptr);
948                         ds->ds_user_ptr = NULL;
949                 }
950                 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
951                     dsl_dataset_destroy_check, dsl_dataset_destroy_sync,
952                     ds, tag, 0);
953                 goto out;
954         }
955
956         dd = ds->ds_dir;
957
958         /*
959          * Check for errors and mark this ds as inconsistent, in
960          * case we crash while freeing the objects.
961          */
962         err = dsl_sync_task_do(dd->dd_pool, dsl_dataset_destroy_begin_check,
963             dsl_dataset_destroy_begin_sync, ds, NULL, 0);
964         if (err)
965                 goto out;
966
967         err = dmu_objset_open_ds(ds, DMU_OST_ANY, &os);
968         if (err)
969                 goto out;
970
971         /*
972          * remove the objects in open context, so that we won't
973          * have too much to do in syncing context.
974          */
975         for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE,
976             ds->ds_phys->ds_prev_snap_txg)) {
977                 /*
978                  * Ignore errors, if there is not enough disk space
979                  * we will deal with it in dsl_dataset_destroy_sync().
980                  */
981                 (void) dmu_free_object(os, obj);
982         }
983
984         dmu_objset_close(os);
985         if (err != ESRCH)
986                 goto out;
987
988         rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
989         err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd);
990         rw_exit(&dd->dd_pool->dp_config_rwlock);
991
992         if (err)
993                 goto out;
994
995         if (ds->ds_user_ptr) {
996                 /*
997                  * We need to sync out all in-flight IO before we try
998                  * to evict (the dataset evict func is trying to clear
999                  * the cached entries for this dataset in the ARC).
1000                  */
1001                 txg_wait_synced(dd->dd_pool, 0);
1002         }
1003
1004         /*
1005          * Blow away the dsl_dir + head dataset.
1006          */
1007         dsl_dataset_make_exclusive(ds, tag);
1008         if (ds->ds_user_ptr) {
1009                 ds->ds_user_evict_func(ds, ds->ds_user_ptr);
1010                 ds->ds_user_ptr = NULL;
1011         }
1012         dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool);
1013         dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
1014             dsl_dataset_destroy_sync, ds, tag, 0);
1015         dsl_sync_task_create(dstg, dsl_dir_destroy_check,
1016             dsl_dir_destroy_sync, dd, FTAG, 0);
1017         err = dsl_sync_task_group_wait(dstg);
1018         dsl_sync_task_group_destroy(dstg);
1019         /* if it is successful, dsl_dir_destroy_sync will close the dd */
1020         if (err)
1021                 dsl_dir_close(dd, FTAG);
1022 out:
1023         dsl_dataset_disown(ds, tag);
1024         return (err);
1025 }
1026
1027 int
1028 dsl_dataset_rollback(dsl_dataset_t *ds, dmu_objset_type_t ost)
1029 {
1030         int err;
1031
1032         ASSERT(ds->ds_owner);
1033
1034         dsl_dataset_make_exclusive(ds, ds->ds_owner);
1035         err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1036             dsl_dataset_rollback_check, dsl_dataset_rollback_sync,
1037             ds, &ost, 0);
1038         /* drop exclusive access */
1039         mutex_enter(&ds->ds_lock);
1040         rw_exit(&ds->ds_rwlock);
1041         cv_broadcast(&ds->ds_exclusive_cv);
1042         mutex_exit(&ds->ds_lock);
1043         return (err);
1044 }
1045
1046 void *
1047 dsl_dataset_set_user_ptr(dsl_dataset_t *ds,
1048     void *p, dsl_dataset_evict_func_t func)
1049 {
1050         void *old;
1051
1052         mutex_enter(&ds->ds_lock);
1053         old = ds->ds_user_ptr;
1054         if (old == NULL) {
1055                 ds->ds_user_ptr = p;
1056                 ds->ds_user_evict_func = func;
1057         }
1058         mutex_exit(&ds->ds_lock);
1059         return (old);
1060 }
1061
1062 void *
1063 dsl_dataset_get_user_ptr(dsl_dataset_t *ds)
1064 {
1065         return (ds->ds_user_ptr);
1066 }
1067
1068
1069 blkptr_t *
1070 dsl_dataset_get_blkptr(dsl_dataset_t *ds)
1071 {
1072         return (&ds->ds_phys->ds_bp);
1073 }
1074
1075 void
1076 dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
1077 {
1078         ASSERT(dmu_tx_is_syncing(tx));
1079         /* If it's the meta-objset, set dp_meta_rootbp */
1080         if (ds == NULL) {
1081                 tx->tx_pool->dp_meta_rootbp = *bp;
1082         } else {
1083                 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1084                 ds->ds_phys->ds_bp = *bp;
1085         }
1086 }
1087
1088 spa_t *
1089 dsl_dataset_get_spa(dsl_dataset_t *ds)
1090 {
1091         return (ds->ds_dir->dd_pool->dp_spa);
1092 }
1093
1094 void
1095 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
1096 {
1097         dsl_pool_t *dp;
1098
1099         if (ds == NULL) /* this is the meta-objset */
1100                 return;
1101
1102         ASSERT(ds->ds_user_ptr != NULL);
1103
1104         if (ds->ds_phys->ds_next_snap_obj != 0)
1105                 panic("dirtying snapshot!");
1106
1107         dp = ds->ds_dir->dd_pool;
1108
1109         if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) {
1110                 /* up the hold count until we can be written out */
1111                 dmu_buf_add_ref(ds->ds_dbuf, ds);
1112         }
1113 }
1114
1115 /*
1116  * The unique space in the head dataset can be calculated by subtracting
1117  * the space used in the most recent snapshot, that is still being used
1118  * in this file system, from the space currently in use.  To figure out
1119  * the space in the most recent snapshot still in use, we need to take
1120  * the total space used in the snapshot and subtract out the space that
1121  * has been freed up since the snapshot was taken.
1122  */
1123 static void
1124 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
1125 {
1126         uint64_t mrs_used;
1127         uint64_t dlused, dlcomp, dluncomp;
1128
1129         ASSERT(ds->ds_object == ds->ds_dir->dd_phys->dd_head_dataset_obj);
1130
1131         if (ds->ds_phys->ds_prev_snap_obj != 0)
1132                 mrs_used = ds->ds_prev->ds_phys->ds_used_bytes;
1133         else
1134                 mrs_used = 0;
1135
1136         VERIFY(0 == bplist_space(&ds->ds_deadlist, &dlused, &dlcomp,
1137             &dluncomp));
1138
1139         ASSERT3U(dlused, <=, mrs_used);
1140         ds->ds_phys->ds_unique_bytes =
1141             ds->ds_phys->ds_used_bytes - (mrs_used - dlused);
1142
1143         if (!DS_UNIQUE_IS_ACCURATE(ds) &&
1144             spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1145             SPA_VERSION_UNIQUE_ACCURATE)
1146                 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1147 }
1148
1149 static uint64_t
1150 dsl_dataset_unique(dsl_dataset_t *ds)
1151 {
1152         if (!DS_UNIQUE_IS_ACCURATE(ds) && !dsl_dataset_is_snapshot(ds))
1153                 dsl_dataset_recalc_head_uniq(ds);
1154
1155         return (ds->ds_phys->ds_unique_bytes);
1156 }
1157
1158 struct killarg {
1159         dsl_dataset_t *ds;
1160         zio_t *zio;
1161         dmu_tx_t *tx;
1162 };
1163
1164 /* ARGSUSED */
1165 static int
1166 kill_blkptr(spa_t *spa, blkptr_t *bp, const zbookmark_t *zb,
1167     const dnode_phys_t *dnp, void *arg)
1168 {
1169         struct killarg *ka = arg;
1170
1171         if (bp == NULL)
1172                 return (0);
1173
1174         ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
1175         (void) dsl_dataset_block_kill(ka->ds, bp, ka->zio, ka->tx);
1176
1177         return (0);
1178 }
1179
1180 /* ARGSUSED */
1181 static int
1182 dsl_dataset_rollback_check(void *arg1, void *arg2, dmu_tx_t *tx)
1183 {
1184         dsl_dataset_t *ds = arg1;
1185         dmu_objset_type_t *ost = arg2;
1186
1187         /*
1188          * We can only roll back to emptyness if it is a ZPL objset.
1189          */
1190         if (*ost != DMU_OST_ZFS && ds->ds_phys->ds_prev_snap_txg == 0)
1191                 return (EINVAL);
1192
1193         /*
1194          * This must not be a snapshot.
1195          */
1196         if (ds->ds_phys->ds_next_snap_obj != 0)
1197                 return (EINVAL);
1198
1199         /*
1200          * If we made changes this txg, traverse_dataset won't find
1201          * them.  Try again.
1202          */
1203         if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1204                 return (EAGAIN);
1205
1206         return (0);
1207 }
1208
1209 /* ARGSUSED */
1210 static void
1211 dsl_dataset_rollback_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
1212 {
1213         dsl_dataset_t *ds = arg1;
1214         dmu_objset_type_t *ost = arg2;
1215         objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1216
1217         dmu_buf_will_dirty(ds->ds_dbuf, tx);
1218
1219         /*
1220          * Before the roll back destroy the zil.
1221          */
1222         if (ds->ds_user_ptr != NULL) {
1223                 zil_rollback_destroy(
1224                     ((objset_impl_t *)ds->ds_user_ptr)->os_zil, tx);
1225
1226                 /*
1227                  * We need to make sure that the objset_impl_t is reopened after
1228                  * we do the rollback, otherwise it will have the wrong
1229                  * objset_phys_t.  Normally this would happen when this
1230                  * dataset-open is closed, thus causing the
1231                  * dataset to be immediately evicted.  But when doing "zfs recv
1232                  * -F", we reopen the objset before that, so that there is no
1233                  * window where the dataset is closed and inconsistent.
1234                  */
1235                 ds->ds_user_evict_func(ds, ds->ds_user_ptr);
1236                 ds->ds_user_ptr = NULL;
1237         }
1238
1239         /* Transfer space that was freed since last snap back to the head. */
1240         {
1241                 uint64_t used;
1242
1243                 VERIFY(0 == bplist_space_birthrange(&ds->ds_deadlist,
1244                     ds->ds_origin_txg, UINT64_MAX, &used));
1245                 dsl_dir_transfer_space(ds->ds_dir, used,
1246                     DD_USED_SNAP, DD_USED_HEAD, tx);
1247         }
1248
1249         /* Zero out the deadlist. */
1250         bplist_close(&ds->ds_deadlist);
1251         bplist_destroy(mos, ds->ds_phys->ds_deadlist_obj, tx);
1252         ds->ds_phys->ds_deadlist_obj =
1253             bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx);
1254         VERIFY(0 == bplist_open(&ds->ds_deadlist, mos,
1255             ds->ds_phys->ds_deadlist_obj));
1256
1257         {
1258                 /* Free blkptrs that we gave birth to */
1259                 zio_t *zio;
1260                 struct killarg ka;
1261
1262                 zio = zio_root(tx->tx_pool->dp_spa, NULL, NULL,
1263                     ZIO_FLAG_MUSTSUCCEED);
1264                 ka.ds = ds;
1265                 ka.zio = zio;
1266                 ka.tx = tx;
1267                 (void) traverse_dataset(ds, ds->ds_phys->ds_prev_snap_txg,
1268                     TRAVERSE_POST, kill_blkptr, &ka);
1269                 (void) zio_wait(zio);
1270         }
1271
1272         ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) || ds->ds_phys->ds_unique_bytes == 0);
1273
1274         if (ds->ds_prev && ds->ds_prev != ds->ds_dir->dd_pool->dp_origin_snap) {
1275                 /* Change our contents to that of the prev snapshot */
1276
1277                 ASSERT3U(ds->ds_prev->ds_object, ==,
1278                     ds->ds_phys->ds_prev_snap_obj);
1279                 ASSERT3U(ds->ds_phys->ds_used_bytes, <=,
1280                     ds->ds_prev->ds_phys->ds_used_bytes);
1281
1282                 ds->ds_phys->ds_bp = ds->ds_prev->ds_phys->ds_bp;
1283                 ds->ds_phys->ds_used_bytes =
1284                     ds->ds_prev->ds_phys->ds_used_bytes;
1285                 ds->ds_phys->ds_compressed_bytes =
1286                     ds->ds_prev->ds_phys->ds_compressed_bytes;
1287                 ds->ds_phys->ds_uncompressed_bytes =
1288                     ds->ds_prev->ds_phys->ds_uncompressed_bytes;
1289                 ds->ds_phys->ds_flags = ds->ds_prev->ds_phys->ds_flags;
1290
1291                 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
1292                         dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1293                         ds->ds_prev->ds_phys->ds_unique_bytes = 0;
1294                 }
1295         } else {
1296                 objset_impl_t *osi;
1297
1298                 ASSERT3U(ds->ds_phys->ds_used_bytes, ==, 0);
1299                 ASSERT3U(ds->ds_phys->ds_compressed_bytes, ==, 0);
1300                 ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, ==, 0);
1301
1302                 bzero(&ds->ds_phys->ds_bp, sizeof (blkptr_t));
1303                 ds->ds_phys->ds_flags = 0;
1304                 ds->ds_phys->ds_unique_bytes = 0;
1305                 if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1306                     SPA_VERSION_UNIQUE_ACCURATE)
1307                         ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1308
1309                 osi = dmu_objset_create_impl(ds->ds_dir->dd_pool->dp_spa, ds,
1310                     &ds->ds_phys->ds_bp, *ost, tx);
1311 #ifdef _KERNEL
1312                 zfs_create_fs(&osi->os, kcred, NULL, tx);
1313 #endif
1314         }
1315
1316         spa_history_internal_log(LOG_DS_ROLLBACK, ds->ds_dir->dd_pool->dp_spa,
1317             tx, cr, "dataset = %llu", ds->ds_object);
1318 }
1319
1320 /* ARGSUSED */
1321 static int
1322 dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx)
1323 {
1324         dsl_dataset_t *ds = arg1;
1325         objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1326         uint64_t count;
1327         int err;
1328
1329         /*
1330          * Can't delete a head dataset if there are snapshots of it.
1331          * (Except if the only snapshots are from the branch we cloned
1332          * from.)
1333          */
1334         if (ds->ds_prev != NULL &&
1335             ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1336                 return (EINVAL);
1337
1338         /*
1339          * This is really a dsl_dir thing, but check it here so that
1340          * we'll be less likely to leave this dataset inconsistent &
1341          * nearly destroyed.
1342          */
1343         err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
1344         if (err)
1345                 return (err);
1346         if (count != 0)
1347                 return (EEXIST);
1348
1349         return (0);
1350 }
1351
1352 /* ARGSUSED */
1353 static void
1354 dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
1355 {
1356         dsl_dataset_t *ds = arg1;
1357         dsl_pool_t *dp = ds->ds_dir->dd_pool;
1358
1359         /* Mark it as inconsistent on-disk, in case we crash */
1360         dmu_buf_will_dirty(ds->ds_dbuf, tx);
1361         ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
1362
1363         spa_history_internal_log(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx,
1364             cr, "dataset = %llu", ds->ds_object);
1365 }
1366
1367 /* ARGSUSED */
1368 int
1369 dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
1370 {
1371         dsl_dataset_t *ds = arg1;
1372
1373         /* we have an owner hold, so noone else can destroy us */
1374         ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
1375
1376         /* Can't delete a branch point. */
1377         if (ds->ds_phys->ds_num_children > 1)
1378                 return (EEXIST);
1379
1380         /*
1381          * Can't delete a head dataset if there are snapshots of it.
1382          * (Except if the only snapshots are from the branch we cloned
1383          * from.)
1384          */
1385         if (ds->ds_prev != NULL &&
1386             ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1387                 return (EINVAL);
1388
1389         /*
1390          * If we made changes this txg, traverse_dsl_dataset won't find
1391          * them.  Try again.
1392          */
1393         if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1394                 return (EAGAIN);
1395
1396         /* XXX we should do some i/o error checking... */
1397         return (0);
1398 }
1399
1400 struct refsarg {
1401         kmutex_t lock;
1402         boolean_t gone;
1403         kcondvar_t cv;
1404 };
1405
1406 /* ARGSUSED */
1407 static void
1408 dsl_dataset_refs_gone(dmu_buf_t *db, void *argv)
1409 {
1410         struct refsarg *arg = argv;
1411
1412         mutex_enter(&arg->lock);
1413         arg->gone = TRUE;
1414         cv_signal(&arg->cv);
1415         mutex_exit(&arg->lock);
1416 }
1417
1418 static void
1419 dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag)
1420 {
1421         struct refsarg arg;
1422
1423         bzero(&arg, sizeof(arg));
1424         mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL);
1425         cv_init(&arg.cv, NULL, CV_DEFAULT, NULL);
1426         arg.gone = FALSE;
1427         (void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys,
1428             dsl_dataset_refs_gone);
1429         dmu_buf_rele(ds->ds_dbuf, tag);
1430         mutex_enter(&arg.lock);
1431         while (!arg.gone)
1432                 cv_wait(&arg.cv, &arg.lock);
1433         ASSERT(arg.gone);
1434         mutex_exit(&arg.lock);
1435         ds->ds_dbuf = NULL;
1436         ds->ds_phys = NULL;
1437         mutex_destroy(&arg.lock);
1438         cv_destroy(&arg.cv);
1439 }
1440
1441 void
1442 dsl_dataset_destroy_sync(void *arg1, void *tag, cred_t *cr, dmu_tx_t *tx)
1443 {
1444         dsl_dataset_t *ds = arg1;
1445         zio_t *zio;
1446         int err;
1447         int after_branch_point = FALSE;
1448         dsl_pool_t *dp = ds->ds_dir->dd_pool;
1449         objset_t *mos = dp->dp_meta_objset;
1450         dsl_dataset_t *ds_prev = NULL;
1451         uint64_t obj;
1452
1453         ASSERT(ds->ds_owner);
1454         ASSERT3U(ds->ds_phys->ds_num_children, <=, 1);
1455         ASSERT(ds->ds_prev == NULL ||
1456             ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
1457         ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
1458
1459         /* signal any waiters that this dataset is going away */
1460         mutex_enter(&ds->ds_lock);
1461         ds->ds_owner = dsl_reaper;
1462         cv_broadcast(&ds->ds_exclusive_cv);
1463         mutex_exit(&ds->ds_lock);
1464
1465         /* Remove our reservation */
1466         if (ds->ds_reserved != 0) {
1467                 uint64_t val = 0;
1468                 dsl_dataset_set_reservation_sync(ds, &val, cr, tx);
1469                 ASSERT3U(ds->ds_reserved, ==, 0);
1470         }
1471
1472         ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1473
1474         dsl_pool_ds_destroyed(ds, tx);
1475
1476         obj = ds->ds_object;
1477
1478         if (ds->ds_phys->ds_prev_snap_obj != 0) {
1479                 if (ds->ds_prev) {
1480                         ds_prev = ds->ds_prev;
1481                 } else {
1482                         VERIFY(0 == dsl_dataset_hold_obj(dp,
1483                             ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
1484                 }
1485                 after_branch_point =
1486                     (ds_prev->ds_phys->ds_next_snap_obj != obj);
1487
1488                 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
1489                 if (after_branch_point &&
1490                     ds_prev->ds_phys->ds_next_clones_obj != 0) {
1491                         VERIFY(0 == zap_remove_int(mos,
1492                             ds_prev->ds_phys->ds_next_clones_obj, obj, tx));
1493                         if (ds->ds_phys->ds_next_snap_obj != 0) {
1494                                 VERIFY(0 == zap_add_int(mos,
1495                                     ds_prev->ds_phys->ds_next_clones_obj,
1496                                     ds->ds_phys->ds_next_snap_obj, tx));
1497                         }
1498                 }
1499                 if (after_branch_point &&
1500                     ds->ds_phys->ds_next_snap_obj == 0) {
1501                         /* This clone is toast. */
1502                         ASSERT(ds_prev->ds_phys->ds_num_children > 1);
1503                         ds_prev->ds_phys->ds_num_children--;
1504                 } else if (!after_branch_point) {
1505                         ds_prev->ds_phys->ds_next_snap_obj =
1506                             ds->ds_phys->ds_next_snap_obj;
1507                 }
1508         }
1509
1510         zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
1511
1512         if (ds->ds_phys->ds_next_snap_obj != 0) {
1513                 blkptr_t bp;
1514                 dsl_dataset_t *ds_next;
1515                 uint64_t itor = 0;
1516                 uint64_t old_unique;
1517                 int64_t used = 0, compressed = 0, uncompressed = 0;
1518
1519                 VERIFY(0 == dsl_dataset_hold_obj(dp,
1520                     ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
1521                 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
1522
1523                 old_unique = dsl_dataset_unique(ds_next);
1524
1525                 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
1526                 ds_next->ds_phys->ds_prev_snap_obj =
1527                     ds->ds_phys->ds_prev_snap_obj;
1528                 ds_next->ds_phys->ds_prev_snap_txg =
1529                     ds->ds_phys->ds_prev_snap_txg;
1530                 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1531                     ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
1532
1533                 /*
1534                  * Transfer to our deadlist (which will become next's
1535                  * new deadlist) any entries from next's current
1536                  * deadlist which were born before prev, and free the
1537                  * other entries.
1538                  *
1539                  * XXX we're doing this long task with the config lock held
1540                  */
1541                 while (bplist_iterate(&ds_next->ds_deadlist, &itor, &bp) == 0) {
1542                         if (bp.blk_birth <= ds->ds_phys->ds_prev_snap_txg) {
1543                                 VERIFY(0 == bplist_enqueue(&ds->ds_deadlist,
1544                                     &bp, tx));
1545                                 if (ds_prev && !after_branch_point &&
1546                                     bp.blk_birth >
1547                                     ds_prev->ds_phys->ds_prev_snap_txg) {
1548                                         ds_prev->ds_phys->ds_unique_bytes +=
1549                                             bp_get_dasize(dp->dp_spa, &bp);
1550                                 }
1551                         } else {
1552                                 used += bp_get_dasize(dp->dp_spa, &bp);
1553                                 compressed += BP_GET_PSIZE(&bp);
1554                                 uncompressed += BP_GET_UCSIZE(&bp);
1555                                 /* XXX check return value? */
1556                                 (void) dsl_free(zio, dp, tx->tx_txg,
1557                                     &bp, NULL, NULL, ARC_NOWAIT);
1558                         }
1559                 }
1560
1561                 ASSERT3U(used, ==, ds->ds_phys->ds_unique_bytes);
1562
1563                 /* change snapused */
1564                 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1565                     -used, -compressed, -uncompressed, tx);
1566
1567                 /* free next's deadlist */
1568                 bplist_close(&ds_next->ds_deadlist);
1569                 bplist_destroy(mos, ds_next->ds_phys->ds_deadlist_obj, tx);
1570
1571                 /* set next's deadlist to our deadlist */
1572                 bplist_close(&ds->ds_deadlist);
1573                 ds_next->ds_phys->ds_deadlist_obj =
1574                     ds->ds_phys->ds_deadlist_obj;
1575                 VERIFY(0 == bplist_open(&ds_next->ds_deadlist, mos,
1576                     ds_next->ds_phys->ds_deadlist_obj));
1577                 ds->ds_phys->ds_deadlist_obj = 0;
1578
1579                 if (ds_next->ds_phys->ds_next_snap_obj != 0) {
1580                         /*
1581                          * Update next's unique to include blocks which
1582                          * were previously shared by only this snapshot
1583                          * and it.  Those blocks will be born after the
1584                          * prev snap and before this snap, and will have
1585                          * died after the next snap and before the one
1586                          * after that (ie. be on the snap after next's
1587                          * deadlist).
1588                          *
1589                          * XXX we're doing this long task with the
1590                          * config lock held
1591                          */
1592                         dsl_dataset_t *ds_after_next;
1593                         uint64_t space;
1594
1595                         VERIFY(0 == dsl_dataset_hold_obj(dp,
1596                             ds_next->ds_phys->ds_next_snap_obj,
1597                             FTAG, &ds_after_next));
1598
1599                         VERIFY(0 ==
1600                             bplist_space_birthrange(&ds_after_next->ds_deadlist,
1601                             ds->ds_phys->ds_prev_snap_txg,
1602                             ds->ds_phys->ds_creation_txg, &space));
1603                         ds_next->ds_phys->ds_unique_bytes += space;
1604
1605                         dsl_dataset_rele(ds_after_next, FTAG);
1606                         ASSERT3P(ds_next->ds_prev, ==, NULL);
1607                 } else {
1608                         ASSERT3P(ds_next->ds_prev, ==, ds);
1609                         dsl_dataset_drop_ref(ds_next->ds_prev, ds_next);
1610                         ds_next->ds_prev = NULL;
1611                         if (ds_prev) {
1612                                 VERIFY(0 == dsl_dataset_get_ref(dp,
1613                                     ds->ds_phys->ds_prev_snap_obj,
1614                                     ds_next, &ds_next->ds_prev));
1615                         }
1616
1617                         dsl_dataset_recalc_head_uniq(ds_next);
1618
1619                         /*
1620                          * Reduce the amount of our unconsmed refreservation
1621                          * being charged to our parent by the amount of
1622                          * new unique data we have gained.
1623                          */
1624                         if (old_unique < ds_next->ds_reserved) {
1625                                 int64_t mrsdelta;
1626                                 uint64_t new_unique =
1627                                     ds_next->ds_phys->ds_unique_bytes;
1628
1629                                 ASSERT(old_unique <= new_unique);
1630                                 mrsdelta = MIN(new_unique - old_unique,
1631                                     ds_next->ds_reserved - old_unique);
1632                                 dsl_dir_diduse_space(ds->ds_dir,
1633                                     DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
1634                         }
1635                 }
1636                 dsl_dataset_rele(ds_next, FTAG);
1637         } else {
1638                 /*
1639                  * There's no next snapshot, so this is a head dataset.
1640                  * Destroy the deadlist.  Unless it's a clone, the
1641                  * deadlist should be empty.  (If it's a clone, it's
1642                  * safe to ignore the deadlist contents.)
1643                  */
1644                 struct killarg ka;
1645
1646                 ASSERT(after_branch_point || bplist_empty(&ds->ds_deadlist));
1647                 bplist_close(&ds->ds_deadlist);
1648                 bplist_destroy(mos, ds->ds_phys->ds_deadlist_obj, tx);
1649                 ds->ds_phys->ds_deadlist_obj = 0;
1650
1651                 /*
1652                  * Free everything that we point to (that's born after
1653                  * the previous snapshot, if we are a clone)
1654                  *
1655                  * NB: this should be very quick, because we already
1656                  * freed all the objects in open context.
1657                  */
1658                 ka.ds = ds;
1659                 ka.zio = zio;
1660                 ka.tx = tx;
1661                 err = traverse_dataset(ds, ds->ds_phys->ds_prev_snap_txg,
1662                     TRAVERSE_POST, kill_blkptr, &ka);
1663                 ASSERT3U(err, ==, 0);
1664                 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
1665                     ds->ds_phys->ds_unique_bytes == 0);
1666         }
1667
1668         err = zio_wait(zio);
1669         ASSERT3U(err, ==, 0);
1670
1671         if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) {
1672                 /* Erase the link in the dir */
1673                 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1674                 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
1675                 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
1676                 err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx);
1677                 ASSERT(err == 0);
1678         } else {
1679                 /* remove from snapshot namespace */
1680                 dsl_dataset_t *ds_head;
1681                 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
1682                 VERIFY(0 == dsl_dataset_hold_obj(dp,
1683                     ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
1684                 VERIFY(0 == dsl_dataset_get_snapname(ds));
1685 #ifdef ZFS_DEBUG
1686                 {
1687                         uint64_t val;
1688
1689                         err = dsl_dataset_snap_lookup(ds_head,
1690                             ds->ds_snapname, &val);
1691                         ASSERT3U(err, ==, 0);
1692                         ASSERT3U(val, ==, obj);
1693                 }
1694 #endif
1695                 err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx);
1696                 ASSERT(err == 0);
1697                 dsl_dataset_rele(ds_head, FTAG);
1698         }
1699
1700         if (ds_prev && ds->ds_prev != ds_prev)
1701                 dsl_dataset_rele(ds_prev, FTAG);
1702
1703         spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
1704         spa_history_internal_log(LOG_DS_DESTROY, dp->dp_spa, tx,
1705             cr, "dataset = %llu", ds->ds_object);
1706
1707         if (ds->ds_phys->ds_next_clones_obj != 0) {
1708                 uint64_t count;
1709                 ASSERT(0 == zap_count(mos,
1710                     ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
1711                 VERIFY(0 == dmu_object_free(mos,
1712                     ds->ds_phys->ds_next_clones_obj, tx));
1713         }
1714         if (ds->ds_phys->ds_props_obj != 0)
1715                 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
1716         dsl_dir_close(ds->ds_dir, ds);
1717         ds->ds_dir = NULL;
1718         dsl_dataset_drain_refs(ds, tag);
1719         VERIFY(0 == dmu_object_free(mos, obj, tx));
1720 }
1721
1722 static int
1723 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
1724 {
1725         uint64_t asize;
1726
1727         if (!dmu_tx_is_syncing(tx))
1728                 return (0);
1729
1730         /*
1731          * If there's an fs-only reservation, any blocks that might become
1732          * owned by the snapshot dataset must be accommodated by space
1733          * outside of the reservation.
1734          */
1735         asize = MIN(dsl_dataset_unique(ds), ds->ds_reserved);
1736         if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, FALSE))
1737                 return (ENOSPC);
1738
1739         /*
1740          * Propogate any reserved space for this snapshot to other
1741          * snapshot checks in this sync group.
1742          */
1743         if (asize > 0)
1744                 dsl_dir_willuse_space(ds->ds_dir, asize, tx);
1745
1746         return (0);
1747 }
1748
1749 /* ARGSUSED */
1750 int
1751 dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
1752 {
1753         dsl_dataset_t *ds = arg1;
1754         const char *snapname = arg2;
1755         int err;
1756         uint64_t value;
1757
1758         /*
1759          * We don't allow multiple snapshots of the same txg.  If there
1760          * is already one, try again.
1761          */
1762         if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
1763                 return (EAGAIN);
1764
1765         /*
1766          * Check for conflicting name snapshot name.
1767          */
1768         err = dsl_dataset_snap_lookup(ds, snapname, &value);
1769         if (err == 0)
1770                 return (EEXIST);
1771         if (err != ENOENT)
1772                 return (err);
1773
1774         /*
1775          * Check that the dataset's name is not too long.  Name consists
1776          * of the dataset's length + 1 for the @-sign + snapshot name's length
1777          */
1778         if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN)
1779                 return (ENAMETOOLONG);
1780
1781         err = dsl_dataset_snapshot_reserve_space(ds, tx);
1782         if (err)
1783                 return (err);
1784
1785         ds->ds_trysnap_txg = tx->tx_txg;
1786         return (0);
1787 }
1788
1789 void
1790 dsl_dataset_snapshot_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
1791 {
1792         dsl_dataset_t *ds = arg1;
1793         const char *snapname = arg2;
1794         dsl_pool_t *dp = ds->ds_dir->dd_pool;
1795         dmu_buf_t *dbuf;
1796         dsl_dataset_phys_t *dsphys;
1797         uint64_t dsobj, crtxg;
1798         objset_t *mos = dp->dp_meta_objset;
1799         int err;
1800
1801         ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1802
1803         /*
1804          * The origin's ds_creation_txg has to be < TXG_INITIAL
1805          */
1806         if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
1807                 crtxg = 1;
1808         else
1809                 crtxg = tx->tx_txg;
1810
1811         dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
1812             DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
1813         VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
1814         dmu_buf_will_dirty(dbuf, tx);
1815         dsphys = dbuf->db_data;
1816         bzero(dsphys, sizeof (dsl_dataset_phys_t));
1817         dsphys->ds_dir_obj = ds->ds_dir->dd_object;
1818         dsphys->ds_fsid_guid = unique_create();
1819         (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
1820             sizeof (dsphys->ds_guid));
1821         dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj;
1822         dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg;
1823         dsphys->ds_next_snap_obj = ds->ds_object;
1824         dsphys->ds_num_children = 1;
1825         dsphys->ds_creation_time = gethrestime_sec();
1826         dsphys->ds_creation_txg = crtxg;
1827         dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj;
1828         dsphys->ds_used_bytes = ds->ds_phys->ds_used_bytes;
1829         dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes;
1830         dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes;
1831         dsphys->ds_flags = ds->ds_phys->ds_flags;
1832         dsphys->ds_bp = ds->ds_phys->ds_bp;
1833         dmu_buf_rele(dbuf, FTAG);
1834
1835         ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0);
1836         if (ds->ds_prev) {
1837                 uint64_t next_clones_obj =
1838                     ds->ds_prev->ds_phys->ds_next_clones_obj;
1839                 ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj ==
1840                     ds->ds_object ||
1841                     ds->ds_prev->ds_phys->ds_num_children > 1);
1842                 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
1843                         dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1844                         ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1845                             ds->ds_prev->ds_phys->ds_creation_txg);
1846                         ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj;
1847                 } else if (next_clones_obj != 0) {
1848                         VERIFY3U(0, ==, zap_remove_int(mos,
1849                             next_clones_obj, dsphys->ds_next_snap_obj, tx));
1850                         VERIFY3U(0, ==, zap_add_int(mos,
1851                             next_clones_obj, dsobj, tx));
1852                 }
1853         }
1854
1855         /*
1856          * If we have a reference-reservation on this dataset, we will
1857          * need to increase the amount of refreservation being charged
1858          * since our unique space is going to zero.
1859          */
1860         if (ds->ds_reserved) {
1861                 int64_t add = MIN(dsl_dataset_unique(ds), ds->ds_reserved);
1862                 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
1863                     add, 0, 0, tx);
1864         }
1865
1866         bplist_close(&ds->ds_deadlist);
1867         dmu_buf_will_dirty(ds->ds_dbuf, tx);
1868         ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg);
1869         ds->ds_phys->ds_prev_snap_obj = dsobj;
1870         ds->ds_phys->ds_prev_snap_txg = crtxg;
1871         ds->ds_phys->ds_unique_bytes = 0;
1872         if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
1873                 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1874         ds->ds_phys->ds_deadlist_obj =
1875             bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx);
1876         VERIFY(0 == bplist_open(&ds->ds_deadlist, mos,
1877             ds->ds_phys->ds_deadlist_obj));
1878
1879         dprintf("snap '%s' -> obj %llu\n", snapname, dsobj);
1880         err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj,
1881             snapname, 8, 1, &dsobj, tx);
1882         ASSERT(err == 0);
1883
1884         if (ds->ds_prev)
1885                 dsl_dataset_drop_ref(ds->ds_prev, ds);
1886         VERIFY(0 == dsl_dataset_get_ref(dp,
1887             ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
1888
1889         dsl_pool_ds_snapshotted(ds, tx);
1890
1891         spa_history_internal_log(LOG_DS_SNAPSHOT, dp->dp_spa, tx, cr,
1892             "dataset = %llu", dsobj);
1893 }
1894
1895 void
1896 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
1897 {
1898         ASSERT(dmu_tx_is_syncing(tx));
1899         ASSERT(ds->ds_user_ptr != NULL);
1900         ASSERT(ds->ds_phys->ds_next_snap_obj == 0);
1901
1902         /*
1903          * in case we had to change ds_fsid_guid when we opened it,
1904          * sync it out now.
1905          */
1906         dmu_buf_will_dirty(ds->ds_dbuf, tx);
1907         ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid;
1908
1909         dsl_dir_dirty(ds->ds_dir, tx);
1910         dmu_objset_sync(ds->ds_user_ptr, zio, tx);
1911 }
1912
1913 void
1914 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
1915 {
1916         uint64_t refd, avail, uobjs, aobjs;
1917
1918         dsl_dir_stats(ds->ds_dir, nv);
1919
1920         dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs);
1921         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail);
1922         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd);
1923
1924         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
1925             ds->ds_phys->ds_creation_time);
1926         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
1927             ds->ds_phys->ds_creation_txg);
1928         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
1929             ds->ds_quota);
1930         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
1931             ds->ds_reserved);
1932         dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
1933             ds->ds_phys->ds_guid);
1934
1935         if (ds->ds_phys->ds_next_snap_obj) {
1936                 /*
1937                  * This is a snapshot; override the dd's space used with
1938                  * our unique space and compression ratio.
1939                  */
1940                 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
1941                     ds->ds_phys->ds_unique_bytes);
1942                 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
1943                     ds->ds_phys->ds_compressed_bytes == 0 ? 100 :
1944                     (ds->ds_phys->ds_uncompressed_bytes * 100 /
1945                     ds->ds_phys->ds_compressed_bytes));
1946         }
1947 }
1948
1949 void
1950 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
1951 {
1952         stat->dds_creation_txg = ds->ds_phys->ds_creation_txg;
1953         stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT;
1954         stat->dds_guid = ds->ds_phys->ds_guid;
1955         if (ds->ds_phys->ds_next_snap_obj) {
1956                 stat->dds_is_snapshot = B_TRUE;
1957                 stat->dds_num_clones = ds->ds_phys->ds_num_children - 1;
1958         }
1959
1960         /* clone origin is really a dsl_dir thing... */
1961         rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
1962         if (dsl_dir_is_clone(ds->ds_dir)) {
1963                 dsl_dataset_t *ods;
1964
1965                 VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool,
1966                     ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods));
1967                 dsl_dataset_name(ods, stat->dds_origin);
1968                 dsl_dataset_drop_ref(ods, FTAG);
1969         }
1970         rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
1971 }
1972
1973 uint64_t
1974 dsl_dataset_fsid_guid(dsl_dataset_t *ds)
1975 {
1976         return (ds->ds_fsid_guid);
1977 }
1978
1979 void
1980 dsl_dataset_space(dsl_dataset_t *ds,
1981     uint64_t *refdbytesp, uint64_t *availbytesp,
1982     uint64_t *usedobjsp, uint64_t *availobjsp)
1983 {
1984         *refdbytesp = ds->ds_phys->ds_used_bytes;
1985         *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
1986         if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes)
1987                 *availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes;
1988         if (ds->ds_quota != 0) {
1989                 /*
1990                  * Adjust available bytes according to refquota
1991                  */
1992                 if (*refdbytesp < ds->ds_quota)
1993                         *availbytesp = MIN(*availbytesp,
1994                             ds->ds_quota - *refdbytesp);
1995                 else
1996                         *availbytesp = 0;
1997         }
1998         *usedobjsp = ds->ds_phys->ds_bp.blk_fill;
1999         *availobjsp = DN_MAX_OBJECT - *usedobjsp;
2000 }
2001
2002 boolean_t
2003 dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds)
2004 {
2005         dsl_pool_t *dp = ds->ds_dir->dd_pool;
2006
2007         ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
2008             dsl_pool_sync_context(dp));
2009         if (ds->ds_prev == NULL)
2010                 return (B_FALSE);
2011         if (ds->ds_phys->ds_bp.blk_birth >
2012             ds->ds_prev->ds_phys->ds_creation_txg)
2013                 return (B_TRUE);
2014         return (B_FALSE);
2015 }
2016
2017 /* ARGSUSED */
2018 static int
2019 dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
2020 {
2021         dsl_dataset_t *ds = arg1;
2022         char *newsnapname = arg2;
2023         dsl_dir_t *dd = ds->ds_dir;
2024         dsl_dataset_t *hds;
2025         uint64_t val;
2026         int err;
2027
2028         err = dsl_dataset_hold_obj(dd->dd_pool,
2029             dd->dd_phys->dd_head_dataset_obj, FTAG, &hds);
2030         if (err)
2031                 return (err);
2032
2033         /* new name better not be in use */
2034         err = dsl_dataset_snap_lookup(hds, newsnapname, &val);
2035         dsl_dataset_rele(hds, FTAG);
2036
2037         if (err == 0)
2038                 err = EEXIST;
2039         else if (err == ENOENT)
2040                 err = 0;
2041
2042         /* dataset name + 1 for the "@" + the new snapshot name must fit */
2043         if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN)
2044                 err = ENAMETOOLONG;
2045
2046         return (err);
2047 }
2048
2049 static void
2050 dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2,
2051     cred_t *cr, dmu_tx_t *tx)
2052 {
2053         dsl_dataset_t *ds = arg1;
2054         const char *newsnapname = arg2;
2055         dsl_dir_t *dd = ds->ds_dir;
2056         objset_t *mos = dd->dd_pool->dp_meta_objset;
2057         dsl_dataset_t *hds;
2058         int err;
2059
2060         ASSERT(ds->ds_phys->ds_next_snap_obj != 0);
2061
2062         VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
2063             dd->dd_phys->dd_head_dataset_obj, FTAG, &hds));
2064
2065         VERIFY(0 == dsl_dataset_get_snapname(ds));
2066         err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx);
2067         ASSERT3U(err, ==, 0);
2068         mutex_enter(&ds->ds_lock);
2069         (void) strcpy(ds->ds_snapname, newsnapname);
2070         mutex_exit(&ds->ds_lock);
2071         err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj,
2072             ds->ds_snapname, 8, 1, &ds->ds_object, tx);
2073         ASSERT3U(err, ==, 0);
2074
2075         spa_history_internal_log(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx,
2076             cr, "dataset = %llu", ds->ds_object);
2077         dsl_dataset_rele(hds, FTAG);
2078 }
2079
2080 struct renamesnaparg {
2081         dsl_sync_task_group_t *dstg;
2082         char failed[MAXPATHLEN];
2083         char *oldsnap;
2084         char *newsnap;
2085 };
2086
2087 static int
2088 dsl_snapshot_rename_one(char *name, void *arg)
2089 {
2090         struct renamesnaparg *ra = arg;
2091         dsl_dataset_t *ds = NULL;
2092         char *cp;
2093         int err;
2094
2095         cp = name + strlen(name);
2096         *cp = '@';
2097         (void) strcpy(cp + 1, ra->oldsnap);
2098
2099         /*
2100          * For recursive snapshot renames the parent won't be changing
2101          * so we just pass name for both the to/from argument.
2102          */
2103         err = zfs_secpolicy_rename_perms(name, name, CRED());
2104         if (err == ENOENT) {
2105                 return (0);
2106         } else if (err) {
2107                 (void) strcpy(ra->failed, name);
2108                 return (err);
2109         }
2110
2111 #ifdef _KERNEL
2112         /*
2113          * For all filesystems undergoing rename, we'll need to unmount it.
2114          */
2115         (void) zfs_unmount_snap(name, NULL);
2116 #endif
2117         err = dsl_dataset_hold(name, ra->dstg, &ds);
2118         *cp = '\0';
2119         if (err == ENOENT) {
2120                 return (0);
2121         } else if (err) {
2122                 (void) strcpy(ra->failed, name);
2123                 return (err);
2124         }
2125
2126         dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check,
2127             dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0);
2128
2129         return (0);
2130 }
2131
2132 static int
2133 dsl_recursive_rename(char *oldname, const char *newname)
2134 {
2135         int err;
2136         struct renamesnaparg *ra;
2137         dsl_sync_task_t *dst;
2138         spa_t *spa;
2139         char *cp, *fsname = spa_strdup(oldname);
2140         int len = strlen(oldname);
2141
2142         /* truncate the snapshot name to get the fsname */
2143         cp = strchr(fsname, '@');
2144         *cp = '\0';
2145
2146         err = spa_open(fsname, &spa, FTAG);
2147         if (err) {
2148                 kmem_free(fsname, len + 1);
2149                 return (err);
2150         }
2151         ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP);
2152         ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
2153
2154         ra->oldsnap = strchr(oldname, '@') + 1;
2155         ra->newsnap = strchr(newname, '@') + 1;
2156         *ra->failed = '\0';
2157
2158         err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra,
2159             DS_FIND_CHILDREN);
2160         kmem_free(fsname, len + 1);
2161
2162         if (err == 0) {
2163                 err = dsl_sync_task_group_wait(ra->dstg);
2164         }
2165
2166         for (dst = list_head(&ra->dstg->dstg_tasks); dst;
2167             dst = list_next(&ra->dstg->dstg_tasks, dst)) {
2168                 dsl_dataset_t *ds = dst->dst_arg1;
2169                 if (dst->dst_err) {
2170                         dsl_dir_name(ds->ds_dir, ra->failed);
2171                         (void) strcat(ra->failed, "@");
2172                         (void) strcat(ra->failed, ra->newsnap);
2173                 }
2174                 dsl_dataset_rele(ds, ra->dstg);
2175         }
2176
2177         if (err)
2178                 (void) strcpy(oldname, ra->failed);
2179
2180         dsl_sync_task_group_destroy(ra->dstg);
2181         kmem_free(ra, sizeof (struct renamesnaparg));
2182         spa_close(spa, FTAG);
2183         return (err);
2184 }
2185
2186 static int
2187 dsl_valid_rename(char *oldname, void *arg)
2188 {
2189         int delta = *(int *)arg;
2190
2191         if (strlen(oldname) + delta >= MAXNAMELEN)
2192                 return (ENAMETOOLONG);
2193
2194         return (0);
2195 }
2196
2197 #pragma weak dmu_objset_rename = dsl_dataset_rename
2198 int
2199 dsl_dataset_rename(char *oldname, const char *newname, boolean_t recursive)
2200 {
2201         dsl_dir_t *dd;
2202         dsl_dataset_t *ds;
2203         const char *tail;
2204         int err;
2205
2206         err = dsl_dir_open(oldname, FTAG, &dd, &tail);
2207         if (err)
2208                 return (err);
2209         /*
2210          * If there are more than 2 references there may be holds
2211          * hanging around that haven't been cleared out yet.
2212          */
2213         if (dmu_buf_refcount(dd->dd_dbuf) > 2)
2214                 txg_wait_synced(dd->dd_pool, 0);
2215         if (tail == NULL) {
2216                 int delta = strlen(newname) - strlen(oldname);
2217
2218                 /* if we're growing, validate child name lengths */
2219                 if (delta > 0)
2220                         err = dmu_objset_find(oldname, dsl_valid_rename,
2221                             &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2222
2223                 if (!err)
2224                         err = dsl_dir_rename(dd, newname);
2225                 dsl_dir_close(dd, FTAG);
2226                 return (err);
2227         }
2228         if (tail[0] != '@') {
2229                 /* the name ended in a nonexistant component */
2230                 dsl_dir_close(dd, FTAG);
2231                 return (ENOENT);
2232         }
2233
2234         dsl_dir_close(dd, FTAG);
2235
2236         /* new name must be snapshot in same filesystem */
2237         tail = strchr(newname, '@');
2238         if (tail == NULL)
2239                 return (EINVAL);
2240         tail++;
2241         if (strncmp(oldname, newname, tail - newname) != 0)
2242                 return (EXDEV);
2243
2244         if (recursive) {
2245                 err = dsl_recursive_rename(oldname, newname);
2246         } else {
2247                 err = dsl_dataset_hold(oldname, FTAG, &ds);
2248                 if (err)
2249                         return (err);
2250
2251                 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
2252                     dsl_dataset_snapshot_rename_check,
2253                     dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
2254
2255                 dsl_dataset_rele(ds, FTAG);
2256         }
2257
2258         return (err);
2259 }
2260
2261 struct promotenode {
2262         list_node_t link;
2263         dsl_dataset_t *ds;
2264 };
2265
2266 struct promotearg {
2267         list_t shared_snaps, origin_snaps, clone_snaps;
2268         dsl_dataset_t *origin_origin, *origin_head;
2269         uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap;
2270 };
2271
2272 static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
2273
2274 /* ARGSUSED */
2275 static int
2276 dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx)
2277 {
2278         dsl_dataset_t *hds = arg1;
2279         struct promotearg *pa = arg2;
2280         struct promotenode *snap = list_head(&pa->shared_snaps);
2281         dsl_dataset_t *origin_ds = snap->ds;
2282         int err;
2283
2284         /* Check that it is a real clone */
2285         if (!dsl_dir_is_clone(hds->ds_dir))
2286                 return (EINVAL);
2287
2288         /* Since this is so expensive, don't do the preliminary check */
2289         if (!dmu_tx_is_syncing(tx))
2290                 return (0);
2291
2292         if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)
2293                 return (EXDEV);
2294
2295         /* compute origin's new unique space */
2296         snap = list_tail(&pa->clone_snaps);
2297         ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2298         err = bplist_space_birthrange(&snap->ds->ds_deadlist,
2299             origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX, &pa->unique);
2300         if (err)
2301                 return (err);
2302
2303         /*
2304          * Walk the snapshots that we are moving
2305          *
2306          * Compute space to transfer.  Consider the incremental changes
2307          * to used for each snapshot:
2308          * (my used) = (prev's used) + (blocks born) - (blocks killed)
2309          * So each snapshot gave birth to:
2310          * (blocks born) = (my used) - (prev's used) + (blocks killed)
2311          * So a sequence would look like:
2312          * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2313          * Which simplifies to:
2314          * uN + kN + kN-1 + ... + k1 + k0
2315          * Note however, if we stop before we reach the ORIGIN we get:
2316          * uN + kN + kN-1 + ... + kM - uM-1
2317          */
2318         pa->used = origin_ds->ds_phys->ds_used_bytes;
2319         pa->comp = origin_ds->ds_phys->ds_compressed_bytes;
2320         pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes;
2321         for (snap = list_head(&pa->shared_snaps); snap;
2322             snap = list_next(&pa->shared_snaps, snap)) {
2323                 uint64_t val, dlused, dlcomp, dluncomp;
2324                 dsl_dataset_t *ds = snap->ds;
2325
2326                 /* Check that the snapshot name does not conflict */
2327                 VERIFY(0 == dsl_dataset_get_snapname(ds));
2328                 err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
2329                 if (err == 0)
2330                         return (EEXIST);
2331                 if (err != ENOENT)
2332                         return (err);
2333
2334                 /* The very first snapshot does not have a deadlist */
2335                 if (ds->ds_phys->ds_prev_snap_obj == 0)
2336                         continue;
2337
2338                 if (err = bplist_space(&ds->ds_deadlist,
2339                     &dlused, &dlcomp, &dluncomp))
2340                         return (err);
2341                 pa->used += dlused;
2342                 pa->comp += dlcomp;
2343                 pa->uncomp += dluncomp;
2344         }
2345
2346         /*
2347          * If we are a clone of a clone then we never reached ORIGIN,
2348          * so we need to subtract out the clone origin's used space.
2349          */
2350         if (pa->origin_origin) {
2351                 pa->used -= pa->origin_origin->ds_phys->ds_used_bytes;
2352                 pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes;
2353                 pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes;
2354         }
2355
2356         /* Check that there is enough space here */
2357         err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
2358             pa->used);
2359         if (err)
2360                 return (err);
2361
2362         /*
2363          * Compute the amounts of space that will be used by snapshots
2364          * after the promotion (for both origin and clone).  For each,
2365          * it is the amount of space that will be on all of their
2366          * deadlists (that was not born before their new origin).
2367          */
2368         if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2369                 uint64_t space;
2370
2371                 /*
2372                  * Note, typically this will not be a clone of a clone,
2373                  * so snap->ds->ds_origin_txg will be < TXG_INITIAL, so
2374                  * these snaplist_space() -> bplist_space_birthrange()
2375                  * calls will be fast because they do not have to
2376                  * iterate over all bps.
2377                  */
2378                 snap = list_head(&pa->origin_snaps);
2379                 err = snaplist_space(&pa->shared_snaps,
2380                     snap->ds->ds_origin_txg, &pa->cloneusedsnap);
2381                 if (err)
2382                         return (err);
2383
2384                 err = snaplist_space(&pa->clone_snaps,
2385                     snap->ds->ds_origin_txg, &space);
2386                 if (err)
2387                         return (err);
2388                 pa->cloneusedsnap += space;
2389         }
2390         if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2391                 err = snaplist_space(&pa->origin_snaps,
2392                     origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap);
2393                 if (err)
2394                         return (err);
2395         }
2396
2397         return (0);
2398 }
2399
2400 static void
2401 dsl_dataset_promote_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
2402 {
2403         dsl_dataset_t *hds = arg1;
2404         struct promotearg *pa = arg2;
2405         struct promotenode *snap = list_head(&pa->shared_snaps);
2406         dsl_dataset_t *origin_ds = snap->ds;
2407         dsl_dataset_t *origin_head;
2408         dsl_dir_t *dd = hds->ds_dir;
2409         dsl_pool_t *dp = hds->ds_dir->dd_pool;
2410         dsl_dir_t *odd = NULL;
2411         uint64_t oldnext_obj;
2412         int64_t delta;
2413
2414         ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE));
2415
2416         snap = list_head(&pa->origin_snaps);
2417         origin_head = snap->ds;
2418
2419         /*
2420          * We need to explicitly open odd, since origin_ds's dd will be
2421          * changing.
2422          */
2423         VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object,
2424             NULL, FTAG, &odd));
2425
2426         /* change origin's next snap */
2427         dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
2428         oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj;
2429         snap = list_tail(&pa->clone_snaps);
2430         ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2431         origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object;
2432
2433         /* change the origin's next clone */
2434         if (origin_ds->ds_phys->ds_next_clones_obj) {
2435                 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2436                     origin_ds->ds_phys->ds_next_clones_obj,
2437                     origin_ds->ds_phys->ds_next_snap_obj, tx));
2438                 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2439                     origin_ds->ds_phys->ds_next_clones_obj,
2440                     oldnext_obj, tx));
2441         }
2442
2443         /* change origin */
2444         dmu_buf_will_dirty(dd->dd_dbuf, tx);
2445         ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object);
2446         dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj;
2447         hds->ds_origin_txg = origin_head->ds_origin_txg;
2448         dmu_buf_will_dirty(odd->dd_dbuf, tx);
2449         odd->dd_phys->dd_origin_obj = origin_ds->ds_object;
2450         origin_head->ds_origin_txg = origin_ds->ds_phys->ds_creation_txg;
2451
2452         /* move snapshots to this dir */
2453         for (snap = list_head(&pa->shared_snaps); snap;
2454             snap = list_next(&pa->shared_snaps, snap)) {
2455                 dsl_dataset_t *ds = snap->ds;
2456
2457                 /* unregister props as dsl_dir is changing */
2458                 if (ds->ds_user_ptr) {
2459                         ds->ds_user_evict_func(ds, ds->ds_user_ptr);
2460                         ds->ds_user_ptr = NULL;
2461                 }
2462                 /* move snap name entry */
2463                 VERIFY(0 == dsl_dataset_get_snapname(ds));
2464                 VERIFY(0 == dsl_dataset_snap_remove(origin_head,
2465                     ds->ds_snapname, tx));
2466                 VERIFY(0 == zap_add(dp->dp_meta_objset,
2467                     hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname,
2468                     8, 1, &ds->ds_object, tx));
2469                 /* change containing dsl_dir */
2470                 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2471                 ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object);
2472                 ds->ds_phys->ds_dir_obj = dd->dd_object;
2473                 ASSERT3P(ds->ds_dir, ==, odd);
2474                 dsl_dir_close(ds->ds_dir, ds);
2475                 VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object,
2476                     NULL, ds, &ds->ds_dir));
2477
2478                 ASSERT3U(dsl_prop_numcb(ds), ==, 0);
2479         }
2480
2481         /*
2482          * Change space accounting.
2483          * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2484          * both be valid, or both be 0 (resulting in delta == 0).  This
2485          * is true for each of {clone,origin} independently.
2486          */
2487
2488         delta = pa->cloneusedsnap -
2489             dd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2490         ASSERT3S(delta, >=, 0);
2491         ASSERT3U(pa->used, >=, delta);
2492         dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
2493         dsl_dir_diduse_space(dd, DD_USED_HEAD,
2494             pa->used - delta, pa->comp, pa->uncomp, tx);
2495
2496         delta = pa->originusedsnap -
2497             odd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2498         ASSERT3S(delta, <=, 0);
2499         ASSERT3U(pa->used, >=, -delta);
2500         dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
2501         dsl_dir_diduse_space(odd, DD_USED_HEAD,
2502             -pa->used - delta, -pa->comp, -pa->uncomp, tx);
2503
2504         origin_ds->ds_phys->ds_unique_bytes = pa->unique;
2505
2506         /* log history record */
2507         spa_history_internal_log(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx,
2508             cr, "dataset = %llu", hds->ds_object);
2509
2510         dsl_dir_close(odd, FTAG);
2511 }
2512
2513 static char *snaplist_tag = "snaplist";
2514 /*
2515  * Make a list of dsl_dataset_t's for the snapshots between first_obj
2516  * (exclusive) and last_obj (inclusive).  The list will be in reverse
2517  * order (last_obj will be the list_head()).  If first_obj == 0, do all
2518  * snapshots back to this dataset's origin.
2519  */
2520 static int
2521 snaplist_make(dsl_pool_t *dp, boolean_t own,
2522     uint64_t first_obj, uint64_t last_obj, list_t *l)
2523 {
2524         uint64_t obj = last_obj;
2525
2526         ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock));
2527
2528         list_create(l, sizeof (struct promotenode),
2529             offsetof(struct promotenode, link));
2530
2531         while (obj != first_obj) {
2532                 dsl_dataset_t *ds;
2533                 struct promotenode *snap;
2534                 int err;
2535
2536                 if (own) {
2537                         err = dsl_dataset_own_obj(dp, obj,
2538                             0, snaplist_tag, &ds);
2539                         if (err == 0)
2540                                 dsl_dataset_make_exclusive(ds, snaplist_tag);
2541                 } else {
2542                         err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds);
2543                 }
2544                 if (err == ENOENT) {
2545                         /* lost race with snapshot destroy */
2546                         struct promotenode *last = list_tail(l);
2547                         ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj);
2548                         obj = last->ds->ds_phys->ds_prev_snap_obj;
2549                         continue;
2550                 } else if (err) {
2551                         return (err);
2552                 }
2553
2554                 if (first_obj == 0)
2555                         first_obj = ds->ds_dir->dd_phys->dd_origin_obj;
2556
2557                 snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP);
2558                 snap->ds = ds;
2559                 list_insert_tail(l, snap);
2560                 obj = ds->ds_phys->ds_prev_snap_obj;
2561         }
2562
2563         return (0);
2564 }
2565
2566 static int
2567 snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
2568 {
2569         struct promotenode *snap;
2570
2571         *spacep = 0;
2572         for (snap = list_head(l); snap; snap = list_next(l, snap)) {
2573                 uint64_t used;
2574                 int err = bplist_space_birthrange(&snap->ds->ds_deadlist,
2575                     mintxg, UINT64_MAX, &used);
2576                 if (err)
2577                         return (err);
2578                 *spacep += used;
2579         }
2580         return (0);
2581 }
2582
2583 static void
2584 snaplist_destroy(list_t *l, boolean_t own)
2585 {
2586         struct promotenode *snap;
2587
2588         if (!l || !list_link_active(&l->list_head))
2589                 return;
2590
2591         while ((snap = list_tail(l)) != NULL) {
2592                 list_remove(l, snap);
2593                 if (own)
2594                         dsl_dataset_disown(snap->ds, snaplist_tag);
2595                 else
2596                         dsl_dataset_rele(snap->ds, snaplist_tag);
2597                 kmem_free(snap, sizeof (struct promotenode));
2598         }
2599         list_destroy(l);
2600 }
2601
2602 /*
2603  * Promote a clone.  Nomenclature note:
2604  * "clone" or "cds": the original clone which is being promoted
2605  * "origin" or "ods": the snapshot which is originally clone's origin
2606  * "origin head" or "ohds": the dataset which is the head
2607  * (filesystem/volume) for the origin
2608  * "origin origin": the origin of the origin's filesystem (typically
2609  * NULL, indicating that the clone is not a clone of a clone).
2610  */
2611 int
2612 dsl_dataset_promote(const char *name)
2613 {
2614         dsl_dataset_t *ds;
2615         dsl_dir_t *dd;
2616         dsl_pool_t *dp;
2617         dmu_object_info_t doi;
2618         struct promotearg pa = { 0 };
2619         struct promotenode *snap;
2620         int err;
2621
2622         err = dsl_dataset_hold(name, FTAG, &ds);
2623         if (err)
2624                 return (err);
2625         dd = ds->ds_dir;
2626         dp = dd->dd_pool;
2627
2628         err = dmu_object_info(dp->dp_meta_objset,
2629             ds->ds_phys->ds_snapnames_zapobj, &doi);
2630         if (err) {
2631                 dsl_dataset_rele(ds, FTAG);
2632                 return (err);
2633         }
2634
2635         if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) {
2636                 dsl_dataset_rele(ds, FTAG);
2637                 return (EINVAL);
2638         }
2639
2640         /*
2641          * We are going to inherit all the snapshots taken before our
2642          * origin (i.e., our new origin will be our parent's origin).
2643          * Take ownership of them so that we can rename them into our
2644          * namespace.
2645          */
2646         rw_enter(&dp->dp_config_rwlock, RW_READER);
2647
2648         err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj,
2649             &pa.shared_snaps);
2650         if (err != 0)
2651                 goto out;
2652
2653         err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps);
2654         if (err != 0)
2655                 goto out;
2656
2657         snap = list_head(&pa.shared_snaps);
2658         ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj);
2659         err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj,
2660             snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps);
2661         if (err != 0)
2662                 goto out;
2663
2664         if (dsl_dir_is_clone(snap->ds->ds_dir)) {
2665                 err = dsl_dataset_own_obj(dp,
2666                     snap->ds->ds_dir->dd_phys->dd_origin_obj,
2667                     0, FTAG, &pa.origin_origin);
2668                 if (err != 0)
2669                         goto out;
2670         }
2671
2672 out:
2673         rw_exit(&dp->dp_config_rwlock);
2674
2675         /*
2676          * Add in 128x the snapnames zapobj size, since we will be moving
2677          * a bunch of snapnames to the promoted ds, and dirtying their
2678          * bonus buffers.
2679          */
2680         if (err == 0) {
2681                 err = dsl_sync_task_do(dp, dsl_dataset_promote_check,
2682                     dsl_dataset_promote_sync, ds, &pa,
2683                     2 + 2 * doi.doi_physical_blks);
2684         }
2685
2686         snaplist_destroy(&pa.shared_snaps, B_TRUE);
2687         snaplist_destroy(&pa.clone_snaps, B_FALSE);
2688         snaplist_destroy(&pa.origin_snaps, B_FALSE);
2689         if (pa.origin_origin)
2690                 dsl_dataset_disown(pa.origin_origin, FTAG);
2691         dsl_dataset_rele(ds, FTAG);
2692         return (err);
2693 }
2694
2695 struct cloneswaparg {
2696         dsl_dataset_t *cds; /* clone dataset */
2697         dsl_dataset_t *ohds; /* origin's head dataset */
2698         boolean_t force;
2699         int64_t unused_refres_delta; /* change in unconsumed refreservation */
2700 };
2701
2702 /* ARGSUSED */
2703 static int
2704 dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx)
2705 {
2706         struct cloneswaparg *csa = arg1;
2707
2708         /* they should both be heads */
2709         if (dsl_dataset_is_snapshot(csa->cds) ||
2710             dsl_dataset_is_snapshot(csa->ohds))
2711                 return (EINVAL);
2712
2713         /* the branch point should be just before them */
2714         if (csa->cds->ds_prev != csa->ohds->ds_prev)
2715                 return (EINVAL);
2716
2717         /* cds should be the clone */
2718         if (csa->cds->ds_prev->ds_phys->ds_next_snap_obj !=
2719             csa->ohds->ds_object)
2720                 return (EINVAL);
2721
2722         /* the clone should be a child of the origin */
2723         if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir)
2724                 return (EINVAL);
2725
2726         /* ohds shouldn't be modified unless 'force' */
2727         if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds))
2728                 return (ETXTBSY);
2729
2730         /* adjust amount of any unconsumed refreservation */
2731         csa->unused_refres_delta =
2732             (int64_t)MIN(csa->ohds->ds_reserved,
2733             csa->ohds->ds_phys->ds_unique_bytes) -
2734             (int64_t)MIN(csa->ohds->ds_reserved,
2735             csa->cds->ds_phys->ds_unique_bytes);
2736
2737         if (csa->unused_refres_delta > 0 &&
2738             csa->unused_refres_delta >
2739             dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE))
2740                 return (ENOSPC);
2741
2742         return (0);
2743 }
2744
2745 /* ARGSUSED */
2746 static void
2747 dsl_dataset_clone_swap_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
2748 {
2749         struct cloneswaparg *csa = arg1;
2750         dsl_pool_t *dp = csa->cds->ds_dir->dd_pool;
2751
2752         ASSERT(csa->cds->ds_reserved == 0);
2753         ASSERT(csa->cds->ds_quota == csa->ohds->ds_quota);
2754
2755         dmu_buf_will_dirty(csa->cds->ds_dbuf, tx);
2756         dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx);
2757         dmu_buf_will_dirty(csa->cds->ds_prev->ds_dbuf, tx);
2758
2759         if (csa->cds->ds_user_ptr != NULL) {
2760                 csa->cds->ds_user_evict_func(csa->cds, csa->cds->ds_user_ptr);
2761                 csa->cds->ds_user_ptr = NULL;
2762         }
2763
2764         if (csa->ohds->ds_user_ptr != NULL) {
2765                 csa->ohds->ds_user_evict_func(csa->ohds,
2766                     csa->ohds->ds_user_ptr);
2767                 csa->ohds->ds_user_ptr = NULL;
2768         }
2769
2770         /* reset origin's unique bytes */
2771         VERIFY(0 == bplist_space_birthrange(&csa->cds->ds_deadlist,
2772             csa->cds->ds_prev->ds_phys->ds_prev_snap_txg, UINT64_MAX,
2773             &csa->cds->ds_prev->ds_phys->ds_unique_bytes));
2774
2775         /* swap blkptrs */
2776         {
2777                 blkptr_t tmp;
2778                 tmp = csa->ohds->ds_phys->ds_bp;
2779                 csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp;
2780                 csa->cds->ds_phys->ds_bp = tmp;
2781         }
2782
2783         /* set dd_*_bytes */
2784         {
2785                 int64_t dused, dcomp, duncomp;
2786                 uint64_t cdl_used, cdl_comp, cdl_uncomp;
2787                 uint64_t odl_used, odl_comp, odl_uncomp;
2788
2789                 ASSERT3U(csa->cds->ds_dir->dd_phys->
2790                     dd_used_breakdown[DD_USED_SNAP], ==, 0);
2791
2792                 VERIFY(0 == bplist_space(&csa->cds->ds_deadlist, &cdl_used,
2793                     &cdl_comp, &cdl_uncomp));
2794                 VERIFY(0 == bplist_space(&csa->ohds->ds_deadlist, &odl_used,
2795                     &odl_comp, &odl_uncomp));
2796
2797                 dused = csa->cds->ds_phys->ds_used_bytes + cdl_used -
2798                     (csa->ohds->ds_phys->ds_used_bytes + odl_used);
2799                 dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp -
2800                     (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp);
2801                 duncomp = csa->cds->ds_phys->ds_uncompressed_bytes +
2802                     cdl_uncomp -
2803                     (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp);
2804
2805                 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD,
2806                     dused, dcomp, duncomp, tx);
2807                 dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD,
2808                     -dused, -dcomp, -duncomp, tx);
2809
2810                 /*
2811                  * The difference in the space used by snapshots is the
2812                  * difference in snapshot space due to the head's
2813                  * deadlist (since that's the only thing that's
2814                  * changing that affects the snapused).
2815                  */
2816                 VERIFY(0 == bplist_space_birthrange(&csa->cds->ds_deadlist,
2817                     csa->ohds->ds_origin_txg, UINT64_MAX, &cdl_used));
2818                 VERIFY(0 == bplist_space_birthrange(&csa->ohds->ds_deadlist,
2819                     csa->ohds->ds_origin_txg, UINT64_MAX, &odl_used));
2820                 dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used,
2821                     DD_USED_HEAD, DD_USED_SNAP, tx);
2822         }
2823
2824 #define SWITCH64(x, y) \
2825         { \
2826                 uint64_t __tmp = (x); \
2827                 (x) = (y); \
2828                 (y) = __tmp; \
2829         }
2830
2831         /* swap ds_*_bytes */
2832         SWITCH64(csa->ohds->ds_phys->ds_used_bytes,
2833             csa->cds->ds_phys->ds_used_bytes);
2834         SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes,
2835             csa->cds->ds_phys->ds_compressed_bytes);
2836         SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes,
2837             csa->cds->ds_phys->ds_uncompressed_bytes);
2838         SWITCH64(csa->ohds->ds_phys->ds_unique_bytes,
2839             csa->cds->ds_phys->ds_unique_bytes);
2840
2841         /* apply any parent delta for change in unconsumed refreservation */
2842         dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV,
2843             csa->unused_refres_delta, 0, 0, tx);
2844
2845         /* swap deadlists */
2846         bplist_close(&csa->cds->ds_deadlist);
2847         bplist_close(&csa->ohds->ds_deadlist);
2848         SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj,
2849             csa->cds->ds_phys->ds_deadlist_obj);
2850         VERIFY(0 == bplist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset,
2851             csa->cds->ds_phys->ds_deadlist_obj));
2852         VERIFY(0 == bplist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset,
2853             csa->ohds->ds_phys->ds_deadlist_obj));
2854
2855         dsl_pool_ds_clone_swapped(csa->ohds, csa->cds, tx);
2856 }
2857
2858 /*
2859  * Swap 'clone' with its origin head file system.  Used at the end
2860  * of "online recv" to swizzle the file system to the new version.
2861  */
2862 int
2863 dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head,
2864     boolean_t force)
2865 {
2866         struct cloneswaparg csa;
2867         int error;
2868
2869         ASSERT(clone->ds_owner);
2870         ASSERT(origin_head->ds_owner);
2871 retry:
2872         /* Need exclusive access for the swap */
2873         rw_enter(&clone->ds_rwlock, RW_WRITER);
2874         if (!rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) {
2875                 rw_exit(&clone->ds_rwlock);
2876                 rw_enter(&origin_head->ds_rwlock, RW_WRITER);
2877                 if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) {
2878                         rw_exit(&origin_head->ds_rwlock);
2879                         goto retry;
2880                 }
2881         }
2882         csa.cds = clone;
2883         csa.ohds = origin_head;
2884         csa.force = force;
2885         error = dsl_sync_task_do(clone->ds_dir->dd_pool,
2886             dsl_dataset_clone_swap_check,
2887             dsl_dataset_clone_swap_sync, &csa, NULL, 9);
2888         return (error);
2889 }
2890
2891 /*
2892  * Given a pool name and a dataset object number in that pool,
2893  * return the name of that dataset.
2894  */
2895 int
2896 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
2897 {
2898         spa_t *spa;
2899         dsl_pool_t *dp;
2900         dsl_dataset_t *ds;
2901         int error;
2902
2903         if ((error = spa_open(pname, &spa, FTAG)) != 0)
2904                 return (error);
2905         dp = spa_get_dsl(spa);
2906         rw_enter(&dp->dp_config_rwlock, RW_READER);
2907         if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) {
2908                 dsl_dataset_name(ds, buf);
2909                 dsl_dataset_rele(ds, FTAG);
2910         }
2911         rw_exit(&dp->dp_config_rwlock);
2912         spa_close(spa, FTAG);
2913
2914         return (error);
2915 }
2916
2917 int
2918 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
2919     uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
2920 {
2921         int error = 0;
2922
2923         ASSERT3S(asize, >, 0);
2924
2925         /*
2926          * *ref_rsrv is the portion of asize that will come from any
2927          * unconsumed refreservation space.
2928          */
2929         *ref_rsrv = 0;
2930
2931         mutex_enter(&ds->ds_lock);
2932         /*
2933          * Make a space adjustment for reserved bytes.
2934          */
2935         if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) {
2936                 ASSERT3U(*used, >=,
2937                     ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
2938                 *used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
2939                 *ref_rsrv =
2940                     asize - MIN(asize, parent_delta(ds, asize + inflight));
2941         }
2942
2943         if (!check_quota || ds->ds_quota == 0) {
2944                 mutex_exit(&ds->ds_lock);
2945                 return (0);
2946         }
2947         /*
2948          * If they are requesting more space, and our current estimate
2949          * is over quota, they get to try again unless the actual
2950          * on-disk is over quota and there are no pending changes (which
2951          * may free up space for us).
2952          */
2953         if (ds->ds_phys->ds_used_bytes + inflight >= ds->ds_quota) {
2954                 if (inflight > 0 || ds->ds_phys->ds_used_bytes < ds->ds_quota)
2955                         error = ERESTART;
2956                 else
2957                         error = EDQUOT;
2958         }
2959         mutex_exit(&ds->ds_lock);
2960
2961         return (error);
2962 }
2963
2964 /* ARGSUSED */
2965 static int
2966 dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
2967 {
2968         dsl_dataset_t *ds = arg1;
2969         uint64_t *quotap = arg2;
2970         uint64_t new_quota = *quotap;
2971
2972         if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA)
2973                 return (ENOTSUP);
2974
2975         if (new_quota == 0)
2976                 return (0);
2977
2978         if (new_quota < ds->ds_phys->ds_used_bytes ||
2979             new_quota < ds->ds_reserved)
2980                 return (ENOSPC);
2981
2982         return (0);
2983 }
2984
2985 /* ARGSUSED */
2986 void
2987 dsl_dataset_set_quota_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
2988 {
2989         dsl_dataset_t *ds = arg1;
2990         uint64_t *quotap = arg2;
2991         uint64_t new_quota = *quotap;
2992
2993         dmu_buf_will_dirty(ds->ds_dbuf, tx);
2994
2995         ds->ds_quota = new_quota;
2996
2997         dsl_prop_set_uint64_sync(ds->ds_dir, "refquota", new_quota, cr, tx);
2998
2999         spa_history_internal_log(LOG_DS_REFQUOTA, ds->ds_dir->dd_pool->dp_spa,
3000             tx, cr, "%lld dataset = %llu ",
3001             (longlong_t)new_quota, ds->ds_object);
3002 }
3003
3004 int
3005 dsl_dataset_set_quota(const char *dsname, uint64_t quota)
3006 {
3007         dsl_dataset_t *ds;
3008         int err;
3009
3010         err = dsl_dataset_hold(dsname, FTAG, &ds);
3011         if (err)
3012                 return (err);
3013
3014         if (quota != ds->ds_quota) {
3015                 /*
3016                  * If someone removes a file, then tries to set the quota, we
3017                  * want to make sure the file freeing takes effect.
3018                  */
3019                 txg_wait_open(ds->ds_dir->dd_pool, 0);
3020
3021                 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3022                     dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync,
3023                     ds, &quota, 0);
3024         }
3025         dsl_dataset_rele(ds, FTAG);
3026         return (err);
3027 }
3028
3029 static int
3030 dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
3031 {
3032         dsl_dataset_t *ds = arg1;
3033         uint64_t *reservationp = arg2;
3034         uint64_t new_reservation = *reservationp;
3035         int64_t delta;
3036         uint64_t unique;
3037
3038         if (new_reservation > INT64_MAX)
3039                 return (EOVERFLOW);
3040
3041         if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
3042             SPA_VERSION_REFRESERVATION)
3043                 return (ENOTSUP);
3044
3045         if (dsl_dataset_is_snapshot(ds))
3046                 return (EINVAL);
3047
3048         /*
3049          * If we are doing the preliminary check in open context, the
3050          * space estimates may be inaccurate.
3051          */
3052         if (!dmu_tx_is_syncing(tx))
3053                 return (0);
3054
3055         mutex_enter(&ds->ds_lock);
3056         unique = dsl_dataset_unique(ds);
3057         delta = MAX(unique, new_reservation) - MAX(unique, ds->ds_reserved);
3058         mutex_exit(&ds->ds_lock);
3059
3060         if (delta > 0 &&
3061             delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
3062                 return (ENOSPC);
3063         if (delta > 0 && ds->ds_quota > 0 &&
3064             new_reservation > ds->ds_quota)
3065                 return (ENOSPC);
3066
3067         return (0);
3068 }
3069
3070 /* ARGSUSED */
3071 static void
3072 dsl_dataset_set_reservation_sync(void *arg1, void *arg2, cred_t *cr,
3073     dmu_tx_t *tx)
3074 {
3075         dsl_dataset_t *ds = arg1;
3076         uint64_t *reservationp = arg2;
3077         uint64_t new_reservation = *reservationp;
3078         uint64_t unique;
3079         int64_t delta;
3080
3081         dmu_buf_will_dirty(ds->ds_dbuf, tx);
3082
3083         mutex_enter(&ds->ds_dir->dd_lock);
3084         mutex_enter(&ds->ds_lock);
3085         unique = dsl_dataset_unique(ds);
3086         delta = MAX(0, (int64_t)(new_reservation - unique)) -
3087             MAX(0, (int64_t)(ds->ds_reserved - unique));
3088         ds->ds_reserved = new_reservation;
3089         mutex_exit(&ds->ds_lock);
3090
3091         dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
3092         mutex_exit(&ds->ds_dir->dd_lock);
3093         dsl_prop_set_uint64_sync(ds->ds_dir, "refreservation",
3094             new_reservation, cr, tx);
3095
3096         spa_history_internal_log(LOG_DS_REFRESERV,
3097             ds->ds_dir->dd_pool->dp_spa, tx, cr, "%lld dataset = %llu",
3098             (longlong_t)new_reservation, ds->ds_object);
3099 }
3100
3101 int
3102 dsl_dataset_set_reservation(const char *dsname, uint64_t reservation)
3103 {
3104         dsl_dataset_t *ds;
3105         int err;
3106
3107         err = dsl_dataset_hold(dsname, FTAG, &ds);
3108         if (err)
3109                 return (err);
3110
3111         err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3112             dsl_dataset_set_reservation_check,
3113             dsl_dataset_set_reservation_sync, ds, &reservation, 0);
3114         dsl_dataset_rele(ds, FTAG);
3115         return (err);
3116 }