4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012 by Delphix. All rights reserved.
25 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
26 * Copyright (c) 2012, Martin Matuska <mm@FreeBSD.org>. All rights reserved.
30 #include <sys/dmu_impl.h>
31 #include <sys/dmu_tx.h>
33 #include <sys/dnode.h>
34 #include <sys/zfs_context.h>
35 #include <sys/dmu_objset.h>
36 #include <sys/dmu_traverse.h>
37 #include <sys/dsl_dataset.h>
38 #include <sys/dsl_dir.h>
39 #include <sys/dsl_prop.h>
40 #include <sys/dsl_pool.h>
41 #include <sys/dsl_synctask.h>
42 #include <sys/zfs_ioctl.h>
44 #include <sys/zio_checksum.h>
45 #include <sys/zfs_znode.h>
46 #include <zfs_fletcher.h>
49 #include <sys/zfs_onexit.h>
51 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
52 int zfs_send_corrupt_data = B_FALSE;
54 static char *dmu_recv_tag = "dmu_recv_tag";
57 dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
59 dsl_dataset_t *ds = dsp->dsa_os->os_dsl_dataset;
64 fletcher_4_incremental_native(buf, len, &dsp->dsa_zc);
70 auio.uio_segflg = UIO_SYSSPACE;
71 auio.uio_rw = UIO_WRITE;
72 auio.uio_offset = (off_t)-1;
73 auio.uio_td = dsp->dsa_td;
75 if (dsp->dsa_fp->f_type == DTYPE_VNODE)
77 dsp->dsa_err = fo_write(dsp->dsa_fp, &auio, dsp->dsa_td->td_ucred, 0,
80 fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
81 dsp->dsa_err = EOPNOTSUPP;
83 mutex_enter(&ds->ds_sendstream_lock);
85 mutex_exit(&ds->ds_sendstream_lock);
87 return (dsp->dsa_err);
91 dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
94 struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free);
96 if (length != -1ULL && offset + length < offset)
100 * If there is a pending op, but it's not PENDING_FREE, push it out,
101 * since free block aggregation can only be done for blocks of the
102 * same type (i.e., DRR_FREE records can only be aggregated with
103 * other DRR_FREE records. DRR_FREEOBJECTS records can only be
104 * aggregated with other DRR_FREEOBJECTS records.
106 if (dsp->dsa_pending_op != PENDING_NONE &&
107 dsp->dsa_pending_op != PENDING_FREE) {
108 if (dump_bytes(dsp, dsp->dsa_drr,
109 sizeof (dmu_replay_record_t)) != 0)
111 dsp->dsa_pending_op = PENDING_NONE;
114 if (dsp->dsa_pending_op == PENDING_FREE) {
116 * There should never be a PENDING_FREE if length is -1
117 * (because dump_dnode is the only place where this
118 * function is called with a -1, and only after flushing
119 * any pending record).
121 ASSERT(length != -1ULL);
123 * Check to see whether this free block can be aggregated
126 if (drrf->drr_object == object && drrf->drr_offset +
127 drrf->drr_length == offset) {
128 drrf->drr_length += length;
131 /* not a continuation. Push out pending record */
132 if (dump_bytes(dsp, dsp->dsa_drr,
133 sizeof (dmu_replay_record_t)) != 0)
135 dsp->dsa_pending_op = PENDING_NONE;
138 /* create a FREE record and make it pending */
139 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
140 dsp->dsa_drr->drr_type = DRR_FREE;
141 drrf->drr_object = object;
142 drrf->drr_offset = offset;
143 drrf->drr_length = length;
144 drrf->drr_toguid = dsp->dsa_toguid;
145 if (length == -1ULL) {
146 if (dump_bytes(dsp, dsp->dsa_drr,
147 sizeof (dmu_replay_record_t)) != 0)
150 dsp->dsa_pending_op = PENDING_FREE;
157 dump_data(dmu_sendarg_t *dsp, dmu_object_type_t type,
158 uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data)
160 struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write);
164 * If there is any kind of pending aggregation (currently either
165 * a grouping of free objects or free blocks), push it out to
166 * the stream, since aggregation can't be done across operations
167 * of different types.
169 if (dsp->dsa_pending_op != PENDING_NONE) {
170 if (dump_bytes(dsp, dsp->dsa_drr,
171 sizeof (dmu_replay_record_t)) != 0)
173 dsp->dsa_pending_op = PENDING_NONE;
175 /* write a DATA record */
176 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
177 dsp->dsa_drr->drr_type = DRR_WRITE;
178 drrw->drr_object = object;
179 drrw->drr_type = type;
180 drrw->drr_offset = offset;
181 drrw->drr_length = blksz;
182 drrw->drr_toguid = dsp->dsa_toguid;
183 drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
184 if (zio_checksum_table[drrw->drr_checksumtype].ci_dedup)
185 drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP;
186 DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
187 DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
188 DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
189 drrw->drr_key.ddk_cksum = bp->blk_cksum;
191 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
193 if (dump_bytes(dsp, data, blksz) != 0)
199 dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data)
201 struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill);
203 if (dsp->dsa_pending_op != PENDING_NONE) {
204 if (dump_bytes(dsp, dsp->dsa_drr,
205 sizeof (dmu_replay_record_t)) != 0)
207 dsp->dsa_pending_op = PENDING_NONE;
210 /* write a SPILL record */
211 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
212 dsp->dsa_drr->drr_type = DRR_SPILL;
213 drrs->drr_object = object;
214 drrs->drr_length = blksz;
215 drrs->drr_toguid = dsp->dsa_toguid;
217 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)))
219 if (dump_bytes(dsp, data, blksz))
225 dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
227 struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects);
230 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
231 * push it out, since free block aggregation can only be done for
232 * blocks of the same type (i.e., DRR_FREE records can only be
233 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
234 * can only be aggregated with other DRR_FREEOBJECTS records.
236 if (dsp->dsa_pending_op != PENDING_NONE &&
237 dsp->dsa_pending_op != PENDING_FREEOBJECTS) {
238 if (dump_bytes(dsp, dsp->dsa_drr,
239 sizeof (dmu_replay_record_t)) != 0)
241 dsp->dsa_pending_op = PENDING_NONE;
243 if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) {
245 * See whether this free object array can be aggregated
248 if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
249 drrfo->drr_numobjs += numobjs;
252 /* can't be aggregated. Push out pending record */
253 if (dump_bytes(dsp, dsp->dsa_drr,
254 sizeof (dmu_replay_record_t)) != 0)
256 dsp->dsa_pending_op = PENDING_NONE;
260 /* write a FREEOBJECTS record */
261 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
262 dsp->dsa_drr->drr_type = DRR_FREEOBJECTS;
263 drrfo->drr_firstobj = firstobj;
264 drrfo->drr_numobjs = numobjs;
265 drrfo->drr_toguid = dsp->dsa_toguid;
267 dsp->dsa_pending_op = PENDING_FREEOBJECTS;
273 dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp)
275 struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object);
277 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
278 return (dump_freeobjects(dsp, object, 1));
280 if (dsp->dsa_pending_op != PENDING_NONE) {
281 if (dump_bytes(dsp, dsp->dsa_drr,
282 sizeof (dmu_replay_record_t)) != 0)
284 dsp->dsa_pending_op = PENDING_NONE;
287 /* write an OBJECT record */
288 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
289 dsp->dsa_drr->drr_type = DRR_OBJECT;
290 drro->drr_object = object;
291 drro->drr_type = dnp->dn_type;
292 drro->drr_bonustype = dnp->dn_bonustype;
293 drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
294 drro->drr_bonuslen = dnp->dn_bonuslen;
295 drro->drr_checksumtype = dnp->dn_checksum;
296 drro->drr_compress = dnp->dn_compress;
297 drro->drr_toguid = dsp->dsa_toguid;
299 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
302 if (dump_bytes(dsp, DN_BONUS(dnp), P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0)
305 /* free anything past the end of the file */
306 if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) *
307 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL))
314 #define BP_SPAN(dnp, level) \
315 (((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \
316 (level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)))
320 backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
321 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
323 dmu_sendarg_t *dsp = arg;
324 dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
327 if (issig(JUSTLOOKING) && issig(FORREAL))
330 if (zb->zb_object != DMU_META_DNODE_OBJECT &&
331 DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
333 } else if (bp == NULL && zb->zb_object == DMU_META_DNODE_OBJECT) {
334 uint64_t span = BP_SPAN(dnp, zb->zb_level);
335 uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
336 err = dump_freeobjects(dsp, dnobj, span >> DNODE_SHIFT);
337 } else if (bp == NULL) {
338 uint64_t span = BP_SPAN(dnp, zb->zb_level);
339 err = dump_free(dsp, zb->zb_object, zb->zb_blkid * span, span);
340 } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
342 } else if (type == DMU_OT_DNODE) {
345 int blksz = BP_GET_LSIZE(bp);
346 uint32_t aflags = ARC_WAIT;
349 if (dsl_read(NULL, spa, bp, pbuf,
350 arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ,
351 ZIO_FLAG_CANFAIL, &aflags, zb) != 0)
355 for (i = 0; i < blksz >> DNODE_SHIFT; i++) {
356 uint64_t dnobj = (zb->zb_blkid <<
357 (DNODE_BLOCK_SHIFT - DNODE_SHIFT)) + i;
358 err = dump_dnode(dsp, dnobj, blk+i);
362 (void) arc_buf_remove_ref(abuf, &abuf);
363 } else if (type == DMU_OT_SA) {
364 uint32_t aflags = ARC_WAIT;
366 int blksz = BP_GET_LSIZE(bp);
368 if (arc_read_nolock(NULL, spa, bp,
369 arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ,
370 ZIO_FLAG_CANFAIL, &aflags, zb) != 0)
373 err = dump_spill(dsp, zb->zb_object, blksz, abuf->b_data);
374 (void) arc_buf_remove_ref(abuf, &abuf);
375 } else { /* it's a level-0 block of a regular object */
376 uint32_t aflags = ARC_WAIT;
378 int blksz = BP_GET_LSIZE(bp);
380 if (dsl_read(NULL, spa, bp, pbuf,
381 arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ,
382 ZIO_FLAG_CANFAIL, &aflags, zb) != 0) {
383 if (zfs_send_corrupt_data) {
384 /* Send a block filled with 0x"zfs badd bloc" */
385 abuf = arc_buf_alloc(spa, blksz, &abuf,
388 for (ptr = abuf->b_data;
389 (char *)ptr < (char *)abuf->b_data + blksz;
391 *ptr = 0x2f5baddb10c;
397 err = dump_data(dsp, type, zb->zb_object, zb->zb_blkid * blksz,
398 blksz, bp, abuf->b_data);
399 (void) arc_buf_remove_ref(abuf, &abuf);
402 ASSERT(err == 0 || err == EINTR);
407 dmu_send(objset_t *tosnap, objset_t *fromsnap, boolean_t fromorigin,
408 int outfd, struct file *fp, offset_t *off)
410 dsl_dataset_t *ds = tosnap->os_dsl_dataset;
411 dsl_dataset_t *fromds = fromsnap ? fromsnap->os_dsl_dataset : NULL;
412 dmu_replay_record_t *drr;
415 uint64_t fromtxg = 0;
417 /* tosnap must be a snapshot */
418 if (ds->ds_phys->ds_next_snap_obj == 0)
421 /* fromsnap must be an earlier snapshot from the same fs as tosnap */
422 if (fromds && (ds->ds_dir != fromds->ds_dir ||
423 fromds->ds_phys->ds_creation_txg >= ds->ds_phys->ds_creation_txg))
427 dsl_pool_t *dp = ds->ds_dir->dd_pool;
432 if (dsl_dir_is_clone(ds->ds_dir)) {
433 rw_enter(&dp->dp_config_rwlock, RW_READER);
434 err = dsl_dataset_hold_obj(dp,
435 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &fromds);
436 rw_exit(&dp->dp_config_rwlock);
440 fromorigin = B_FALSE;
445 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
446 drr->drr_type = DRR_BEGIN;
447 drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
448 DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
452 if (dmu_objset_type(tosnap) == DMU_OST_ZFS) {
454 if (zfs_get_zplprop(tosnap, ZFS_PROP_VERSION, &version) != 0) {
455 kmem_free(drr, sizeof (dmu_replay_record_t));
458 if (version == ZPL_VERSION_SA) {
459 DMU_SET_FEATUREFLAGS(
460 drr->drr_u.drr_begin.drr_versioninfo,
461 DMU_BACKUP_FEATURE_SA_SPILL);
466 drr->drr_u.drr_begin.drr_creation_time =
467 ds->ds_phys->ds_creation_time;
468 drr->drr_u.drr_begin.drr_type = tosnap->os_phys->os_type;
470 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
471 drr->drr_u.drr_begin.drr_toguid = ds->ds_phys->ds_guid;
472 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
473 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
476 drr->drr_u.drr_begin.drr_fromguid = fromds->ds_phys->ds_guid;
477 dsl_dataset_name(ds, drr->drr_u.drr_begin.drr_toname);
480 fromtxg = fromds->ds_phys->ds_creation_txg;
482 dsl_dataset_rele(fromds, FTAG);
484 dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP);
487 dsp->dsa_outfd = outfd;
488 dsp->dsa_proc = curproc;
489 dsp->dsa_td = curthread;
491 dsp->dsa_os = tosnap;
493 dsp->dsa_toguid = ds->ds_phys->ds_guid;
494 ZIO_SET_CHECKSUM(&dsp->dsa_zc, 0, 0, 0, 0);
495 dsp->dsa_pending_op = PENDING_NONE;
497 mutex_enter(&ds->ds_sendstream_lock);
498 list_insert_head(&ds->ds_sendstreams, dsp);
499 mutex_exit(&ds->ds_sendstream_lock);
501 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) {
506 err = traverse_dataset(ds, fromtxg, TRAVERSE_PRE | TRAVERSE_PREFETCH,
509 if (dsp->dsa_pending_op != PENDING_NONE)
510 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0)
514 if (err == EINTR && dsp->dsa_err)
519 bzero(drr, sizeof (dmu_replay_record_t));
520 drr->drr_type = DRR_END;
521 drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc;
522 drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid;
524 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) {
530 mutex_enter(&ds->ds_sendstream_lock);
531 list_remove(&ds->ds_sendstreams, dsp);
532 mutex_exit(&ds->ds_sendstream_lock);
534 kmem_free(drr, sizeof (dmu_replay_record_t));
535 kmem_free(dsp, sizeof (dmu_sendarg_t));
541 dmu_send_estimate(objset_t *tosnap, objset_t *fromsnap, boolean_t fromorigin,
544 dsl_dataset_t *ds = tosnap->os_dsl_dataset;
545 dsl_dataset_t *fromds = fromsnap ? fromsnap->os_dsl_dataset : NULL;
546 dsl_pool_t *dp = ds->ds_dir->dd_pool;
550 /* tosnap must be a snapshot */
551 if (ds->ds_phys->ds_next_snap_obj == 0)
554 /* fromsnap must be an earlier snapshot from the same fs as tosnap */
555 if (fromds && (ds->ds_dir != fromds->ds_dir ||
556 fromds->ds_phys->ds_creation_txg >= ds->ds_phys->ds_creation_txg))
563 if (dsl_dir_is_clone(ds->ds_dir)) {
564 rw_enter(&dp->dp_config_rwlock, RW_READER);
565 err = dsl_dataset_hold_obj(dp,
566 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &fromds);
567 rw_exit(&dp->dp_config_rwlock);
571 fromorigin = B_FALSE;
575 /* Get uncompressed size estimate of changed data. */
576 if (fromds == NULL) {
577 size = ds->ds_phys->ds_uncompressed_bytes;
580 err = dsl_dataset_space_written(fromds, ds,
581 &used, &comp, &size);
583 dsl_dataset_rele(fromds, FTAG);
589 * Assume that space (both on-disk and in-stream) is dominated by
590 * data. We will adjust for indirect blocks and the copies property,
591 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
595 * Subtract out approximate space used by indirect blocks.
596 * Assume most space is used by data blocks (non-indirect, non-dnode).
597 * Assume all blocks are recordsize. Assume ditto blocks and
598 * internal fragmentation counter out compression.
600 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
601 * block, which we observe in practice.
604 rw_enter(&dp->dp_config_rwlock, RW_READER);
605 err = dsl_prop_get_ds(ds, "recordsize",
606 sizeof (recordsize), 1, &recordsize, NULL);
607 rw_exit(&dp->dp_config_rwlock);
610 size -= size / recordsize * sizeof (blkptr_t);
612 /* Add in the space for the record associated with each block. */
613 size += size / recordsize * sizeof (dmu_replay_record_t);
620 struct recvbeginsyncarg {
623 dsl_dataset_t *origin;
625 dmu_objset_type_t type;
629 char clonelastname[MAXNAMELEN];
630 dsl_dataset_t *ds; /* the ds to recv into; returned from the syncfunc */
636 recv_new_check(void *arg1, void *arg2, dmu_tx_t *tx)
638 dsl_dir_t *dd = arg1;
639 struct recvbeginsyncarg *rbsa = arg2;
640 objset_t *mos = dd->dd_pool->dp_meta_objset;
644 err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj,
645 strrchr(rbsa->tofs, '/') + 1, sizeof (uint64_t), 1, &val);
648 return (err ? err : EEXIST);
651 /* make sure it's a snap in the same pool */
652 if (rbsa->origin->ds_dir->dd_pool != dd->dd_pool)
654 if (!dsl_dataset_is_snapshot(rbsa->origin))
656 if (rbsa->origin->ds_phys->ds_guid != rbsa->fromguid)
664 recv_new_sync(void *arg1, void *arg2, dmu_tx_t *tx)
666 dsl_dir_t *dd = arg1;
667 struct recvbeginsyncarg *rbsa = arg2;
668 uint64_t flags = DS_FLAG_INCONSISTENT | rbsa->dsflags;
671 /* Create and open new dataset. */
672 dsobj = dsl_dataset_create_sync(dd, strrchr(rbsa->tofs, '/') + 1,
673 rbsa->origin, flags, rbsa->cr, tx);
674 VERIFY(0 == dsl_dataset_own_obj(dd->dd_pool, dsobj,
675 B_TRUE, dmu_recv_tag, &rbsa->ds));
677 if (rbsa->origin == NULL) {
678 (void) dmu_objset_create_impl(dd->dd_pool->dp_spa,
679 rbsa->ds, &rbsa->ds->ds_phys->ds_bp, rbsa->type, tx);
682 spa_history_log_internal(LOG_DS_REPLAY_FULL_SYNC,
683 dd->dd_pool->dp_spa, tx, "dataset = %lld", dsobj);
688 recv_existing_check(void *arg1, void *arg2, dmu_tx_t *tx)
690 dsl_dataset_t *ds = arg1;
691 struct recvbeginsyncarg *rbsa = arg2;
695 /* must not have any changes since most recent snapshot */
696 if (!rbsa->force && dsl_dataset_modified_since_lastsnap(ds))
699 /* new snapshot name must not exist */
700 err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset,
701 ds->ds_phys->ds_snapnames_zapobj, rbsa->tosnap, 8, 1, &val);
707 if (rbsa->fromguid) {
708 /* if incremental, most recent snapshot must match fromguid */
709 if (ds->ds_prev == NULL)
713 * most recent snapshot must match fromguid, or there are no
714 * changes since the fromguid one
716 if (ds->ds_prev->ds_phys->ds_guid != rbsa->fromguid) {
717 uint64_t birth = ds->ds_prev->ds_phys->ds_bp.blk_birth;
718 uint64_t obj = ds->ds_prev->ds_phys->ds_prev_snap_obj;
721 err = dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
725 if (snap->ds_phys->ds_creation_txg < birth) {
726 dsl_dataset_rele(snap, FTAG);
729 if (snap->ds_phys->ds_guid == rbsa->fromguid) {
730 dsl_dataset_rele(snap, FTAG);
733 obj = snap->ds_phys->ds_prev_snap_obj;
734 dsl_dataset_rele(snap, FTAG);
740 /* if full, most recent snapshot must be $ORIGIN */
741 if (ds->ds_phys->ds_prev_snap_txg >= TXG_INITIAL)
745 /* temporary clone name must not exist */
746 err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset,
747 ds->ds_dir->dd_phys->dd_child_dir_zapobj,
748 rbsa->clonelastname, 8, 1, &val);
759 recv_existing_sync(void *arg1, void *arg2, dmu_tx_t *tx)
761 dsl_dataset_t *ohds = arg1;
762 struct recvbeginsyncarg *rbsa = arg2;
763 dsl_pool_t *dp = ohds->ds_dir->dd_pool;
765 uint64_t flags = DS_FLAG_INCONSISTENT | rbsa->dsflags;
768 /* create and open the temporary clone */
769 dsobj = dsl_dataset_create_sync(ohds->ds_dir, rbsa->clonelastname,
770 ohds->ds_prev, flags, rbsa->cr, tx);
771 VERIFY(0 == dsl_dataset_own_obj(dp, dsobj, B_TRUE, dmu_recv_tag, &cds));
774 * If we actually created a non-clone, we need to create the
775 * objset in our new dataset.
777 if (BP_IS_HOLE(dsl_dataset_get_blkptr(cds))) {
778 (void) dmu_objset_create_impl(dp->dp_spa,
779 cds, dsl_dataset_get_blkptr(cds), rbsa->type, tx);
784 spa_history_log_internal(LOG_DS_REPLAY_INC_SYNC,
785 dp->dp_spa, tx, "dataset = %lld", dsobj);
789 dmu_recv_verify_features(dsl_dataset_t *ds, struct drr_begin *drrb)
793 featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
795 /* Verify pool version supports SA if SA_SPILL feature set */
796 return ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
797 (spa_version(dsl_dataset_get_spa(ds)) < SPA_VERSION_SA));
801 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
802 * succeeds; otherwise we will leak the holds on the datasets.
805 dmu_recv_begin(char *tofs, char *tosnap, char *top_ds, struct drr_begin *drrb,
806 boolean_t force, objset_t *origin, dmu_recv_cookie_t *drc)
810 struct recvbeginsyncarg rbsa = { 0 };
811 uint64_t versioninfo;
815 if (drrb->drr_magic == DMU_BACKUP_MAGIC)
817 else if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
823 rbsa.tosnap = tosnap;
824 rbsa.origin = origin ? origin->os_dsl_dataset : NULL;
825 rbsa.fromguid = drrb->drr_fromguid;
826 rbsa.type = drrb->drr_type;
830 versioninfo = drrb->drr_versioninfo;
831 flags = drrb->drr_flags;
834 rbsa.type = BSWAP_32(rbsa.type);
835 rbsa.fromguid = BSWAP_64(rbsa.fromguid);
836 versioninfo = BSWAP_64(versioninfo);
837 flags = BSWAP_32(flags);
840 if (DMU_GET_STREAM_HDRTYPE(versioninfo) == DMU_COMPOUNDSTREAM ||
841 rbsa.type >= DMU_OST_NUMTYPES ||
842 ((flags & DRR_FLAG_CLONE) && origin == NULL))
845 if (flags & DRR_FLAG_CI_DATA)
846 rbsa.dsflags = DS_FLAG_CI_DATASET;
848 bzero(drc, sizeof (dmu_recv_cookie_t));
849 drc->drc_drrb = drrb;
850 drc->drc_tosnap = tosnap;
851 drc->drc_top_ds = top_ds;
852 drc->drc_force = force;
855 * Process the begin in syncing context.
858 /* open the dataset we are logically receiving into */
859 err = dsl_dataset_hold(tofs, dmu_recv_tag, &ds);
861 if (dmu_recv_verify_features(ds, drrb)) {
862 dsl_dataset_rele(ds, dmu_recv_tag);
865 /* target fs already exists; recv into temp clone */
867 /* Can't recv a clone into an existing fs */
868 if (flags & DRR_FLAG_CLONE) {
869 dsl_dataset_rele(ds, dmu_recv_tag);
873 /* must not have an incremental recv already in progress */
874 if (!mutex_tryenter(&ds->ds_recvlock)) {
875 dsl_dataset_rele(ds, dmu_recv_tag);
879 /* tmp clone name is: tofs/%tosnap" */
880 (void) snprintf(rbsa.clonelastname, sizeof (rbsa.clonelastname),
883 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
884 recv_existing_check, recv_existing_sync, ds, &rbsa, 5);
886 mutex_exit(&ds->ds_recvlock);
887 dsl_dataset_rele(ds, dmu_recv_tag);
890 drc->drc_logical_ds = ds;
891 drc->drc_real_ds = rbsa.ds;
892 } else if (err == ENOENT) {
893 /* target fs does not exist; must be a full backup or clone */
897 * If it's a non-clone incremental, we are missing the
898 * target fs, so fail the recv.
900 if (rbsa.fromguid && !(flags & DRR_FLAG_CLONE))
903 /* Open the parent of tofs */
904 cp = strrchr(tofs, '/');
906 err = dsl_dataset_hold(tofs, FTAG, &ds);
911 if (dmu_recv_verify_features(ds, drrb)) {
912 dsl_dataset_rele(ds, FTAG);
916 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
917 recv_new_check, recv_new_sync, ds->ds_dir, &rbsa, 5);
918 dsl_dataset_rele(ds, FTAG);
921 drc->drc_logical_ds = drc->drc_real_ds = rbsa.ds;
922 drc->drc_newfs = B_TRUE;
935 int bufsize; /* amount of memory allocated for buf */
937 avl_tree_t *guid_to_ds_map;
940 typedef struct guid_map_entry {
942 dsl_dataset_t *gme_ds;
947 guid_compare(const void *arg1, const void *arg2)
949 const guid_map_entry_t *gmep1 = arg1;
950 const guid_map_entry_t *gmep2 = arg2;
952 if (gmep1->guid < gmep2->guid)
954 else if (gmep1->guid > gmep2->guid)
960 free_guid_map_onexit(void *arg)
962 avl_tree_t *ca = arg;
964 guid_map_entry_t *gmep;
966 while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
967 dsl_dataset_rele(gmep->gme_ds, ca);
968 kmem_free(gmep, sizeof (guid_map_entry_t));
971 kmem_free(ca, sizeof (avl_tree_t));
975 restore_bytes(struct restorearg *ra, void *buf, int len, off_t off, ssize_t *resid)
983 auio.uio_iov = &aiov;
985 auio.uio_resid = len;
986 auio.uio_segflg = UIO_SYSSPACE;
987 auio.uio_rw = UIO_READ;
988 auio.uio_offset = off;
989 auio.uio_td = ra->td;
991 error = fo_read(ra->fp, &auio, ra->td->td_ucred, FOF_OFFSET, ra->td);
993 fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
996 *resid = auio.uio_resid;
1001 restore_read(struct restorearg *ra, int len)
1006 /* some things will require 8-byte alignment, so everything must */
1009 while (done < len) {
1012 ra->err = restore_bytes(ra, (caddr_t)ra->buf + done,
1013 len - done, ra->voff, &resid);
1015 if (resid == len - done)
1017 ra->voff += len - done - resid;
1023 ASSERT3U(done, ==, len);
1026 fletcher_4_incremental_byteswap(rv, len, &ra->cksum);
1028 fletcher_4_incremental_native(rv, len, &ra->cksum);
1033 backup_byteswap(dmu_replay_record_t *drr)
1035 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
1036 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
1037 drr->drr_type = BSWAP_32(drr->drr_type);
1038 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
1039 switch (drr->drr_type) {
1041 DO64(drr_begin.drr_magic);
1042 DO64(drr_begin.drr_versioninfo);
1043 DO64(drr_begin.drr_creation_time);
1044 DO32(drr_begin.drr_type);
1045 DO32(drr_begin.drr_flags);
1046 DO64(drr_begin.drr_toguid);
1047 DO64(drr_begin.drr_fromguid);
1050 DO64(drr_object.drr_object);
1051 /* DO64(drr_object.drr_allocation_txg); */
1052 DO32(drr_object.drr_type);
1053 DO32(drr_object.drr_bonustype);
1054 DO32(drr_object.drr_blksz);
1055 DO32(drr_object.drr_bonuslen);
1056 DO64(drr_object.drr_toguid);
1058 case DRR_FREEOBJECTS:
1059 DO64(drr_freeobjects.drr_firstobj);
1060 DO64(drr_freeobjects.drr_numobjs);
1061 DO64(drr_freeobjects.drr_toguid);
1064 DO64(drr_write.drr_object);
1065 DO32(drr_write.drr_type);
1066 DO64(drr_write.drr_offset);
1067 DO64(drr_write.drr_length);
1068 DO64(drr_write.drr_toguid);
1069 DO64(drr_write.drr_key.ddk_cksum.zc_word[0]);
1070 DO64(drr_write.drr_key.ddk_cksum.zc_word[1]);
1071 DO64(drr_write.drr_key.ddk_cksum.zc_word[2]);
1072 DO64(drr_write.drr_key.ddk_cksum.zc_word[3]);
1073 DO64(drr_write.drr_key.ddk_prop);
1075 case DRR_WRITE_BYREF:
1076 DO64(drr_write_byref.drr_object);
1077 DO64(drr_write_byref.drr_offset);
1078 DO64(drr_write_byref.drr_length);
1079 DO64(drr_write_byref.drr_toguid);
1080 DO64(drr_write_byref.drr_refguid);
1081 DO64(drr_write_byref.drr_refobject);
1082 DO64(drr_write_byref.drr_refoffset);
1083 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[0]);
1084 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[1]);
1085 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[2]);
1086 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[3]);
1087 DO64(drr_write_byref.drr_key.ddk_prop);
1090 DO64(drr_free.drr_object);
1091 DO64(drr_free.drr_offset);
1092 DO64(drr_free.drr_length);
1093 DO64(drr_free.drr_toguid);
1096 DO64(drr_spill.drr_object);
1097 DO64(drr_spill.drr_length);
1098 DO64(drr_spill.drr_toguid);
1101 DO64(drr_end.drr_checksum.zc_word[0]);
1102 DO64(drr_end.drr_checksum.zc_word[1]);
1103 DO64(drr_end.drr_checksum.zc_word[2]);
1104 DO64(drr_end.drr_checksum.zc_word[3]);
1105 DO64(drr_end.drr_toguid);
1113 restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro)
1119 if (drro->drr_type == DMU_OT_NONE ||
1120 !DMU_OT_IS_VALID(drro->drr_type) ||
1121 !DMU_OT_IS_VALID(drro->drr_bonustype) ||
1122 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
1123 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
1124 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
1125 drro->drr_blksz < SPA_MINBLOCKSIZE ||
1126 drro->drr_blksz > SPA_MAXBLOCKSIZE ||
1127 drro->drr_bonuslen > DN_MAX_BONUSLEN) {
1131 err = dmu_object_info(os, drro->drr_object, NULL);
1133 if (err != 0 && err != ENOENT)
1136 if (drro->drr_bonuslen) {
1137 data = restore_read(ra, P2ROUNDUP(drro->drr_bonuslen, 8));
1142 if (err == ENOENT) {
1143 /* currently free, want to be allocated */
1144 tx = dmu_tx_create(os);
1145 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1146 err = dmu_tx_assign(tx, TXG_WAIT);
1151 err = dmu_object_claim(os, drro->drr_object,
1152 drro->drr_type, drro->drr_blksz,
1153 drro->drr_bonustype, drro->drr_bonuslen, tx);
1156 /* currently allocated, want to be allocated */
1157 err = dmu_object_reclaim(os, drro->drr_object,
1158 drro->drr_type, drro->drr_blksz,
1159 drro->drr_bonustype, drro->drr_bonuslen);
1165 tx = dmu_tx_create(os);
1166 dmu_tx_hold_bonus(tx, drro->drr_object);
1167 err = dmu_tx_assign(tx, TXG_WAIT);
1173 dmu_object_set_checksum(os, drro->drr_object, drro->drr_checksumtype,
1175 dmu_object_set_compress(os, drro->drr_object, drro->drr_compress, tx);
1180 VERIFY(0 == dmu_bonus_hold(os, drro->drr_object, FTAG, &db));
1181 dmu_buf_will_dirty(db, tx);
1183 ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
1184 bcopy(data, db->db_data, drro->drr_bonuslen);
1186 dmu_object_byteswap_t byteswap =
1187 DMU_OT_BYTESWAP(drro->drr_bonustype);
1188 dmu_ot_byteswap[byteswap].ob_func(db->db_data,
1189 drro->drr_bonuslen);
1191 dmu_buf_rele(db, FTAG);
1199 restore_freeobjects(struct restorearg *ra, objset_t *os,
1200 struct drr_freeobjects *drrfo)
1204 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
1207 for (obj = drrfo->drr_firstobj;
1208 obj < drrfo->drr_firstobj + drrfo->drr_numobjs;
1209 (void) dmu_object_next(os, &obj, FALSE, 0)) {
1212 if (dmu_object_info(os, obj, NULL) != 0)
1215 err = dmu_free_object(os, obj);
1223 restore_write(struct restorearg *ra, objset_t *os,
1224 struct drr_write *drrw)
1230 if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset ||
1231 !DMU_OT_IS_VALID(drrw->drr_type))
1234 data = restore_read(ra, drrw->drr_length);
1238 if (dmu_object_info(os, drrw->drr_object, NULL) != 0)
1241 tx = dmu_tx_create(os);
1243 dmu_tx_hold_write(tx, drrw->drr_object,
1244 drrw->drr_offset, drrw->drr_length);
1245 err = dmu_tx_assign(tx, TXG_WAIT);
1251 dmu_object_byteswap_t byteswap =
1252 DMU_OT_BYTESWAP(drrw->drr_type);
1253 dmu_ot_byteswap[byteswap].ob_func(data, drrw->drr_length);
1255 dmu_write(os, drrw->drr_object,
1256 drrw->drr_offset, drrw->drr_length, data, tx);
1262 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed
1263 * streams to refer to a copy of the data that is already on the
1264 * system because it came in earlier in the stream. This function
1265 * finds the earlier copy of the data, and uses that copy instead of
1266 * data from the stream to fulfill this write.
1269 restore_write_byref(struct restorearg *ra, objset_t *os,
1270 struct drr_write_byref *drrwbr)
1274 guid_map_entry_t gmesrch;
1275 guid_map_entry_t *gmep;
1277 objset_t *ref_os = NULL;
1280 if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
1284 * If the GUID of the referenced dataset is different from the
1285 * GUID of the target dataset, find the referenced dataset.
1287 if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
1288 gmesrch.guid = drrwbr->drr_refguid;
1289 if ((gmep = avl_find(ra->guid_to_ds_map, &gmesrch,
1293 if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
1299 if (err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
1300 drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH))
1303 tx = dmu_tx_create(os);
1305 dmu_tx_hold_write(tx, drrwbr->drr_object,
1306 drrwbr->drr_offset, drrwbr->drr_length);
1307 err = dmu_tx_assign(tx, TXG_WAIT);
1312 dmu_write(os, drrwbr->drr_object,
1313 drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
1314 dmu_buf_rele(dbp, FTAG);
1320 restore_spill(struct restorearg *ra, objset_t *os, struct drr_spill *drrs)
1324 dmu_buf_t *db, *db_spill;
1327 if (drrs->drr_length < SPA_MINBLOCKSIZE ||
1328 drrs->drr_length > SPA_MAXBLOCKSIZE)
1331 data = restore_read(ra, drrs->drr_length);
1335 if (dmu_object_info(os, drrs->drr_object, NULL) != 0)
1338 VERIFY(0 == dmu_bonus_hold(os, drrs->drr_object, FTAG, &db));
1339 if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
1340 dmu_buf_rele(db, FTAG);
1344 tx = dmu_tx_create(os);
1346 dmu_tx_hold_spill(tx, db->db_object);
1348 err = dmu_tx_assign(tx, TXG_WAIT);
1350 dmu_buf_rele(db, FTAG);
1351 dmu_buf_rele(db_spill, FTAG);
1355 dmu_buf_will_dirty(db_spill, tx);
1357 if (db_spill->db_size < drrs->drr_length)
1358 VERIFY(0 == dbuf_spill_set_blksz(db_spill,
1359 drrs->drr_length, tx));
1360 bcopy(data, db_spill->db_data, drrs->drr_length);
1362 dmu_buf_rele(db, FTAG);
1363 dmu_buf_rele(db_spill, FTAG);
1371 restore_free(struct restorearg *ra, objset_t *os,
1372 struct drr_free *drrf)
1376 if (drrf->drr_length != -1ULL &&
1377 drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
1380 if (dmu_object_info(os, drrf->drr_object, NULL) != 0)
1383 err = dmu_free_long_range(os, drrf->drr_object,
1384 drrf->drr_offset, drrf->drr_length);
1389 * NB: callers *must* call dmu_recv_end() if this succeeds.
1392 dmu_recv_stream(dmu_recv_cookie_t *drc, struct file *fp, offset_t *voffp,
1393 int cleanup_fd, uint64_t *action_handlep)
1395 struct restorearg ra = { 0 };
1396 dmu_replay_record_t *drr;
1401 if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
1405 /* compute checksum of drr_begin record */
1406 dmu_replay_record_t *drr;
1407 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
1409 drr->drr_type = DRR_BEGIN;
1410 drr->drr_u.drr_begin = *drc->drc_drrb;
1412 fletcher_4_incremental_byteswap(drr,
1413 sizeof (dmu_replay_record_t), &ra.cksum);
1415 fletcher_4_incremental_native(drr,
1416 sizeof (dmu_replay_record_t), &ra.cksum);
1418 kmem_free(drr, sizeof (dmu_replay_record_t));
1422 struct drr_begin *drrb = drc->drc_drrb;
1423 drrb->drr_magic = BSWAP_64(drrb->drr_magic);
1424 drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo);
1425 drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
1426 drrb->drr_type = BSWAP_32(drrb->drr_type);
1427 drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
1428 drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
1435 ra.buf = kmem_alloc(ra.bufsize, KM_SLEEP);
1437 /* these were verified in dmu_recv_begin */
1438 ASSERT(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo) ==
1440 ASSERT(drc->drc_drrb->drr_type < DMU_OST_NUMTYPES);
1443 * Open the objset we are modifying.
1445 VERIFY(dmu_objset_from_ds(drc->drc_real_ds, &os) == 0);
1447 ASSERT(drc->drc_real_ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT);
1449 featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
1451 /* if this stream is dedup'ed, set up the avl tree for guid mapping */
1452 if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
1455 if (cleanup_fd == -1) {
1459 ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor);
1465 if (*action_handlep == 0) {
1467 kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
1468 avl_create(ra.guid_to_ds_map, guid_compare,
1469 sizeof (guid_map_entry_t),
1470 offsetof(guid_map_entry_t, avlnode));
1471 ra.err = zfs_onexit_add_cb(minor,
1472 free_guid_map_onexit, ra.guid_to_ds_map,
1477 ra.err = zfs_onexit_cb_data(minor, *action_handlep,
1478 (void **)&ra.guid_to_ds_map);
1483 drc->drc_guid_to_ds_map = ra.guid_to_ds_map;
1487 * Read records and process them.
1490 while (ra.err == 0 &&
1491 NULL != (drr = restore_read(&ra, sizeof (*drr)))) {
1492 if (issig(JUSTLOOKING) && issig(FORREAL)) {
1498 backup_byteswap(drr);
1500 switch (drr->drr_type) {
1504 * We need to make a copy of the record header,
1505 * because restore_{object,write} may need to
1506 * restore_read(), which will invalidate drr.
1508 struct drr_object drro = drr->drr_u.drr_object;
1509 ra.err = restore_object(&ra, os, &drro);
1512 case DRR_FREEOBJECTS:
1514 struct drr_freeobjects drrfo =
1515 drr->drr_u.drr_freeobjects;
1516 ra.err = restore_freeobjects(&ra, os, &drrfo);
1521 struct drr_write drrw = drr->drr_u.drr_write;
1522 ra.err = restore_write(&ra, os, &drrw);
1525 case DRR_WRITE_BYREF:
1527 struct drr_write_byref drrwbr =
1528 drr->drr_u.drr_write_byref;
1529 ra.err = restore_write_byref(&ra, os, &drrwbr);
1534 struct drr_free drrf = drr->drr_u.drr_free;
1535 ra.err = restore_free(&ra, os, &drrf);
1540 struct drr_end drre = drr->drr_u.drr_end;
1542 * We compare against the *previous* checksum
1543 * value, because the stored checksum is of
1544 * everything before the DRR_END record.
1546 if (!ZIO_CHECKSUM_EQUAL(drre.drr_checksum, pcksum))
1552 struct drr_spill drrs = drr->drr_u.drr_spill;
1553 ra.err = restore_spill(&ra, os, &drrs);
1562 ASSERT(ra.err != 0);
1565 if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
1566 zfs_onexit_fd_rele(cleanup_fd);
1570 * destroy what we created, so we don't leave it in the
1571 * inconsistent restoring state.
1573 txg_wait_synced(drc->drc_real_ds->ds_dir->dd_pool, 0);
1575 (void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag,
1577 if (drc->drc_real_ds != drc->drc_logical_ds) {
1578 mutex_exit(&drc->drc_logical_ds->ds_recvlock);
1579 dsl_dataset_rele(drc->drc_logical_ds, dmu_recv_tag);
1583 kmem_free(ra.buf, ra.bufsize);
1588 struct recvendsyncarg {
1590 uint64_t creation_time;
1595 recv_end_check(void *arg1, void *arg2, dmu_tx_t *tx)
1597 dsl_dataset_t *ds = arg1;
1598 struct recvendsyncarg *resa = arg2;
1600 return (dsl_dataset_snapshot_check(ds, resa->tosnap, tx));
1604 recv_end_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1606 dsl_dataset_t *ds = arg1;
1607 struct recvendsyncarg *resa = arg2;
1609 dsl_dataset_snapshot_sync(ds, resa->tosnap, tx);
1611 /* set snapshot's creation time and guid */
1612 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1613 ds->ds_prev->ds_phys->ds_creation_time = resa->creation_time;
1614 ds->ds_prev->ds_phys->ds_guid = resa->toguid;
1615 ds->ds_prev->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1617 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1618 ds->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1622 add_ds_to_guidmap(avl_tree_t *guid_map, dsl_dataset_t *ds)
1624 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1625 uint64_t snapobj = ds->ds_phys->ds_prev_snap_obj;
1626 dsl_dataset_t *snapds;
1627 guid_map_entry_t *gmep;
1630 ASSERT(guid_map != NULL);
1632 rw_enter(&dp->dp_config_rwlock, RW_READER);
1633 err = dsl_dataset_hold_obj(dp, snapobj, guid_map, &snapds);
1635 gmep = kmem_alloc(sizeof (guid_map_entry_t), KM_SLEEP);
1636 gmep->guid = snapds->ds_phys->ds_guid;
1637 gmep->gme_ds = snapds;
1638 avl_add(guid_map, gmep);
1641 rw_exit(&dp->dp_config_rwlock);
1646 dmu_recv_existing_end(dmu_recv_cookie_t *drc)
1648 struct recvendsyncarg resa;
1649 dsl_dataset_t *ds = drc->drc_logical_ds;
1652 if (dsl_dataset_tryown(ds, FALSE, dmu_recv_tag)) {
1653 err = dsl_dataset_clone_swap(drc->drc_real_ds, ds,
1658 mutex_exit(&ds->ds_recvlock);
1659 dsl_dataset_rele(ds, dmu_recv_tag);
1660 (void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag,
1665 resa.creation_time = drc->drc_drrb->drr_creation_time;
1666 resa.toguid = drc->drc_drrb->drr_toguid;
1667 resa.tosnap = drc->drc_tosnap;
1669 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1670 recv_end_check, recv_end_sync, ds, &resa, 3);
1673 (void) dsl_dataset_clone_swap(drc->drc_real_ds, ds, B_TRUE);
1677 mutex_exit(&ds->ds_recvlock);
1678 if (err == 0 && drc->drc_guid_to_ds_map != NULL)
1679 (void) add_ds_to_guidmap(drc->drc_guid_to_ds_map, ds);
1680 dsl_dataset_disown(ds, dmu_recv_tag);
1681 myerr = dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag, B_FALSE);
1687 dmu_recv_new_end(dmu_recv_cookie_t *drc)
1689 struct recvendsyncarg resa;
1690 dsl_dataset_t *ds = drc->drc_logical_ds;
1694 * XXX hack; seems the ds is still dirty and dsl_pool_zil_clean()
1695 * expects it to have a ds_user_ptr (and zil), but clone_swap()
1698 txg_wait_synced(ds->ds_dir->dd_pool, 0);
1700 resa.creation_time = drc->drc_drrb->drr_creation_time;
1701 resa.toguid = drc->drc_drrb->drr_toguid;
1702 resa.tosnap = drc->drc_tosnap;
1704 err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1705 recv_end_check, recv_end_sync, ds, &resa, 3);
1707 /* clean up the fs we just recv'd into */
1708 (void) dsl_dataset_destroy(ds, dmu_recv_tag, B_FALSE);
1710 if (drc->drc_guid_to_ds_map != NULL)
1711 (void) add_ds_to_guidmap(drc->drc_guid_to_ds_map, ds);
1712 /* release the hold from dmu_recv_begin */
1713 dsl_dataset_disown(ds, dmu_recv_tag);
1719 dmu_recv_end(dmu_recv_cookie_t *drc)
1721 if (drc->drc_logical_ds != drc->drc_real_ds)
1722 return (dmu_recv_existing_end(drc));
1724 return (dmu_recv_new_end(drc));