4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright (c) 2012, Martin Matuska <mm@FreeBSD.org>. All rights reserved.
27 * Copyright 2014 HybridCluster. All rights reserved.
31 #include <sys/dmu_impl.h>
32 #include <sys/dmu_tx.h>
34 #include <sys/dnode.h>
35 #include <sys/zfs_context.h>
36 #include <sys/dmu_objset.h>
37 #include <sys/dmu_traverse.h>
38 #include <sys/dsl_dataset.h>
39 #include <sys/dsl_dir.h>
40 #include <sys/dsl_prop.h>
41 #include <sys/dsl_pool.h>
42 #include <sys/dsl_synctask.h>
43 #include <sys/zfs_ioctl.h>
45 #include <sys/zio_checksum.h>
46 #include <sys/zfs_znode.h>
47 #include <zfs_fletcher.h>
50 #include <sys/zfs_onexit.h>
51 #include <sys/dmu_send.h>
52 #include <sys/dsl_destroy.h>
53 #include <sys/blkptr.h>
54 #include <sys/dsl_bookmark.h>
55 #include <sys/zfeature.h>
59 #define dump_write dmu_dump_write
62 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
63 int zfs_send_corrupt_data = B_FALSE;
65 static char *dmu_recv_tag = "dmu_recv_tag";
66 static const char *recv_clone_name = "%recv";
69 dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
71 dsl_dataset_t *ds = dsp->dsa_os->os_dsl_dataset;
81 auio.uio_segflg = UIO_SYSSPACE;
82 auio.uio_rw = UIO_WRITE;
83 auio.uio_offset = (off_t)-1;
84 auio.uio_td = dsp->dsa_td;
86 if (dsp->dsa_fp->f_type == DTYPE_VNODE)
88 dsp->dsa_err = fo_write(dsp->dsa_fp, &auio, dsp->dsa_td->td_ucred, 0,
91 fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
92 dsp->dsa_err = EOPNOTSUPP;
94 mutex_enter(&ds->ds_sendstream_lock);
96 mutex_exit(&ds->ds_sendstream_lock);
98 return (dsp->dsa_err);
102 * For all record types except BEGIN, fill in the checksum (overlaid in
103 * drr_u.drr_checksum.drr_checksum). The checksum verifies everything
104 * up to the start of the checksum itself.
107 dump_record(dmu_sendarg_t *dsp, void *payload, int payload_len)
109 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
110 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
111 fletcher_4_incremental_native(dsp->dsa_drr,
112 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
114 if (dsp->dsa_drr->drr_type != DRR_BEGIN) {
115 ASSERT(ZIO_CHECKSUM_IS_ZERO(&dsp->dsa_drr->drr_u.
116 drr_checksum.drr_checksum));
117 dsp->dsa_drr->drr_u.drr_checksum.drr_checksum = dsp->dsa_zc;
119 fletcher_4_incremental_native(&dsp->dsa_drr->
120 drr_u.drr_checksum.drr_checksum,
121 sizeof (zio_cksum_t), &dsp->dsa_zc);
122 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
123 return (SET_ERROR(EINTR));
124 if (payload_len != 0) {
125 fletcher_4_incremental_native(payload, payload_len,
127 if (dump_bytes(dsp, payload, payload_len) != 0)
128 return (SET_ERROR(EINTR));
134 dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
137 struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free);
140 * When we receive a free record, dbuf_free_range() assumes
141 * that the receiving system doesn't have any dbufs in the range
142 * being freed. This is always true because there is a one-record
143 * constraint: we only send one WRITE record for any given
144 * object+offset. We know that the one-record constraint is
145 * true because we always send data in increasing order by
148 * If the increasing-order constraint ever changes, we should find
149 * another way to assert that the one-record constraint is still
152 ASSERT(object > dsp->dsa_last_data_object ||
153 (object == dsp->dsa_last_data_object &&
154 offset > dsp->dsa_last_data_offset));
157 * If we are doing a non-incremental send, then there can't
158 * be any data in the dataset we're receiving into. Therefore
159 * a free record would simply be a no-op. Save space by not
160 * sending it to begin with.
162 if (!dsp->dsa_incremental)
165 if (length != -1ULL && offset + length < offset)
169 * If there is a pending op, but it's not PENDING_FREE, push it out,
170 * since free block aggregation can only be done for blocks of the
171 * same type (i.e., DRR_FREE records can only be aggregated with
172 * other DRR_FREE records. DRR_FREEOBJECTS records can only be
173 * aggregated with other DRR_FREEOBJECTS records.
175 if (dsp->dsa_pending_op != PENDING_NONE &&
176 dsp->dsa_pending_op != PENDING_FREE) {
177 if (dump_record(dsp, NULL, 0) != 0)
178 return (SET_ERROR(EINTR));
179 dsp->dsa_pending_op = PENDING_NONE;
182 if (dsp->dsa_pending_op == PENDING_FREE) {
184 * There should never be a PENDING_FREE if length is -1
185 * (because dump_dnode is the only place where this
186 * function is called with a -1, and only after flushing
187 * any pending record).
189 ASSERT(length != -1ULL);
191 * Check to see whether this free block can be aggregated
194 if (drrf->drr_object == object && drrf->drr_offset +
195 drrf->drr_length == offset) {
196 drrf->drr_length += length;
199 /* not a continuation. Push out pending record */
200 if (dump_record(dsp, NULL, 0) != 0)
201 return (SET_ERROR(EINTR));
202 dsp->dsa_pending_op = PENDING_NONE;
205 /* create a FREE record and make it pending */
206 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
207 dsp->dsa_drr->drr_type = DRR_FREE;
208 drrf->drr_object = object;
209 drrf->drr_offset = offset;
210 drrf->drr_length = length;
211 drrf->drr_toguid = dsp->dsa_toguid;
212 if (length == -1ULL) {
213 if (dump_record(dsp, NULL, 0) != 0)
214 return (SET_ERROR(EINTR));
216 dsp->dsa_pending_op = PENDING_FREE;
223 dump_write(dmu_sendarg_t *dsp, dmu_object_type_t type,
224 uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data)
226 struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write);
229 * We send data in increasing object, offset order.
230 * See comment in dump_free() for details.
232 ASSERT(object > dsp->dsa_last_data_object ||
233 (object == dsp->dsa_last_data_object &&
234 offset > dsp->dsa_last_data_offset));
235 dsp->dsa_last_data_object = object;
236 dsp->dsa_last_data_offset = offset + blksz - 1;
239 * If there is any kind of pending aggregation (currently either
240 * a grouping of free objects or free blocks), push it out to
241 * the stream, since aggregation can't be done across operations
242 * of different types.
244 if (dsp->dsa_pending_op != PENDING_NONE) {
245 if (dump_record(dsp, NULL, 0) != 0)
246 return (SET_ERROR(EINTR));
247 dsp->dsa_pending_op = PENDING_NONE;
249 /* write a WRITE record */
250 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
251 dsp->dsa_drr->drr_type = DRR_WRITE;
252 drrw->drr_object = object;
253 drrw->drr_type = type;
254 drrw->drr_offset = offset;
255 drrw->drr_length = blksz;
256 drrw->drr_toguid = dsp->dsa_toguid;
257 if (bp == NULL || BP_IS_EMBEDDED(bp)) {
259 * There's no pre-computed checksum for partial-block
260 * writes or embedded BP's, so (like
261 * fletcher4-checkummed blocks) userland will have to
262 * compute a dedup-capable checksum itself.
264 drrw->drr_checksumtype = ZIO_CHECKSUM_OFF;
266 drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
267 if (zio_checksum_table[drrw->drr_checksumtype].ci_dedup)
268 drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP;
269 DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
270 DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
271 DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
272 drrw->drr_key.ddk_cksum = bp->blk_cksum;
275 if (dump_record(dsp, data, blksz) != 0)
276 return (SET_ERROR(EINTR));
281 dump_write_embedded(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
282 int blksz, const blkptr_t *bp)
284 char buf[BPE_PAYLOAD_SIZE];
285 struct drr_write_embedded *drrw =
286 &(dsp->dsa_drr->drr_u.drr_write_embedded);
288 if (dsp->dsa_pending_op != PENDING_NONE) {
289 if (dump_record(dsp, NULL, 0) != 0)
291 dsp->dsa_pending_op = PENDING_NONE;
294 ASSERT(BP_IS_EMBEDDED(bp));
296 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
297 dsp->dsa_drr->drr_type = DRR_WRITE_EMBEDDED;
298 drrw->drr_object = object;
299 drrw->drr_offset = offset;
300 drrw->drr_length = blksz;
301 drrw->drr_toguid = dsp->dsa_toguid;
302 drrw->drr_compression = BP_GET_COMPRESS(bp);
303 drrw->drr_etype = BPE_GET_ETYPE(bp);
304 drrw->drr_lsize = BPE_GET_LSIZE(bp);
305 drrw->drr_psize = BPE_GET_PSIZE(bp);
307 decode_embedded_bp_compressed(bp, buf);
309 if (dump_record(dsp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0)
315 dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data)
317 struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill);
319 if (dsp->dsa_pending_op != PENDING_NONE) {
320 if (dump_record(dsp, NULL, 0) != 0)
321 return (SET_ERROR(EINTR));
322 dsp->dsa_pending_op = PENDING_NONE;
325 /* write a SPILL record */
326 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
327 dsp->dsa_drr->drr_type = DRR_SPILL;
328 drrs->drr_object = object;
329 drrs->drr_length = blksz;
330 drrs->drr_toguid = dsp->dsa_toguid;
332 if (dump_record(dsp, data, blksz) != 0)
333 return (SET_ERROR(EINTR));
338 dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
340 struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects);
342 /* See comment in dump_free(). */
343 if (!dsp->dsa_incremental)
347 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
348 * push it out, since free block aggregation can only be done for
349 * blocks of the same type (i.e., DRR_FREE records can only be
350 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
351 * can only be aggregated with other DRR_FREEOBJECTS records.
353 if (dsp->dsa_pending_op != PENDING_NONE &&
354 dsp->dsa_pending_op != PENDING_FREEOBJECTS) {
355 if (dump_record(dsp, NULL, 0) != 0)
356 return (SET_ERROR(EINTR));
357 dsp->dsa_pending_op = PENDING_NONE;
359 if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) {
361 * See whether this free object array can be aggregated
364 if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
365 drrfo->drr_numobjs += numobjs;
368 /* can't be aggregated. Push out pending record */
369 if (dump_record(dsp, NULL, 0) != 0)
370 return (SET_ERROR(EINTR));
371 dsp->dsa_pending_op = PENDING_NONE;
375 /* write a FREEOBJECTS record */
376 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
377 dsp->dsa_drr->drr_type = DRR_FREEOBJECTS;
378 drrfo->drr_firstobj = firstobj;
379 drrfo->drr_numobjs = numobjs;
380 drrfo->drr_toguid = dsp->dsa_toguid;
382 dsp->dsa_pending_op = PENDING_FREEOBJECTS;
388 dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp)
390 struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object);
392 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
393 return (dump_freeobjects(dsp, object, 1));
395 if (dsp->dsa_pending_op != PENDING_NONE) {
396 if (dump_record(dsp, NULL, 0) != 0)
397 return (SET_ERROR(EINTR));
398 dsp->dsa_pending_op = PENDING_NONE;
401 /* write an OBJECT record */
402 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
403 dsp->dsa_drr->drr_type = DRR_OBJECT;
404 drro->drr_object = object;
405 drro->drr_type = dnp->dn_type;
406 drro->drr_bonustype = dnp->dn_bonustype;
407 drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
408 drro->drr_bonuslen = dnp->dn_bonuslen;
409 drro->drr_checksumtype = dnp->dn_checksum;
410 drro->drr_compress = dnp->dn_compress;
411 drro->drr_toguid = dsp->dsa_toguid;
413 if (!(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
414 drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE)
415 drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE;
417 if (dump_record(dsp, DN_BONUS(dnp),
418 P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0) {
419 return (SET_ERROR(EINTR));
422 /* Free anything past the end of the file. */
423 if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) *
424 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL) != 0)
425 return (SET_ERROR(EINTR));
426 if (dsp->dsa_err != 0)
427 return (SET_ERROR(EINTR));
432 backup_do_embed(dmu_sendarg_t *dsp, const blkptr_t *bp)
434 if (!BP_IS_EMBEDDED(bp))
438 * Compression function must be legacy, or explicitly enabled.
440 if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS &&
441 !(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4)))
445 * Embed type must be explicitly enabled.
447 switch (BPE_GET_ETYPE(bp)) {
448 case BP_EMBEDDED_TYPE_DATA:
449 if (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
458 #define BP_SPAN(dnp, level) \
459 (((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \
460 (level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)))
464 backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
465 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
467 dmu_sendarg_t *dsp = arg;
468 dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
471 if (issig(JUSTLOOKING) && issig(FORREAL))
472 return (SET_ERROR(EINTR));
474 if (zb->zb_object != DMU_META_DNODE_OBJECT &&
475 DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
477 } else if (zb->zb_level == ZB_ZIL_LEVEL) {
479 * If we are sending a non-snapshot (which is allowed on
480 * read-only pools), it may have a ZIL, which must be ignored.
483 } else if (BP_IS_HOLE(bp) &&
484 zb->zb_object == DMU_META_DNODE_OBJECT) {
485 uint64_t span = BP_SPAN(dnp, zb->zb_level);
486 uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
487 err = dump_freeobjects(dsp, dnobj, span >> DNODE_SHIFT);
488 } else if (BP_IS_HOLE(bp)) {
489 uint64_t span = BP_SPAN(dnp, zb->zb_level);
490 err = dump_free(dsp, zb->zb_object, zb->zb_blkid * span, span);
491 } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
493 } else if (type == DMU_OT_DNODE) {
496 int blksz = BP_GET_LSIZE(bp);
497 arc_flags_t aflags = ARC_FLAG_WAIT;
500 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
501 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
503 return (SET_ERROR(EIO));
506 for (i = 0; i < blksz >> DNODE_SHIFT; i++) {
507 uint64_t dnobj = (zb->zb_blkid <<
508 (DNODE_BLOCK_SHIFT - DNODE_SHIFT)) + i;
509 err = dump_dnode(dsp, dnobj, blk+i);
513 (void) arc_buf_remove_ref(abuf, &abuf);
514 } else if (type == DMU_OT_SA) {
515 arc_flags_t aflags = ARC_FLAG_WAIT;
517 int blksz = BP_GET_LSIZE(bp);
519 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
520 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
522 return (SET_ERROR(EIO));
524 err = dump_spill(dsp, zb->zb_object, blksz, abuf->b_data);
525 (void) arc_buf_remove_ref(abuf, &abuf);
526 } else if (backup_do_embed(dsp, bp)) {
527 /* it's an embedded level-0 block of a regular object */
528 int blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
529 err = dump_write_embedded(dsp, zb->zb_object,
530 zb->zb_blkid * blksz, blksz, bp);
531 } else { /* it's a level-0 block of a regular object */
532 arc_flags_t aflags = ARC_FLAG_WAIT;
534 int blksz = BP_GET_LSIZE(bp);
537 ASSERT3U(blksz, ==, dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
538 ASSERT0(zb->zb_level);
539 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
540 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
542 if (zfs_send_corrupt_data) {
543 /* Send a block filled with 0x"zfs badd bloc" */
544 abuf = arc_buf_alloc(spa, blksz, &abuf,
547 for (ptr = abuf->b_data;
548 (char *)ptr < (char *)abuf->b_data + blksz;
550 *ptr = 0x2f5baddb10cULL;
552 return (SET_ERROR(EIO));
556 offset = zb->zb_blkid * blksz;
558 if (!(dsp->dsa_featureflags &
559 DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
560 blksz > SPA_OLD_MAXBLOCKSIZE) {
561 char *buf = abuf->b_data;
562 while (blksz > 0 && err == 0) {
563 int n = MIN(blksz, SPA_OLD_MAXBLOCKSIZE);
564 err = dump_write(dsp, type, zb->zb_object,
565 offset, n, NULL, buf);
571 err = dump_write(dsp, type, zb->zb_object,
572 offset, blksz, bp, abuf->b_data);
574 (void) arc_buf_remove_ref(abuf, &abuf);
577 ASSERT(err == 0 || err == EINTR);
582 * Releases dp using the specified tag.
585 dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *ds,
586 zfs_bookmark_phys_t *fromzb, boolean_t is_clone, boolean_t embedok,
588 boolean_t large_block_ok, int outfd, vnode_t *vp, offset_t *off)
590 boolean_t large_block_ok, int outfd, struct file *fp, offset_t *off)
594 dmu_replay_record_t *drr;
597 uint64_t fromtxg = 0;
598 uint64_t featureflags = 0;
600 err = dmu_objset_from_ds(ds, &os);
602 dsl_pool_rele(dp, tag);
606 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
607 drr->drr_type = DRR_BEGIN;
608 drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
609 DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
613 if (dmu_objset_type(os) == DMU_OST_ZFS) {
615 if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) {
616 kmem_free(drr, sizeof (dmu_replay_record_t));
617 dsl_pool_rele(dp, tag);
618 return (SET_ERROR(EINVAL));
620 if (version >= ZPL_VERSION_SA) {
621 featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
626 if (large_block_ok && ds->ds_large_blocks)
627 featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS;
629 spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) {
630 featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA;
631 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
632 featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA_LZ4;
637 DMU_SET_FEATUREFLAGS(drr->drr_u.drr_begin.drr_versioninfo,
640 drr->drr_u.drr_begin.drr_creation_time =
641 dsl_dataset_phys(ds)->ds_creation_time;
642 drr->drr_u.drr_begin.drr_type = dmu_objset_type(os);
644 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
645 drr->drr_u.drr_begin.drr_toguid = dsl_dataset_phys(ds)->ds_guid;
646 if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
647 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
649 if (fromzb != NULL) {
650 drr->drr_u.drr_begin.drr_fromguid = fromzb->zbm_guid;
651 fromtxg = fromzb->zbm_creation_txg;
653 dsl_dataset_name(ds, drr->drr_u.drr_begin.drr_toname);
654 if (!ds->ds_is_snapshot) {
655 (void) strlcat(drr->drr_u.drr_begin.drr_toname, "@--head--",
656 sizeof (drr->drr_u.drr_begin.drr_toname));
659 dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP);
662 dsp->dsa_outfd = outfd;
663 dsp->dsa_proc = curproc;
664 dsp->dsa_td = curthread;
668 dsp->dsa_toguid = dsl_dataset_phys(ds)->ds_guid;
669 dsp->dsa_pending_op = PENDING_NONE;
670 dsp->dsa_incremental = (fromzb != NULL);
671 dsp->dsa_featureflags = featureflags;
673 mutex_enter(&ds->ds_sendstream_lock);
674 list_insert_head(&ds->ds_sendstreams, dsp);
675 mutex_exit(&ds->ds_sendstream_lock);
677 dsl_dataset_long_hold(ds, FTAG);
678 dsl_pool_rele(dp, tag);
680 if (dump_record(dsp, NULL, 0) != 0) {
685 err = traverse_dataset(ds, fromtxg, TRAVERSE_PRE | TRAVERSE_PREFETCH,
688 if (dsp->dsa_pending_op != PENDING_NONE)
689 if (dump_record(dsp, NULL, 0) != 0)
690 err = SET_ERROR(EINTR);
693 if (err == EINTR && dsp->dsa_err != 0)
698 bzero(drr, sizeof (dmu_replay_record_t));
699 drr->drr_type = DRR_END;
700 drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc;
701 drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid;
703 if (dump_record(dsp, NULL, 0) != 0) {
709 mutex_enter(&ds->ds_sendstream_lock);
710 list_remove(&ds->ds_sendstreams, dsp);
711 mutex_exit(&ds->ds_sendstream_lock);
713 kmem_free(drr, sizeof (dmu_replay_record_t));
714 kmem_free(dsp, sizeof (dmu_sendarg_t));
716 dsl_dataset_long_rele(ds, FTAG);
722 dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
723 boolean_t embedok, boolean_t large_block_ok,
725 int outfd, vnode_t *vp, offset_t *off)
727 int outfd, struct file *fp, offset_t *off)
732 dsl_dataset_t *fromds = NULL;
735 err = dsl_pool_hold(pool, FTAG, &dp);
739 err = dsl_dataset_hold_obj(dp, tosnap, FTAG, &ds);
741 dsl_pool_rele(dp, FTAG);
746 zfs_bookmark_phys_t zb;
749 err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds);
751 dsl_dataset_rele(ds, FTAG);
752 dsl_pool_rele(dp, FTAG);
755 if (!dsl_dataset_is_before(ds, fromds, 0))
756 err = SET_ERROR(EXDEV);
757 zb.zbm_creation_time =
758 dsl_dataset_phys(fromds)->ds_creation_time;
759 zb.zbm_creation_txg = dsl_dataset_phys(fromds)->ds_creation_txg;
760 zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
761 is_clone = (fromds->ds_dir != ds->ds_dir);
762 dsl_dataset_rele(fromds, FTAG);
763 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
764 embedok, large_block_ok, outfd, fp, off);
766 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
767 embedok, large_block_ok, outfd, fp, off);
769 dsl_dataset_rele(ds, FTAG);
774 dmu_send(const char *tosnap, const char *fromsnap,
775 boolean_t embedok, boolean_t large_block_ok,
777 int outfd, vnode_t *vp, offset_t *off)
779 int outfd, struct file *fp, offset_t *off)
785 boolean_t owned = B_FALSE;
787 if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL)
788 return (SET_ERROR(EINVAL));
790 err = dsl_pool_hold(tosnap, FTAG, &dp);
794 if (strchr(tosnap, '@') == NULL && spa_writeable(dp->dp_spa)) {
796 * We are sending a filesystem or volume. Ensure
797 * that it doesn't change by owning the dataset.
799 err = dsl_dataset_own(dp, tosnap, FTAG, &ds);
802 err = dsl_dataset_hold(dp, tosnap, FTAG, &ds);
805 dsl_pool_rele(dp, FTAG);
809 if (fromsnap != NULL) {
810 zfs_bookmark_phys_t zb;
811 boolean_t is_clone = B_FALSE;
812 int fsnamelen = strchr(tosnap, '@') - tosnap;
815 * If the fromsnap is in a different filesystem, then
816 * mark the send stream as a clone.
818 if (strncmp(tosnap, fromsnap, fsnamelen) != 0 ||
819 (fromsnap[fsnamelen] != '@' &&
820 fromsnap[fsnamelen] != '#')) {
824 if (strchr(fromsnap, '@')) {
825 dsl_dataset_t *fromds;
826 err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds);
828 if (!dsl_dataset_is_before(ds, fromds, 0))
829 err = SET_ERROR(EXDEV);
830 zb.zbm_creation_time =
831 dsl_dataset_phys(fromds)->ds_creation_time;
832 zb.zbm_creation_txg =
833 dsl_dataset_phys(fromds)->ds_creation_txg;
834 zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
835 is_clone = (ds->ds_dir != fromds->ds_dir);
836 dsl_dataset_rele(fromds, FTAG);
839 err = dsl_bookmark_lookup(dp, fromsnap, ds, &zb);
842 dsl_dataset_rele(ds, FTAG);
843 dsl_pool_rele(dp, FTAG);
846 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
847 embedok, large_block_ok, outfd, fp, off);
849 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
850 embedok, large_block_ok, outfd, fp, off);
853 dsl_dataset_disown(ds, FTAG);
855 dsl_dataset_rele(ds, FTAG);
860 dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t size,
865 * Assume that space (both on-disk and in-stream) is dominated by
866 * data. We will adjust for indirect blocks and the copies property,
867 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
871 * Subtract out approximate space used by indirect blocks.
872 * Assume most space is used by data blocks (non-indirect, non-dnode).
873 * Assume all blocks are recordsize. Assume ditto blocks and
874 * internal fragmentation counter out compression.
876 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
877 * block, which we observe in practice.
880 err = dsl_prop_get_int_ds(ds, "recordsize", &recordsize);
883 size -= size / recordsize * sizeof (blkptr_t);
885 /* Add in the space for the record associated with each block. */
886 size += size / recordsize * sizeof (dmu_replay_record_t);
894 dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds, uint64_t *sizep)
896 dsl_pool_t *dp = ds->ds_dir->dd_pool;
900 ASSERT(dsl_pool_config_held(dp));
902 /* tosnap must be a snapshot */
903 if (!ds->ds_is_snapshot)
904 return (SET_ERROR(EINVAL));
906 /* fromsnap, if provided, must be a snapshot */
907 if (fromds != NULL && !fromds->ds_is_snapshot)
908 return (SET_ERROR(EINVAL));
911 * fromsnap must be an earlier snapshot from the same fs as tosnap,
912 * or the origin's fs.
914 if (fromds != NULL && !dsl_dataset_is_before(ds, fromds, 0))
915 return (SET_ERROR(EXDEV));
917 /* Get uncompressed size estimate of changed data. */
918 if (fromds == NULL) {
919 size = dsl_dataset_phys(ds)->ds_uncompressed_bytes;
922 err = dsl_dataset_space_written(fromds, ds,
923 &used, &comp, &size);
928 err = dmu_adjust_send_estimate_for_indirects(ds, size, sizep);
933 * Simple callback used to traverse the blocks of a snapshot and sum their
938 dmu_calculate_send_traversal(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
939 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
941 uint64_t *spaceptr = arg;
942 if (bp != NULL && !BP_IS_HOLE(bp)) {
943 *spaceptr += BP_GET_UCSIZE(bp);
949 * Given a desination snapshot and a TXG, calculate the approximate size of a
950 * send stream sent from that TXG. from_txg may be zero, indicating that the
951 * whole snapshot will be sent.
954 dmu_send_estimate_from_txg(dsl_dataset_t *ds, uint64_t from_txg,
957 dsl_pool_t *dp = ds->ds_dir->dd_pool;
961 ASSERT(dsl_pool_config_held(dp));
963 /* tosnap must be a snapshot */
964 if (!dsl_dataset_is_snapshot(ds))
965 return (SET_ERROR(EINVAL));
967 /* verify that from_txg is before the provided snapshot was taken */
968 if (from_txg >= dsl_dataset_phys(ds)->ds_creation_txg) {
969 return (SET_ERROR(EXDEV));
973 * traverse the blocks of the snapshot with birth times after
974 * from_txg, summing their uncompressed size
976 err = traverse_dataset(ds, from_txg, TRAVERSE_POST,
977 dmu_calculate_send_traversal, &size);
981 err = dmu_adjust_send_estimate_for_indirects(ds, size, sizep);
985 typedef struct dmu_recv_begin_arg {
986 const char *drba_origin;
987 dmu_recv_cookie_t *drba_cookie;
989 uint64_t drba_snapobj;
990 } dmu_recv_begin_arg_t;
993 recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
998 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1000 /* temporary clone name must not exist */
1001 error = zap_lookup(dp->dp_meta_objset,
1002 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name,
1004 if (error != ENOENT)
1005 return (error == 0 ? EBUSY : error);
1007 /* new snapshot name must not exist */
1008 error = zap_lookup(dp->dp_meta_objset,
1009 dsl_dataset_phys(ds)->ds_snapnames_zapobj,
1010 drba->drba_cookie->drc_tosnap, 8, 1, &val);
1011 if (error != ENOENT)
1012 return (error == 0 ? EEXIST : error);
1015 * Check snapshot limit before receiving. We'll recheck again at the
1016 * end, but might as well abort before receiving if we're already over
1019 * Note that we do not check the file system limit with
1020 * dsl_dir_fscount_check because the temporary %clones don't count
1021 * against that limit.
1023 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT,
1024 NULL, drba->drba_cred);
1028 if (fromguid != 0) {
1029 dsl_dataset_t *snap;
1030 uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
1032 /* Find snapshot in this dir that matches fromguid. */
1034 error = dsl_dataset_hold_obj(dp, obj, FTAG,
1037 return (SET_ERROR(ENODEV));
1038 if (snap->ds_dir != ds->ds_dir) {
1039 dsl_dataset_rele(snap, FTAG);
1040 return (SET_ERROR(ENODEV));
1042 if (dsl_dataset_phys(snap)->ds_guid == fromguid)
1044 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
1045 dsl_dataset_rele(snap, FTAG);
1048 return (SET_ERROR(ENODEV));
1050 if (drba->drba_cookie->drc_force) {
1051 drba->drba_snapobj = obj;
1054 * If we are not forcing, there must be no
1055 * changes since fromsnap.
1057 if (dsl_dataset_modified_since_snap(ds, snap)) {
1058 dsl_dataset_rele(snap, FTAG);
1059 return (SET_ERROR(ETXTBSY));
1061 drba->drba_snapobj = ds->ds_prev->ds_object;
1064 dsl_dataset_rele(snap, FTAG);
1066 /* if full, then must be forced */
1067 if (!drba->drba_cookie->drc_force)
1068 return (SET_ERROR(EEXIST));
1069 /* start from $ORIGIN@$ORIGIN, if supported */
1070 drba->drba_snapobj = dp->dp_origin_snap != NULL ?
1071 dp->dp_origin_snap->ds_object : 0;
1079 dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
1081 dmu_recv_begin_arg_t *drba = arg;
1082 dsl_pool_t *dp = dmu_tx_pool(tx);
1083 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1084 uint64_t fromguid = drrb->drr_fromguid;
1085 int flags = drrb->drr_flags;
1087 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
1089 const char *tofs = drba->drba_cookie->drc_tofs;
1091 /* already checked */
1092 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
1094 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
1095 DMU_COMPOUNDSTREAM ||
1096 drrb->drr_type >= DMU_OST_NUMTYPES ||
1097 ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
1098 return (SET_ERROR(EINVAL));
1100 /* Verify pool version supports SA if SA_SPILL feature set */
1101 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
1102 spa_version(dp->dp_spa) < SPA_VERSION_SA)
1103 return (SET_ERROR(ENOTSUP));
1106 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
1107 * record to a plan WRITE record, so the pool must have the
1108 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
1109 * records. Same with WRITE_EMBEDDED records that use LZ4 compression.
1111 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
1112 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
1113 return (SET_ERROR(ENOTSUP));
1114 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4) &&
1115 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
1116 return (SET_ERROR(ENOTSUP));
1119 * The receiving code doesn't know how to translate large blocks
1120 * to smaller ones, so the pool must have the LARGE_BLOCKS
1121 * feature enabled if the stream has LARGE_BLOCKS.
1123 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
1124 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS))
1125 return (SET_ERROR(ENOTSUP));
1127 error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1129 /* target fs already exists; recv into temp clone */
1131 /* Can't recv a clone into an existing fs */
1132 if (flags & DRR_FLAG_CLONE) {
1133 dsl_dataset_rele(ds, FTAG);
1134 return (SET_ERROR(EINVAL));
1137 error = recv_begin_check_existing_impl(drba, ds, fromguid);
1138 dsl_dataset_rele(ds, FTAG);
1139 } else if (error == ENOENT) {
1140 /* target fs does not exist; must be a full backup or clone */
1141 char buf[MAXNAMELEN];
1144 * If it's a non-clone incremental, we are missing the
1145 * target fs, so fail the recv.
1147 if (fromguid != 0 && !(flags & DRR_FLAG_CLONE))
1148 return (SET_ERROR(ENOENT));
1150 /* Open the parent of tofs */
1151 ASSERT3U(strlen(tofs), <, MAXNAMELEN);
1152 (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
1153 error = dsl_dataset_hold(dp, buf, FTAG, &ds);
1158 * Check filesystem and snapshot limits before receiving. We'll
1159 * recheck snapshot limits again at the end (we create the
1160 * filesystems and increment those counts during begin_sync).
1162 error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1163 ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred);
1165 dsl_dataset_rele(ds, FTAG);
1169 error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1170 ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred);
1172 dsl_dataset_rele(ds, FTAG);
1176 if (drba->drba_origin != NULL) {
1177 dsl_dataset_t *origin;
1178 error = dsl_dataset_hold(dp, drba->drba_origin,
1181 dsl_dataset_rele(ds, FTAG);
1184 if (!origin->ds_is_snapshot) {
1185 dsl_dataset_rele(origin, FTAG);
1186 dsl_dataset_rele(ds, FTAG);
1187 return (SET_ERROR(EINVAL));
1189 if (dsl_dataset_phys(origin)->ds_guid != fromguid) {
1190 dsl_dataset_rele(origin, FTAG);
1191 dsl_dataset_rele(ds, FTAG);
1192 return (SET_ERROR(ENODEV));
1194 dsl_dataset_rele(origin, FTAG);
1196 dsl_dataset_rele(ds, FTAG);
1203 dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
1205 dmu_recv_begin_arg_t *drba = arg;
1206 dsl_pool_t *dp = dmu_tx_pool(tx);
1207 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1208 const char *tofs = drba->drba_cookie->drc_tofs;
1209 dsl_dataset_t *ds, *newds;
1214 crflags = (drrb->drr_flags & DRR_FLAG_CI_DATA) ?
1215 DS_FLAG_CI_DATASET : 0;
1217 error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1219 /* create temporary clone */
1220 dsl_dataset_t *snap = NULL;
1221 if (drba->drba_snapobj != 0) {
1222 VERIFY0(dsl_dataset_hold_obj(dp,
1223 drba->drba_snapobj, FTAG, &snap));
1225 dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name,
1226 snap, crflags, drba->drba_cred, tx);
1227 if (drba->drba_snapobj != 0)
1228 dsl_dataset_rele(snap, FTAG);
1229 dsl_dataset_rele(ds, FTAG);
1233 dsl_dataset_t *origin = NULL;
1235 VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
1237 if (drba->drba_origin != NULL) {
1238 VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
1242 /* Create new dataset. */
1243 dsobj = dsl_dataset_create_sync(dd,
1244 strrchr(tofs, '/') + 1,
1245 origin, crflags, drba->drba_cred, tx);
1247 dsl_dataset_rele(origin, FTAG);
1248 dsl_dir_rele(dd, FTAG);
1249 drba->drba_cookie->drc_newfs = B_TRUE;
1251 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &newds));
1253 if ((DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
1254 DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
1255 !newds->ds_large_blocks) {
1256 dsl_dataset_activate_large_blocks_sync_impl(dsobj, tx);
1257 newds->ds_large_blocks = B_TRUE;
1260 dmu_buf_will_dirty(newds->ds_dbuf, tx);
1261 dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT;
1264 * If we actually created a non-clone, we need to create the
1265 * objset in our new dataset.
1267 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds))) {
1268 (void) dmu_objset_create_impl(dp->dp_spa,
1269 newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
1272 drba->drba_cookie->drc_ds = newds;
1274 spa_history_log_internal_ds(newds, "receive", tx, "");
1278 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
1279 * succeeds; otherwise we will leak the holds on the datasets.
1282 dmu_recv_begin(char *tofs, char *tosnap, struct drr_begin *drrb,
1283 boolean_t force, char *origin, dmu_recv_cookie_t *drc)
1285 dmu_recv_begin_arg_t drba = { 0 };
1286 dmu_replay_record_t *drr;
1288 bzero(drc, sizeof (dmu_recv_cookie_t));
1289 drc->drc_drrb = drrb;
1290 drc->drc_tosnap = tosnap;
1291 drc->drc_tofs = tofs;
1292 drc->drc_force = force;
1293 drc->drc_cred = CRED();
1295 if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
1296 drc->drc_byteswap = B_TRUE;
1297 else if (drrb->drr_magic != DMU_BACKUP_MAGIC)
1298 return (SET_ERROR(EINVAL));
1300 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
1301 drr->drr_type = DRR_BEGIN;
1302 drr->drr_u.drr_begin = *drc->drc_drrb;
1303 if (drc->drc_byteswap) {
1304 fletcher_4_incremental_byteswap(drr,
1305 sizeof (dmu_replay_record_t), &drc->drc_cksum);
1307 fletcher_4_incremental_native(drr,
1308 sizeof (dmu_replay_record_t), &drc->drc_cksum);
1310 kmem_free(drr, sizeof (dmu_replay_record_t));
1312 if (drc->drc_byteswap) {
1313 drrb->drr_magic = BSWAP_64(drrb->drr_magic);
1314 drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo);
1315 drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
1316 drrb->drr_type = BSWAP_32(drrb->drr_type);
1317 drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
1318 drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
1321 drba.drba_origin = origin;
1322 drba.drba_cookie = drc;
1323 drba.drba_cred = CRED();
1325 return (dsl_sync_task(tofs, dmu_recv_begin_check, dmu_recv_begin_sync,
1326 &drba, 5, ZFS_SPACE_CHECK_NORMAL));
1336 int bufsize; /* amount of memory allocated for buf */
1338 dmu_replay_record_t *drr;
1339 dmu_replay_record_t *next_drr;
1342 zio_cksum_t prev_cksum;
1344 avl_tree_t *guid_to_ds_map;
1347 typedef struct guid_map_entry {
1349 dsl_dataset_t *gme_ds;
1354 guid_compare(const void *arg1, const void *arg2)
1356 const guid_map_entry_t *gmep1 = arg1;
1357 const guid_map_entry_t *gmep2 = arg2;
1359 if (gmep1->guid < gmep2->guid)
1361 else if (gmep1->guid > gmep2->guid)
1367 free_guid_map_onexit(void *arg)
1369 avl_tree_t *ca = arg;
1370 void *cookie = NULL;
1371 guid_map_entry_t *gmep;
1373 while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
1374 dsl_dataset_long_rele(gmep->gme_ds, gmep);
1375 dsl_dataset_rele(gmep->gme_ds, gmep);
1376 kmem_free(gmep, sizeof (guid_map_entry_t));
1379 kmem_free(ca, sizeof (avl_tree_t));
1383 restore_bytes(struct restorearg *ra, void *buf, int len, off_t off, ssize_t *resid)
1389 aiov.iov_base = buf;
1391 auio.uio_iov = &aiov;
1392 auio.uio_iovcnt = 1;
1393 auio.uio_resid = len;
1394 auio.uio_segflg = UIO_SYSSPACE;
1395 auio.uio_rw = UIO_READ;
1396 auio.uio_offset = off;
1397 auio.uio_td = ra->td;
1399 error = fo_read(ra->fp, &auio, ra->td->td_ucred, FOF_OFFSET, ra->td);
1401 fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
1404 *resid = auio.uio_resid;
1409 restore_read(struct restorearg *ra, int len, void *buf)
1413 /* some things will require 8-byte alignment, so everything must */
1415 ASSERT3U(len, <=, ra->bufsize);
1417 while (done < len) {
1420 ra->err = restore_bytes(ra, buf + done,
1421 len - done, ra->voff, &resid);
1423 if (resid == len - done)
1424 ra->err = SET_ERROR(EINVAL);
1425 ra->voff += len - done - resid;
1431 ASSERT3U(done, ==, len);
1436 byteswap_record(dmu_replay_record_t *drr)
1438 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
1439 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
1440 drr->drr_type = BSWAP_32(drr->drr_type);
1441 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
1443 switch (drr->drr_type) {
1445 DO64(drr_begin.drr_magic);
1446 DO64(drr_begin.drr_versioninfo);
1447 DO64(drr_begin.drr_creation_time);
1448 DO32(drr_begin.drr_type);
1449 DO32(drr_begin.drr_flags);
1450 DO64(drr_begin.drr_toguid);
1451 DO64(drr_begin.drr_fromguid);
1454 DO64(drr_object.drr_object);
1455 DO32(drr_object.drr_type);
1456 DO32(drr_object.drr_bonustype);
1457 DO32(drr_object.drr_blksz);
1458 DO32(drr_object.drr_bonuslen);
1459 DO64(drr_object.drr_toguid);
1461 case DRR_FREEOBJECTS:
1462 DO64(drr_freeobjects.drr_firstobj);
1463 DO64(drr_freeobjects.drr_numobjs);
1464 DO64(drr_freeobjects.drr_toguid);
1467 DO64(drr_write.drr_object);
1468 DO32(drr_write.drr_type);
1469 DO64(drr_write.drr_offset);
1470 DO64(drr_write.drr_length);
1471 DO64(drr_write.drr_toguid);
1472 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum);
1473 DO64(drr_write.drr_key.ddk_prop);
1475 case DRR_WRITE_BYREF:
1476 DO64(drr_write_byref.drr_object);
1477 DO64(drr_write_byref.drr_offset);
1478 DO64(drr_write_byref.drr_length);
1479 DO64(drr_write_byref.drr_toguid);
1480 DO64(drr_write_byref.drr_refguid);
1481 DO64(drr_write_byref.drr_refobject);
1482 DO64(drr_write_byref.drr_refoffset);
1483 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write_byref.
1485 DO64(drr_write_byref.drr_key.ddk_prop);
1487 case DRR_WRITE_EMBEDDED:
1488 DO64(drr_write_embedded.drr_object);
1489 DO64(drr_write_embedded.drr_offset);
1490 DO64(drr_write_embedded.drr_length);
1491 DO64(drr_write_embedded.drr_toguid);
1492 DO32(drr_write_embedded.drr_lsize);
1493 DO32(drr_write_embedded.drr_psize);
1496 DO64(drr_free.drr_object);
1497 DO64(drr_free.drr_offset);
1498 DO64(drr_free.drr_length);
1499 DO64(drr_free.drr_toguid);
1502 DO64(drr_spill.drr_object);
1503 DO64(drr_spill.drr_length);
1504 DO64(drr_spill.drr_toguid);
1507 DO64(drr_end.drr_toguid);
1508 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum);
1512 if (drr->drr_type != DRR_BEGIN) {
1513 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum);
1520 static inline uint8_t
1521 deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size)
1523 if (bonus_type == DMU_OT_SA) {
1527 ((DN_MAX_BONUSLEN - bonus_size) >> SPA_BLKPTRSHIFT));
1532 restore_object(struct restorearg *ra, struct drr_object *drro, void *data)
1534 dmu_object_info_t doi;
1539 if (drro->drr_type == DMU_OT_NONE ||
1540 !DMU_OT_IS_VALID(drro->drr_type) ||
1541 !DMU_OT_IS_VALID(drro->drr_bonustype) ||
1542 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
1543 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
1544 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
1545 drro->drr_blksz < SPA_MINBLOCKSIZE ||
1546 drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(ra->os)) ||
1547 drro->drr_bonuslen > DN_MAX_BONUSLEN) {
1548 return (SET_ERROR(EINVAL));
1551 err = dmu_object_info(ra->os, drro->drr_object, &doi);
1553 if (err != 0 && err != ENOENT)
1554 return (SET_ERROR(EINVAL));
1555 object = err == 0 ? drro->drr_object : DMU_NEW_OBJECT;
1558 * If we are losing blkptrs or changing the block size this must
1559 * be a new file instance. We must clear out the previous file
1560 * contents before we can change this type of metadata in the dnode.
1565 nblkptr = deduce_nblkptr(drro->drr_bonustype,
1566 drro->drr_bonuslen);
1568 if (drro->drr_blksz != doi.doi_data_block_size ||
1569 nblkptr < doi.doi_nblkptr) {
1570 err = dmu_free_long_range(ra->os, drro->drr_object,
1573 return (SET_ERROR(EINVAL));
1577 tx = dmu_tx_create(ra->os);
1578 dmu_tx_hold_bonus(tx, object);
1579 err = dmu_tx_assign(tx, TXG_WAIT);
1585 if (object == DMU_NEW_OBJECT) {
1586 /* currently free, want to be allocated */
1587 err = dmu_object_claim(ra->os, drro->drr_object,
1588 drro->drr_type, drro->drr_blksz,
1589 drro->drr_bonustype, drro->drr_bonuslen, tx);
1590 } else if (drro->drr_type != doi.doi_type ||
1591 drro->drr_blksz != doi.doi_data_block_size ||
1592 drro->drr_bonustype != doi.doi_bonus_type ||
1593 drro->drr_bonuslen != doi.doi_bonus_size) {
1594 /* currently allocated, but with different properties */
1595 err = dmu_object_reclaim(ra->os, drro->drr_object,
1596 drro->drr_type, drro->drr_blksz,
1597 drro->drr_bonustype, drro->drr_bonuslen, tx);
1601 return (SET_ERROR(EINVAL));
1604 dmu_object_set_checksum(ra->os, drro->drr_object,
1605 drro->drr_checksumtype, tx);
1606 dmu_object_set_compress(ra->os, drro->drr_object,
1607 drro->drr_compress, tx);
1612 VERIFY0(dmu_bonus_hold(ra->os, drro->drr_object, FTAG, &db));
1613 dmu_buf_will_dirty(db, tx);
1615 ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
1616 bcopy(data, db->db_data, drro->drr_bonuslen);
1618 dmu_object_byteswap_t byteswap =
1619 DMU_OT_BYTESWAP(drro->drr_bonustype);
1620 dmu_ot_byteswap[byteswap].ob_func(db->db_data,
1621 drro->drr_bonuslen);
1623 dmu_buf_rele(db, FTAG);
1631 restore_freeobjects(struct restorearg *ra,
1632 struct drr_freeobjects *drrfo)
1636 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
1637 return (SET_ERROR(EINVAL));
1639 for (obj = drrfo->drr_firstobj;
1640 obj < drrfo->drr_firstobj + drrfo->drr_numobjs;
1641 (void) dmu_object_next(ra->os, &obj, FALSE, 0)) {
1644 if (dmu_object_info(ra->os, obj, NULL) != 0)
1647 err = dmu_free_long_object(ra->os, obj);
1655 restore_write(struct restorearg *ra, struct drr_write *drrw, arc_buf_t *abuf)
1660 if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset ||
1661 !DMU_OT_IS_VALID(drrw->drr_type))
1662 return (SET_ERROR(EINVAL));
1664 if (dmu_object_info(ra->os, drrw->drr_object, NULL) != 0)
1665 return (SET_ERROR(EINVAL));
1667 tx = dmu_tx_create(ra->os);
1669 dmu_tx_hold_write(tx, drrw->drr_object,
1670 drrw->drr_offset, drrw->drr_length);
1671 err = dmu_tx_assign(tx, TXG_WAIT);
1677 dmu_object_byteswap_t byteswap =
1678 DMU_OT_BYTESWAP(drrw->drr_type);
1679 dmu_ot_byteswap[byteswap].ob_func(abuf->b_data,
1684 if (dmu_bonus_hold(ra->os, drrw->drr_object, FTAG, &bonus) != 0)
1685 return (SET_ERROR(EINVAL));
1686 dmu_assign_arcbuf(bonus, drrw->drr_offset, abuf, tx);
1688 dmu_buf_rele(bonus, FTAG);
1693 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed
1694 * streams to refer to a copy of the data that is already on the
1695 * system because it came in earlier in the stream. This function
1696 * finds the earlier copy of the data, and uses that copy instead of
1697 * data from the stream to fulfill this write.
1700 restore_write_byref(struct restorearg *ra, struct drr_write_byref *drrwbr)
1704 guid_map_entry_t gmesrch;
1705 guid_map_entry_t *gmep;
1707 objset_t *ref_os = NULL;
1710 if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
1711 return (SET_ERROR(EINVAL));
1714 * If the GUID of the referenced dataset is different from the
1715 * GUID of the target dataset, find the referenced dataset.
1717 if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
1718 gmesrch.guid = drrwbr->drr_refguid;
1719 if ((gmep = avl_find(ra->guid_to_ds_map, &gmesrch,
1721 return (SET_ERROR(EINVAL));
1723 if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
1724 return (SET_ERROR(EINVAL));
1729 err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
1730 drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH);
1734 tx = dmu_tx_create(ra->os);
1736 dmu_tx_hold_write(tx, drrwbr->drr_object,
1737 drrwbr->drr_offset, drrwbr->drr_length);
1738 err = dmu_tx_assign(tx, TXG_WAIT);
1743 dmu_write(ra->os, drrwbr->drr_object,
1744 drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
1745 dmu_buf_rele(dbp, FTAG);
1751 restore_write_embedded(struct restorearg *ra,
1752 struct drr_write_embedded *drrwnp, void *data)
1757 if (drrwnp->drr_offset + drrwnp->drr_length < drrwnp->drr_offset)
1760 if (drrwnp->drr_psize > BPE_PAYLOAD_SIZE)
1763 if (drrwnp->drr_etype >= NUM_BP_EMBEDDED_TYPES)
1765 if (drrwnp->drr_compression >= ZIO_COMPRESS_FUNCTIONS)
1768 tx = dmu_tx_create(ra->os);
1770 dmu_tx_hold_write(tx, drrwnp->drr_object,
1771 drrwnp->drr_offset, drrwnp->drr_length);
1772 err = dmu_tx_assign(tx, TXG_WAIT);
1778 dmu_write_embedded(ra->os, drrwnp->drr_object,
1779 drrwnp->drr_offset, data, drrwnp->drr_etype,
1780 drrwnp->drr_compression, drrwnp->drr_lsize, drrwnp->drr_psize,
1781 ra->byteswap ^ ZFS_HOST_BYTEORDER, tx);
1788 restore_spill(struct restorearg *ra, struct drr_spill *drrs, void *data)
1791 dmu_buf_t *db, *db_spill;
1794 if (drrs->drr_length < SPA_MINBLOCKSIZE ||
1795 drrs->drr_length > spa_maxblocksize(dmu_objset_spa(ra->os)))
1796 return (SET_ERROR(EINVAL));
1798 if (dmu_object_info(ra->os, drrs->drr_object, NULL) != 0)
1799 return (SET_ERROR(EINVAL));
1801 VERIFY0(dmu_bonus_hold(ra->os, drrs->drr_object, FTAG, &db));
1802 if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
1803 dmu_buf_rele(db, FTAG);
1807 tx = dmu_tx_create(ra->os);
1809 dmu_tx_hold_spill(tx, db->db_object);
1811 err = dmu_tx_assign(tx, TXG_WAIT);
1813 dmu_buf_rele(db, FTAG);
1814 dmu_buf_rele(db_spill, FTAG);
1818 dmu_buf_will_dirty(db_spill, tx);
1820 if (db_spill->db_size < drrs->drr_length)
1821 VERIFY(0 == dbuf_spill_set_blksz(db_spill,
1822 drrs->drr_length, tx));
1823 bcopy(data, db_spill->db_data, drrs->drr_length);
1825 dmu_buf_rele(db, FTAG);
1826 dmu_buf_rele(db_spill, FTAG);
1834 restore_free(struct restorearg *ra, struct drr_free *drrf)
1838 if (drrf->drr_length != -1ULL &&
1839 drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
1840 return (SET_ERROR(EINVAL));
1842 if (dmu_object_info(ra->os, drrf->drr_object, NULL) != 0)
1843 return (SET_ERROR(EINVAL));
1845 err = dmu_free_long_range(ra->os, drrf->drr_object,
1846 drrf->drr_offset, drrf->drr_length);
1850 /* used to destroy the drc_ds on error */
1852 dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
1854 char name[MAXNAMELEN];
1855 dsl_dataset_name(drc->drc_ds, name);
1856 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
1857 (void) dsl_destroy_head(name);
1861 restore_cksum(struct restorearg *ra, int len, void *buf)
1864 fletcher_4_incremental_byteswap(buf, len, &ra->cksum);
1866 fletcher_4_incremental_native(buf, len, &ra->cksum);
1871 * If len != 0, read payload into buf.
1872 * Read next record's header into ra->next_drr.
1873 * Verify checksum of payload and next record.
1876 restore_read_payload_and_next_header(struct restorearg *ra, int len, void *buf)
1881 ASSERT3U(len, <=, ra->bufsize);
1882 err = restore_read(ra, len, buf);
1885 restore_cksum(ra, len, buf);
1888 ra->prev_cksum = ra->cksum;
1890 err = restore_read(ra, sizeof (*ra->next_drr), ra->next_drr);
1893 if (ra->next_drr->drr_type == DRR_BEGIN)
1894 return (SET_ERROR(EINVAL));
1897 * Note: checksum is of everything up to but not including the
1900 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
1901 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
1903 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
1906 zio_cksum_t cksum_orig = ra->next_drr->drr_u.drr_checksum.drr_checksum;
1907 zio_cksum_t *cksump = &ra->next_drr->drr_u.drr_checksum.drr_checksum;
1910 byteswap_record(ra->next_drr);
1912 if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) &&
1913 !ZIO_CHECKSUM_EQUAL(ra->cksum, *cksump))
1914 return (SET_ERROR(ECKSUM));
1916 restore_cksum(ra, sizeof (cksum_orig), &cksum_orig);
1922 restore_process_record(struct restorearg *ra)
1926 switch (ra->drr->drr_type) {
1929 struct drr_object *drro = &ra->drr->drr_u.drr_object;
1930 err = restore_read_payload_and_next_header(ra,
1931 P2ROUNDUP(drro->drr_bonuslen, 8), ra->buf);
1934 return (restore_object(ra, drro, ra->buf));
1936 case DRR_FREEOBJECTS:
1938 struct drr_freeobjects *drrfo =
1939 &ra->drr->drr_u.drr_freeobjects;
1940 err = restore_read_payload_and_next_header(ra, 0, NULL);
1943 return (restore_freeobjects(ra, drrfo));
1947 struct drr_write *drrw = &ra->drr->drr_u.drr_write;
1948 arc_buf_t *abuf = arc_loan_buf(dmu_objset_spa(ra->os),
1951 err = restore_read_payload_and_next_header(ra,
1952 drrw->drr_length, abuf->b_data);
1955 err = restore_write(ra, drrw, abuf);
1956 /* if restore_write() is successful, it consumes the arc_buf */
1958 dmu_return_arcbuf(abuf);
1961 case DRR_WRITE_BYREF:
1963 struct drr_write_byref *drrwbr =
1964 &ra->drr->drr_u.drr_write_byref;
1965 err = restore_read_payload_and_next_header(ra, 0, NULL);
1968 return (restore_write_byref(ra, drrwbr));
1970 case DRR_WRITE_EMBEDDED:
1972 struct drr_write_embedded *drrwe =
1973 &ra->drr->drr_u.drr_write_embedded;
1974 err = restore_read_payload_and_next_header(ra,
1975 P2ROUNDUP(drrwe->drr_psize, 8), ra->buf);
1978 return (restore_write_embedded(ra, drrwe, ra->buf));
1982 struct drr_free *drrf = &ra->drr->drr_u.drr_free;
1983 err = restore_read_payload_and_next_header(ra, 0, NULL);
1986 return (restore_free(ra, drrf));
1990 struct drr_end *drre = &ra->drr->drr_u.drr_end;
1991 if (!ZIO_CHECKSUM_EQUAL(ra->prev_cksum, drre->drr_checksum))
1992 return (SET_ERROR(EINVAL));
1997 struct drr_spill *drrs = &ra->drr->drr_u.drr_spill;
1998 err = restore_read_payload_and_next_header(ra,
1999 drrs->drr_length, ra->buf);
2002 return (restore_spill(ra, drrs, ra->buf));
2005 return (SET_ERROR(EINVAL));
2010 * NB: callers *must* call dmu_recv_end() if this succeeds.
2013 dmu_recv_stream(dmu_recv_cookie_t *drc, struct file *fp, offset_t *voffp,
2014 int cleanup_fd, uint64_t *action_handlep)
2017 struct restorearg ra = { 0 };
2020 ra.byteswap = drc->drc_byteswap;
2021 ra.cksum = drc->drc_cksum;
2025 ra.bufsize = SPA_MAXBLOCKSIZE;
2026 ra.drr = kmem_alloc(sizeof (*ra.drr), KM_SLEEP);
2027 ra.buf = kmem_alloc(ra.bufsize, KM_SLEEP);
2028 ra.next_drr = kmem_alloc(sizeof (*ra.next_drr), KM_SLEEP);
2030 /* these were verified in dmu_recv_begin */
2031 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
2033 ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
2036 * Open the objset we are modifying.
2038 VERIFY0(dmu_objset_from_ds(drc->drc_ds, &ra.os));
2040 ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT);
2042 featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
2044 /* if this stream is dedup'ed, set up the avl tree for guid mapping */
2045 if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
2048 if (cleanup_fd == -1) {
2049 ra.err = SET_ERROR(EBADF);
2052 ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor);
2058 if (*action_handlep == 0) {
2060 kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
2061 avl_create(ra.guid_to_ds_map, guid_compare,
2062 sizeof (guid_map_entry_t),
2063 offsetof(guid_map_entry_t, avlnode));
2064 err = zfs_onexit_add_cb(minor,
2065 free_guid_map_onexit, ra.guid_to_ds_map,
2070 err = zfs_onexit_cb_data(minor, *action_handlep,
2071 (void **)&ra.guid_to_ds_map);
2076 drc->drc_guid_to_ds_map = ra.guid_to_ds_map;
2079 err = restore_read_payload_and_next_header(&ra, 0, NULL);
2085 if (issig(JUSTLOOKING) && issig(FORREAL)) {
2086 err = SET_ERROR(EINTR);
2091 ra.next_drr = ra.drr;
2094 /* process ra.drr, read in ra.next_drr */
2095 err = restore_process_record(&ra);
2098 if (ra.drr->drr_type == DRR_END)
2103 if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
2104 zfs_onexit_fd_rele(cleanup_fd);
2108 * destroy what we created, so we don't leave it in the
2109 * inconsistent restoring state.
2111 dmu_recv_cleanup_ds(drc);
2114 kmem_free(ra.drr, sizeof (*ra.drr));
2115 kmem_free(ra.buf, ra.bufsize);
2116 kmem_free(ra.next_drr, sizeof (*ra.next_drr));
2122 dmu_recv_end_check(void *arg, dmu_tx_t *tx)
2124 dmu_recv_cookie_t *drc = arg;
2125 dsl_pool_t *dp = dmu_tx_pool(tx);
2128 ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
2130 if (!drc->drc_newfs) {
2131 dsl_dataset_t *origin_head;
2133 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
2136 if (drc->drc_force) {
2138 * We will destroy any snapshots in tofs (i.e. before
2139 * origin_head) that are after the origin (which is
2140 * the snap before drc_ds, because drc_ds can not
2141 * have any snaps of its own).
2145 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
2147 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
2148 dsl_dataset_t *snap;
2149 error = dsl_dataset_hold_obj(dp, obj, FTAG,
2153 if (snap->ds_dir != origin_head->ds_dir)
2154 error = SET_ERROR(EINVAL);
2156 error = dsl_destroy_snapshot_check_impl(
2159 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
2160 dsl_dataset_rele(snap, FTAG);
2165 dsl_dataset_rele(origin_head, FTAG);
2169 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
2170 origin_head, drc->drc_force, drc->drc_owner, tx);
2172 dsl_dataset_rele(origin_head, FTAG);
2175 error = dsl_dataset_snapshot_check_impl(origin_head,
2176 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
2177 dsl_dataset_rele(origin_head, FTAG);
2181 error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
2183 error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
2184 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
2190 dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
2192 dmu_recv_cookie_t *drc = arg;
2193 dsl_pool_t *dp = dmu_tx_pool(tx);
2195 spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
2196 tx, "snap=%s", drc->drc_tosnap);
2198 if (!drc->drc_newfs) {
2199 dsl_dataset_t *origin_head;
2201 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
2204 if (drc->drc_force) {
2206 * Destroy any snapshots of drc_tofs (origin_head)
2207 * after the origin (the snap before drc_ds).
2211 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
2213 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
2214 dsl_dataset_t *snap;
2215 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
2217 ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
2218 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
2219 dsl_destroy_snapshot_sync_impl(snap,
2221 dsl_dataset_rele(snap, FTAG);
2224 VERIFY3P(drc->drc_ds->ds_prev, ==,
2225 origin_head->ds_prev);
2227 dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
2229 dsl_dataset_snapshot_sync_impl(origin_head,
2230 drc->drc_tosnap, tx);
2232 /* set snapshot's creation time and guid */
2233 dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
2234 dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time =
2235 drc->drc_drrb->drr_creation_time;
2236 dsl_dataset_phys(origin_head->ds_prev)->ds_guid =
2237 drc->drc_drrb->drr_toguid;
2238 dsl_dataset_phys(origin_head->ds_prev)->ds_flags &=
2239 ~DS_FLAG_INCONSISTENT;
2241 dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
2242 dsl_dataset_phys(origin_head)->ds_flags &=
2243 ~DS_FLAG_INCONSISTENT;
2245 dsl_dataset_rele(origin_head, FTAG);
2246 dsl_destroy_head_sync_impl(drc->drc_ds, tx);
2248 if (drc->drc_owner != NULL)
2249 VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
2251 dsl_dataset_t *ds = drc->drc_ds;
2253 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
2255 /* set snapshot's creation time and guid */
2256 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
2257 dsl_dataset_phys(ds->ds_prev)->ds_creation_time =
2258 drc->drc_drrb->drr_creation_time;
2259 dsl_dataset_phys(ds->ds_prev)->ds_guid =
2260 drc->drc_drrb->drr_toguid;
2261 dsl_dataset_phys(ds->ds_prev)->ds_flags &=
2262 ~DS_FLAG_INCONSISTENT;
2264 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2265 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
2267 drc->drc_newsnapobj = dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj;
2269 * Release the hold from dmu_recv_begin. This must be done before
2270 * we return to open context, so that when we free the dataset's dnode,
2271 * we can evict its bonus buffer.
2273 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
2278 add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj)
2281 dsl_dataset_t *snapds;
2282 guid_map_entry_t *gmep;
2285 ASSERT(guid_map != NULL);
2287 err = dsl_pool_hold(name, FTAG, &dp);
2290 gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP);
2291 err = dsl_dataset_hold_obj(dp, snapobj, gmep, &snapds);
2293 gmep->guid = dsl_dataset_phys(snapds)->ds_guid;
2294 gmep->gme_ds = snapds;
2295 avl_add(guid_map, gmep);
2296 dsl_dataset_long_hold(snapds, gmep);
2298 kmem_free(gmep, sizeof (*gmep));
2300 dsl_pool_rele(dp, FTAG);
2304 static int dmu_recv_end_modified_blocks = 3;
2307 dmu_recv_existing_end(dmu_recv_cookie_t *drc)
2310 char name[MAXNAMELEN];
2314 * We will be destroying the ds; make sure its origin is unmounted if
2317 dsl_dataset_name(drc->drc_ds, name);
2318 zfs_destroy_unmount_origin(name);
2321 error = dsl_sync_task(drc->drc_tofs,
2322 dmu_recv_end_check, dmu_recv_end_sync, drc,
2323 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL);
2326 dmu_recv_cleanup_ds(drc);
2331 dmu_recv_new_end(dmu_recv_cookie_t *drc)
2335 error = dsl_sync_task(drc->drc_tofs,
2336 dmu_recv_end_check, dmu_recv_end_sync, drc,
2337 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL);
2340 dmu_recv_cleanup_ds(drc);
2341 } else if (drc->drc_guid_to_ds_map != NULL) {
2342 (void) add_ds_to_guidmap(drc->drc_tofs,
2343 drc->drc_guid_to_ds_map,
2344 drc->drc_newsnapobj);
2350 dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
2352 drc->drc_owner = owner;
2355 return (dmu_recv_new_end(drc));
2357 return (dmu_recv_existing_end(drc));
2361 * Return TRUE if this objset is currently being received into.
2364 dmu_objset_is_receiving(objset_t *os)
2366 return (os->os_dsl_dataset != NULL &&
2367 os->os_dsl_dataset->ds_owner == dmu_recv_tag);