4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright (c) 2012, Martin Matuska <mm@FreeBSD.org>. All rights reserved.
27 * Copyright 2014 HybridCluster. All rights reserved.
31 #include <sys/dmu_impl.h>
32 #include <sys/dmu_tx.h>
34 #include <sys/dnode.h>
35 #include <sys/zfs_context.h>
36 #include <sys/dmu_objset.h>
37 #include <sys/dmu_traverse.h>
38 #include <sys/dsl_dataset.h>
39 #include <sys/dsl_dir.h>
40 #include <sys/dsl_prop.h>
41 #include <sys/dsl_pool.h>
42 #include <sys/dsl_synctask.h>
43 #include <sys/zfs_ioctl.h>
45 #include <sys/zio_checksum.h>
46 #include <sys/zfs_znode.h>
47 #include <zfs_fletcher.h>
50 #include <sys/zfs_onexit.h>
51 #include <sys/dmu_send.h>
52 #include <sys/dsl_destroy.h>
53 #include <sys/blkptr.h>
54 #include <sys/dsl_bookmark.h>
55 #include <sys/zfeature.h>
56 #include <sys/bqueue.h>
60 #define dump_write dmu_dump_write
63 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
64 int zfs_send_corrupt_data = B_FALSE;
65 int zfs_send_queue_length = 16 * 1024 * 1024;
66 int zfs_recv_queue_length = 16 * 1024 * 1024;
68 static char *dmu_recv_tag = "dmu_recv_tag";
69 static const char *recv_clone_name = "%recv";
71 #define BP_SPAN(datablkszsec, indblkshift, level) \
72 (((uint64_t)datablkszsec) << (SPA_MINBLOCKSHIFT + \
73 (level) * (indblkshift - SPA_BLKPTRSHIFT)))
75 struct send_thread_arg {
77 dsl_dataset_t *ds; /* Dataset to traverse */
78 uint64_t fromtxg; /* Traverse from this txg */
79 int flags; /* flags to pass to traverse_dataset */
84 struct send_block_record {
85 boolean_t eos_marker; /* Marks the end of the stream */
89 uint16_t datablkszsec;
94 dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
96 dsl_dataset_t *ds = dsp->dsa_os->os_dsl_dataset;
103 auio.uio_iov = &aiov;
105 auio.uio_resid = len;
106 auio.uio_segflg = UIO_SYSSPACE;
107 auio.uio_rw = UIO_WRITE;
108 auio.uio_offset = (off_t)-1;
109 auio.uio_td = dsp->dsa_td;
111 if (dsp->dsa_fp->f_type == DTYPE_VNODE)
113 dsp->dsa_err = fo_write(dsp->dsa_fp, &auio, dsp->dsa_td->td_ucred, 0,
116 fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
117 dsp->dsa_err = EOPNOTSUPP;
119 mutex_enter(&ds->ds_sendstream_lock);
120 *dsp->dsa_off += len;
121 mutex_exit(&ds->ds_sendstream_lock);
123 return (dsp->dsa_err);
127 * For all record types except BEGIN, fill in the checksum (overlaid in
128 * drr_u.drr_checksum.drr_checksum). The checksum verifies everything
129 * up to the start of the checksum itself.
132 dump_record(dmu_sendarg_t *dsp, void *payload, int payload_len)
134 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
135 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
136 fletcher_4_incremental_native(dsp->dsa_drr,
137 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
139 if (dsp->dsa_drr->drr_type != DRR_BEGIN) {
140 ASSERT(ZIO_CHECKSUM_IS_ZERO(&dsp->dsa_drr->drr_u.
141 drr_checksum.drr_checksum));
142 dsp->dsa_drr->drr_u.drr_checksum.drr_checksum = dsp->dsa_zc;
144 fletcher_4_incremental_native(&dsp->dsa_drr->
145 drr_u.drr_checksum.drr_checksum,
146 sizeof (zio_cksum_t), &dsp->dsa_zc);
147 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
148 return (SET_ERROR(EINTR));
149 if (payload_len != 0) {
150 fletcher_4_incremental_native(payload, payload_len,
152 if (dump_bytes(dsp, payload, payload_len) != 0)
153 return (SET_ERROR(EINTR));
159 dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
162 struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free);
165 * When we receive a free record, dbuf_free_range() assumes
166 * that the receiving system doesn't have any dbufs in the range
167 * being freed. This is always true because there is a one-record
168 * constraint: we only send one WRITE record for any given
169 * object+offset. We know that the one-record constraint is
170 * true because we always send data in increasing order by
173 * If the increasing-order constraint ever changes, we should find
174 * another way to assert that the one-record constraint is still
177 ASSERT(object > dsp->dsa_last_data_object ||
178 (object == dsp->dsa_last_data_object &&
179 offset > dsp->dsa_last_data_offset));
182 * If we are doing a non-incremental send, then there can't
183 * be any data in the dataset we're receiving into. Therefore
184 * a free record would simply be a no-op. Save space by not
185 * sending it to begin with.
187 if (!dsp->dsa_incremental)
190 if (length != -1ULL && offset + length < offset)
194 * If there is a pending op, but it's not PENDING_FREE, push it out,
195 * since free block aggregation can only be done for blocks of the
196 * same type (i.e., DRR_FREE records can only be aggregated with
197 * other DRR_FREE records. DRR_FREEOBJECTS records can only be
198 * aggregated with other DRR_FREEOBJECTS records.
200 if (dsp->dsa_pending_op != PENDING_NONE &&
201 dsp->dsa_pending_op != PENDING_FREE) {
202 if (dump_record(dsp, NULL, 0) != 0)
203 return (SET_ERROR(EINTR));
204 dsp->dsa_pending_op = PENDING_NONE;
207 if (dsp->dsa_pending_op == PENDING_FREE) {
209 * There should never be a PENDING_FREE if length is -1
210 * (because dump_dnode is the only place where this
211 * function is called with a -1, and only after flushing
212 * any pending record).
214 ASSERT(length != -1ULL);
216 * Check to see whether this free block can be aggregated
219 if (drrf->drr_object == object && drrf->drr_offset +
220 drrf->drr_length == offset) {
221 drrf->drr_length += length;
224 /* not a continuation. Push out pending record */
225 if (dump_record(dsp, NULL, 0) != 0)
226 return (SET_ERROR(EINTR));
227 dsp->dsa_pending_op = PENDING_NONE;
230 /* create a FREE record and make it pending */
231 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
232 dsp->dsa_drr->drr_type = DRR_FREE;
233 drrf->drr_object = object;
234 drrf->drr_offset = offset;
235 drrf->drr_length = length;
236 drrf->drr_toguid = dsp->dsa_toguid;
237 if (length == -1ULL) {
238 if (dump_record(dsp, NULL, 0) != 0)
239 return (SET_ERROR(EINTR));
241 dsp->dsa_pending_op = PENDING_FREE;
248 dump_write(dmu_sendarg_t *dsp, dmu_object_type_t type,
249 uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data)
251 struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write);
254 * We send data in increasing object, offset order.
255 * See comment in dump_free() for details.
257 ASSERT(object > dsp->dsa_last_data_object ||
258 (object == dsp->dsa_last_data_object &&
259 offset > dsp->dsa_last_data_offset));
260 dsp->dsa_last_data_object = object;
261 dsp->dsa_last_data_offset = offset + blksz - 1;
264 * If there is any kind of pending aggregation (currently either
265 * a grouping of free objects or free blocks), push it out to
266 * the stream, since aggregation can't be done across operations
267 * of different types.
269 if (dsp->dsa_pending_op != PENDING_NONE) {
270 if (dump_record(dsp, NULL, 0) != 0)
271 return (SET_ERROR(EINTR));
272 dsp->dsa_pending_op = PENDING_NONE;
274 /* write a WRITE record */
275 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
276 dsp->dsa_drr->drr_type = DRR_WRITE;
277 drrw->drr_object = object;
278 drrw->drr_type = type;
279 drrw->drr_offset = offset;
280 drrw->drr_length = blksz;
281 drrw->drr_toguid = dsp->dsa_toguid;
282 if (bp == NULL || BP_IS_EMBEDDED(bp)) {
284 * There's no pre-computed checksum for partial-block
285 * writes or embedded BP's, so (like
286 * fletcher4-checkummed blocks) userland will have to
287 * compute a dedup-capable checksum itself.
289 drrw->drr_checksumtype = ZIO_CHECKSUM_OFF;
291 drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
292 if (zio_checksum_table[drrw->drr_checksumtype].ci_dedup)
293 drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP;
294 DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
295 DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
296 DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
297 drrw->drr_key.ddk_cksum = bp->blk_cksum;
300 if (dump_record(dsp, data, blksz) != 0)
301 return (SET_ERROR(EINTR));
306 dump_write_embedded(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
307 int blksz, const blkptr_t *bp)
309 char buf[BPE_PAYLOAD_SIZE];
310 struct drr_write_embedded *drrw =
311 &(dsp->dsa_drr->drr_u.drr_write_embedded);
313 if (dsp->dsa_pending_op != PENDING_NONE) {
314 if (dump_record(dsp, NULL, 0) != 0)
316 dsp->dsa_pending_op = PENDING_NONE;
319 ASSERT(BP_IS_EMBEDDED(bp));
321 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
322 dsp->dsa_drr->drr_type = DRR_WRITE_EMBEDDED;
323 drrw->drr_object = object;
324 drrw->drr_offset = offset;
325 drrw->drr_length = blksz;
326 drrw->drr_toguid = dsp->dsa_toguid;
327 drrw->drr_compression = BP_GET_COMPRESS(bp);
328 drrw->drr_etype = BPE_GET_ETYPE(bp);
329 drrw->drr_lsize = BPE_GET_LSIZE(bp);
330 drrw->drr_psize = BPE_GET_PSIZE(bp);
332 decode_embedded_bp_compressed(bp, buf);
334 if (dump_record(dsp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0)
340 dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data)
342 struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill);
344 if (dsp->dsa_pending_op != PENDING_NONE) {
345 if (dump_record(dsp, NULL, 0) != 0)
346 return (SET_ERROR(EINTR));
347 dsp->dsa_pending_op = PENDING_NONE;
350 /* write a SPILL record */
351 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
352 dsp->dsa_drr->drr_type = DRR_SPILL;
353 drrs->drr_object = object;
354 drrs->drr_length = blksz;
355 drrs->drr_toguid = dsp->dsa_toguid;
357 if (dump_record(dsp, data, blksz) != 0)
358 return (SET_ERROR(EINTR));
363 dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
365 struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects);
367 /* See comment in dump_free(). */
368 if (!dsp->dsa_incremental)
372 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
373 * push it out, since free block aggregation can only be done for
374 * blocks of the same type (i.e., DRR_FREE records can only be
375 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
376 * can only be aggregated with other DRR_FREEOBJECTS records.
378 if (dsp->dsa_pending_op != PENDING_NONE &&
379 dsp->dsa_pending_op != PENDING_FREEOBJECTS) {
380 if (dump_record(dsp, NULL, 0) != 0)
381 return (SET_ERROR(EINTR));
382 dsp->dsa_pending_op = PENDING_NONE;
384 if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) {
386 * See whether this free object array can be aggregated
389 if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
390 drrfo->drr_numobjs += numobjs;
393 /* can't be aggregated. Push out pending record */
394 if (dump_record(dsp, NULL, 0) != 0)
395 return (SET_ERROR(EINTR));
396 dsp->dsa_pending_op = PENDING_NONE;
400 /* write a FREEOBJECTS record */
401 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
402 dsp->dsa_drr->drr_type = DRR_FREEOBJECTS;
403 drrfo->drr_firstobj = firstobj;
404 drrfo->drr_numobjs = numobjs;
405 drrfo->drr_toguid = dsp->dsa_toguid;
407 dsp->dsa_pending_op = PENDING_FREEOBJECTS;
413 dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp)
415 struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object);
417 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
418 return (dump_freeobjects(dsp, object, 1));
420 if (dsp->dsa_pending_op != PENDING_NONE) {
421 if (dump_record(dsp, NULL, 0) != 0)
422 return (SET_ERROR(EINTR));
423 dsp->dsa_pending_op = PENDING_NONE;
426 /* write an OBJECT record */
427 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
428 dsp->dsa_drr->drr_type = DRR_OBJECT;
429 drro->drr_object = object;
430 drro->drr_type = dnp->dn_type;
431 drro->drr_bonustype = dnp->dn_bonustype;
432 drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
433 drro->drr_bonuslen = dnp->dn_bonuslen;
434 drro->drr_checksumtype = dnp->dn_checksum;
435 drro->drr_compress = dnp->dn_compress;
436 drro->drr_toguid = dsp->dsa_toguid;
438 if (!(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
439 drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE)
440 drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE;
442 if (dump_record(dsp, DN_BONUS(dnp),
443 P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0) {
444 return (SET_ERROR(EINTR));
447 /* Free anything past the end of the file. */
448 if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) *
449 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL) != 0)
450 return (SET_ERROR(EINTR));
451 if (dsp->dsa_err != 0)
452 return (SET_ERROR(EINTR));
457 backup_do_embed(dmu_sendarg_t *dsp, const blkptr_t *bp)
459 if (!BP_IS_EMBEDDED(bp))
463 * Compression function must be legacy, or explicitly enabled.
465 if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS &&
466 !(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4)))
470 * Embed type must be explicitly enabled.
472 switch (BPE_GET_ETYPE(bp)) {
473 case BP_EMBEDDED_TYPE_DATA:
474 if (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
484 * This is the callback function to traverse_dataset that acts as the worker
485 * thread for dmu_send_impl.
489 send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
490 const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg)
492 struct send_thread_arg *sta = arg;
493 struct send_block_record *record;
494 uint64_t record_size;
498 return (SET_ERROR(EINTR));
501 ASSERT3U(zb->zb_level, ==, ZB_DNODE_LEVEL);
503 } else if (zb->zb_level < 0) {
507 record = kmem_zalloc(sizeof (struct send_block_record), KM_SLEEP);
508 record->eos_marker = B_FALSE;
511 record->indblkshift = dnp->dn_indblkshift;
512 record->datablkszsec = dnp->dn_datablkszsec;
513 record_size = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
514 bqueue_enqueue(&sta->q, record, record_size);
520 * This function kicks off the traverse_dataset. It also handles setting the
521 * error code of the thread in case something goes wrong, and pushes the End of
522 * Stream record when the traverse_dataset call has finished. If there is no
523 * dataset to traverse, the thread immediately pushes End of Stream marker.
526 send_traverse_thread(void *arg)
528 struct send_thread_arg *st_arg = arg;
530 struct send_block_record *data;
532 if (st_arg->ds != NULL) {
533 err = traverse_dataset(st_arg->ds, st_arg->fromtxg,
534 st_arg->flags, send_cb, arg);
536 st_arg->error_code = err;
538 data = kmem_zalloc(sizeof (*data), KM_SLEEP);
539 data->eos_marker = B_TRUE;
540 bqueue_enqueue(&st_arg->q, data, 1);
545 * This function actually handles figuring out what kind of record needs to be
546 * dumped, reading the data (which has hopefully been prefetched), and calling
547 * the appropriate helper function.
550 do_dump(dmu_sendarg_t *dsa, struct send_block_record *data)
552 dsl_dataset_t *ds = dmu_objset_ds(dsa->dsa_os);
553 const blkptr_t *bp = &data->bp;
554 const zbookmark_phys_t *zb = &data->zb;
555 uint8_t indblkshift = data->indblkshift;
556 uint16_t dblkszsec = data->datablkszsec;
557 spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
558 dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
561 ASSERT3U(zb->zb_level, >=, 0);
563 if (zb->zb_object != DMU_META_DNODE_OBJECT &&
564 DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
566 } else if (BP_IS_HOLE(bp) &&
567 zb->zb_object == DMU_META_DNODE_OBJECT) {
568 uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level);
569 uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
570 err = dump_freeobjects(dsa, dnobj, span >> DNODE_SHIFT);
571 } else if (BP_IS_HOLE(bp)) {
572 uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level);
573 uint64_t offset = zb->zb_blkid * span;
574 err = dump_free(dsa, zb->zb_object, offset, span);
575 } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
577 } else if (type == DMU_OT_DNODE) {
578 int blksz = BP_GET_LSIZE(bp);
579 arc_flags_t aflags = ARC_FLAG_WAIT;
582 ASSERT0(zb->zb_level);
584 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
585 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
587 return (SET_ERROR(EIO));
589 dnode_phys_t *blk = abuf->b_data;
590 uint64_t dnobj = zb->zb_blkid * (blksz >> DNODE_SHIFT);
591 for (int i = 0; i < blksz >> DNODE_SHIFT; i++) {
592 err = dump_dnode(dsa, dnobj + i, blk + i);
596 (void) arc_buf_remove_ref(abuf, &abuf);
597 } else if (type == DMU_OT_SA) {
598 arc_flags_t aflags = ARC_FLAG_WAIT;
600 int blksz = BP_GET_LSIZE(bp);
602 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
603 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
605 return (SET_ERROR(EIO));
607 err = dump_spill(dsa, zb->zb_object, blksz, abuf->b_data);
608 (void) arc_buf_remove_ref(abuf, &abuf);
609 } else if (backup_do_embed(dsa, bp)) {
610 /* it's an embedded level-0 block of a regular object */
611 int blksz = dblkszsec << SPA_MINBLOCKSHIFT;
612 ASSERT0(zb->zb_level);
613 err = dump_write_embedded(dsa, zb->zb_object,
614 zb->zb_blkid * blksz, blksz, bp);
616 /* it's a level-0 block of a regular object */
617 arc_flags_t aflags = ARC_FLAG_WAIT;
619 int blksz = dblkszsec << SPA_MINBLOCKSHIFT;
622 ASSERT0(zb->zb_level);
623 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
624 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
626 if (zfs_send_corrupt_data) {
627 /* Send a block filled with 0x"zfs badd bloc" */
628 abuf = arc_buf_alloc(spa, blksz, &abuf,
631 for (ptr = abuf->b_data;
632 (char *)ptr < (char *)abuf->b_data + blksz;
634 *ptr = 0x2f5baddb10cULL;
636 return (SET_ERROR(EIO));
640 offset = zb->zb_blkid * blksz;
642 if (!(dsa->dsa_featureflags &
643 DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
644 blksz > SPA_OLD_MAXBLOCKSIZE) {
645 char *buf = abuf->b_data;
646 while (blksz > 0 && err == 0) {
647 int n = MIN(blksz, SPA_OLD_MAXBLOCKSIZE);
648 err = dump_write(dsa, type, zb->zb_object,
649 offset, n, NULL, buf);
655 err = dump_write(dsa, type, zb->zb_object,
656 offset, blksz, bp, abuf->b_data);
658 (void) arc_buf_remove_ref(abuf, &abuf);
661 ASSERT(err == 0 || err == EINTR);
666 * Pop the new data off the queue, and free the old data.
668 static struct send_block_record *
669 get_next_record(bqueue_t *bq, struct send_block_record *data)
671 struct send_block_record *tmp = bqueue_dequeue(bq);
672 kmem_free(data, sizeof (*data));
677 * Actually do the bulk of the work in a zfs send.
679 * Note: Releases dp using the specified tag.
682 dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *to_ds,
683 zfs_bookmark_phys_t *ancestor_zb, boolean_t is_clone, boolean_t embedok,
685 boolean_t large_block_ok, int outfd, vnode_t *vp, offset_t *off)
687 boolean_t large_block_ok, int outfd, struct file *fp, offset_t *off)
691 dmu_replay_record_t *drr;
694 uint64_t fromtxg = 0;
695 uint64_t featureflags = 0;
696 struct send_thread_arg to_arg;
698 err = dmu_objset_from_ds(to_ds, &os);
700 dsl_pool_rele(dp, tag);
704 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
705 drr->drr_type = DRR_BEGIN;
706 drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
707 DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
711 if (dmu_objset_type(os) == DMU_OST_ZFS) {
713 if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) {
714 kmem_free(drr, sizeof (dmu_replay_record_t));
715 dsl_pool_rele(dp, tag);
716 return (SET_ERROR(EINVAL));
718 if (version >= ZPL_VERSION_SA) {
719 featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
724 if (large_block_ok && to_ds->ds_feature_inuse[SPA_FEATURE_LARGE_BLOCKS])
725 featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS;
727 spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) {
728 featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA;
729 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
730 featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA_LZ4;
733 DMU_SET_FEATUREFLAGS(drr->drr_u.drr_begin.drr_versioninfo,
736 drr->drr_u.drr_begin.drr_creation_time =
737 dsl_dataset_phys(to_ds)->ds_creation_time;
738 drr->drr_u.drr_begin.drr_type = dmu_objset_type(os);
740 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
741 drr->drr_u.drr_begin.drr_toguid = dsl_dataset_phys(to_ds)->ds_guid;
742 if (dsl_dataset_phys(to_ds)->ds_flags & DS_FLAG_CI_DATASET)
743 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
745 if (ancestor_zb != NULL) {
746 drr->drr_u.drr_begin.drr_fromguid =
747 ancestor_zb->zbm_guid;
748 fromtxg = ancestor_zb->zbm_creation_txg;
750 dsl_dataset_name(to_ds, drr->drr_u.drr_begin.drr_toname);
751 if (!to_ds->ds_is_snapshot) {
752 (void) strlcat(drr->drr_u.drr_begin.drr_toname, "@--head--",
753 sizeof (drr->drr_u.drr_begin.drr_toname));
756 dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP);
759 dsp->dsa_outfd = outfd;
760 dsp->dsa_proc = curproc;
761 dsp->dsa_td = curthread;
765 dsp->dsa_toguid = dsl_dataset_phys(to_ds)->ds_guid;
766 dsp->dsa_pending_op = PENDING_NONE;
767 dsp->dsa_incremental = (ancestor_zb != NULL);
768 dsp->dsa_featureflags = featureflags;
770 mutex_enter(&to_ds->ds_sendstream_lock);
771 list_insert_head(&to_ds->ds_sendstreams, dsp);
772 mutex_exit(&to_ds->ds_sendstream_lock);
774 dsl_dataset_long_hold(to_ds, FTAG);
775 dsl_pool_rele(dp, tag);
777 if (dump_record(dsp, NULL, 0) != 0) {
782 err = bqueue_init(&to_arg.q, zfs_send_queue_length,
783 offsetof(struct send_block_record, ln));
784 to_arg.error_code = 0;
785 to_arg.cancel = B_FALSE;
787 to_arg.fromtxg = fromtxg;
788 to_arg.flags = TRAVERSE_PRE | TRAVERSE_PREFETCH;
789 (void) thread_create(NULL, 0, send_traverse_thread, &to_arg, 0, curproc,
790 TS_RUN, minclsyspri);
792 struct send_block_record *to_data;
793 to_data = bqueue_dequeue(&to_arg.q);
795 while (!to_data->eos_marker && err == 0) {
796 err = do_dump(dsp, to_data);
797 to_data = get_next_record(&to_arg.q, to_data);
798 if (issig(JUSTLOOKING) && issig(FORREAL))
803 to_arg.cancel = B_TRUE;
804 while (!to_data->eos_marker) {
805 to_data = get_next_record(&to_arg.q, to_data);
808 kmem_free(to_data, sizeof (*to_data));
810 bqueue_destroy(&to_arg.q);
812 if (err == 0 && to_arg.error_code != 0)
813 err = to_arg.error_code;
818 if (dsp->dsa_pending_op != PENDING_NONE)
819 if (dump_record(dsp, NULL, 0) != 0)
820 err = SET_ERROR(EINTR);
823 if (err == EINTR && dsp->dsa_err != 0)
828 bzero(drr, sizeof (dmu_replay_record_t));
829 drr->drr_type = DRR_END;
830 drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc;
831 drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid;
833 if (dump_record(dsp, NULL, 0) != 0)
837 mutex_enter(&to_ds->ds_sendstream_lock);
838 list_remove(&to_ds->ds_sendstreams, dsp);
839 mutex_exit(&to_ds->ds_sendstream_lock);
841 kmem_free(drr, sizeof (dmu_replay_record_t));
842 kmem_free(dsp, sizeof (dmu_sendarg_t));
844 dsl_dataset_long_rele(to_ds, FTAG);
850 dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
851 boolean_t embedok, boolean_t large_block_ok,
853 int outfd, vnode_t *vp, offset_t *off)
855 int outfd, struct file *fp, offset_t *off)
860 dsl_dataset_t *fromds = NULL;
863 err = dsl_pool_hold(pool, FTAG, &dp);
867 err = dsl_dataset_hold_obj(dp, tosnap, FTAG, &ds);
869 dsl_pool_rele(dp, FTAG);
874 zfs_bookmark_phys_t zb;
877 err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds);
879 dsl_dataset_rele(ds, FTAG);
880 dsl_pool_rele(dp, FTAG);
883 if (!dsl_dataset_is_before(ds, fromds, 0))
884 err = SET_ERROR(EXDEV);
885 zb.zbm_creation_time =
886 dsl_dataset_phys(fromds)->ds_creation_time;
887 zb.zbm_creation_txg = dsl_dataset_phys(fromds)->ds_creation_txg;
888 zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
889 is_clone = (fromds->ds_dir != ds->ds_dir);
890 dsl_dataset_rele(fromds, FTAG);
891 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
892 embedok, large_block_ok, outfd, fp, off);
894 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
895 embedok, large_block_ok, outfd, fp, off);
897 dsl_dataset_rele(ds, FTAG);
902 dmu_send(const char *tosnap, const char *fromsnap,
903 boolean_t embedok, boolean_t large_block_ok,
905 int outfd, vnode_t *vp, offset_t *off)
907 int outfd, struct file *fp, offset_t *off)
913 boolean_t owned = B_FALSE;
915 if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL)
916 return (SET_ERROR(EINVAL));
918 err = dsl_pool_hold(tosnap, FTAG, &dp);
922 if (strchr(tosnap, '@') == NULL && spa_writeable(dp->dp_spa)) {
924 * We are sending a filesystem or volume. Ensure
925 * that it doesn't change by owning the dataset.
927 err = dsl_dataset_own(dp, tosnap, FTAG, &ds);
930 err = dsl_dataset_hold(dp, tosnap, FTAG, &ds);
933 dsl_pool_rele(dp, FTAG);
937 if (fromsnap != NULL) {
938 zfs_bookmark_phys_t zb;
939 boolean_t is_clone = B_FALSE;
940 int fsnamelen = strchr(tosnap, '@') - tosnap;
943 * If the fromsnap is in a different filesystem, then
944 * mark the send stream as a clone.
946 if (strncmp(tosnap, fromsnap, fsnamelen) != 0 ||
947 (fromsnap[fsnamelen] != '@' &&
948 fromsnap[fsnamelen] != '#')) {
952 if (strchr(fromsnap, '@')) {
953 dsl_dataset_t *fromds;
954 err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds);
956 if (!dsl_dataset_is_before(ds, fromds, 0))
957 err = SET_ERROR(EXDEV);
958 zb.zbm_creation_time =
959 dsl_dataset_phys(fromds)->ds_creation_time;
960 zb.zbm_creation_txg =
961 dsl_dataset_phys(fromds)->ds_creation_txg;
962 zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
963 is_clone = (ds->ds_dir != fromds->ds_dir);
964 dsl_dataset_rele(fromds, FTAG);
967 err = dsl_bookmark_lookup(dp, fromsnap, ds, &zb);
970 dsl_dataset_rele(ds, FTAG);
971 dsl_pool_rele(dp, FTAG);
974 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
975 embedok, large_block_ok, outfd, fp, off);
977 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
978 embedok, large_block_ok, outfd, fp, off);
981 dsl_dataset_disown(ds, FTAG);
983 dsl_dataset_rele(ds, FTAG);
988 dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t size,
993 * Assume that space (both on-disk and in-stream) is dominated by
994 * data. We will adjust for indirect blocks and the copies property,
995 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
999 * Subtract out approximate space used by indirect blocks.
1000 * Assume most space is used by data blocks (non-indirect, non-dnode).
1001 * Assume all blocks are recordsize. Assume ditto blocks and
1002 * internal fragmentation counter out compression.
1004 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
1005 * block, which we observe in practice.
1007 uint64_t recordsize;
1008 err = dsl_prop_get_int_ds(ds, "recordsize", &recordsize);
1011 size -= size / recordsize * sizeof (blkptr_t);
1013 /* Add in the space for the record associated with each block. */
1014 size += size / recordsize * sizeof (dmu_replay_record_t);
1022 dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds, uint64_t *sizep)
1024 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1028 ASSERT(dsl_pool_config_held(dp));
1030 /* tosnap must be a snapshot */
1031 if (!ds->ds_is_snapshot)
1032 return (SET_ERROR(EINVAL));
1034 /* fromsnap, if provided, must be a snapshot */
1035 if (fromds != NULL && !fromds->ds_is_snapshot)
1036 return (SET_ERROR(EINVAL));
1039 * fromsnap must be an earlier snapshot from the same fs as tosnap,
1040 * or the origin's fs.
1042 if (fromds != NULL && !dsl_dataset_is_before(ds, fromds, 0))
1043 return (SET_ERROR(EXDEV));
1045 /* Get uncompressed size estimate of changed data. */
1046 if (fromds == NULL) {
1047 size = dsl_dataset_phys(ds)->ds_uncompressed_bytes;
1049 uint64_t used, comp;
1050 err = dsl_dataset_space_written(fromds, ds,
1051 &used, &comp, &size);
1056 err = dmu_adjust_send_estimate_for_indirects(ds, size, sizep);
1061 * Simple callback used to traverse the blocks of a snapshot and sum their
1066 dmu_calculate_send_traversal(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
1067 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
1069 uint64_t *spaceptr = arg;
1070 if (bp != NULL && !BP_IS_HOLE(bp)) {
1071 *spaceptr += BP_GET_UCSIZE(bp);
1077 * Given a desination snapshot and a TXG, calculate the approximate size of a
1078 * send stream sent from that TXG. from_txg may be zero, indicating that the
1079 * whole snapshot will be sent.
1082 dmu_send_estimate_from_txg(dsl_dataset_t *ds, uint64_t from_txg,
1085 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1089 ASSERT(dsl_pool_config_held(dp));
1091 /* tosnap must be a snapshot */
1092 if (!dsl_dataset_is_snapshot(ds))
1093 return (SET_ERROR(EINVAL));
1095 /* verify that from_txg is before the provided snapshot was taken */
1096 if (from_txg >= dsl_dataset_phys(ds)->ds_creation_txg) {
1097 return (SET_ERROR(EXDEV));
1101 * traverse the blocks of the snapshot with birth times after
1102 * from_txg, summing their uncompressed size
1104 err = traverse_dataset(ds, from_txg, TRAVERSE_POST,
1105 dmu_calculate_send_traversal, &size);
1109 err = dmu_adjust_send_estimate_for_indirects(ds, size, sizep);
1113 typedef struct dmu_recv_begin_arg {
1114 const char *drba_origin;
1115 dmu_recv_cookie_t *drba_cookie;
1117 uint64_t drba_snapobj;
1118 } dmu_recv_begin_arg_t;
1121 recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
1126 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1128 /* temporary clone name must not exist */
1129 error = zap_lookup(dp->dp_meta_objset,
1130 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name,
1132 if (error != ENOENT)
1133 return (error == 0 ? EBUSY : error);
1135 /* new snapshot name must not exist */
1136 error = zap_lookup(dp->dp_meta_objset,
1137 dsl_dataset_phys(ds)->ds_snapnames_zapobj,
1138 drba->drba_cookie->drc_tosnap, 8, 1, &val);
1139 if (error != ENOENT)
1140 return (error == 0 ? EEXIST : error);
1143 * Check snapshot limit before receiving. We'll recheck again at the
1144 * end, but might as well abort before receiving if we're already over
1147 * Note that we do not check the file system limit with
1148 * dsl_dir_fscount_check because the temporary %clones don't count
1149 * against that limit.
1151 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT,
1152 NULL, drba->drba_cred);
1156 if (fromguid != 0) {
1157 dsl_dataset_t *snap;
1158 uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
1160 /* Find snapshot in this dir that matches fromguid. */
1162 error = dsl_dataset_hold_obj(dp, obj, FTAG,
1165 return (SET_ERROR(ENODEV));
1166 if (snap->ds_dir != ds->ds_dir) {
1167 dsl_dataset_rele(snap, FTAG);
1168 return (SET_ERROR(ENODEV));
1170 if (dsl_dataset_phys(snap)->ds_guid == fromguid)
1172 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
1173 dsl_dataset_rele(snap, FTAG);
1176 return (SET_ERROR(ENODEV));
1178 if (drba->drba_cookie->drc_force) {
1179 drba->drba_snapobj = obj;
1182 * If we are not forcing, there must be no
1183 * changes since fromsnap.
1185 if (dsl_dataset_modified_since_snap(ds, snap)) {
1186 dsl_dataset_rele(snap, FTAG);
1187 return (SET_ERROR(ETXTBSY));
1189 drba->drba_snapobj = ds->ds_prev->ds_object;
1192 dsl_dataset_rele(snap, FTAG);
1194 /* if full, then must be forced */
1195 if (!drba->drba_cookie->drc_force)
1196 return (SET_ERROR(EEXIST));
1197 /* start from $ORIGIN@$ORIGIN, if supported */
1198 drba->drba_snapobj = dp->dp_origin_snap != NULL ?
1199 dp->dp_origin_snap->ds_object : 0;
1207 dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
1209 dmu_recv_begin_arg_t *drba = arg;
1210 dsl_pool_t *dp = dmu_tx_pool(tx);
1211 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1212 uint64_t fromguid = drrb->drr_fromguid;
1213 int flags = drrb->drr_flags;
1215 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
1217 const char *tofs = drba->drba_cookie->drc_tofs;
1219 /* already checked */
1220 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
1222 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
1223 DMU_COMPOUNDSTREAM ||
1224 drrb->drr_type >= DMU_OST_NUMTYPES ||
1225 ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
1226 return (SET_ERROR(EINVAL));
1228 /* Verify pool version supports SA if SA_SPILL feature set */
1229 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
1230 spa_version(dp->dp_spa) < SPA_VERSION_SA)
1231 return (SET_ERROR(ENOTSUP));
1234 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
1235 * record to a plan WRITE record, so the pool must have the
1236 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
1237 * records. Same with WRITE_EMBEDDED records that use LZ4 compression.
1239 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
1240 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
1241 return (SET_ERROR(ENOTSUP));
1242 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4) &&
1243 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
1244 return (SET_ERROR(ENOTSUP));
1247 * The receiving code doesn't know how to translate large blocks
1248 * to smaller ones, so the pool must have the LARGE_BLOCKS
1249 * feature enabled if the stream has LARGE_BLOCKS.
1251 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
1252 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS))
1253 return (SET_ERROR(ENOTSUP));
1255 error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1257 /* target fs already exists; recv into temp clone */
1259 /* Can't recv a clone into an existing fs */
1260 if (flags & DRR_FLAG_CLONE) {
1261 dsl_dataset_rele(ds, FTAG);
1262 return (SET_ERROR(EINVAL));
1265 error = recv_begin_check_existing_impl(drba, ds, fromguid);
1266 dsl_dataset_rele(ds, FTAG);
1267 } else if (error == ENOENT) {
1268 /* target fs does not exist; must be a full backup or clone */
1269 char buf[MAXNAMELEN];
1272 * If it's a non-clone incremental, we are missing the
1273 * target fs, so fail the recv.
1275 if (fromguid != 0 && !(flags & DRR_FLAG_CLONE ||
1277 return (SET_ERROR(ENOENT));
1279 /* Open the parent of tofs */
1280 ASSERT3U(strlen(tofs), <, MAXNAMELEN);
1281 (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
1282 error = dsl_dataset_hold(dp, buf, FTAG, &ds);
1287 * Check filesystem and snapshot limits before receiving. We'll
1288 * recheck snapshot limits again at the end (we create the
1289 * filesystems and increment those counts during begin_sync).
1291 error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1292 ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred);
1294 dsl_dataset_rele(ds, FTAG);
1298 error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1299 ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred);
1301 dsl_dataset_rele(ds, FTAG);
1305 if (drba->drba_origin != NULL) {
1306 dsl_dataset_t *origin;
1307 error = dsl_dataset_hold(dp, drba->drba_origin,
1310 dsl_dataset_rele(ds, FTAG);
1313 if (!origin->ds_is_snapshot) {
1314 dsl_dataset_rele(origin, FTAG);
1315 dsl_dataset_rele(ds, FTAG);
1316 return (SET_ERROR(EINVAL));
1318 if (dsl_dataset_phys(origin)->ds_guid != fromguid) {
1319 dsl_dataset_rele(origin, FTAG);
1320 dsl_dataset_rele(ds, FTAG);
1321 return (SET_ERROR(ENODEV));
1323 dsl_dataset_rele(origin, FTAG);
1325 dsl_dataset_rele(ds, FTAG);
1332 dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
1334 dmu_recv_begin_arg_t *drba = arg;
1335 dsl_pool_t *dp = dmu_tx_pool(tx);
1336 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1337 const char *tofs = drba->drba_cookie->drc_tofs;
1338 dsl_dataset_t *ds, *newds;
1343 crflags = (drrb->drr_flags & DRR_FLAG_CI_DATA) ?
1344 DS_FLAG_CI_DATASET : 0;
1346 error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1348 /* create temporary clone */
1349 dsl_dataset_t *snap = NULL;
1350 if (drba->drba_snapobj != 0) {
1351 VERIFY0(dsl_dataset_hold_obj(dp,
1352 drba->drba_snapobj, FTAG, &snap));
1354 dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name,
1355 snap, crflags, drba->drba_cred, tx);
1356 if (drba->drba_snapobj != 0)
1357 dsl_dataset_rele(snap, FTAG);
1358 dsl_dataset_rele(ds, FTAG);
1362 dsl_dataset_t *origin = NULL;
1364 VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
1366 if (drba->drba_origin != NULL) {
1367 VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
1371 /* Create new dataset. */
1372 dsobj = dsl_dataset_create_sync(dd,
1373 strrchr(tofs, '/') + 1,
1374 origin, crflags, drba->drba_cred, tx);
1376 dsl_dataset_rele(origin, FTAG);
1377 dsl_dir_rele(dd, FTAG);
1378 drba->drba_cookie->drc_newfs = B_TRUE;
1380 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &newds));
1382 dmu_buf_will_dirty(newds->ds_dbuf, tx);
1383 dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT;
1386 * If we actually created a non-clone, we need to create the
1387 * objset in our new dataset.
1389 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds))) {
1390 (void) dmu_objset_create_impl(dp->dp_spa,
1391 newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
1394 drba->drba_cookie->drc_ds = newds;
1396 spa_history_log_internal_ds(newds, "receive", tx, "");
1400 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
1401 * succeeds; otherwise we will leak the holds on the datasets.
1404 dmu_recv_begin(char *tofs, char *tosnap, struct drr_begin *drrb,
1405 boolean_t force, char *origin, dmu_recv_cookie_t *drc)
1407 dmu_recv_begin_arg_t drba = { 0 };
1408 dmu_replay_record_t *drr;
1410 bzero(drc, sizeof (dmu_recv_cookie_t));
1411 drc->drc_drrb = drrb;
1412 drc->drc_tosnap = tosnap;
1413 drc->drc_tofs = tofs;
1414 drc->drc_force = force;
1415 drc->drc_cred = CRED();
1417 if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
1418 drc->drc_byteswap = B_TRUE;
1419 else if (drrb->drr_magic != DMU_BACKUP_MAGIC)
1420 return (SET_ERROR(EINVAL));
1422 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
1423 drr->drr_type = DRR_BEGIN;
1424 drr->drr_u.drr_begin = *drc->drc_drrb;
1425 if (drc->drc_byteswap) {
1426 fletcher_4_incremental_byteswap(drr,
1427 sizeof (dmu_replay_record_t), &drc->drc_cksum);
1429 fletcher_4_incremental_native(drr,
1430 sizeof (dmu_replay_record_t), &drc->drc_cksum);
1432 kmem_free(drr, sizeof (dmu_replay_record_t));
1434 if (drc->drc_byteswap) {
1435 drrb->drr_magic = BSWAP_64(drrb->drr_magic);
1436 drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo);
1437 drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
1438 drrb->drr_type = BSWAP_32(drrb->drr_type);
1439 drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
1440 drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
1443 drba.drba_origin = origin;
1444 drba.drba_cookie = drc;
1445 drba.drba_cred = CRED();
1447 return (dsl_sync_task(tofs, dmu_recv_begin_check, dmu_recv_begin_sync,
1448 &drba, 5, ZFS_SPACE_CHECK_NORMAL));
1451 struct receive_record_arg {
1452 dmu_replay_record_t header;
1453 void *payload; /* Pointer to a buffer containing the payload */
1455 * If the record is a write, pointer to the arc_buf_t containing the
1458 arc_buf_t *write_buf;
1460 boolean_t eos_marker; /* Marks the end of the stream */
1464 struct receive_writer_arg {
1469 * These three args are used to signal to the main thread that we're
1476 /* A map from guid to dataset to help handle dedup'd streams. */
1477 avl_tree_t *guid_to_ds_map;
1480 struct receive_arg {
1484 uint64_t voff; /* The current offset in the stream */
1486 * A record that has had its payload read in, but hasn't yet been handed
1487 * off to the worker thread.
1489 struct receive_record_arg *rrd;
1490 /* A record that has had its header read in, but not its payload. */
1491 struct receive_record_arg *next_rrd;
1493 zio_cksum_t prev_cksum;
1496 /* Sorted list of objects not to issue prefetches for. */
1497 list_t ignore_obj_list;
1500 struct receive_ign_obj_node {
1505 typedef struct guid_map_entry {
1507 dsl_dataset_t *gme_ds;
1512 guid_compare(const void *arg1, const void *arg2)
1514 const guid_map_entry_t *gmep1 = arg1;
1515 const guid_map_entry_t *gmep2 = arg2;
1517 if (gmep1->guid < gmep2->guid)
1519 else if (gmep1->guid > gmep2->guid)
1525 free_guid_map_onexit(void *arg)
1527 avl_tree_t *ca = arg;
1528 void *cookie = NULL;
1529 guid_map_entry_t *gmep;
1531 while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
1532 dsl_dataset_long_rele(gmep->gme_ds, gmep);
1533 dsl_dataset_rele(gmep->gme_ds, gmep);
1534 kmem_free(gmep, sizeof (guid_map_entry_t));
1537 kmem_free(ca, sizeof (avl_tree_t));
1541 restore_bytes(struct receive_arg *ra, void *buf, int len, off_t off, ssize_t *resid)
1547 aiov.iov_base = buf;
1549 auio.uio_iov = &aiov;
1550 auio.uio_iovcnt = 1;
1551 auio.uio_resid = len;
1552 auio.uio_segflg = UIO_SYSSPACE;
1553 auio.uio_rw = UIO_READ;
1554 auio.uio_offset = off;
1555 auio.uio_td = ra->td;
1557 error = fo_read(ra->fp, &auio, ra->td->td_ucred, FOF_OFFSET, ra->td);
1559 fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
1562 *resid = auio.uio_resid;
1567 receive_read(struct receive_arg *ra, int len, void *buf)
1571 /* some things will require 8-byte alignment, so everything must */
1574 while (done < len) {
1577 ra->err = restore_bytes(ra, buf + done,
1578 len - done, ra->voff, &resid);
1580 if (resid == len - done)
1581 ra->err = SET_ERROR(EINVAL);
1582 ra->voff += len - done - resid;
1588 ASSERT3U(done, ==, len);
1593 byteswap_record(dmu_replay_record_t *drr)
1595 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
1596 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
1597 drr->drr_type = BSWAP_32(drr->drr_type);
1598 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
1600 switch (drr->drr_type) {
1602 DO64(drr_begin.drr_magic);
1603 DO64(drr_begin.drr_versioninfo);
1604 DO64(drr_begin.drr_creation_time);
1605 DO32(drr_begin.drr_type);
1606 DO32(drr_begin.drr_flags);
1607 DO64(drr_begin.drr_toguid);
1608 DO64(drr_begin.drr_fromguid);
1611 DO64(drr_object.drr_object);
1612 DO32(drr_object.drr_type);
1613 DO32(drr_object.drr_bonustype);
1614 DO32(drr_object.drr_blksz);
1615 DO32(drr_object.drr_bonuslen);
1616 DO64(drr_object.drr_toguid);
1618 case DRR_FREEOBJECTS:
1619 DO64(drr_freeobjects.drr_firstobj);
1620 DO64(drr_freeobjects.drr_numobjs);
1621 DO64(drr_freeobjects.drr_toguid);
1624 DO64(drr_write.drr_object);
1625 DO32(drr_write.drr_type);
1626 DO64(drr_write.drr_offset);
1627 DO64(drr_write.drr_length);
1628 DO64(drr_write.drr_toguid);
1629 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum);
1630 DO64(drr_write.drr_key.ddk_prop);
1632 case DRR_WRITE_BYREF:
1633 DO64(drr_write_byref.drr_object);
1634 DO64(drr_write_byref.drr_offset);
1635 DO64(drr_write_byref.drr_length);
1636 DO64(drr_write_byref.drr_toguid);
1637 DO64(drr_write_byref.drr_refguid);
1638 DO64(drr_write_byref.drr_refobject);
1639 DO64(drr_write_byref.drr_refoffset);
1640 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write_byref.
1642 DO64(drr_write_byref.drr_key.ddk_prop);
1644 case DRR_WRITE_EMBEDDED:
1645 DO64(drr_write_embedded.drr_object);
1646 DO64(drr_write_embedded.drr_offset);
1647 DO64(drr_write_embedded.drr_length);
1648 DO64(drr_write_embedded.drr_toguid);
1649 DO32(drr_write_embedded.drr_lsize);
1650 DO32(drr_write_embedded.drr_psize);
1653 DO64(drr_free.drr_object);
1654 DO64(drr_free.drr_offset);
1655 DO64(drr_free.drr_length);
1656 DO64(drr_free.drr_toguid);
1659 DO64(drr_spill.drr_object);
1660 DO64(drr_spill.drr_length);
1661 DO64(drr_spill.drr_toguid);
1664 DO64(drr_end.drr_toguid);
1665 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum);
1669 if (drr->drr_type != DRR_BEGIN) {
1670 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum);
1677 static inline uint8_t
1678 deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size)
1680 if (bonus_type == DMU_OT_SA) {
1684 ((DN_MAX_BONUSLEN - bonus_size) >> SPA_BLKPTRSHIFT));
1689 receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
1692 dmu_object_info_t doi;
1697 if (drro->drr_type == DMU_OT_NONE ||
1698 !DMU_OT_IS_VALID(drro->drr_type) ||
1699 !DMU_OT_IS_VALID(drro->drr_bonustype) ||
1700 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
1701 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
1702 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
1703 drro->drr_blksz < SPA_MINBLOCKSIZE ||
1704 drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) ||
1705 drro->drr_bonuslen > DN_MAX_BONUSLEN) {
1706 return (SET_ERROR(EINVAL));
1709 err = dmu_object_info(rwa->os, drro->drr_object, &doi);
1711 if (err != 0 && err != ENOENT)
1712 return (SET_ERROR(EINVAL));
1713 object = err == 0 ? drro->drr_object : DMU_NEW_OBJECT;
1716 * If we are losing blkptrs or changing the block size this must
1717 * be a new file instance. We must clear out the previous file
1718 * contents before we can change this type of metadata in the dnode.
1723 nblkptr = deduce_nblkptr(drro->drr_bonustype,
1724 drro->drr_bonuslen);
1726 if (drro->drr_blksz != doi.doi_data_block_size ||
1727 nblkptr < doi.doi_nblkptr) {
1728 err = dmu_free_long_range(rwa->os, drro->drr_object,
1731 return (SET_ERROR(EINVAL));
1735 tx = dmu_tx_create(rwa->os);
1736 dmu_tx_hold_bonus(tx, object);
1737 err = dmu_tx_assign(tx, TXG_WAIT);
1743 if (object == DMU_NEW_OBJECT) {
1744 /* currently free, want to be allocated */
1745 err = dmu_object_claim(rwa->os, drro->drr_object,
1746 drro->drr_type, drro->drr_blksz,
1747 drro->drr_bonustype, drro->drr_bonuslen, tx);
1748 } else if (drro->drr_type != doi.doi_type ||
1749 drro->drr_blksz != doi.doi_data_block_size ||
1750 drro->drr_bonustype != doi.doi_bonus_type ||
1751 drro->drr_bonuslen != doi.doi_bonus_size) {
1752 /* currently allocated, but with different properties */
1753 err = dmu_object_reclaim(rwa->os, drro->drr_object,
1754 drro->drr_type, drro->drr_blksz,
1755 drro->drr_bonustype, drro->drr_bonuslen, tx);
1759 return (SET_ERROR(EINVAL));
1762 dmu_object_set_checksum(rwa->os, drro->drr_object,
1763 drro->drr_checksumtype, tx);
1764 dmu_object_set_compress(rwa->os, drro->drr_object,
1765 drro->drr_compress, tx);
1770 VERIFY0(dmu_bonus_hold(rwa->os, drro->drr_object, FTAG, &db));
1771 dmu_buf_will_dirty(db, tx);
1773 ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
1774 bcopy(data, db->db_data, drro->drr_bonuslen);
1775 if (rwa->byteswap) {
1776 dmu_object_byteswap_t byteswap =
1777 DMU_OT_BYTESWAP(drro->drr_bonustype);
1778 dmu_ot_byteswap[byteswap].ob_func(db->db_data,
1779 drro->drr_bonuslen);
1781 dmu_buf_rele(db, FTAG);
1789 receive_freeobjects(struct receive_writer_arg *rwa,
1790 struct drr_freeobjects *drrfo)
1794 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
1795 return (SET_ERROR(EINVAL));
1797 for (obj = drrfo->drr_firstobj;
1798 obj < drrfo->drr_firstobj + drrfo->drr_numobjs;
1799 (void) dmu_object_next(rwa->os, &obj, FALSE, 0)) {
1802 if (dmu_object_info(rwa->os, obj, NULL) != 0)
1805 err = dmu_free_long_object(rwa->os, obj);
1813 receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw,
1819 if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset ||
1820 !DMU_OT_IS_VALID(drrw->drr_type))
1821 return (SET_ERROR(EINVAL));
1823 if (dmu_object_info(rwa->os, drrw->drr_object, NULL) != 0)
1824 return (SET_ERROR(EINVAL));
1826 tx = dmu_tx_create(rwa->os);
1828 dmu_tx_hold_write(tx, drrw->drr_object,
1829 drrw->drr_offset, drrw->drr_length);
1830 err = dmu_tx_assign(tx, TXG_WAIT);
1835 if (rwa->byteswap) {
1836 dmu_object_byteswap_t byteswap =
1837 DMU_OT_BYTESWAP(drrw->drr_type);
1838 dmu_ot_byteswap[byteswap].ob_func(abuf->b_data,
1843 if (dmu_bonus_hold(rwa->os, drrw->drr_object, FTAG, &bonus) != 0)
1844 return (SET_ERROR(EINVAL));
1845 dmu_assign_arcbuf(bonus, drrw->drr_offset, abuf, tx);
1847 dmu_buf_rele(bonus, FTAG);
1852 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed
1853 * streams to refer to a copy of the data that is already on the
1854 * system because it came in earlier in the stream. This function
1855 * finds the earlier copy of the data, and uses that copy instead of
1856 * data from the stream to fulfill this write.
1859 receive_write_byref(struct receive_writer_arg *rwa,
1860 struct drr_write_byref *drrwbr)
1864 guid_map_entry_t gmesrch;
1865 guid_map_entry_t *gmep;
1867 objset_t *ref_os = NULL;
1870 if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
1871 return (SET_ERROR(EINVAL));
1874 * If the GUID of the referenced dataset is different from the
1875 * GUID of the target dataset, find the referenced dataset.
1877 if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
1878 gmesrch.guid = drrwbr->drr_refguid;
1879 if ((gmep = avl_find(rwa->guid_to_ds_map, &gmesrch,
1881 return (SET_ERROR(EINVAL));
1883 if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
1884 return (SET_ERROR(EINVAL));
1889 err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
1890 drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH);
1894 tx = dmu_tx_create(rwa->os);
1896 dmu_tx_hold_write(tx, drrwbr->drr_object,
1897 drrwbr->drr_offset, drrwbr->drr_length);
1898 err = dmu_tx_assign(tx, TXG_WAIT);
1903 dmu_write(rwa->os, drrwbr->drr_object,
1904 drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
1905 dmu_buf_rele(dbp, FTAG);
1911 receive_write_embedded(struct receive_writer_arg *rwa,
1912 struct drr_write_embedded *drrwnp, void *data)
1917 if (drrwnp->drr_offset + drrwnp->drr_length < drrwnp->drr_offset)
1920 if (drrwnp->drr_psize > BPE_PAYLOAD_SIZE)
1923 if (drrwnp->drr_etype >= NUM_BP_EMBEDDED_TYPES)
1925 if (drrwnp->drr_compression >= ZIO_COMPRESS_FUNCTIONS)
1928 tx = dmu_tx_create(rwa->os);
1930 dmu_tx_hold_write(tx, drrwnp->drr_object,
1931 drrwnp->drr_offset, drrwnp->drr_length);
1932 err = dmu_tx_assign(tx, TXG_WAIT);
1938 dmu_write_embedded(rwa->os, drrwnp->drr_object,
1939 drrwnp->drr_offset, data, drrwnp->drr_etype,
1940 drrwnp->drr_compression, drrwnp->drr_lsize, drrwnp->drr_psize,
1941 rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx);
1948 receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
1952 dmu_buf_t *db, *db_spill;
1955 if (drrs->drr_length < SPA_MINBLOCKSIZE ||
1956 drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os)))
1957 return (SET_ERROR(EINVAL));
1959 if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0)
1960 return (SET_ERROR(EINVAL));
1962 VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db));
1963 if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
1964 dmu_buf_rele(db, FTAG);
1968 tx = dmu_tx_create(rwa->os);
1970 dmu_tx_hold_spill(tx, db->db_object);
1972 err = dmu_tx_assign(tx, TXG_WAIT);
1974 dmu_buf_rele(db, FTAG);
1975 dmu_buf_rele(db_spill, FTAG);
1979 dmu_buf_will_dirty(db_spill, tx);
1981 if (db_spill->db_size < drrs->drr_length)
1982 VERIFY(0 == dbuf_spill_set_blksz(db_spill,
1983 drrs->drr_length, tx));
1984 bcopy(data, db_spill->db_data, drrs->drr_length);
1986 dmu_buf_rele(db, FTAG);
1987 dmu_buf_rele(db_spill, FTAG);
1995 receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf)
1999 if (drrf->drr_length != -1ULL &&
2000 drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
2001 return (SET_ERROR(EINVAL));
2003 if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0)
2004 return (SET_ERROR(EINVAL));
2006 err = dmu_free_long_range(rwa->os, drrf->drr_object,
2007 drrf->drr_offset, drrf->drr_length);
2012 /* used to destroy the drc_ds on error */
2014 dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
2016 char name[MAXNAMELEN];
2017 dsl_dataset_name(drc->drc_ds, name);
2018 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
2019 (void) dsl_destroy_head(name);
2023 receive_cksum(struct receive_arg *ra, int len, void *buf)
2026 fletcher_4_incremental_byteswap(buf, len, &ra->cksum);
2028 fletcher_4_incremental_native(buf, len, &ra->cksum);
2033 * Read the payload into a buffer of size len, and update the current record's
2035 * Allocate ra->next_rrd and read the next record's header into
2036 * ra->next_rrd->header.
2037 * Verify checksum of payload and next record.
2040 receive_read_payload_and_next_header(struct receive_arg *ra, int len, void *buf)
2045 ASSERT3U(len, <=, SPA_MAXBLOCKSIZE);
2046 ra->rrd->payload = buf;
2047 ra->rrd->payload_size = len;
2048 err = receive_read(ra, len, ra->rrd->payload);
2051 receive_cksum(ra, len, ra->rrd->payload);
2054 ra->prev_cksum = ra->cksum;
2056 ra->next_rrd = kmem_zalloc(sizeof (*ra->next_rrd), KM_SLEEP);
2057 err = receive_read(ra, sizeof (ra->next_rrd->header),
2058 &ra->next_rrd->header);
2060 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2061 ra->next_rrd = NULL;
2064 if (ra->next_rrd->header.drr_type == DRR_BEGIN) {
2065 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2066 ra->next_rrd = NULL;
2067 return (SET_ERROR(EINVAL));
2071 * Note: checksum is of everything up to but not including the
2074 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
2075 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
2077 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
2078 &ra->next_rrd->header);
2080 zio_cksum_t cksum_orig =
2081 ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
2082 zio_cksum_t *cksump =
2083 &ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
2086 byteswap_record(&ra->next_rrd->header);
2088 if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) &&
2089 !ZIO_CHECKSUM_EQUAL(ra->cksum, *cksump)) {
2090 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2091 ra->next_rrd = NULL;
2092 return (SET_ERROR(ECKSUM));
2095 receive_cksum(ra, sizeof (cksum_orig), &cksum_orig);
2101 * Issue the prefetch reads for any necessary indirect blocks.
2103 * We use the object ignore list to tell us whether or not to issue prefetches
2104 * for a given object. We do this for both correctness (in case the blocksize
2105 * of an object has changed) and performance (if the object doesn't exist, don't
2106 * needlessly try to issue prefetches). We also trim the list as we go through
2107 * the stream to prevent it from growing to an unbounded size.
2109 * The object numbers within will always be in sorted order, and any write
2110 * records we see will also be in sorted order, but they're not sorted with
2111 * respect to each other (i.e. we can get several object records before
2112 * receiving each object's write records). As a result, once we've reached a
2113 * given object number, we can safely remove any reference to lower object
2114 * numbers in the ignore list. In practice, we receive up to 32 object records
2115 * before receiving write records, so the list can have up to 32 nodes in it.
2119 receive_read_prefetch(struct receive_arg *ra,
2120 uint64_t object, uint64_t offset, uint64_t length)
2122 struct receive_ign_obj_node *node = list_head(&ra->ignore_obj_list);
2123 while (node != NULL && node->object < object) {
2124 VERIFY3P(node, ==, list_remove_head(&ra->ignore_obj_list));
2125 kmem_free(node, sizeof (*node));
2126 node = list_head(&ra->ignore_obj_list);
2128 if (node == NULL || node->object > object) {
2129 dmu_prefetch(ra->os, object, 1, offset, length,
2130 ZIO_PRIORITY_SYNC_READ);
2135 * Read records off the stream, issuing any necessary prefetches.
2138 receive_read_record(struct receive_arg *ra)
2142 switch (ra->rrd->header.drr_type) {
2145 struct drr_object *drro = &ra->rrd->header.drr_u.drr_object;
2146 uint32_t size = P2ROUNDUP(drro->drr_bonuslen, 8);
2147 void *buf = kmem_zalloc(size, KM_SLEEP);
2148 dmu_object_info_t doi;
2149 err = receive_read_payload_and_next_header(ra, size, buf);
2151 kmem_free(buf, size);
2154 err = dmu_object_info(ra->os, drro->drr_object, &doi);
2156 * See receive_read_prefetch for an explanation why we're
2157 * storing this object in the ignore_obj_list.
2159 if (err == ENOENT ||
2160 (err == 0 && doi.doi_data_block_size != drro->drr_blksz)) {
2161 struct receive_ign_obj_node *node =
2162 kmem_zalloc(sizeof (*node),
2164 node->object = drro->drr_object;
2166 struct receive_ign_obj_node *last_object =
2167 list_tail(&ra->ignore_obj_list);
2168 uint64_t last_objnum = (last_object != NULL ?
2169 last_object->object : 0);
2170 ASSERT3U(node->object, >, last_objnum);
2172 list_insert_tail(&ra->ignore_obj_list, node);
2177 case DRR_FREEOBJECTS:
2179 err = receive_read_payload_and_next_header(ra, 0, NULL);
2184 struct drr_write *drrw = &ra->rrd->header.drr_u.drr_write;
2185 arc_buf_t *abuf = arc_loan_buf(dmu_objset_spa(ra->os),
2188 err = receive_read_payload_and_next_header(ra,
2189 drrw->drr_length, abuf->b_data);
2191 dmu_return_arcbuf(abuf);
2194 ra->rrd->write_buf = abuf;
2195 receive_read_prefetch(ra, drrw->drr_object, drrw->drr_offset,
2199 case DRR_WRITE_BYREF:
2201 struct drr_write_byref *drrwb =
2202 &ra->rrd->header.drr_u.drr_write_byref;
2203 err = receive_read_payload_and_next_header(ra, 0, NULL);
2204 receive_read_prefetch(ra, drrwb->drr_object, drrwb->drr_offset,
2208 case DRR_WRITE_EMBEDDED:
2210 struct drr_write_embedded *drrwe =
2211 &ra->rrd->header.drr_u.drr_write_embedded;
2212 uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8);
2213 void *buf = kmem_zalloc(size, KM_SLEEP);
2215 err = receive_read_payload_and_next_header(ra, size, buf);
2217 kmem_free(buf, size);
2221 receive_read_prefetch(ra, drrwe->drr_object, drrwe->drr_offset,
2228 * It might be beneficial to prefetch indirect blocks here, but
2229 * we don't really have the data to decide for sure.
2231 err = receive_read_payload_and_next_header(ra, 0, NULL);
2236 struct drr_end *drre = &ra->rrd->header.drr_u.drr_end;
2237 if (!ZIO_CHECKSUM_EQUAL(ra->prev_cksum, drre->drr_checksum))
2238 return (SET_ERROR(EINVAL));
2243 struct drr_spill *drrs = &ra->rrd->header.drr_u.drr_spill;
2244 void *buf = kmem_zalloc(drrs->drr_length, KM_SLEEP);
2245 err = receive_read_payload_and_next_header(ra, drrs->drr_length,
2248 kmem_free(buf, drrs->drr_length);
2252 return (SET_ERROR(EINVAL));
2257 * Commit the records to the pool.
2260 receive_process_record(struct receive_writer_arg *rwa,
2261 struct receive_record_arg *rrd)
2265 switch (rrd->header.drr_type) {
2268 struct drr_object *drro = &rrd->header.drr_u.drr_object;
2269 err = receive_object(rwa, drro, rrd->payload);
2270 kmem_free(rrd->payload, rrd->payload_size);
2271 rrd->payload = NULL;
2274 case DRR_FREEOBJECTS:
2276 struct drr_freeobjects *drrfo =
2277 &rrd->header.drr_u.drr_freeobjects;
2278 return (receive_freeobjects(rwa, drrfo));
2282 struct drr_write *drrw = &rrd->header.drr_u.drr_write;
2283 err = receive_write(rwa, drrw, rrd->write_buf);
2284 /* if receive_write() is successful, it consumes the arc_buf */
2286 dmu_return_arcbuf(rrd->write_buf);
2287 rrd->write_buf = NULL;
2288 rrd->payload = NULL;
2291 case DRR_WRITE_BYREF:
2293 struct drr_write_byref *drrwbr =
2294 &rrd->header.drr_u.drr_write_byref;
2295 return (receive_write_byref(rwa, drrwbr));
2297 case DRR_WRITE_EMBEDDED:
2299 struct drr_write_embedded *drrwe =
2300 &rrd->header.drr_u.drr_write_embedded;
2301 err = receive_write_embedded(rwa, drrwe, rrd->payload);
2302 kmem_free(rrd->payload, rrd->payload_size);
2303 rrd->payload = NULL;
2308 struct drr_free *drrf = &rrd->header.drr_u.drr_free;
2309 return (receive_free(rwa, drrf));
2313 struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
2314 err = receive_spill(rwa, drrs, rrd->payload);
2315 kmem_free(rrd->payload, rrd->payload_size);
2316 rrd->payload = NULL;
2320 return (SET_ERROR(EINVAL));
2325 * dmu_recv_stream's worker thread; pull records off the queue, and then call
2326 * receive_process_record When we're done, signal the main thread and exit.
2329 receive_writer_thread(void *arg)
2331 struct receive_writer_arg *rwa = arg;
2332 struct receive_record_arg *rrd;
2333 for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker;
2334 rrd = bqueue_dequeue(&rwa->q)) {
2336 * If there's an error, the main thread will stop putting things
2337 * on the queue, but we need to clear everything in it before we
2340 if (rwa->err == 0) {
2341 rwa->err = receive_process_record(rwa, rrd);
2342 } else if (rrd->write_buf != NULL) {
2343 dmu_return_arcbuf(rrd->write_buf);
2344 rrd->write_buf = NULL;
2345 rrd->payload = NULL;
2346 } else if (rrd->payload != NULL) {
2347 kmem_free(rrd->payload, rrd->payload_size);
2348 rrd->payload = NULL;
2350 kmem_free(rrd, sizeof (*rrd));
2352 kmem_free(rrd, sizeof (*rrd));
2353 mutex_enter(&rwa->mutex);
2355 cv_signal(&rwa->cv);
2356 mutex_exit(&rwa->mutex);
2361 * Read in the stream's records, one by one, and apply them to the pool. There
2362 * are two threads involved; the thread that calls this function will spin up a
2363 * worker thread, read the records off the stream one by one, and issue
2364 * prefetches for any necessary indirect blocks. It will then push the records
2365 * onto an internal blocking queue. The worker thread will pull the records off
2366 * the queue, and actually write the data into the DMU. This way, the worker
2367 * thread doesn't have to wait for reads to complete, since everything it needs
2368 * (the indirect blocks) will be prefetched.
2370 * NB: callers *must* call dmu_recv_end() if this succeeds.
2373 dmu_recv_stream(dmu_recv_cookie_t *drc, struct file *fp, offset_t *voffp,
2374 int cleanup_fd, uint64_t *action_handlep)
2377 struct receive_arg ra = { 0 };
2378 struct receive_writer_arg rwa = { 0 };
2381 ra.byteswap = drc->drc_byteswap;
2382 ra.cksum = drc->drc_cksum;
2386 list_create(&ra.ignore_obj_list, sizeof (struct receive_ign_obj_node),
2387 offsetof(struct receive_ign_obj_node, node));
2389 /* these were verified in dmu_recv_begin */
2390 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
2392 ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
2395 * Open the objset we are modifying.
2397 VERIFY0(dmu_objset_from_ds(drc->drc_ds, &ra.os));
2399 ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT);
2401 featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
2403 /* if this stream is dedup'ed, set up the avl tree for guid mapping */
2404 if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
2407 if (cleanup_fd == -1) {
2408 ra.err = SET_ERROR(EBADF);
2411 ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor);
2417 if (*action_handlep == 0) {
2418 rwa.guid_to_ds_map =
2419 kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
2420 avl_create(rwa.guid_to_ds_map, guid_compare,
2421 sizeof (guid_map_entry_t),
2422 offsetof(guid_map_entry_t, avlnode));
2423 err = zfs_onexit_add_cb(minor,
2424 free_guid_map_onexit, rwa.guid_to_ds_map,
2429 err = zfs_onexit_cb_data(minor, *action_handlep,
2430 (void **)&rwa.guid_to_ds_map);
2435 drc->drc_guid_to_ds_map = rwa.guid_to_ds_map;
2438 err = receive_read_payload_and_next_header(&ra, 0, NULL);
2442 (void) bqueue_init(&rwa.q, zfs_recv_queue_length,
2443 offsetof(struct receive_record_arg, node));
2444 cv_init(&rwa.cv, NULL, CV_DEFAULT, NULL);
2445 mutex_init(&rwa.mutex, NULL, MUTEX_DEFAULT, NULL);
2447 rwa.byteswap = drc->drc_byteswap;
2449 (void) thread_create(NULL, 0, receive_writer_thread, &rwa, 0, curproc,
2450 TS_RUN, minclsyspri);
2452 * We're reading rwa.err without locks, which is safe since we are the
2453 * only reader, and the worker thread is the only writer. It's ok if we
2454 * miss a write for an iteration or two of the loop, since the writer
2455 * thread will keep freeing records we send it until we send it an eos
2458 * We can leave this loop in 3 ways: First, if rwa.err is
2459 * non-zero. In that case, the writer thread will free the rrd we just
2460 * pushed. Second, if we're interrupted; in that case, either it's the
2461 * first loop and ra.rrd was never allocated, or it's later, and ra.rrd
2462 * has been handed off to the writer thread who will free it. Finally,
2463 * if receive_read_record fails or we're at the end of the stream, then
2464 * we free ra.rrd and exit.
2466 while (rwa.err == 0) {
2467 if (issig(JUSTLOOKING) && issig(FORREAL)) {
2468 err = SET_ERROR(EINTR);
2472 ASSERT3P(ra.rrd, ==, NULL);
2473 ra.rrd = ra.next_rrd;
2475 /* Allocates and loads header into ra.next_rrd */
2476 err = receive_read_record(&ra);
2478 if (ra.rrd->header.drr_type == DRR_END || err != 0) {
2479 kmem_free(ra.rrd, sizeof (*ra.rrd));
2484 bqueue_enqueue(&rwa.q, ra.rrd,
2485 sizeof (struct receive_record_arg) + ra.rrd->payload_size);
2488 if (ra.next_rrd == NULL)
2489 ra.next_rrd = kmem_zalloc(sizeof (*ra.next_rrd), KM_SLEEP);
2490 ra.next_rrd->eos_marker = B_TRUE;
2491 bqueue_enqueue(&rwa.q, ra.next_rrd, 1);
2493 mutex_enter(&rwa.mutex);
2495 cv_wait(&rwa.cv, &rwa.mutex);
2497 mutex_exit(&rwa.mutex);
2499 cv_destroy(&rwa.cv);
2500 mutex_destroy(&rwa.mutex);
2501 bqueue_destroy(&rwa.q);
2506 if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
2507 zfs_onexit_fd_rele(cleanup_fd);
2511 * destroy what we created, so we don't leave it in the
2512 * inconsistent restoring state.
2514 dmu_recv_cleanup_ds(drc);
2518 for (struct receive_ign_obj_node *n =
2519 list_remove_head(&ra.ignore_obj_list); n != NULL;
2520 n = list_remove_head(&ra.ignore_obj_list)) {
2521 kmem_free(n, sizeof (*n));
2523 list_destroy(&ra.ignore_obj_list);
2528 dmu_recv_end_check(void *arg, dmu_tx_t *tx)
2530 dmu_recv_cookie_t *drc = arg;
2531 dsl_pool_t *dp = dmu_tx_pool(tx);
2534 ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
2536 if (!drc->drc_newfs) {
2537 dsl_dataset_t *origin_head;
2539 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
2542 if (drc->drc_force) {
2544 * We will destroy any snapshots in tofs (i.e. before
2545 * origin_head) that are after the origin (which is
2546 * the snap before drc_ds, because drc_ds can not
2547 * have any snaps of its own).
2551 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
2553 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
2554 dsl_dataset_t *snap;
2555 error = dsl_dataset_hold_obj(dp, obj, FTAG,
2559 if (snap->ds_dir != origin_head->ds_dir)
2560 error = SET_ERROR(EINVAL);
2562 error = dsl_destroy_snapshot_check_impl(
2565 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
2566 dsl_dataset_rele(snap, FTAG);
2571 dsl_dataset_rele(origin_head, FTAG);
2575 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
2576 origin_head, drc->drc_force, drc->drc_owner, tx);
2578 dsl_dataset_rele(origin_head, FTAG);
2581 error = dsl_dataset_snapshot_check_impl(origin_head,
2582 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
2583 dsl_dataset_rele(origin_head, FTAG);
2587 error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
2589 error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
2590 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
2596 dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
2598 dmu_recv_cookie_t *drc = arg;
2599 dsl_pool_t *dp = dmu_tx_pool(tx);
2601 spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
2602 tx, "snap=%s", drc->drc_tosnap);
2604 if (!drc->drc_newfs) {
2605 dsl_dataset_t *origin_head;
2607 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
2610 if (drc->drc_force) {
2612 * Destroy any snapshots of drc_tofs (origin_head)
2613 * after the origin (the snap before drc_ds).
2617 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
2619 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
2620 dsl_dataset_t *snap;
2621 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
2623 ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
2624 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
2625 dsl_destroy_snapshot_sync_impl(snap,
2627 dsl_dataset_rele(snap, FTAG);
2630 VERIFY3P(drc->drc_ds->ds_prev, ==,
2631 origin_head->ds_prev);
2633 dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
2635 dsl_dataset_snapshot_sync_impl(origin_head,
2636 drc->drc_tosnap, tx);
2638 /* set snapshot's creation time and guid */
2639 dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
2640 dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time =
2641 drc->drc_drrb->drr_creation_time;
2642 dsl_dataset_phys(origin_head->ds_prev)->ds_guid =
2643 drc->drc_drrb->drr_toguid;
2644 dsl_dataset_phys(origin_head->ds_prev)->ds_flags &=
2645 ~DS_FLAG_INCONSISTENT;
2647 dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
2648 dsl_dataset_phys(origin_head)->ds_flags &=
2649 ~DS_FLAG_INCONSISTENT;
2651 dsl_dataset_rele(origin_head, FTAG);
2652 dsl_destroy_head_sync_impl(drc->drc_ds, tx);
2654 if (drc->drc_owner != NULL)
2655 VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
2657 dsl_dataset_t *ds = drc->drc_ds;
2659 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
2661 /* set snapshot's creation time and guid */
2662 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
2663 dsl_dataset_phys(ds->ds_prev)->ds_creation_time =
2664 drc->drc_drrb->drr_creation_time;
2665 dsl_dataset_phys(ds->ds_prev)->ds_guid =
2666 drc->drc_drrb->drr_toguid;
2667 dsl_dataset_phys(ds->ds_prev)->ds_flags &=
2668 ~DS_FLAG_INCONSISTENT;
2670 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2671 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
2673 drc->drc_newsnapobj = dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj;
2675 * Release the hold from dmu_recv_begin. This must be done before
2676 * we return to open context, so that when we free the dataset's dnode,
2677 * we can evict its bonus buffer.
2679 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
2684 add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj)
2687 dsl_dataset_t *snapds;
2688 guid_map_entry_t *gmep;
2691 ASSERT(guid_map != NULL);
2693 err = dsl_pool_hold(name, FTAG, &dp);
2696 gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP);
2697 err = dsl_dataset_hold_obj(dp, snapobj, gmep, &snapds);
2699 gmep->guid = dsl_dataset_phys(snapds)->ds_guid;
2700 gmep->gme_ds = snapds;
2701 avl_add(guid_map, gmep);
2702 dsl_dataset_long_hold(snapds, gmep);
2704 kmem_free(gmep, sizeof (*gmep));
2706 dsl_pool_rele(dp, FTAG);
2710 static int dmu_recv_end_modified_blocks = 3;
2713 dmu_recv_existing_end(dmu_recv_cookie_t *drc)
2716 char name[MAXNAMELEN];
2720 * We will be destroying the ds; make sure its origin is unmounted if
2723 dsl_dataset_name(drc->drc_ds, name);
2724 zfs_destroy_unmount_origin(name);
2727 error = dsl_sync_task(drc->drc_tofs,
2728 dmu_recv_end_check, dmu_recv_end_sync, drc,
2729 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL);
2732 dmu_recv_cleanup_ds(drc);
2737 dmu_recv_new_end(dmu_recv_cookie_t *drc)
2741 error = dsl_sync_task(drc->drc_tofs,
2742 dmu_recv_end_check, dmu_recv_end_sync, drc,
2743 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL);
2746 dmu_recv_cleanup_ds(drc);
2747 } else if (drc->drc_guid_to_ds_map != NULL) {
2748 (void) add_ds_to_guidmap(drc->drc_tofs,
2749 drc->drc_guid_to_ds_map,
2750 drc->drc_newsnapobj);
2756 dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
2758 drc->drc_owner = owner;
2761 return (dmu_recv_new_end(drc));
2763 return (dmu_recv_existing_end(drc));
2767 * Return TRUE if this objset is currently being received into.
2770 dmu_objset_is_receiving(objset_t *os)
2772 return (os->os_dsl_dataset != NULL &&
2773 os->os_dsl_dataset->ds_owner == dmu_recv_tag);