4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright (c) 2012, Martin Matuska <mm@FreeBSD.org>. All rights reserved.
27 * Copyright 2014 HybridCluster. All rights reserved.
28 * Copyright 2016 RackTop Systems.
29 * Copyright (c) 2014 Integros [integros.com]
33 #include <sys/dmu_impl.h>
34 #include <sys/dmu_tx.h>
36 #include <sys/dnode.h>
37 #include <sys/zfs_context.h>
38 #include <sys/dmu_objset.h>
39 #include <sys/dmu_traverse.h>
40 #include <sys/dsl_dataset.h>
41 #include <sys/dsl_dir.h>
42 #include <sys/dsl_prop.h>
43 #include <sys/dsl_pool.h>
44 #include <sys/dsl_synctask.h>
45 #include <sys/zfs_ioctl.h>
47 #include <sys/zio_checksum.h>
48 #include <sys/zfs_znode.h>
49 #include <zfs_fletcher.h>
52 #include <sys/zfs_onexit.h>
53 #include <sys/dmu_send.h>
54 #include <sys/dsl_destroy.h>
55 #include <sys/blkptr.h>
56 #include <sys/dsl_bookmark.h>
57 #include <sys/zfeature.h>
58 #include <sys/bqueue.h>
62 #define dump_write dmu_dump_write
65 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
66 int zfs_send_corrupt_data = B_FALSE;
67 int zfs_send_queue_length = 16 * 1024 * 1024;
68 int zfs_recv_queue_length = 16 * 1024 * 1024;
69 /* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */
70 int zfs_send_set_freerecords_bit = B_TRUE;
73 TUNABLE_INT("vfs.zfs.send_set_freerecords_bit", &zfs_send_set_freerecords_bit);
76 static char *dmu_recv_tag = "dmu_recv_tag";
77 const char *recv_clone_name = "%recv";
79 #define BP_SPAN(datablkszsec, indblkshift, level) \
80 (((uint64_t)datablkszsec) << (SPA_MINBLOCKSHIFT + \
81 (level) * (indblkshift - SPA_BLKPTRSHIFT)))
83 static void byteswap_record(dmu_replay_record_t *drr);
85 struct send_thread_arg {
87 dsl_dataset_t *ds; /* Dataset to traverse */
88 uint64_t fromtxg; /* Traverse from this txg */
89 int flags; /* flags to pass to traverse_dataset */
92 zbookmark_phys_t resume;
95 struct send_block_record {
96 boolean_t eos_marker; /* Marks the end of the stream */
100 uint16_t datablkszsec;
105 dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
107 dsl_dataset_t *ds = dmu_objset_ds(dsp->dsa_os);
112 * The code does not rely on this (len being a multiple of 8). We keep
113 * this assertion because of the corresponding assertion in
114 * receive_read(). Keeping this assertion ensures that we do not
115 * inadvertently break backwards compatibility (causing the assertion
116 * in receive_read() to trigger on old software).
118 * Removing the assertions could be rolled into a new feature that uses
119 * data that isn't 8-byte aligned; if the assertions were removed, a
120 * feature flag would have to be added.
127 auio.uio_iov = &aiov;
129 auio.uio_resid = len;
130 auio.uio_segflg = UIO_SYSSPACE;
131 auio.uio_rw = UIO_WRITE;
132 auio.uio_offset = (off_t)-1;
133 auio.uio_td = dsp->dsa_td;
135 if (dsp->dsa_fp->f_type == DTYPE_VNODE)
137 dsp->dsa_err = fo_write(dsp->dsa_fp, &auio, dsp->dsa_td->td_ucred, 0,
140 fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
141 dsp->dsa_err = EOPNOTSUPP;
143 mutex_enter(&ds->ds_sendstream_lock);
144 *dsp->dsa_off += len;
145 mutex_exit(&ds->ds_sendstream_lock);
147 return (dsp->dsa_err);
151 * For all record types except BEGIN, fill in the checksum (overlaid in
152 * drr_u.drr_checksum.drr_checksum). The checksum verifies everything
153 * up to the start of the checksum itself.
156 dump_record(dmu_sendarg_t *dsp, void *payload, int payload_len)
158 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
159 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
160 (void) fletcher_4_incremental_native(dsp->dsa_drr,
161 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
163 if (dsp->dsa_drr->drr_type == DRR_BEGIN) {
164 dsp->dsa_sent_begin = B_TRUE;
166 ASSERT(ZIO_CHECKSUM_IS_ZERO(&dsp->dsa_drr->drr_u.
167 drr_checksum.drr_checksum));
168 dsp->dsa_drr->drr_u.drr_checksum.drr_checksum = dsp->dsa_zc;
170 if (dsp->dsa_drr->drr_type == DRR_END) {
171 dsp->dsa_sent_end = B_TRUE;
173 (void) fletcher_4_incremental_native(&dsp->dsa_drr->
174 drr_u.drr_checksum.drr_checksum,
175 sizeof (zio_cksum_t), &dsp->dsa_zc);
176 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
177 return (SET_ERROR(EINTR));
178 if (payload_len != 0) {
179 (void) fletcher_4_incremental_native(payload, payload_len,
181 if (dump_bytes(dsp, payload, payload_len) != 0)
182 return (SET_ERROR(EINTR));
188 * Fill in the drr_free struct, or perform aggregation if the previous record is
189 * also a free record, and the two are adjacent.
191 * Note that we send free records even for a full send, because we want to be
192 * able to receive a full send as a clone, which requires a list of all the free
193 * and freeobject records that were generated on the source.
196 dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
199 struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free);
202 * When we receive a free record, dbuf_free_range() assumes
203 * that the receiving system doesn't have any dbufs in the range
204 * being freed. This is always true because there is a one-record
205 * constraint: we only send one WRITE record for any given
206 * object,offset. We know that the one-record constraint is
207 * true because we always send data in increasing order by
210 * If the increasing-order constraint ever changes, we should find
211 * another way to assert that the one-record constraint is still
214 ASSERT(object > dsp->dsa_last_data_object ||
215 (object == dsp->dsa_last_data_object &&
216 offset > dsp->dsa_last_data_offset));
218 if (length != -1ULL && offset + length < offset)
222 * If there is a pending op, but it's not PENDING_FREE, push it out,
223 * since free block aggregation can only be done for blocks of the
224 * same type (i.e., DRR_FREE records can only be aggregated with
225 * other DRR_FREE records. DRR_FREEOBJECTS records can only be
226 * aggregated with other DRR_FREEOBJECTS records.
228 if (dsp->dsa_pending_op != PENDING_NONE &&
229 dsp->dsa_pending_op != PENDING_FREE) {
230 if (dump_record(dsp, NULL, 0) != 0)
231 return (SET_ERROR(EINTR));
232 dsp->dsa_pending_op = PENDING_NONE;
235 if (dsp->dsa_pending_op == PENDING_FREE) {
237 * There should never be a PENDING_FREE if length is -1
238 * (because dump_dnode is the only place where this
239 * function is called with a -1, and only after flushing
240 * any pending record).
242 ASSERT(length != -1ULL);
244 * Check to see whether this free block can be aggregated
247 if (drrf->drr_object == object && drrf->drr_offset +
248 drrf->drr_length == offset) {
249 drrf->drr_length += length;
252 /* not a continuation. Push out pending record */
253 if (dump_record(dsp, NULL, 0) != 0)
254 return (SET_ERROR(EINTR));
255 dsp->dsa_pending_op = PENDING_NONE;
258 /* create a FREE record and make it pending */
259 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
260 dsp->dsa_drr->drr_type = DRR_FREE;
261 drrf->drr_object = object;
262 drrf->drr_offset = offset;
263 drrf->drr_length = length;
264 drrf->drr_toguid = dsp->dsa_toguid;
265 if (length == -1ULL) {
266 if (dump_record(dsp, NULL, 0) != 0)
267 return (SET_ERROR(EINTR));
269 dsp->dsa_pending_op = PENDING_FREE;
276 dump_write(dmu_sendarg_t *dsp, dmu_object_type_t type,
277 uint64_t object, uint64_t offset, int lsize, int psize, const blkptr_t *bp,
280 uint64_t payload_size;
281 struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write);
284 * We send data in increasing object, offset order.
285 * See comment in dump_free() for details.
287 ASSERT(object > dsp->dsa_last_data_object ||
288 (object == dsp->dsa_last_data_object &&
289 offset > dsp->dsa_last_data_offset));
290 dsp->dsa_last_data_object = object;
291 dsp->dsa_last_data_offset = offset + lsize - 1;
294 * If there is any kind of pending aggregation (currently either
295 * a grouping of free objects or free blocks), push it out to
296 * the stream, since aggregation can't be done across operations
297 * of different types.
299 if (dsp->dsa_pending_op != PENDING_NONE) {
300 if (dump_record(dsp, NULL, 0) != 0)
301 return (SET_ERROR(EINTR));
302 dsp->dsa_pending_op = PENDING_NONE;
304 /* write a WRITE record */
305 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
306 dsp->dsa_drr->drr_type = DRR_WRITE;
307 drrw->drr_object = object;
308 drrw->drr_type = type;
309 drrw->drr_offset = offset;
310 drrw->drr_toguid = dsp->dsa_toguid;
311 drrw->drr_logical_size = lsize;
313 /* only set the compression fields if the buf is compressed */
314 if (lsize != psize) {
315 ASSERT(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_COMPRESSED);
316 ASSERT(!BP_IS_EMBEDDED(bp));
317 ASSERT(!BP_SHOULD_BYTESWAP(bp));
318 ASSERT(!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)));
319 ASSERT3U(BP_GET_COMPRESS(bp), !=, ZIO_COMPRESS_OFF);
320 ASSERT3S(psize, >, 0);
321 ASSERT3S(lsize, >=, psize);
323 drrw->drr_compressiontype = BP_GET_COMPRESS(bp);
324 drrw->drr_compressed_size = psize;
325 payload_size = drrw->drr_compressed_size;
327 payload_size = drrw->drr_logical_size;
330 if (bp == NULL || BP_IS_EMBEDDED(bp)) {
332 * There's no pre-computed checksum for partial-block
333 * writes or embedded BP's, so (like
334 * fletcher4-checkummed blocks) userland will have to
335 * compute a dedup-capable checksum itself.
337 drrw->drr_checksumtype = ZIO_CHECKSUM_OFF;
339 drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
340 if (zio_checksum_table[drrw->drr_checksumtype].ci_flags &
341 ZCHECKSUM_FLAG_DEDUP)
342 drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP;
343 DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
344 DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
345 DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
346 drrw->drr_key.ddk_cksum = bp->blk_cksum;
349 if (dump_record(dsp, data, payload_size) != 0)
350 return (SET_ERROR(EINTR));
355 dump_write_embedded(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
356 int blksz, const blkptr_t *bp)
358 char buf[BPE_PAYLOAD_SIZE];
359 struct drr_write_embedded *drrw =
360 &(dsp->dsa_drr->drr_u.drr_write_embedded);
362 if (dsp->dsa_pending_op != PENDING_NONE) {
363 if (dump_record(dsp, NULL, 0) != 0)
365 dsp->dsa_pending_op = PENDING_NONE;
368 ASSERT(BP_IS_EMBEDDED(bp));
370 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
371 dsp->dsa_drr->drr_type = DRR_WRITE_EMBEDDED;
372 drrw->drr_object = object;
373 drrw->drr_offset = offset;
374 drrw->drr_length = blksz;
375 drrw->drr_toguid = dsp->dsa_toguid;
376 drrw->drr_compression = BP_GET_COMPRESS(bp);
377 drrw->drr_etype = BPE_GET_ETYPE(bp);
378 drrw->drr_lsize = BPE_GET_LSIZE(bp);
379 drrw->drr_psize = BPE_GET_PSIZE(bp);
381 decode_embedded_bp_compressed(bp, buf);
383 if (dump_record(dsp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0)
389 dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data)
391 struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill);
393 if (dsp->dsa_pending_op != PENDING_NONE) {
394 if (dump_record(dsp, NULL, 0) != 0)
395 return (SET_ERROR(EINTR));
396 dsp->dsa_pending_op = PENDING_NONE;
399 /* write a SPILL record */
400 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
401 dsp->dsa_drr->drr_type = DRR_SPILL;
402 drrs->drr_object = object;
403 drrs->drr_length = blksz;
404 drrs->drr_toguid = dsp->dsa_toguid;
406 if (dump_record(dsp, data, blksz) != 0)
407 return (SET_ERROR(EINTR));
412 dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
414 struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects);
417 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
418 * push it out, since free block aggregation can only be done for
419 * blocks of the same type (i.e., DRR_FREE records can only be
420 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
421 * can only be aggregated with other DRR_FREEOBJECTS records.
423 if (dsp->dsa_pending_op != PENDING_NONE &&
424 dsp->dsa_pending_op != PENDING_FREEOBJECTS) {
425 if (dump_record(dsp, NULL, 0) != 0)
426 return (SET_ERROR(EINTR));
427 dsp->dsa_pending_op = PENDING_NONE;
429 if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) {
431 * See whether this free object array can be aggregated
434 if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
435 drrfo->drr_numobjs += numobjs;
438 /* can't be aggregated. Push out pending record */
439 if (dump_record(dsp, NULL, 0) != 0)
440 return (SET_ERROR(EINTR));
441 dsp->dsa_pending_op = PENDING_NONE;
445 /* write a FREEOBJECTS record */
446 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
447 dsp->dsa_drr->drr_type = DRR_FREEOBJECTS;
448 drrfo->drr_firstobj = firstobj;
449 drrfo->drr_numobjs = numobjs;
450 drrfo->drr_toguid = dsp->dsa_toguid;
452 dsp->dsa_pending_op = PENDING_FREEOBJECTS;
458 dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp)
460 struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object);
462 if (object < dsp->dsa_resume_object) {
464 * Note: when resuming, we will visit all the dnodes in
465 * the block of dnodes that we are resuming from. In
466 * this case it's unnecessary to send the dnodes prior to
467 * the one we are resuming from. We should be at most one
468 * block's worth of dnodes behind the resume point.
470 ASSERT3U(dsp->dsa_resume_object - object, <,
471 1 << (DNODE_BLOCK_SHIFT - DNODE_SHIFT));
475 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
476 return (dump_freeobjects(dsp, object, 1));
478 if (dsp->dsa_pending_op != PENDING_NONE) {
479 if (dump_record(dsp, NULL, 0) != 0)
480 return (SET_ERROR(EINTR));
481 dsp->dsa_pending_op = PENDING_NONE;
484 /* write an OBJECT record */
485 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
486 dsp->dsa_drr->drr_type = DRR_OBJECT;
487 drro->drr_object = object;
488 drro->drr_type = dnp->dn_type;
489 drro->drr_bonustype = dnp->dn_bonustype;
490 drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
491 drro->drr_bonuslen = dnp->dn_bonuslen;
492 drro->drr_checksumtype = dnp->dn_checksum;
493 drro->drr_compress = dnp->dn_compress;
494 drro->drr_toguid = dsp->dsa_toguid;
496 if (!(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
497 drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE)
498 drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE;
500 if (dump_record(dsp, DN_BONUS(dnp),
501 P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0) {
502 return (SET_ERROR(EINTR));
505 /* Free anything past the end of the file. */
506 if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) *
507 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL) != 0)
508 return (SET_ERROR(EINTR));
509 if (dsp->dsa_err != 0)
510 return (SET_ERROR(EINTR));
515 backup_do_embed(dmu_sendarg_t *dsp, const blkptr_t *bp)
517 if (!BP_IS_EMBEDDED(bp))
521 * Compression function must be legacy, or explicitly enabled.
523 if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS &&
524 !(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LZ4)))
528 * Embed type must be explicitly enabled.
530 switch (BPE_GET_ETYPE(bp)) {
531 case BP_EMBEDDED_TYPE_DATA:
532 if (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
542 * This is the callback function to traverse_dataset that acts as the worker
543 * thread for dmu_send_impl.
547 send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
548 const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg)
550 struct send_thread_arg *sta = arg;
551 struct send_block_record *record;
552 uint64_t record_size;
555 ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
556 zb->zb_object >= sta->resume.zb_object);
559 return (SET_ERROR(EINTR));
562 ASSERT3U(zb->zb_level, ==, ZB_DNODE_LEVEL);
564 } else if (zb->zb_level < 0) {
568 record = kmem_zalloc(sizeof (struct send_block_record), KM_SLEEP);
569 record->eos_marker = B_FALSE;
572 record->indblkshift = dnp->dn_indblkshift;
573 record->datablkszsec = dnp->dn_datablkszsec;
574 record_size = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
575 bqueue_enqueue(&sta->q, record, record_size);
581 * This function kicks off the traverse_dataset. It also handles setting the
582 * error code of the thread in case something goes wrong, and pushes the End of
583 * Stream record when the traverse_dataset call has finished. If there is no
584 * dataset to traverse, the thread immediately pushes End of Stream marker.
587 send_traverse_thread(void *arg)
589 struct send_thread_arg *st_arg = arg;
591 struct send_block_record *data;
593 if (st_arg->ds != NULL) {
594 err = traverse_dataset_resume(st_arg->ds,
595 st_arg->fromtxg, &st_arg->resume,
596 st_arg->flags, send_cb, st_arg);
599 st_arg->error_code = err;
601 data = kmem_zalloc(sizeof (*data), KM_SLEEP);
602 data->eos_marker = B_TRUE;
603 bqueue_enqueue(&st_arg->q, data, 1);
608 * This function actually handles figuring out what kind of record needs to be
609 * dumped, reading the data (which has hopefully been prefetched), and calling
610 * the appropriate helper function.
613 do_dump(dmu_sendarg_t *dsa, struct send_block_record *data)
615 dsl_dataset_t *ds = dmu_objset_ds(dsa->dsa_os);
616 const blkptr_t *bp = &data->bp;
617 const zbookmark_phys_t *zb = &data->zb;
618 uint8_t indblkshift = data->indblkshift;
619 uint16_t dblkszsec = data->datablkszsec;
620 spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
621 dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
624 ASSERT3U(zb->zb_level, >=, 0);
626 ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
627 zb->zb_object >= dsa->dsa_resume_object);
629 if (zb->zb_object != DMU_META_DNODE_OBJECT &&
630 DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
632 } else if (BP_IS_HOLE(bp) &&
633 zb->zb_object == DMU_META_DNODE_OBJECT) {
634 uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level);
635 uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
636 err = dump_freeobjects(dsa, dnobj, span >> DNODE_SHIFT);
637 } else if (BP_IS_HOLE(bp)) {
638 uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level);
639 uint64_t offset = zb->zb_blkid * span;
640 err = dump_free(dsa, zb->zb_object, offset, span);
641 } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
643 } else if (type == DMU_OT_DNODE) {
644 int blksz = BP_GET_LSIZE(bp);
645 arc_flags_t aflags = ARC_FLAG_WAIT;
648 ASSERT0(zb->zb_level);
650 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
651 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
653 return (SET_ERROR(EIO));
655 dnode_phys_t *blk = abuf->b_data;
656 uint64_t dnobj = zb->zb_blkid * (blksz >> DNODE_SHIFT);
657 for (int i = 0; i < blksz >> DNODE_SHIFT; i++) {
658 err = dump_dnode(dsa, dnobj + i, blk + i);
662 arc_buf_destroy(abuf, &abuf);
663 } else if (type == DMU_OT_SA) {
664 arc_flags_t aflags = ARC_FLAG_WAIT;
666 int blksz = BP_GET_LSIZE(bp);
668 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
669 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
671 return (SET_ERROR(EIO));
673 err = dump_spill(dsa, zb->zb_object, blksz, abuf->b_data);
674 arc_buf_destroy(abuf, &abuf);
675 } else if (backup_do_embed(dsa, bp)) {
676 /* it's an embedded level-0 block of a regular object */
677 int blksz = dblkszsec << SPA_MINBLOCKSHIFT;
678 ASSERT0(zb->zb_level);
679 err = dump_write_embedded(dsa, zb->zb_object,
680 zb->zb_blkid * blksz, blksz, bp);
682 /* it's a level-0 block of a regular object */
683 arc_flags_t aflags = ARC_FLAG_WAIT;
685 int blksz = dblkszsec << SPA_MINBLOCKSHIFT;
689 * If we have large blocks stored on disk but the send flags
690 * don't allow us to send large blocks, we split the data from
691 * the arc buf into chunks.
693 boolean_t split_large_blocks = blksz > SPA_OLD_MAXBLOCKSIZE &&
694 !(dsa->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS);
696 * We should only request compressed data from the ARC if all
697 * the following are true:
698 * - stream compression was requested
699 * - we aren't splitting large blocks into smaller chunks
700 * - the data won't need to be byteswapped before sending
701 * - this isn't an embedded block
702 * - this isn't metadata (if receiving on a different endian
703 * system it can be byteswapped more easily)
705 boolean_t request_compressed =
706 (dsa->dsa_featureflags & DMU_BACKUP_FEATURE_COMPRESSED) &&
707 !split_large_blocks && !BP_SHOULD_BYTESWAP(bp) &&
708 !BP_IS_EMBEDDED(bp) && !DMU_OT_IS_METADATA(BP_GET_TYPE(bp));
710 ASSERT0(zb->zb_level);
711 ASSERT(zb->zb_object > dsa->dsa_resume_object ||
712 (zb->zb_object == dsa->dsa_resume_object &&
713 zb->zb_blkid * blksz >= dsa->dsa_resume_offset));
715 ASSERT0(zb->zb_level);
716 ASSERT(zb->zb_object > dsa->dsa_resume_object ||
717 (zb->zb_object == dsa->dsa_resume_object &&
718 zb->zb_blkid * blksz >= dsa->dsa_resume_offset));
720 ASSERT3U(blksz, ==, BP_GET_LSIZE(bp));
722 enum zio_flag zioflags = ZIO_FLAG_CANFAIL;
723 if (request_compressed)
724 zioflags |= ZIO_FLAG_RAW;
725 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
726 ZIO_PRIORITY_ASYNC_READ, zioflags, &aflags, zb) != 0) {
727 if (zfs_send_corrupt_data) {
728 /* Send a block filled with 0x"zfs badd bloc" */
729 abuf = arc_alloc_buf(spa, &abuf, ARC_BUFC_DATA,
732 for (ptr = abuf->b_data;
733 (char *)ptr < (char *)abuf->b_data + blksz;
735 *ptr = 0x2f5baddb10cULL;
737 return (SET_ERROR(EIO));
741 offset = zb->zb_blkid * blksz;
743 if (split_large_blocks) {
744 ASSERT3U(arc_get_compression(abuf), ==,
746 char *buf = abuf->b_data;
747 while (blksz > 0 && err == 0) {
748 int n = MIN(blksz, SPA_OLD_MAXBLOCKSIZE);
749 err = dump_write(dsa, type, zb->zb_object,
750 offset, n, n, NULL, buf);
756 err = dump_write(dsa, type, zb->zb_object, offset,
757 blksz, arc_buf_size(abuf), bp, abuf->b_data);
759 arc_buf_destroy(abuf, &abuf);
762 ASSERT(err == 0 || err == EINTR);
767 * Pop the new data off the queue, and free the old data.
769 static struct send_block_record *
770 get_next_record(bqueue_t *bq, struct send_block_record *data)
772 struct send_block_record *tmp = bqueue_dequeue(bq);
773 kmem_free(data, sizeof (*data));
778 * Actually do the bulk of the work in a zfs send.
780 * Note: Releases dp using the specified tag.
783 dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *to_ds,
784 zfs_bookmark_phys_t *ancestor_zb, boolean_t is_clone,
785 boolean_t embedok, boolean_t large_block_ok, boolean_t compressok,
786 int outfd, uint64_t resumeobj, uint64_t resumeoff,
788 vnode_t *vp, offset_t *off)
790 struct file *fp, offset_t *off)
794 dmu_replay_record_t *drr;
797 uint64_t fromtxg = 0;
798 uint64_t featureflags = 0;
799 struct send_thread_arg to_arg = { 0 };
801 err = dmu_objset_from_ds(to_ds, &os);
803 dsl_pool_rele(dp, tag);
807 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
808 drr->drr_type = DRR_BEGIN;
809 drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
810 DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
814 if (dmu_objset_type(os) == DMU_OST_ZFS) {
816 if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) {
817 kmem_free(drr, sizeof (dmu_replay_record_t));
818 dsl_pool_rele(dp, tag);
819 return (SET_ERROR(EINVAL));
821 if (version >= ZPL_VERSION_SA) {
822 featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
827 if (large_block_ok && to_ds->ds_feature_inuse[SPA_FEATURE_LARGE_BLOCKS])
828 featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS;
830 spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) {
831 featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA;
832 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
833 featureflags |= DMU_BACKUP_FEATURE_LZ4;
836 featureflags |= DMU_BACKUP_FEATURE_COMPRESSED;
839 (DMU_BACKUP_FEATURE_EMBED_DATA | DMU_BACKUP_FEATURE_COMPRESSED)) !=
840 0 && spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) {
841 featureflags |= DMU_BACKUP_FEATURE_LZ4;
844 if (resumeobj != 0 || resumeoff != 0) {
845 featureflags |= DMU_BACKUP_FEATURE_RESUMING;
848 DMU_SET_FEATUREFLAGS(drr->drr_u.drr_begin.drr_versioninfo,
851 drr->drr_u.drr_begin.drr_creation_time =
852 dsl_dataset_phys(to_ds)->ds_creation_time;
853 drr->drr_u.drr_begin.drr_type = dmu_objset_type(os);
855 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
856 drr->drr_u.drr_begin.drr_toguid = dsl_dataset_phys(to_ds)->ds_guid;
857 if (dsl_dataset_phys(to_ds)->ds_flags & DS_FLAG_CI_DATASET)
858 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
859 if (zfs_send_set_freerecords_bit)
860 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_FREERECORDS;
862 if (ancestor_zb != NULL) {
863 drr->drr_u.drr_begin.drr_fromguid =
864 ancestor_zb->zbm_guid;
865 fromtxg = ancestor_zb->zbm_creation_txg;
867 dsl_dataset_name(to_ds, drr->drr_u.drr_begin.drr_toname);
868 if (!to_ds->ds_is_snapshot) {
869 (void) strlcat(drr->drr_u.drr_begin.drr_toname, "@--head--",
870 sizeof (drr->drr_u.drr_begin.drr_toname));
873 dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP);
876 dsp->dsa_outfd = outfd;
877 dsp->dsa_proc = curproc;
878 dsp->dsa_td = curthread;
882 dsp->dsa_toguid = dsl_dataset_phys(to_ds)->ds_guid;
883 dsp->dsa_pending_op = PENDING_NONE;
884 dsp->dsa_featureflags = featureflags;
885 dsp->dsa_resume_object = resumeobj;
886 dsp->dsa_resume_offset = resumeoff;
888 mutex_enter(&to_ds->ds_sendstream_lock);
889 list_insert_head(&to_ds->ds_sendstreams, dsp);
890 mutex_exit(&to_ds->ds_sendstream_lock);
892 dsl_dataset_long_hold(to_ds, FTAG);
893 dsl_pool_rele(dp, tag);
895 void *payload = NULL;
896 size_t payload_len = 0;
897 if (resumeobj != 0 || resumeoff != 0) {
898 dmu_object_info_t to_doi;
899 err = dmu_object_info(os, resumeobj, &to_doi);
902 SET_BOOKMARK(&to_arg.resume, to_ds->ds_object, resumeobj, 0,
903 resumeoff / to_doi.doi_data_block_size);
905 nvlist_t *nvl = fnvlist_alloc();
906 fnvlist_add_uint64(nvl, "resume_object", resumeobj);
907 fnvlist_add_uint64(nvl, "resume_offset", resumeoff);
908 payload = fnvlist_pack(nvl, &payload_len);
909 drr->drr_payloadlen = payload_len;
913 err = dump_record(dsp, payload, payload_len);
914 fnvlist_pack_free(payload, payload_len);
920 err = bqueue_init(&to_arg.q, zfs_send_queue_length,
921 offsetof(struct send_block_record, ln));
922 to_arg.error_code = 0;
923 to_arg.cancel = B_FALSE;
925 to_arg.fromtxg = fromtxg;
926 to_arg.flags = TRAVERSE_PRE | TRAVERSE_PREFETCH;
927 (void) thread_create(NULL, 0, send_traverse_thread, &to_arg, 0, &p0,
928 TS_RUN, minclsyspri);
930 struct send_block_record *to_data;
931 to_data = bqueue_dequeue(&to_arg.q);
933 while (!to_data->eos_marker && err == 0) {
934 err = do_dump(dsp, to_data);
935 to_data = get_next_record(&to_arg.q, to_data);
936 if (issig(JUSTLOOKING) && issig(FORREAL))
941 to_arg.cancel = B_TRUE;
942 while (!to_data->eos_marker) {
943 to_data = get_next_record(&to_arg.q, to_data);
946 kmem_free(to_data, sizeof (*to_data));
948 bqueue_destroy(&to_arg.q);
950 if (err == 0 && to_arg.error_code != 0)
951 err = to_arg.error_code;
956 if (dsp->dsa_pending_op != PENDING_NONE)
957 if (dump_record(dsp, NULL, 0) != 0)
958 err = SET_ERROR(EINTR);
961 if (err == EINTR && dsp->dsa_err != 0)
966 bzero(drr, sizeof (dmu_replay_record_t));
967 drr->drr_type = DRR_END;
968 drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc;
969 drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid;
971 if (dump_record(dsp, NULL, 0) != 0)
975 mutex_enter(&to_ds->ds_sendstream_lock);
976 list_remove(&to_ds->ds_sendstreams, dsp);
977 mutex_exit(&to_ds->ds_sendstream_lock);
979 VERIFY(err != 0 || (dsp->dsa_sent_begin && dsp->dsa_sent_end));
981 kmem_free(drr, sizeof (dmu_replay_record_t));
982 kmem_free(dsp, sizeof (dmu_sendarg_t));
984 dsl_dataset_long_rele(to_ds, FTAG);
990 dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
991 boolean_t embedok, boolean_t large_block_ok, boolean_t compressok,
993 int outfd, vnode_t *vp, offset_t *off)
995 int outfd, struct file *fp, offset_t *off)
1000 dsl_dataset_t *fromds = NULL;
1003 err = dsl_pool_hold(pool, FTAG, &dp);
1007 err = dsl_dataset_hold_obj(dp, tosnap, FTAG, &ds);
1009 dsl_pool_rele(dp, FTAG);
1013 if (fromsnap != 0) {
1014 zfs_bookmark_phys_t zb;
1017 err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds);
1019 dsl_dataset_rele(ds, FTAG);
1020 dsl_pool_rele(dp, FTAG);
1023 if (!dsl_dataset_is_before(ds, fromds, 0))
1024 err = SET_ERROR(EXDEV);
1025 zb.zbm_creation_time =
1026 dsl_dataset_phys(fromds)->ds_creation_time;
1027 zb.zbm_creation_txg = dsl_dataset_phys(fromds)->ds_creation_txg;
1028 zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
1029 is_clone = (fromds->ds_dir != ds->ds_dir);
1030 dsl_dataset_rele(fromds, FTAG);
1031 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
1032 embedok, large_block_ok, compressok, outfd, 0, 0, fp, off);
1034 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
1035 embedok, large_block_ok, compressok, outfd, 0, 0, fp, off);
1037 dsl_dataset_rele(ds, FTAG);
1042 dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok,
1043 boolean_t large_block_ok, boolean_t compressok, int outfd,
1044 uint64_t resumeobj, uint64_t resumeoff,
1046 vnode_t *vp, offset_t *off)
1048 struct file *fp, offset_t *off)
1054 boolean_t owned = B_FALSE;
1056 if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL)
1057 return (SET_ERROR(EINVAL));
1059 err = dsl_pool_hold(tosnap, FTAG, &dp);
1063 if (strchr(tosnap, '@') == NULL && spa_writeable(dp->dp_spa)) {
1065 * We are sending a filesystem or volume. Ensure
1066 * that it doesn't change by owning the dataset.
1068 err = dsl_dataset_own(dp, tosnap, FTAG, &ds);
1071 err = dsl_dataset_hold(dp, tosnap, FTAG, &ds);
1074 dsl_pool_rele(dp, FTAG);
1078 if (fromsnap != NULL) {
1079 zfs_bookmark_phys_t zb;
1080 boolean_t is_clone = B_FALSE;
1081 int fsnamelen = strchr(tosnap, '@') - tosnap;
1084 * If the fromsnap is in a different filesystem, then
1085 * mark the send stream as a clone.
1087 if (strncmp(tosnap, fromsnap, fsnamelen) != 0 ||
1088 (fromsnap[fsnamelen] != '@' &&
1089 fromsnap[fsnamelen] != '#')) {
1093 if (strchr(fromsnap, '@')) {
1094 dsl_dataset_t *fromds;
1095 err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds);
1097 if (!dsl_dataset_is_before(ds, fromds, 0))
1098 err = SET_ERROR(EXDEV);
1099 zb.zbm_creation_time =
1100 dsl_dataset_phys(fromds)->ds_creation_time;
1101 zb.zbm_creation_txg =
1102 dsl_dataset_phys(fromds)->ds_creation_txg;
1103 zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
1104 is_clone = (ds->ds_dir != fromds->ds_dir);
1105 dsl_dataset_rele(fromds, FTAG);
1108 err = dsl_bookmark_lookup(dp, fromsnap, ds, &zb);
1111 dsl_dataset_rele(ds, FTAG);
1112 dsl_pool_rele(dp, FTAG);
1115 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
1116 embedok, large_block_ok, compressok,
1117 outfd, resumeobj, resumeoff, fp, off);
1119 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
1120 embedok, large_block_ok, compressok,
1121 outfd, resumeobj, resumeoff, fp, off);
1124 dsl_dataset_disown(ds, FTAG);
1126 dsl_dataset_rele(ds, FTAG);
1131 dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t uncompressed,
1132 uint64_t compressed, boolean_t stream_compressed, uint64_t *sizep)
1137 * Assume that space (both on-disk and in-stream) is dominated by
1138 * data. We will adjust for indirect blocks and the copies property,
1139 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
1141 uint64_t recordsize;
1142 uint64_t record_count;
1144 /* Assume all (uncompressed) blocks are recordsize. */
1145 err = dsl_prop_get_int_ds(ds, zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
1149 record_count = uncompressed / recordsize;
1152 * If we're estimating a send size for a compressed stream, use the
1153 * compressed data size to estimate the stream size. Otherwise, use the
1154 * uncompressed data size.
1156 size = stream_compressed ? compressed : uncompressed;
1159 * Subtract out approximate space used by indirect blocks.
1160 * Assume most space is used by data blocks (non-indirect, non-dnode).
1161 * Assume no ditto blocks or internal fragmentation.
1163 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
1166 size -= record_count * sizeof (blkptr_t);
1168 /* Add in the space for the record associated with each block. */
1169 size += record_count * sizeof (dmu_replay_record_t);
1177 dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds,
1178 boolean_t stream_compressed, uint64_t *sizep)
1180 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1182 uint64_t uncomp, comp;
1184 ASSERT(dsl_pool_config_held(dp));
1186 /* tosnap must be a snapshot */
1187 if (!ds->ds_is_snapshot)
1188 return (SET_ERROR(EINVAL));
1190 /* fromsnap, if provided, must be a snapshot */
1191 if (fromds != NULL && !fromds->ds_is_snapshot)
1192 return (SET_ERROR(EINVAL));
1195 * fromsnap must be an earlier snapshot from the same fs as tosnap,
1196 * or the origin's fs.
1198 if (fromds != NULL && !dsl_dataset_is_before(ds, fromds, 0))
1199 return (SET_ERROR(EXDEV));
1201 /* Get compressed and uncompressed size estimates of changed data. */
1202 if (fromds == NULL) {
1203 uncomp = dsl_dataset_phys(ds)->ds_uncompressed_bytes;
1204 comp = dsl_dataset_phys(ds)->ds_compressed_bytes;
1207 err = dsl_dataset_space_written(fromds, ds,
1208 &used, &comp, &uncomp);
1213 err = dmu_adjust_send_estimate_for_indirects(ds, uncomp, comp,
1214 stream_compressed, sizep);
1218 struct calculate_send_arg {
1219 uint64_t uncompressed;
1220 uint64_t compressed;
1224 * Simple callback used to traverse the blocks of a snapshot and sum their
1225 * uncompressed and compressed sizes.
1229 dmu_calculate_send_traversal(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
1230 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
1232 struct calculate_send_arg *space = arg;
1233 if (bp != NULL && !BP_IS_HOLE(bp)) {
1234 space->uncompressed += BP_GET_UCSIZE(bp);
1235 space->compressed += BP_GET_PSIZE(bp);
1241 * Given a desination snapshot and a TXG, calculate the approximate size of a
1242 * send stream sent from that TXG. from_txg may be zero, indicating that the
1243 * whole snapshot will be sent.
1246 dmu_send_estimate_from_txg(dsl_dataset_t *ds, uint64_t from_txg,
1247 boolean_t stream_compressed, uint64_t *sizep)
1249 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1251 struct calculate_send_arg size = { 0 };
1253 ASSERT(dsl_pool_config_held(dp));
1255 /* tosnap must be a snapshot */
1256 if (!ds->ds_is_snapshot)
1257 return (SET_ERROR(EINVAL));
1259 /* verify that from_txg is before the provided snapshot was taken */
1260 if (from_txg >= dsl_dataset_phys(ds)->ds_creation_txg) {
1261 return (SET_ERROR(EXDEV));
1265 * traverse the blocks of the snapshot with birth times after
1266 * from_txg, summing their uncompressed size
1268 err = traverse_dataset(ds, from_txg, TRAVERSE_POST,
1269 dmu_calculate_send_traversal, &size);
1273 err = dmu_adjust_send_estimate_for_indirects(ds, size.uncompressed,
1274 size.compressed, stream_compressed, sizep);
1278 typedef struct dmu_recv_begin_arg {
1279 const char *drba_origin;
1280 dmu_recv_cookie_t *drba_cookie;
1282 uint64_t drba_snapobj;
1283 } dmu_recv_begin_arg_t;
1286 recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
1291 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1293 /* temporary clone name must not exist */
1294 error = zap_lookup(dp->dp_meta_objset,
1295 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name,
1297 if (error != ENOENT)
1298 return (error == 0 ? EBUSY : error);
1300 /* new snapshot name must not exist */
1301 error = zap_lookup(dp->dp_meta_objset,
1302 dsl_dataset_phys(ds)->ds_snapnames_zapobj,
1303 drba->drba_cookie->drc_tosnap, 8, 1, &val);
1304 if (error != ENOENT)
1305 return (error == 0 ? EEXIST : error);
1308 * Check snapshot limit before receiving. We'll recheck again at the
1309 * end, but might as well abort before receiving if we're already over
1312 * Note that we do not check the file system limit with
1313 * dsl_dir_fscount_check because the temporary %clones don't count
1314 * against that limit.
1316 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT,
1317 NULL, drba->drba_cred);
1321 if (fromguid != 0) {
1322 dsl_dataset_t *snap;
1323 uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
1325 /* Find snapshot in this dir that matches fromguid. */
1327 error = dsl_dataset_hold_obj(dp, obj, FTAG,
1330 return (SET_ERROR(ENODEV));
1331 if (snap->ds_dir != ds->ds_dir) {
1332 dsl_dataset_rele(snap, FTAG);
1333 return (SET_ERROR(ENODEV));
1335 if (dsl_dataset_phys(snap)->ds_guid == fromguid)
1337 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
1338 dsl_dataset_rele(snap, FTAG);
1341 return (SET_ERROR(ENODEV));
1343 if (drba->drba_cookie->drc_force) {
1344 drba->drba_snapobj = obj;
1347 * If we are not forcing, there must be no
1348 * changes since fromsnap.
1350 if (dsl_dataset_modified_since_snap(ds, snap)) {
1351 dsl_dataset_rele(snap, FTAG);
1352 return (SET_ERROR(ETXTBSY));
1354 drba->drba_snapobj = ds->ds_prev->ds_object;
1357 dsl_dataset_rele(snap, FTAG);
1359 /* if full, then must be forced */
1360 if (!drba->drba_cookie->drc_force)
1361 return (SET_ERROR(EEXIST));
1362 /* start from $ORIGIN@$ORIGIN, if supported */
1363 drba->drba_snapobj = dp->dp_origin_snap != NULL ?
1364 dp->dp_origin_snap->ds_object : 0;
1372 dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
1374 dmu_recv_begin_arg_t *drba = arg;
1375 dsl_pool_t *dp = dmu_tx_pool(tx);
1376 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1377 uint64_t fromguid = drrb->drr_fromguid;
1378 int flags = drrb->drr_flags;
1380 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
1382 const char *tofs = drba->drba_cookie->drc_tofs;
1384 /* already checked */
1385 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
1386 ASSERT(!(featureflags & DMU_BACKUP_FEATURE_RESUMING));
1388 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
1389 DMU_COMPOUNDSTREAM ||
1390 drrb->drr_type >= DMU_OST_NUMTYPES ||
1391 ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
1392 return (SET_ERROR(EINVAL));
1394 /* Verify pool version supports SA if SA_SPILL feature set */
1395 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
1396 spa_version(dp->dp_spa) < SPA_VERSION_SA)
1397 return (SET_ERROR(ENOTSUP));
1399 if (drba->drba_cookie->drc_resumable &&
1400 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EXTENSIBLE_DATASET))
1401 return (SET_ERROR(ENOTSUP));
1404 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
1405 * record to a plain WRITE record, so the pool must have the
1406 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
1407 * records. Same with WRITE_EMBEDDED records that use LZ4 compression.
1409 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
1410 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
1411 return (SET_ERROR(ENOTSUP));
1412 if ((featureflags & DMU_BACKUP_FEATURE_LZ4) &&
1413 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
1414 return (SET_ERROR(ENOTSUP));
1417 * The receiving code doesn't know how to translate large blocks
1418 * to smaller ones, so the pool must have the LARGE_BLOCKS
1419 * feature enabled if the stream has LARGE_BLOCKS.
1421 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
1422 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS))
1423 return (SET_ERROR(ENOTSUP));
1425 error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1427 /* target fs already exists; recv into temp clone */
1429 /* Can't recv a clone into an existing fs */
1430 if (flags & DRR_FLAG_CLONE || drba->drba_origin) {
1431 dsl_dataset_rele(ds, FTAG);
1432 return (SET_ERROR(EINVAL));
1435 error = recv_begin_check_existing_impl(drba, ds, fromguid);
1436 dsl_dataset_rele(ds, FTAG);
1437 } else if (error == ENOENT) {
1438 /* target fs does not exist; must be a full backup or clone */
1439 char buf[ZFS_MAX_DATASET_NAME_LEN];
1442 * If it's a non-clone incremental, we are missing the
1443 * target fs, so fail the recv.
1445 if (fromguid != 0 && !(flags & DRR_FLAG_CLONE ||
1447 return (SET_ERROR(ENOENT));
1450 * If we're receiving a full send as a clone, and it doesn't
1451 * contain all the necessary free records and freeobject
1452 * records, reject it.
1454 if (fromguid == 0 && drba->drba_origin &&
1455 !(flags & DRR_FLAG_FREERECORDS))
1456 return (SET_ERROR(EINVAL));
1458 /* Open the parent of tofs */
1459 ASSERT3U(strlen(tofs), <, sizeof (buf));
1460 (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
1461 error = dsl_dataset_hold(dp, buf, FTAG, &ds);
1466 * Check filesystem and snapshot limits before receiving. We'll
1467 * recheck snapshot limits again at the end (we create the
1468 * filesystems and increment those counts during begin_sync).
1470 error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1471 ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred);
1473 dsl_dataset_rele(ds, FTAG);
1477 error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1478 ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred);
1480 dsl_dataset_rele(ds, FTAG);
1484 if (drba->drba_origin != NULL) {
1485 dsl_dataset_t *origin;
1486 error = dsl_dataset_hold(dp, drba->drba_origin,
1489 dsl_dataset_rele(ds, FTAG);
1492 if (!origin->ds_is_snapshot) {
1493 dsl_dataset_rele(origin, FTAG);
1494 dsl_dataset_rele(ds, FTAG);
1495 return (SET_ERROR(EINVAL));
1497 if (dsl_dataset_phys(origin)->ds_guid != fromguid &&
1499 dsl_dataset_rele(origin, FTAG);
1500 dsl_dataset_rele(ds, FTAG);
1501 return (SET_ERROR(ENODEV));
1503 dsl_dataset_rele(origin, FTAG);
1505 dsl_dataset_rele(ds, FTAG);
1512 dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
1514 dmu_recv_begin_arg_t *drba = arg;
1515 dsl_pool_t *dp = dmu_tx_pool(tx);
1516 objset_t *mos = dp->dp_meta_objset;
1517 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1518 const char *tofs = drba->drba_cookie->drc_tofs;
1519 dsl_dataset_t *ds, *newds;
1522 uint64_t crflags = 0;
1524 if (drrb->drr_flags & DRR_FLAG_CI_DATA)
1525 crflags |= DS_FLAG_CI_DATASET;
1527 error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1529 /* create temporary clone */
1530 dsl_dataset_t *snap = NULL;
1531 if (drba->drba_snapobj != 0) {
1532 VERIFY0(dsl_dataset_hold_obj(dp,
1533 drba->drba_snapobj, FTAG, &snap));
1535 dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name,
1536 snap, crflags, drba->drba_cred, tx);
1537 if (drba->drba_snapobj != 0)
1538 dsl_dataset_rele(snap, FTAG);
1539 dsl_dataset_rele(ds, FTAG);
1543 dsl_dataset_t *origin = NULL;
1545 VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
1547 if (drba->drba_origin != NULL) {
1548 VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
1552 /* Create new dataset. */
1553 dsobj = dsl_dataset_create_sync(dd,
1554 strrchr(tofs, '/') + 1,
1555 origin, crflags, drba->drba_cred, tx);
1557 dsl_dataset_rele(origin, FTAG);
1558 dsl_dir_rele(dd, FTAG);
1559 drba->drba_cookie->drc_newfs = B_TRUE;
1561 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &newds));
1563 if (drba->drba_cookie->drc_resumable) {
1564 dsl_dataset_zapify(newds, tx);
1565 if (drrb->drr_fromguid != 0) {
1566 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_FROMGUID,
1567 8, 1, &drrb->drr_fromguid, tx));
1569 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TOGUID,
1570 8, 1, &drrb->drr_toguid, tx));
1571 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TONAME,
1572 1, strlen(drrb->drr_toname) + 1, drrb->drr_toname, tx));
1575 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OBJECT,
1577 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OFFSET,
1579 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_BYTES,
1581 if (DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
1582 DMU_BACKUP_FEATURE_LARGE_BLOCKS) {
1583 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_LARGEBLOCK,
1586 if (DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
1587 DMU_BACKUP_FEATURE_EMBED_DATA) {
1588 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_EMBEDOK,
1591 if (DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
1592 DMU_BACKUP_FEATURE_COMPRESSED) {
1593 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_COMPRESSOK,
1598 dmu_buf_will_dirty(newds->ds_dbuf, tx);
1599 dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT;
1602 * If we actually created a non-clone, we need to create the
1603 * objset in our new dataset.
1605 rrw_enter(&newds->ds_bp_rwlock, RW_READER, FTAG);
1606 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds))) {
1607 (void) dmu_objset_create_impl(dp->dp_spa,
1608 newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
1610 rrw_exit(&newds->ds_bp_rwlock, FTAG);
1612 drba->drba_cookie->drc_ds = newds;
1614 spa_history_log_internal_ds(newds, "receive", tx, "");
1618 dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx)
1620 dmu_recv_begin_arg_t *drba = arg;
1621 dsl_pool_t *dp = dmu_tx_pool(tx);
1622 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1624 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
1626 const char *tofs = drba->drba_cookie->drc_tofs;
1628 /* already checked */
1629 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
1630 ASSERT(featureflags & DMU_BACKUP_FEATURE_RESUMING);
1632 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
1633 DMU_COMPOUNDSTREAM ||
1634 drrb->drr_type >= DMU_OST_NUMTYPES)
1635 return (SET_ERROR(EINVAL));
1637 /* Verify pool version supports SA if SA_SPILL feature set */
1638 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
1639 spa_version(dp->dp_spa) < SPA_VERSION_SA)
1640 return (SET_ERROR(ENOTSUP));
1643 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
1644 * record to a plain WRITE record, so the pool must have the
1645 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
1646 * records. Same with WRITE_EMBEDDED records that use LZ4 compression.
1648 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
1649 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
1650 return (SET_ERROR(ENOTSUP));
1651 if ((featureflags & DMU_BACKUP_FEATURE_LZ4) &&
1652 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
1653 return (SET_ERROR(ENOTSUP));
1655 /* 6 extra bytes for /%recv */
1656 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
1658 (void) snprintf(recvname, sizeof (recvname), "%s/%s",
1659 tofs, recv_clone_name);
1661 if (dsl_dataset_hold(dp, recvname, FTAG, &ds) != 0) {
1662 /* %recv does not exist; continue in tofs */
1663 error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1668 /* check that ds is marked inconsistent */
1669 if (!DS_IS_INCONSISTENT(ds)) {
1670 dsl_dataset_rele(ds, FTAG);
1671 return (SET_ERROR(EINVAL));
1674 /* check that there is resuming data, and that the toguid matches */
1675 if (!dsl_dataset_is_zapified(ds)) {
1676 dsl_dataset_rele(ds, FTAG);
1677 return (SET_ERROR(EINVAL));
1680 error = zap_lookup(dp->dp_meta_objset, ds->ds_object,
1681 DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val);
1682 if (error != 0 || drrb->drr_toguid != val) {
1683 dsl_dataset_rele(ds, FTAG);
1684 return (SET_ERROR(EINVAL));
1688 * Check if the receive is still running. If so, it will be owned.
1689 * Note that nothing else can own the dataset (e.g. after the receive
1690 * fails) because it will be marked inconsistent.
1692 if (dsl_dataset_has_owner(ds)) {
1693 dsl_dataset_rele(ds, FTAG);
1694 return (SET_ERROR(EBUSY));
1697 /* There should not be any snapshots of this fs yet. */
1698 if (ds->ds_prev != NULL && ds->ds_prev->ds_dir == ds->ds_dir) {
1699 dsl_dataset_rele(ds, FTAG);
1700 return (SET_ERROR(EINVAL));
1704 * Note: resume point will be checked when we process the first WRITE
1708 /* check that the origin matches */
1710 (void) zap_lookup(dp->dp_meta_objset, ds->ds_object,
1711 DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val);
1712 if (drrb->drr_fromguid != val) {
1713 dsl_dataset_rele(ds, FTAG);
1714 return (SET_ERROR(EINVAL));
1717 dsl_dataset_rele(ds, FTAG);
1722 dmu_recv_resume_begin_sync(void *arg, dmu_tx_t *tx)
1724 dmu_recv_begin_arg_t *drba = arg;
1725 dsl_pool_t *dp = dmu_tx_pool(tx);
1726 const char *tofs = drba->drba_cookie->drc_tofs;
1729 /* 6 extra bytes for /%recv */
1730 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
1732 (void) snprintf(recvname, sizeof (recvname), "%s/%s",
1733 tofs, recv_clone_name);
1735 if (dsl_dataset_hold(dp, recvname, FTAG, &ds) != 0) {
1736 /* %recv does not exist; continue in tofs */
1737 VERIFY0(dsl_dataset_hold(dp, tofs, FTAG, &ds));
1738 drba->drba_cookie->drc_newfs = B_TRUE;
1741 /* clear the inconsistent flag so that we can own it */
1742 ASSERT(DS_IS_INCONSISTENT(ds));
1743 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1744 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
1745 dsobj = ds->ds_object;
1746 dsl_dataset_rele(ds, FTAG);
1748 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &ds));
1750 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1751 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT;
1753 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
1754 ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds)));
1755 rrw_exit(&ds->ds_bp_rwlock, FTAG);
1757 drba->drba_cookie->drc_ds = ds;
1759 spa_history_log_internal_ds(ds, "resume receive", tx, "");
1763 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
1764 * succeeds; otherwise we will leak the holds on the datasets.
1767 dmu_recv_begin(char *tofs, char *tosnap, dmu_replay_record_t *drr_begin,
1768 boolean_t force, boolean_t resumable, char *origin, dmu_recv_cookie_t *drc)
1770 dmu_recv_begin_arg_t drba = { 0 };
1772 bzero(drc, sizeof (dmu_recv_cookie_t));
1773 drc->drc_drr_begin = drr_begin;
1774 drc->drc_drrb = &drr_begin->drr_u.drr_begin;
1775 drc->drc_tosnap = tosnap;
1776 drc->drc_tofs = tofs;
1777 drc->drc_force = force;
1778 drc->drc_resumable = resumable;
1779 drc->drc_cred = CRED();
1781 if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
1782 drc->drc_byteswap = B_TRUE;
1783 (void) fletcher_4_incremental_byteswap(drr_begin,
1784 sizeof (dmu_replay_record_t), &drc->drc_cksum);
1785 byteswap_record(drr_begin);
1786 } else if (drc->drc_drrb->drr_magic == DMU_BACKUP_MAGIC) {
1787 (void) fletcher_4_incremental_native(drr_begin,
1788 sizeof (dmu_replay_record_t), &drc->drc_cksum);
1790 return (SET_ERROR(EINVAL));
1793 drba.drba_origin = origin;
1794 drba.drba_cookie = drc;
1795 drba.drba_cred = CRED();
1797 if (DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) &
1798 DMU_BACKUP_FEATURE_RESUMING) {
1799 return (dsl_sync_task(tofs,
1800 dmu_recv_resume_begin_check, dmu_recv_resume_begin_sync,
1801 &drba, 5, ZFS_SPACE_CHECK_NORMAL));
1803 return (dsl_sync_task(tofs,
1804 dmu_recv_begin_check, dmu_recv_begin_sync,
1805 &drba, 5, ZFS_SPACE_CHECK_NORMAL));
1809 struct receive_record_arg {
1810 dmu_replay_record_t header;
1811 void *payload; /* Pointer to a buffer containing the payload */
1813 * If the record is a write, pointer to the arc_buf_t containing the
1816 arc_buf_t *write_buf;
1818 uint64_t bytes_read; /* bytes read from stream when record created */
1819 boolean_t eos_marker; /* Marks the end of the stream */
1823 struct receive_writer_arg {
1829 * These three args are used to signal to the main thread that we're
1837 /* A map from guid to dataset to help handle dedup'd streams. */
1838 avl_tree_t *guid_to_ds_map;
1839 boolean_t resumable;
1840 uint64_t last_object, last_offset;
1841 uint64_t bytes_read; /* bytes read when current record created */
1845 list_t list; /* List of struct receive_objnode. */
1847 * Last object looked up. Used to assert that objects are being looked
1848 * up in ascending order.
1850 uint64_t last_lookup;
1853 struct receive_objnode {
1858 struct receive_arg {
1862 uint64_t voff; /* The current offset in the stream */
1863 uint64_t bytes_read;
1865 * A record that has had its payload read in, but hasn't yet been handed
1866 * off to the worker thread.
1868 struct receive_record_arg *rrd;
1869 /* A record that has had its header read in, but not its payload. */
1870 struct receive_record_arg *next_rrd;
1872 zio_cksum_t prev_cksum;
1875 /* Sorted list of objects not to issue prefetches for. */
1876 struct objlist ignore_objlist;
1879 typedef struct guid_map_entry {
1881 dsl_dataset_t *gme_ds;
1886 guid_compare(const void *arg1, const void *arg2)
1888 const guid_map_entry_t *gmep1 = arg1;
1889 const guid_map_entry_t *gmep2 = arg2;
1891 if (gmep1->guid < gmep2->guid)
1893 else if (gmep1->guid > gmep2->guid)
1899 free_guid_map_onexit(void *arg)
1901 avl_tree_t *ca = arg;
1902 void *cookie = NULL;
1903 guid_map_entry_t *gmep;
1905 while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
1906 dsl_dataset_long_rele(gmep->gme_ds, gmep);
1907 dsl_dataset_rele(gmep->gme_ds, gmep);
1908 kmem_free(gmep, sizeof (guid_map_entry_t));
1911 kmem_free(ca, sizeof (avl_tree_t));
1915 restore_bytes(struct receive_arg *ra, void *buf, int len, off_t off, ssize_t *resid)
1921 aiov.iov_base = buf;
1923 auio.uio_iov = &aiov;
1924 auio.uio_iovcnt = 1;
1925 auio.uio_resid = len;
1926 auio.uio_segflg = UIO_SYSSPACE;
1927 auio.uio_rw = UIO_READ;
1928 auio.uio_offset = off;
1929 auio.uio_td = ra->td;
1931 error = fo_read(ra->fp, &auio, ra->td->td_ucred, FOF_OFFSET, ra->td);
1933 fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
1936 *resid = auio.uio_resid;
1941 receive_read(struct receive_arg *ra, int len, void *buf)
1946 * The code doesn't rely on this (lengths being multiples of 8). See
1947 * comment in dump_bytes.
1951 while (done < len) {
1954 ra->err = restore_bytes(ra, buf + done,
1955 len - done, ra->voff, &resid);
1957 if (resid == len - done) {
1959 * Note: ECKSUM indicates that the receive
1960 * was interrupted and can potentially be resumed.
1962 ra->err = SET_ERROR(ECKSUM);
1964 ra->voff += len - done - resid;
1970 ra->bytes_read += len;
1972 ASSERT3U(done, ==, len);
1977 byteswap_record(dmu_replay_record_t *drr)
1979 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
1980 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
1981 drr->drr_type = BSWAP_32(drr->drr_type);
1982 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
1984 switch (drr->drr_type) {
1986 DO64(drr_begin.drr_magic);
1987 DO64(drr_begin.drr_versioninfo);
1988 DO64(drr_begin.drr_creation_time);
1989 DO32(drr_begin.drr_type);
1990 DO32(drr_begin.drr_flags);
1991 DO64(drr_begin.drr_toguid);
1992 DO64(drr_begin.drr_fromguid);
1995 DO64(drr_object.drr_object);
1996 DO32(drr_object.drr_type);
1997 DO32(drr_object.drr_bonustype);
1998 DO32(drr_object.drr_blksz);
1999 DO32(drr_object.drr_bonuslen);
2000 DO64(drr_object.drr_toguid);
2002 case DRR_FREEOBJECTS:
2003 DO64(drr_freeobjects.drr_firstobj);
2004 DO64(drr_freeobjects.drr_numobjs);
2005 DO64(drr_freeobjects.drr_toguid);
2008 DO64(drr_write.drr_object);
2009 DO32(drr_write.drr_type);
2010 DO64(drr_write.drr_offset);
2011 DO64(drr_write.drr_logical_size);
2012 DO64(drr_write.drr_toguid);
2013 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum);
2014 DO64(drr_write.drr_key.ddk_prop);
2015 DO64(drr_write.drr_compressed_size);
2017 case DRR_WRITE_BYREF:
2018 DO64(drr_write_byref.drr_object);
2019 DO64(drr_write_byref.drr_offset);
2020 DO64(drr_write_byref.drr_length);
2021 DO64(drr_write_byref.drr_toguid);
2022 DO64(drr_write_byref.drr_refguid);
2023 DO64(drr_write_byref.drr_refobject);
2024 DO64(drr_write_byref.drr_refoffset);
2025 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write_byref.
2027 DO64(drr_write_byref.drr_key.ddk_prop);
2029 case DRR_WRITE_EMBEDDED:
2030 DO64(drr_write_embedded.drr_object);
2031 DO64(drr_write_embedded.drr_offset);
2032 DO64(drr_write_embedded.drr_length);
2033 DO64(drr_write_embedded.drr_toguid);
2034 DO32(drr_write_embedded.drr_lsize);
2035 DO32(drr_write_embedded.drr_psize);
2038 DO64(drr_free.drr_object);
2039 DO64(drr_free.drr_offset);
2040 DO64(drr_free.drr_length);
2041 DO64(drr_free.drr_toguid);
2044 DO64(drr_spill.drr_object);
2045 DO64(drr_spill.drr_length);
2046 DO64(drr_spill.drr_toguid);
2049 DO64(drr_end.drr_toguid);
2050 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum);
2054 if (drr->drr_type != DRR_BEGIN) {
2055 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum);
2062 static inline uint8_t
2063 deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size)
2065 if (bonus_type == DMU_OT_SA) {
2069 ((DN_MAX_BONUSLEN - bonus_size) >> SPA_BLKPTRSHIFT));
2074 save_resume_state(struct receive_writer_arg *rwa,
2075 uint64_t object, uint64_t offset, dmu_tx_t *tx)
2077 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
2079 if (!rwa->resumable)
2083 * We use ds_resume_bytes[] != 0 to indicate that we need to
2084 * update this on disk, so it must not be 0.
2086 ASSERT(rwa->bytes_read != 0);
2089 * We only resume from write records, which have a valid
2090 * (non-meta-dnode) object number.
2092 ASSERT(object != 0);
2095 * For resuming to work correctly, we must receive records in order,
2096 * sorted by object,offset. This is checked by the callers, but
2097 * assert it here for good measure.
2099 ASSERT3U(object, >=, rwa->os->os_dsl_dataset->ds_resume_object[txgoff]);
2100 ASSERT(object != rwa->os->os_dsl_dataset->ds_resume_object[txgoff] ||
2101 offset >= rwa->os->os_dsl_dataset->ds_resume_offset[txgoff]);
2102 ASSERT3U(rwa->bytes_read, >=,
2103 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff]);
2105 rwa->os->os_dsl_dataset->ds_resume_object[txgoff] = object;
2106 rwa->os->os_dsl_dataset->ds_resume_offset[txgoff] = offset;
2107 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read;
2111 receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
2114 dmu_object_info_t doi;
2119 if (drro->drr_type == DMU_OT_NONE ||
2120 !DMU_OT_IS_VALID(drro->drr_type) ||
2121 !DMU_OT_IS_VALID(drro->drr_bonustype) ||
2122 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
2123 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
2124 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
2125 drro->drr_blksz < SPA_MINBLOCKSIZE ||
2126 drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) ||
2127 drro->drr_bonuslen > DN_MAX_BONUSLEN) {
2128 return (SET_ERROR(EINVAL));
2131 err = dmu_object_info(rwa->os, drro->drr_object, &doi);
2133 if (err != 0 && err != ENOENT)
2134 return (SET_ERROR(EINVAL));
2135 object = err == 0 ? drro->drr_object : DMU_NEW_OBJECT;
2138 * If we are losing blkptrs or changing the block size this must
2139 * be a new file instance. We must clear out the previous file
2140 * contents before we can change this type of metadata in the dnode.
2145 nblkptr = deduce_nblkptr(drro->drr_bonustype,
2146 drro->drr_bonuslen);
2148 if (drro->drr_blksz != doi.doi_data_block_size ||
2149 nblkptr < doi.doi_nblkptr) {
2150 err = dmu_free_long_range(rwa->os, drro->drr_object,
2153 return (SET_ERROR(EINVAL));
2157 tx = dmu_tx_create(rwa->os);
2158 dmu_tx_hold_bonus(tx, object);
2159 err = dmu_tx_assign(tx, TXG_WAIT);
2165 if (object == DMU_NEW_OBJECT) {
2166 /* currently free, want to be allocated */
2167 err = dmu_object_claim(rwa->os, drro->drr_object,
2168 drro->drr_type, drro->drr_blksz,
2169 drro->drr_bonustype, drro->drr_bonuslen, tx);
2170 } else if (drro->drr_type != doi.doi_type ||
2171 drro->drr_blksz != doi.doi_data_block_size ||
2172 drro->drr_bonustype != doi.doi_bonus_type ||
2173 drro->drr_bonuslen != doi.doi_bonus_size) {
2174 /* currently allocated, but with different properties */
2175 err = dmu_object_reclaim(rwa->os, drro->drr_object,
2176 drro->drr_type, drro->drr_blksz,
2177 drro->drr_bonustype, drro->drr_bonuslen, tx);
2181 return (SET_ERROR(EINVAL));
2184 dmu_object_set_checksum(rwa->os, drro->drr_object,
2185 drro->drr_checksumtype, tx);
2186 dmu_object_set_compress(rwa->os, drro->drr_object,
2187 drro->drr_compress, tx);
2192 VERIFY0(dmu_bonus_hold(rwa->os, drro->drr_object, FTAG, &db));
2193 dmu_buf_will_dirty(db, tx);
2195 ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
2196 bcopy(data, db->db_data, drro->drr_bonuslen);
2197 if (rwa->byteswap) {
2198 dmu_object_byteswap_t byteswap =
2199 DMU_OT_BYTESWAP(drro->drr_bonustype);
2200 dmu_ot_byteswap[byteswap].ob_func(db->db_data,
2201 drro->drr_bonuslen);
2203 dmu_buf_rele(db, FTAG);
2212 receive_freeobjects(struct receive_writer_arg *rwa,
2213 struct drr_freeobjects *drrfo)
2218 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
2219 return (SET_ERROR(EINVAL));
2221 for (obj = drrfo->drr_firstobj;
2222 obj < drrfo->drr_firstobj + drrfo->drr_numobjs && next_err == 0;
2223 next_err = dmu_object_next(rwa->os, &obj, FALSE, 0)) {
2226 if (dmu_object_info(rwa->os, obj, NULL) != 0)
2229 err = dmu_free_long_object(rwa->os, obj);
2233 if (next_err != ESRCH)
2239 receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw,
2245 if (drrw->drr_offset + drrw->drr_logical_size < drrw->drr_offset ||
2246 !DMU_OT_IS_VALID(drrw->drr_type))
2247 return (SET_ERROR(EINVAL));
2250 * For resuming to work, records must be in increasing order
2251 * by (object, offset).
2253 if (drrw->drr_object < rwa->last_object ||
2254 (drrw->drr_object == rwa->last_object &&
2255 drrw->drr_offset < rwa->last_offset)) {
2256 return (SET_ERROR(EINVAL));
2258 rwa->last_object = drrw->drr_object;
2259 rwa->last_offset = drrw->drr_offset;
2261 if (dmu_object_info(rwa->os, drrw->drr_object, NULL) != 0)
2262 return (SET_ERROR(EINVAL));
2264 tx = dmu_tx_create(rwa->os);
2266 dmu_tx_hold_write(tx, drrw->drr_object,
2267 drrw->drr_offset, drrw->drr_logical_size);
2268 err = dmu_tx_assign(tx, TXG_WAIT);
2273 if (rwa->byteswap) {
2274 dmu_object_byteswap_t byteswap =
2275 DMU_OT_BYTESWAP(drrw->drr_type);
2276 dmu_ot_byteswap[byteswap].ob_func(abuf->b_data,
2277 DRR_WRITE_PAYLOAD_SIZE(drrw));
2280 /* use the bonus buf to look up the dnode in dmu_assign_arcbuf */
2282 if (dmu_bonus_hold(rwa->os, drrw->drr_object, FTAG, &bonus) != 0)
2283 return (SET_ERROR(EINVAL));
2284 dmu_assign_arcbuf(bonus, drrw->drr_offset, abuf, tx);
2287 * Note: If the receive fails, we want the resume stream to start
2288 * with the same record that we last successfully received (as opposed
2289 * to the next record), so that we can verify that we are
2290 * resuming from the correct location.
2292 save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx);
2294 dmu_buf_rele(bonus, FTAG);
2300 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed
2301 * streams to refer to a copy of the data that is already on the
2302 * system because it came in earlier in the stream. This function
2303 * finds the earlier copy of the data, and uses that copy instead of
2304 * data from the stream to fulfill this write.
2307 receive_write_byref(struct receive_writer_arg *rwa,
2308 struct drr_write_byref *drrwbr)
2312 guid_map_entry_t gmesrch;
2313 guid_map_entry_t *gmep;
2315 objset_t *ref_os = NULL;
2318 if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
2319 return (SET_ERROR(EINVAL));
2322 * If the GUID of the referenced dataset is different from the
2323 * GUID of the target dataset, find the referenced dataset.
2325 if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
2326 gmesrch.guid = drrwbr->drr_refguid;
2327 if ((gmep = avl_find(rwa->guid_to_ds_map, &gmesrch,
2329 return (SET_ERROR(EINVAL));
2331 if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
2332 return (SET_ERROR(EINVAL));
2337 err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
2338 drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH);
2342 tx = dmu_tx_create(rwa->os);
2344 dmu_tx_hold_write(tx, drrwbr->drr_object,
2345 drrwbr->drr_offset, drrwbr->drr_length);
2346 err = dmu_tx_assign(tx, TXG_WAIT);
2351 dmu_write(rwa->os, drrwbr->drr_object,
2352 drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
2353 dmu_buf_rele(dbp, FTAG);
2355 /* See comment in restore_write. */
2356 save_resume_state(rwa, drrwbr->drr_object, drrwbr->drr_offset, tx);
2362 receive_write_embedded(struct receive_writer_arg *rwa,
2363 struct drr_write_embedded *drrwe, void *data)
2368 if (drrwe->drr_offset + drrwe->drr_length < drrwe->drr_offset)
2371 if (drrwe->drr_psize > BPE_PAYLOAD_SIZE)
2374 if (drrwe->drr_etype >= NUM_BP_EMBEDDED_TYPES)
2376 if (drrwe->drr_compression >= ZIO_COMPRESS_FUNCTIONS)
2379 tx = dmu_tx_create(rwa->os);
2381 dmu_tx_hold_write(tx, drrwe->drr_object,
2382 drrwe->drr_offset, drrwe->drr_length);
2383 err = dmu_tx_assign(tx, TXG_WAIT);
2389 dmu_write_embedded(rwa->os, drrwe->drr_object,
2390 drrwe->drr_offset, data, drrwe->drr_etype,
2391 drrwe->drr_compression, drrwe->drr_lsize, drrwe->drr_psize,
2392 rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx);
2394 /* See comment in restore_write. */
2395 save_resume_state(rwa, drrwe->drr_object, drrwe->drr_offset, tx);
2401 receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
2405 dmu_buf_t *db, *db_spill;
2408 if (drrs->drr_length < SPA_MINBLOCKSIZE ||
2409 drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os)))
2410 return (SET_ERROR(EINVAL));
2412 if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0)
2413 return (SET_ERROR(EINVAL));
2415 VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db));
2416 if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
2417 dmu_buf_rele(db, FTAG);
2421 tx = dmu_tx_create(rwa->os);
2423 dmu_tx_hold_spill(tx, db->db_object);
2425 err = dmu_tx_assign(tx, TXG_WAIT);
2427 dmu_buf_rele(db, FTAG);
2428 dmu_buf_rele(db_spill, FTAG);
2432 dmu_buf_will_dirty(db_spill, tx);
2434 if (db_spill->db_size < drrs->drr_length)
2435 VERIFY(0 == dbuf_spill_set_blksz(db_spill,
2436 drrs->drr_length, tx));
2437 bcopy(data, db_spill->db_data, drrs->drr_length);
2439 dmu_buf_rele(db, FTAG);
2440 dmu_buf_rele(db_spill, FTAG);
2448 receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf)
2452 if (drrf->drr_length != -1ULL &&
2453 drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
2454 return (SET_ERROR(EINVAL));
2456 if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0)
2457 return (SET_ERROR(EINVAL));
2459 err = dmu_free_long_range(rwa->os, drrf->drr_object,
2460 drrf->drr_offset, drrf->drr_length);
2465 /* used to destroy the drc_ds on error */
2467 dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
2469 if (drc->drc_resumable) {
2470 /* wait for our resume state to be written to disk */
2471 txg_wait_synced(drc->drc_ds->ds_dir->dd_pool, 0);
2472 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
2474 char name[ZFS_MAX_DATASET_NAME_LEN];
2475 dsl_dataset_name(drc->drc_ds, name);
2476 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
2477 (void) dsl_destroy_head(name);
2482 receive_cksum(struct receive_arg *ra, int len, void *buf)
2485 (void) fletcher_4_incremental_byteswap(buf, len, &ra->cksum);
2487 (void) fletcher_4_incremental_native(buf, len, &ra->cksum);
2492 * Read the payload into a buffer of size len, and update the current record's
2494 * Allocate ra->next_rrd and read the next record's header into
2495 * ra->next_rrd->header.
2496 * Verify checksum of payload and next record.
2499 receive_read_payload_and_next_header(struct receive_arg *ra, int len, void *buf)
2504 ASSERT3U(len, <=, SPA_MAXBLOCKSIZE);
2505 err = receive_read(ra, len, buf);
2508 receive_cksum(ra, len, buf);
2510 /* note: rrd is NULL when reading the begin record's payload */
2511 if (ra->rrd != NULL) {
2512 ra->rrd->payload = buf;
2513 ra->rrd->payload_size = len;
2514 ra->rrd->bytes_read = ra->bytes_read;
2518 ra->prev_cksum = ra->cksum;
2520 ra->next_rrd = kmem_zalloc(sizeof (*ra->next_rrd), KM_SLEEP);
2521 err = receive_read(ra, sizeof (ra->next_rrd->header),
2522 &ra->next_rrd->header);
2523 ra->next_rrd->bytes_read = ra->bytes_read;
2525 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2526 ra->next_rrd = NULL;
2529 if (ra->next_rrd->header.drr_type == DRR_BEGIN) {
2530 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2531 ra->next_rrd = NULL;
2532 return (SET_ERROR(EINVAL));
2536 * Note: checksum is of everything up to but not including the
2539 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
2540 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
2542 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
2543 &ra->next_rrd->header);
2545 zio_cksum_t cksum_orig =
2546 ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
2547 zio_cksum_t *cksump =
2548 &ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
2551 byteswap_record(&ra->next_rrd->header);
2553 if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) &&
2554 !ZIO_CHECKSUM_EQUAL(ra->cksum, *cksump)) {
2555 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2556 ra->next_rrd = NULL;
2557 return (SET_ERROR(ECKSUM));
2560 receive_cksum(ra, sizeof (cksum_orig), &cksum_orig);
2566 objlist_create(struct objlist *list)
2568 list_create(&list->list, sizeof (struct receive_objnode),
2569 offsetof(struct receive_objnode, node));
2570 list->last_lookup = 0;
2574 objlist_destroy(struct objlist *list)
2576 for (struct receive_objnode *n = list_remove_head(&list->list);
2577 n != NULL; n = list_remove_head(&list->list)) {
2578 kmem_free(n, sizeof (*n));
2580 list_destroy(&list->list);
2584 * This function looks through the objlist to see if the specified object number
2585 * is contained in the objlist. In the process, it will remove all object
2586 * numbers in the list that are smaller than the specified object number. Thus,
2587 * any lookup of an object number smaller than a previously looked up object
2588 * number will always return false; therefore, all lookups should be done in
2592 objlist_exists(struct objlist *list, uint64_t object)
2594 struct receive_objnode *node = list_head(&list->list);
2595 ASSERT3U(object, >=, list->last_lookup);
2596 list->last_lookup = object;
2597 while (node != NULL && node->object < object) {
2598 VERIFY3P(node, ==, list_remove_head(&list->list));
2599 kmem_free(node, sizeof (*node));
2600 node = list_head(&list->list);
2602 return (node != NULL && node->object == object);
2606 * The objlist is a list of object numbers stored in ascending order. However,
2607 * the insertion of new object numbers does not seek out the correct location to
2608 * store a new object number; instead, it appends it to the list for simplicity.
2609 * Thus, any users must take care to only insert new object numbers in ascending
2613 objlist_insert(struct objlist *list, uint64_t object)
2615 struct receive_objnode *node = kmem_zalloc(sizeof (*node), KM_SLEEP);
2616 node->object = object;
2618 struct receive_objnode *last_object = list_tail(&list->list);
2619 uint64_t last_objnum = (last_object != NULL ? last_object->object : 0);
2620 ASSERT3U(node->object, >, last_objnum);
2622 list_insert_tail(&list->list, node);
2626 * Issue the prefetch reads for any necessary indirect blocks.
2628 * We use the object ignore list to tell us whether or not to issue prefetches
2629 * for a given object. We do this for both correctness (in case the blocksize
2630 * of an object has changed) and performance (if the object doesn't exist, don't
2631 * needlessly try to issue prefetches). We also trim the list as we go through
2632 * the stream to prevent it from growing to an unbounded size.
2634 * The object numbers within will always be in sorted order, and any write
2635 * records we see will also be in sorted order, but they're not sorted with
2636 * respect to each other (i.e. we can get several object records before
2637 * receiving each object's write records). As a result, once we've reached a
2638 * given object number, we can safely remove any reference to lower object
2639 * numbers in the ignore list. In practice, we receive up to 32 object records
2640 * before receiving write records, so the list can have up to 32 nodes in it.
2644 receive_read_prefetch(struct receive_arg *ra,
2645 uint64_t object, uint64_t offset, uint64_t length)
2647 if (!objlist_exists(&ra->ignore_objlist, object)) {
2648 dmu_prefetch(ra->os, object, 1, offset, length,
2649 ZIO_PRIORITY_SYNC_READ);
2654 * Read records off the stream, issuing any necessary prefetches.
2657 receive_read_record(struct receive_arg *ra)
2661 switch (ra->rrd->header.drr_type) {
2664 struct drr_object *drro = &ra->rrd->header.drr_u.drr_object;
2665 uint32_t size = P2ROUNDUP(drro->drr_bonuslen, 8);
2666 void *buf = kmem_zalloc(size, KM_SLEEP);
2667 dmu_object_info_t doi;
2668 err = receive_read_payload_and_next_header(ra, size, buf);
2670 kmem_free(buf, size);
2673 err = dmu_object_info(ra->os, drro->drr_object, &doi);
2675 * See receive_read_prefetch for an explanation why we're
2676 * storing this object in the ignore_obj_list.
2678 if (err == ENOENT ||
2679 (err == 0 && doi.doi_data_block_size != drro->drr_blksz)) {
2680 objlist_insert(&ra->ignore_objlist, drro->drr_object);
2685 case DRR_FREEOBJECTS:
2687 err = receive_read_payload_and_next_header(ra, 0, NULL);
2692 struct drr_write *drrw = &ra->rrd->header.drr_u.drr_write;
2694 boolean_t is_meta = DMU_OT_IS_METADATA(drrw->drr_type);
2695 if (DRR_WRITE_COMPRESSED(drrw)) {
2696 ASSERT3U(drrw->drr_compressed_size, >, 0);
2697 ASSERT3U(drrw->drr_logical_size, >=,
2698 drrw->drr_compressed_size);
2700 abuf = arc_loan_compressed_buf(
2701 dmu_objset_spa(ra->os),
2702 drrw->drr_compressed_size, drrw->drr_logical_size,
2703 drrw->drr_compressiontype);
2705 abuf = arc_loan_buf(dmu_objset_spa(ra->os),
2706 is_meta, drrw->drr_logical_size);
2709 err = receive_read_payload_and_next_header(ra,
2710 DRR_WRITE_PAYLOAD_SIZE(drrw), abuf->b_data);
2712 dmu_return_arcbuf(abuf);
2715 ra->rrd->write_buf = abuf;
2716 receive_read_prefetch(ra, drrw->drr_object, drrw->drr_offset,
2717 drrw->drr_logical_size);
2720 case DRR_WRITE_BYREF:
2722 struct drr_write_byref *drrwb =
2723 &ra->rrd->header.drr_u.drr_write_byref;
2724 err = receive_read_payload_and_next_header(ra, 0, NULL);
2725 receive_read_prefetch(ra, drrwb->drr_object, drrwb->drr_offset,
2729 case DRR_WRITE_EMBEDDED:
2731 struct drr_write_embedded *drrwe =
2732 &ra->rrd->header.drr_u.drr_write_embedded;
2733 uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8);
2734 void *buf = kmem_zalloc(size, KM_SLEEP);
2736 err = receive_read_payload_and_next_header(ra, size, buf);
2738 kmem_free(buf, size);
2742 receive_read_prefetch(ra, drrwe->drr_object, drrwe->drr_offset,
2749 * It might be beneficial to prefetch indirect blocks here, but
2750 * we don't really have the data to decide for sure.
2752 err = receive_read_payload_and_next_header(ra, 0, NULL);
2757 struct drr_end *drre = &ra->rrd->header.drr_u.drr_end;
2758 if (!ZIO_CHECKSUM_EQUAL(ra->prev_cksum, drre->drr_checksum))
2759 return (SET_ERROR(ECKSUM));
2764 struct drr_spill *drrs = &ra->rrd->header.drr_u.drr_spill;
2765 void *buf = kmem_zalloc(drrs->drr_length, KM_SLEEP);
2766 err = receive_read_payload_and_next_header(ra, drrs->drr_length,
2769 kmem_free(buf, drrs->drr_length);
2773 return (SET_ERROR(EINVAL));
2778 * Commit the records to the pool.
2781 receive_process_record(struct receive_writer_arg *rwa,
2782 struct receive_record_arg *rrd)
2786 /* Processing in order, therefore bytes_read should be increasing. */
2787 ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read);
2788 rwa->bytes_read = rrd->bytes_read;
2790 switch (rrd->header.drr_type) {
2793 struct drr_object *drro = &rrd->header.drr_u.drr_object;
2794 err = receive_object(rwa, drro, rrd->payload);
2795 kmem_free(rrd->payload, rrd->payload_size);
2796 rrd->payload = NULL;
2799 case DRR_FREEOBJECTS:
2801 struct drr_freeobjects *drrfo =
2802 &rrd->header.drr_u.drr_freeobjects;
2803 return (receive_freeobjects(rwa, drrfo));
2807 struct drr_write *drrw = &rrd->header.drr_u.drr_write;
2808 err = receive_write(rwa, drrw, rrd->write_buf);
2809 /* if receive_write() is successful, it consumes the arc_buf */
2811 dmu_return_arcbuf(rrd->write_buf);
2812 rrd->write_buf = NULL;
2813 rrd->payload = NULL;
2816 case DRR_WRITE_BYREF:
2818 struct drr_write_byref *drrwbr =
2819 &rrd->header.drr_u.drr_write_byref;
2820 return (receive_write_byref(rwa, drrwbr));
2822 case DRR_WRITE_EMBEDDED:
2824 struct drr_write_embedded *drrwe =
2825 &rrd->header.drr_u.drr_write_embedded;
2826 err = receive_write_embedded(rwa, drrwe, rrd->payload);
2827 kmem_free(rrd->payload, rrd->payload_size);
2828 rrd->payload = NULL;
2833 struct drr_free *drrf = &rrd->header.drr_u.drr_free;
2834 return (receive_free(rwa, drrf));
2838 struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
2839 err = receive_spill(rwa, drrs, rrd->payload);
2840 kmem_free(rrd->payload, rrd->payload_size);
2841 rrd->payload = NULL;
2845 return (SET_ERROR(EINVAL));
2850 * dmu_recv_stream's worker thread; pull records off the queue, and then call
2851 * receive_process_record When we're done, signal the main thread and exit.
2854 receive_writer_thread(void *arg)
2856 struct receive_writer_arg *rwa = arg;
2857 struct receive_record_arg *rrd;
2858 for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker;
2859 rrd = bqueue_dequeue(&rwa->q)) {
2861 * If there's an error, the main thread will stop putting things
2862 * on the queue, but we need to clear everything in it before we
2865 if (rwa->err == 0) {
2866 rwa->err = receive_process_record(rwa, rrd);
2867 } else if (rrd->write_buf != NULL) {
2868 dmu_return_arcbuf(rrd->write_buf);
2869 rrd->write_buf = NULL;
2870 rrd->payload = NULL;
2871 } else if (rrd->payload != NULL) {
2872 kmem_free(rrd->payload, rrd->payload_size);
2873 rrd->payload = NULL;
2875 kmem_free(rrd, sizeof (*rrd));
2877 kmem_free(rrd, sizeof (*rrd));
2878 mutex_enter(&rwa->mutex);
2880 cv_signal(&rwa->cv);
2881 mutex_exit(&rwa->mutex);
2886 resume_check(struct receive_arg *ra, nvlist_t *begin_nvl)
2889 objset_t *mos = dmu_objset_pool(ra->os)->dp_meta_objset;
2890 uint64_t dsobj = dmu_objset_id(ra->os);
2891 uint64_t resume_obj, resume_off;
2893 if (nvlist_lookup_uint64(begin_nvl,
2894 "resume_object", &resume_obj) != 0 ||
2895 nvlist_lookup_uint64(begin_nvl,
2896 "resume_offset", &resume_off) != 0) {
2897 return (SET_ERROR(EINVAL));
2899 VERIFY0(zap_lookup(mos, dsobj,
2900 DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val));
2901 if (resume_obj != val)
2902 return (SET_ERROR(EINVAL));
2903 VERIFY0(zap_lookup(mos, dsobj,
2904 DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val));
2905 if (resume_off != val)
2906 return (SET_ERROR(EINVAL));
2912 * Read in the stream's records, one by one, and apply them to the pool. There
2913 * are two threads involved; the thread that calls this function will spin up a
2914 * worker thread, read the records off the stream one by one, and issue
2915 * prefetches for any necessary indirect blocks. It will then push the records
2916 * onto an internal blocking queue. The worker thread will pull the records off
2917 * the queue, and actually write the data into the DMU. This way, the worker
2918 * thread doesn't have to wait for reads to complete, since everything it needs
2919 * (the indirect blocks) will be prefetched.
2921 * NB: callers *must* call dmu_recv_end() if this succeeds.
2924 dmu_recv_stream(dmu_recv_cookie_t *drc, struct file *fp, offset_t *voffp,
2925 int cleanup_fd, uint64_t *action_handlep)
2928 struct receive_arg ra = { 0 };
2929 struct receive_writer_arg rwa = { 0 };
2931 nvlist_t *begin_nvl = NULL;
2933 ra.byteswap = drc->drc_byteswap;
2934 ra.cksum = drc->drc_cksum;
2939 if (dsl_dataset_is_zapified(drc->drc_ds)) {
2940 (void) zap_lookup(drc->drc_ds->ds_dir->dd_pool->dp_meta_objset,
2941 drc->drc_ds->ds_object, DS_FIELD_RESUME_BYTES,
2942 sizeof (ra.bytes_read), 1, &ra.bytes_read);
2945 objlist_create(&ra.ignore_objlist);
2947 /* these were verified in dmu_recv_begin */
2948 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
2950 ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
2953 * Open the objset we are modifying.
2955 VERIFY0(dmu_objset_from_ds(drc->drc_ds, &ra.os));
2957 ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT);
2959 featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
2961 /* if this stream is dedup'ed, set up the avl tree for guid mapping */
2962 if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
2965 if (cleanup_fd == -1) {
2966 ra.err = SET_ERROR(EBADF);
2969 ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor);
2975 if (*action_handlep == 0) {
2976 rwa.guid_to_ds_map =
2977 kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
2978 avl_create(rwa.guid_to_ds_map, guid_compare,
2979 sizeof (guid_map_entry_t),
2980 offsetof(guid_map_entry_t, avlnode));
2981 err = zfs_onexit_add_cb(minor,
2982 free_guid_map_onexit, rwa.guid_to_ds_map,
2987 err = zfs_onexit_cb_data(minor, *action_handlep,
2988 (void **)&rwa.guid_to_ds_map);
2993 drc->drc_guid_to_ds_map = rwa.guid_to_ds_map;
2996 uint32_t payloadlen = drc->drc_drr_begin->drr_payloadlen;
2997 void *payload = NULL;
2998 if (payloadlen != 0)
2999 payload = kmem_alloc(payloadlen, KM_SLEEP);
3001 err = receive_read_payload_and_next_header(&ra, payloadlen, payload);
3003 if (payloadlen != 0)
3004 kmem_free(payload, payloadlen);
3007 if (payloadlen != 0) {
3008 err = nvlist_unpack(payload, payloadlen, &begin_nvl, KM_SLEEP);
3009 kmem_free(payload, payloadlen);
3014 if (featureflags & DMU_BACKUP_FEATURE_RESUMING) {
3015 err = resume_check(&ra, begin_nvl);
3020 (void) bqueue_init(&rwa.q, zfs_recv_queue_length,
3021 offsetof(struct receive_record_arg, node));
3022 cv_init(&rwa.cv, NULL, CV_DEFAULT, NULL);
3023 mutex_init(&rwa.mutex, NULL, MUTEX_DEFAULT, NULL);
3025 rwa.byteswap = drc->drc_byteswap;
3026 rwa.resumable = drc->drc_resumable;
3028 (void) thread_create(NULL, 0, receive_writer_thread, &rwa, 0, &p0,
3029 TS_RUN, minclsyspri);
3031 * We're reading rwa.err without locks, which is safe since we are the
3032 * only reader, and the worker thread is the only writer. It's ok if we
3033 * miss a write for an iteration or two of the loop, since the writer
3034 * thread will keep freeing records we send it until we send it an eos
3037 * We can leave this loop in 3 ways: First, if rwa.err is
3038 * non-zero. In that case, the writer thread will free the rrd we just
3039 * pushed. Second, if we're interrupted; in that case, either it's the
3040 * first loop and ra.rrd was never allocated, or it's later, and ra.rrd
3041 * has been handed off to the writer thread who will free it. Finally,
3042 * if receive_read_record fails or we're at the end of the stream, then
3043 * we free ra.rrd and exit.
3045 while (rwa.err == 0) {
3046 if (issig(JUSTLOOKING) && issig(FORREAL)) {
3047 err = SET_ERROR(EINTR);
3051 ASSERT3P(ra.rrd, ==, NULL);
3052 ra.rrd = ra.next_rrd;
3054 /* Allocates and loads header into ra.next_rrd */
3055 err = receive_read_record(&ra);
3057 if (ra.rrd->header.drr_type == DRR_END || err != 0) {
3058 kmem_free(ra.rrd, sizeof (*ra.rrd));
3063 bqueue_enqueue(&rwa.q, ra.rrd,
3064 sizeof (struct receive_record_arg) + ra.rrd->payload_size);
3067 if (ra.next_rrd == NULL)
3068 ra.next_rrd = kmem_zalloc(sizeof (*ra.next_rrd), KM_SLEEP);
3069 ra.next_rrd->eos_marker = B_TRUE;
3070 bqueue_enqueue(&rwa.q, ra.next_rrd, 1);
3072 mutex_enter(&rwa.mutex);
3074 cv_wait(&rwa.cv, &rwa.mutex);
3076 mutex_exit(&rwa.mutex);
3078 cv_destroy(&rwa.cv);
3079 mutex_destroy(&rwa.mutex);
3080 bqueue_destroy(&rwa.q);
3085 nvlist_free(begin_nvl);
3086 if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
3087 zfs_onexit_fd_rele(cleanup_fd);
3091 * Clean up references. If receive is not resumable,
3092 * destroy what we created, so we don't leave it in
3093 * the inconsistent state.
3095 dmu_recv_cleanup_ds(drc);
3099 objlist_destroy(&ra.ignore_objlist);
3104 dmu_recv_end_check(void *arg, dmu_tx_t *tx)
3106 dmu_recv_cookie_t *drc = arg;
3107 dsl_pool_t *dp = dmu_tx_pool(tx);
3110 ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
3112 if (!drc->drc_newfs) {
3113 dsl_dataset_t *origin_head;
3115 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
3118 if (drc->drc_force) {
3120 * We will destroy any snapshots in tofs (i.e. before
3121 * origin_head) that are after the origin (which is
3122 * the snap before drc_ds, because drc_ds can not
3123 * have any snaps of its own).
3127 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
3129 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
3130 dsl_dataset_t *snap;
3131 error = dsl_dataset_hold_obj(dp, obj, FTAG,
3135 if (snap->ds_dir != origin_head->ds_dir)
3136 error = SET_ERROR(EINVAL);
3138 error = dsl_destroy_snapshot_check_impl(
3141 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
3142 dsl_dataset_rele(snap, FTAG);
3147 dsl_dataset_rele(origin_head, FTAG);
3151 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
3152 origin_head, drc->drc_force, drc->drc_owner, tx);
3154 dsl_dataset_rele(origin_head, FTAG);
3157 error = dsl_dataset_snapshot_check_impl(origin_head,
3158 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
3159 dsl_dataset_rele(origin_head, FTAG);
3163 error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
3165 error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
3166 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
3172 dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
3174 dmu_recv_cookie_t *drc = arg;
3175 dsl_pool_t *dp = dmu_tx_pool(tx);
3177 spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
3178 tx, "snap=%s", drc->drc_tosnap);
3180 if (!drc->drc_newfs) {
3181 dsl_dataset_t *origin_head;
3183 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
3186 if (drc->drc_force) {
3188 * Destroy any snapshots of drc_tofs (origin_head)
3189 * after the origin (the snap before drc_ds).
3193 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
3195 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
3196 dsl_dataset_t *snap;
3197 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
3199 ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
3200 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
3201 dsl_destroy_snapshot_sync_impl(snap,
3203 dsl_dataset_rele(snap, FTAG);
3206 VERIFY3P(drc->drc_ds->ds_prev, ==,
3207 origin_head->ds_prev);
3209 dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
3211 dsl_dataset_snapshot_sync_impl(origin_head,
3212 drc->drc_tosnap, tx);
3214 /* set snapshot's creation time and guid */
3215 dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
3216 dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time =
3217 drc->drc_drrb->drr_creation_time;
3218 dsl_dataset_phys(origin_head->ds_prev)->ds_guid =
3219 drc->drc_drrb->drr_toguid;
3220 dsl_dataset_phys(origin_head->ds_prev)->ds_flags &=
3221 ~DS_FLAG_INCONSISTENT;
3223 dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
3224 dsl_dataset_phys(origin_head)->ds_flags &=
3225 ~DS_FLAG_INCONSISTENT;
3227 drc->drc_newsnapobj =
3228 dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
3230 dsl_dataset_rele(origin_head, FTAG);
3231 dsl_destroy_head_sync_impl(drc->drc_ds, tx);
3233 if (drc->drc_owner != NULL)
3234 VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
3236 dsl_dataset_t *ds = drc->drc_ds;
3238 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
3240 /* set snapshot's creation time and guid */
3241 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
3242 dsl_dataset_phys(ds->ds_prev)->ds_creation_time =
3243 drc->drc_drrb->drr_creation_time;
3244 dsl_dataset_phys(ds->ds_prev)->ds_guid =
3245 drc->drc_drrb->drr_toguid;
3246 dsl_dataset_phys(ds->ds_prev)->ds_flags &=
3247 ~DS_FLAG_INCONSISTENT;
3249 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3250 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
3251 if (dsl_dataset_has_resume_receive_state(ds)) {
3252 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3253 DS_FIELD_RESUME_FROMGUID, tx);
3254 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3255 DS_FIELD_RESUME_OBJECT, tx);
3256 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3257 DS_FIELD_RESUME_OFFSET, tx);
3258 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3259 DS_FIELD_RESUME_BYTES, tx);
3260 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3261 DS_FIELD_RESUME_TOGUID, tx);
3262 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3263 DS_FIELD_RESUME_TONAME, tx);
3265 drc->drc_newsnapobj =
3266 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj;
3269 * Release the hold from dmu_recv_begin. This must be done before
3270 * we return to open context, so that when we free the dataset's dnode,
3271 * we can evict its bonus buffer.
3273 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
3278 add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj)
3281 dsl_dataset_t *snapds;
3282 guid_map_entry_t *gmep;
3285 ASSERT(guid_map != NULL);
3287 err = dsl_pool_hold(name, FTAG, &dp);
3290 gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP);
3291 err = dsl_dataset_hold_obj(dp, snapobj, gmep, &snapds);
3293 gmep->guid = dsl_dataset_phys(snapds)->ds_guid;
3294 gmep->gme_ds = snapds;
3295 avl_add(guid_map, gmep);
3296 dsl_dataset_long_hold(snapds, gmep);
3298 kmem_free(gmep, sizeof (*gmep));
3300 dsl_pool_rele(dp, FTAG);
3304 static int dmu_recv_end_modified_blocks = 3;
3307 dmu_recv_existing_end(dmu_recv_cookie_t *drc)
3311 * We will be destroying the ds; make sure its origin is unmounted if
3314 char name[ZFS_MAX_DATASET_NAME_LEN];
3315 dsl_dataset_name(drc->drc_ds, name);
3316 zfs_destroy_unmount_origin(name);
3319 return (dsl_sync_task(drc->drc_tofs,
3320 dmu_recv_end_check, dmu_recv_end_sync, drc,
3321 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
3325 dmu_recv_new_end(dmu_recv_cookie_t *drc)
3327 return (dsl_sync_task(drc->drc_tofs,
3328 dmu_recv_end_check, dmu_recv_end_sync, drc,
3329 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
3333 dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
3337 drc->drc_owner = owner;
3340 error = dmu_recv_new_end(drc);
3342 error = dmu_recv_existing_end(drc);
3345 dmu_recv_cleanup_ds(drc);
3346 } else if (drc->drc_guid_to_ds_map != NULL) {
3347 (void) add_ds_to_guidmap(drc->drc_tofs,
3348 drc->drc_guid_to_ds_map,
3349 drc->drc_newsnapobj);
3355 * Return TRUE if this objset is currently being received into.
3358 dmu_objset_is_receiving(objset_t *os)
3360 return (os->os_dsl_dataset != NULL &&
3361 os->os_dsl_dataset->ds_owner == dmu_recv_tag);