4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright 2014 HybridCluster. All rights reserved.
27 * Copyright 2016 RackTop Systems.
28 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
29 * Copyright (c) 2019, Klara Inc.
30 * Copyright (c) 2019, Allan Jude
34 #include <sys/dmu_impl.h>
35 #include <sys/dmu_tx.h>
37 #include <sys/dnode.h>
38 #include <sys/zfs_context.h>
39 #include <sys/dmu_objset.h>
40 #include <sys/dmu_traverse.h>
41 #include <sys/dsl_dataset.h>
42 #include <sys/dsl_dir.h>
43 #include <sys/dsl_prop.h>
44 #include <sys/dsl_pool.h>
45 #include <sys/dsl_synctask.h>
46 #include <sys/spa_impl.h>
47 #include <sys/zfs_ioctl.h>
49 #include <sys/zio_checksum.h>
50 #include <sys/zfs_znode.h>
51 #include <zfs_fletcher.h>
54 #include <sys/zfs_onexit.h>
55 #include <sys/dmu_send.h>
56 #include <sys/dmu_recv.h>
57 #include <sys/dsl_destroy.h>
58 #include <sys/blkptr.h>
59 #include <sys/dsl_bookmark.h>
60 #include <sys/zfeature.h>
61 #include <sys/bqueue.h>
63 #include <sys/policy.h>
64 #include <sys/objlist.h>
66 #include <sys/zfs_vfsops.h>
69 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
70 static int zfs_send_corrupt_data = B_FALSE;
72 * This tunable controls the amount of data (measured in bytes) that will be
73 * prefetched by zfs send. If the main thread is blocking on reads that haven't
74 * completed, this variable might need to be increased. If instead the main
75 * thread is issuing new reads because the prefetches have fallen out of the
76 * cache, this may need to be decreased.
78 static uint_t zfs_send_queue_length = SPA_MAXBLOCKSIZE;
80 * This tunable controls the length of the queues that zfs send worker threads
81 * use to communicate. If the send_main_thread is blocking on these queues,
82 * this variable may need to be increased. If there is a significant slowdown
83 * at the start of a send as these threads consume all the available IO
84 * resources, this variable may need to be decreased.
86 static uint_t zfs_send_no_prefetch_queue_length = 1024 * 1024;
88 * These tunables control the fill fraction of the queues by zfs send. The fill
89 * fraction controls the frequency with which threads have to be cv_signaled.
90 * If a lot of cpu time is being spent on cv_signal, then these should be tuned
91 * down. If the queues empty before the signalled thread can catch up, then
92 * these should be tuned up.
94 static uint_t zfs_send_queue_ff = 20;
95 static uint_t zfs_send_no_prefetch_queue_ff = 20;
98 * Use this to override the recordsize calculation for fast zfs send estimates.
100 static uint_t zfs_override_estimate_recordsize = 0;
102 /* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */
103 static const boolean_t zfs_send_set_freerecords_bit = B_TRUE;
105 /* Set this tunable to FALSE is disable sending unmodified spill blocks. */
106 static int zfs_send_unmodified_spill_blocks = B_TRUE;
108 static inline boolean_t
109 overflow_multiply(uint64_t a, uint64_t b, uint64_t *c)
111 uint64_t temp = a * b;
112 if (b != 0 && temp / b != a)
118 struct send_thread_arg {
120 objset_t *os; /* Objset to traverse */
121 uint64_t fromtxg; /* Traverse from this txg */
122 int flags; /* flags to pass to traverse_dataset */
125 zbookmark_phys_t resume;
126 uint64_t *num_blocks_visited;
129 struct redact_list_thread_arg {
132 zbookmark_phys_t resume;
133 redaction_list_t *rl;
134 boolean_t mark_redact;
136 uint64_t *num_blocks_visited;
139 struct send_merge_thread_arg {
142 struct redact_list_thread_arg *from_arg;
143 struct send_thread_arg *to_arg;
144 struct redact_list_thread_arg *redact_arg;
150 boolean_t eos_marker; /* Marks the end of the stream */
152 uint64_t start_blkid;
155 enum type {DATA, HOLE, OBJECT, OBJECT_RANGE, REDACT,
156 PREVIOUSLY_REDACTED} type;
159 dmu_object_type_t obj_type;
160 uint32_t datablksz; // logical size
161 uint32_t datasz; // payload size
167 boolean_t io_outstanding;
168 boolean_t io_compressed;
176 * This is a pointer because embedding it in the
177 * struct causes these structures to be massively larger
178 * for all range types; this makes the code much less
194 * The list of data whose inclusion in a send stream can be pending from
195 * one call to backup_cb to another. Multiple calls to dump_free(),
196 * dump_freeobjects(), and dump_redact() can be aggregated into a single
197 * DRR_FREE, DRR_FREEOBJECTS, or DRR_REDACT replay record.
206 typedef struct dmu_send_cookie {
207 dmu_replay_record_t *dsc_drr;
208 dmu_send_outparams_t *dsc_dso;
213 uint64_t dsc_fromtxg;
215 dmu_pendop_t dsc_pending_op;
216 uint64_t dsc_featureflags;
217 uint64_t dsc_last_data_object;
218 uint64_t dsc_last_data_offset;
219 uint64_t dsc_resume_object;
220 uint64_t dsc_resume_offset;
221 boolean_t dsc_sent_begin;
222 boolean_t dsc_sent_end;
225 static int do_dump(dmu_send_cookie_t *dscp, struct send_range *range);
228 range_free(struct send_range *range)
230 if (range->type == OBJECT) {
231 size_t size = sizeof (dnode_phys_t) *
232 (range->sru.object.dnp->dn_extra_slots + 1);
233 kmem_free(range->sru.object.dnp, size);
234 } else if (range->type == DATA) {
235 mutex_enter(&range->sru.data.lock);
236 while (range->sru.data.io_outstanding)
237 cv_wait(&range->sru.data.cv, &range->sru.data.lock);
238 if (range->sru.data.abd != NULL)
239 abd_free(range->sru.data.abd);
240 if (range->sru.data.abuf != NULL) {
241 arc_buf_destroy(range->sru.data.abuf,
242 &range->sru.data.abuf);
244 mutex_exit(&range->sru.data.lock);
246 cv_destroy(&range->sru.data.cv);
247 mutex_destroy(&range->sru.data.lock);
249 kmem_free(range, sizeof (*range));
253 * For all record types except BEGIN, fill in the checksum (overlaid in
254 * drr_u.drr_checksum.drr_checksum). The checksum verifies everything
255 * up to the start of the checksum itself.
258 dump_record(dmu_send_cookie_t *dscp, void *payload, int payload_len)
260 dmu_send_outparams_t *dso = dscp->dsc_dso;
261 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
262 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
263 (void) fletcher_4_incremental_native(dscp->dsc_drr,
264 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
266 if (dscp->dsc_drr->drr_type == DRR_BEGIN) {
267 dscp->dsc_sent_begin = B_TRUE;
269 ASSERT(ZIO_CHECKSUM_IS_ZERO(&dscp->dsc_drr->drr_u.
270 drr_checksum.drr_checksum));
271 dscp->dsc_drr->drr_u.drr_checksum.drr_checksum = dscp->dsc_zc;
273 if (dscp->dsc_drr->drr_type == DRR_END) {
274 dscp->dsc_sent_end = B_TRUE;
276 (void) fletcher_4_incremental_native(&dscp->dsc_drr->
277 drr_u.drr_checksum.drr_checksum,
278 sizeof (zio_cksum_t), &dscp->dsc_zc);
279 *dscp->dsc_off += sizeof (dmu_replay_record_t);
280 dscp->dsc_err = dso->dso_outfunc(dscp->dsc_os, dscp->dsc_drr,
281 sizeof (dmu_replay_record_t), dso->dso_arg);
282 if (dscp->dsc_err != 0)
283 return (SET_ERROR(EINTR));
284 if (payload_len != 0) {
285 *dscp->dsc_off += payload_len;
287 * payload is null when dso_dryrun == B_TRUE (i.e. when we're
288 * doing a send size calculation)
290 if (payload != NULL) {
291 (void) fletcher_4_incremental_native(
292 payload, payload_len, &dscp->dsc_zc);
296 * The code does not rely on this (len being a multiple of 8).
297 * We keep this assertion because of the corresponding assertion
298 * in receive_read(). Keeping this assertion ensures that we do
299 * not inadvertently break backwards compatibility (causing the
300 * assertion in receive_read() to trigger on old software).
302 * Raw sends cannot be received on old software, and so can
303 * bypass this assertion.
306 ASSERT((payload_len % 8 == 0) ||
307 (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW));
309 dscp->dsc_err = dso->dso_outfunc(dscp->dsc_os, payload,
310 payload_len, dso->dso_arg);
311 if (dscp->dsc_err != 0)
312 return (SET_ERROR(EINTR));
318 * Fill in the drr_free struct, or perform aggregation if the previous record is
319 * also a free record, and the two are adjacent.
321 * Note that we send free records even for a full send, because we want to be
322 * able to receive a full send as a clone, which requires a list of all the free
323 * and freeobject records that were generated on the source.
326 dump_free(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset,
329 struct drr_free *drrf = &(dscp->dsc_drr->drr_u.drr_free);
332 * When we receive a free record, dbuf_free_range() assumes
333 * that the receiving system doesn't have any dbufs in the range
334 * being freed. This is always true because there is a one-record
335 * constraint: we only send one WRITE record for any given
336 * object,offset. We know that the one-record constraint is
337 * true because we always send data in increasing order by
340 * If the increasing-order constraint ever changes, we should find
341 * another way to assert that the one-record constraint is still
344 ASSERT(object > dscp->dsc_last_data_object ||
345 (object == dscp->dsc_last_data_object &&
346 offset > dscp->dsc_last_data_offset));
349 * If there is a pending op, but it's not PENDING_FREE, push it out,
350 * since free block aggregation can only be done for blocks of the
351 * same type (i.e., DRR_FREE records can only be aggregated with
352 * other DRR_FREE records. DRR_FREEOBJECTS records can only be
353 * aggregated with other DRR_FREEOBJECTS records).
355 if (dscp->dsc_pending_op != PENDING_NONE &&
356 dscp->dsc_pending_op != PENDING_FREE) {
357 if (dump_record(dscp, NULL, 0) != 0)
358 return (SET_ERROR(EINTR));
359 dscp->dsc_pending_op = PENDING_NONE;
362 if (dscp->dsc_pending_op == PENDING_FREE) {
364 * Check to see whether this free block can be aggregated
367 if (drrf->drr_object == object && drrf->drr_offset +
368 drrf->drr_length == offset) {
369 if (offset + length < offset || length == UINT64_MAX)
370 drrf->drr_length = UINT64_MAX;
372 drrf->drr_length += length;
375 /* not a continuation. Push out pending record */
376 if (dump_record(dscp, NULL, 0) != 0)
377 return (SET_ERROR(EINTR));
378 dscp->dsc_pending_op = PENDING_NONE;
381 /* create a FREE record and make it pending */
382 memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
383 dscp->dsc_drr->drr_type = DRR_FREE;
384 drrf->drr_object = object;
385 drrf->drr_offset = offset;
386 if (offset + length < offset)
387 drrf->drr_length = DMU_OBJECT_END;
389 drrf->drr_length = length;
390 drrf->drr_toguid = dscp->dsc_toguid;
391 if (length == DMU_OBJECT_END) {
392 if (dump_record(dscp, NULL, 0) != 0)
393 return (SET_ERROR(EINTR));
395 dscp->dsc_pending_op = PENDING_FREE;
402 * Fill in the drr_redact struct, or perform aggregation if the previous record
403 * is also a redaction record, and the two are adjacent.
406 dump_redact(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset,
409 struct drr_redact *drrr = &dscp->dsc_drr->drr_u.drr_redact;
412 * If there is a pending op, but it's not PENDING_REDACT, push it out,
413 * since free block aggregation can only be done for blocks of the
414 * same type (i.e., DRR_REDACT records can only be aggregated with
415 * other DRR_REDACT records).
417 if (dscp->dsc_pending_op != PENDING_NONE &&
418 dscp->dsc_pending_op != PENDING_REDACT) {
419 if (dump_record(dscp, NULL, 0) != 0)
420 return (SET_ERROR(EINTR));
421 dscp->dsc_pending_op = PENDING_NONE;
424 if (dscp->dsc_pending_op == PENDING_REDACT) {
426 * Check to see whether this redacted block can be aggregated
429 if (drrr->drr_object == object && drrr->drr_offset +
430 drrr->drr_length == offset) {
431 drrr->drr_length += length;
434 /* not a continuation. Push out pending record */
435 if (dump_record(dscp, NULL, 0) != 0)
436 return (SET_ERROR(EINTR));
437 dscp->dsc_pending_op = PENDING_NONE;
440 /* create a REDACT record and make it pending */
441 memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
442 dscp->dsc_drr->drr_type = DRR_REDACT;
443 drrr->drr_object = object;
444 drrr->drr_offset = offset;
445 drrr->drr_length = length;
446 drrr->drr_toguid = dscp->dsc_toguid;
447 dscp->dsc_pending_op = PENDING_REDACT;
453 dmu_dump_write(dmu_send_cookie_t *dscp, dmu_object_type_t type, uint64_t object,
454 uint64_t offset, int lsize, int psize, const blkptr_t *bp,
455 boolean_t io_compressed, void *data)
457 uint64_t payload_size;
458 boolean_t raw = (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW);
459 struct drr_write *drrw = &(dscp->dsc_drr->drr_u.drr_write);
462 * We send data in increasing object, offset order.
463 * See comment in dump_free() for details.
465 ASSERT(object > dscp->dsc_last_data_object ||
466 (object == dscp->dsc_last_data_object &&
467 offset > dscp->dsc_last_data_offset));
468 dscp->dsc_last_data_object = object;
469 dscp->dsc_last_data_offset = offset + lsize - 1;
472 * If there is any kind of pending aggregation (currently either
473 * a grouping of free objects or free blocks), push it out to
474 * the stream, since aggregation can't be done across operations
475 * of different types.
477 if (dscp->dsc_pending_op != PENDING_NONE) {
478 if (dump_record(dscp, NULL, 0) != 0)
479 return (SET_ERROR(EINTR));
480 dscp->dsc_pending_op = PENDING_NONE;
482 /* write a WRITE record */
483 memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
484 dscp->dsc_drr->drr_type = DRR_WRITE;
485 drrw->drr_object = object;
486 drrw->drr_type = type;
487 drrw->drr_offset = offset;
488 drrw->drr_toguid = dscp->dsc_toguid;
489 drrw->drr_logical_size = lsize;
491 /* only set the compression fields if the buf is compressed or raw */
492 boolean_t compressed =
493 (bp != NULL ? BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF &&
494 io_compressed : lsize != psize);
495 if (raw || compressed) {
497 ASSERT(raw || dscp->dsc_featureflags &
498 DMU_BACKUP_FEATURE_COMPRESSED);
499 ASSERT(!BP_IS_EMBEDDED(bp));
500 ASSERT3S(psize, >, 0);
503 ASSERT(BP_IS_PROTECTED(bp));
506 * This is a raw protected block so we need to pass
507 * along everything the receiving side will need to
508 * interpret this block, including the byteswap, salt,
511 if (BP_SHOULD_BYTESWAP(bp))
512 drrw->drr_flags |= DRR_RAW_BYTESWAP;
513 zio_crypt_decode_params_bp(bp, drrw->drr_salt,
515 zio_crypt_decode_mac_bp(bp, drrw->drr_mac);
517 /* this is a compressed block */
518 ASSERT(dscp->dsc_featureflags &
519 DMU_BACKUP_FEATURE_COMPRESSED);
520 ASSERT(!BP_SHOULD_BYTESWAP(bp));
521 ASSERT(!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)));
522 ASSERT3U(BP_GET_COMPRESS(bp), !=, ZIO_COMPRESS_OFF);
523 ASSERT3S(lsize, >=, psize);
526 /* set fields common to compressed and raw sends */
527 drrw->drr_compressiontype = BP_GET_COMPRESS(bp);
528 drrw->drr_compressed_size = psize;
529 payload_size = drrw->drr_compressed_size;
531 payload_size = drrw->drr_logical_size;
534 if (bp == NULL || BP_IS_EMBEDDED(bp) || (BP_IS_PROTECTED(bp) && !raw)) {
536 * There's no pre-computed checksum for partial-block writes,
537 * embedded BP's, or encrypted BP's that are being sent as
538 * plaintext, so (like fletcher4-checksummed blocks) userland
539 * will have to compute a dedup-capable checksum itself.
541 drrw->drr_checksumtype = ZIO_CHECKSUM_OFF;
543 drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
544 if (zio_checksum_table[drrw->drr_checksumtype].ci_flags &
545 ZCHECKSUM_FLAG_DEDUP)
546 drrw->drr_flags |= DRR_CHECKSUM_DEDUP;
547 DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
548 DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
549 DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
550 DDK_SET_CRYPT(&drrw->drr_key, BP_IS_PROTECTED(bp));
551 drrw->drr_key.ddk_cksum = bp->blk_cksum;
554 if (dump_record(dscp, data, payload_size) != 0)
555 return (SET_ERROR(EINTR));
560 dump_write_embedded(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset,
561 int blksz, const blkptr_t *bp)
563 char buf[BPE_PAYLOAD_SIZE];
564 struct drr_write_embedded *drrw =
565 &(dscp->dsc_drr->drr_u.drr_write_embedded);
567 if (dscp->dsc_pending_op != PENDING_NONE) {
568 if (dump_record(dscp, NULL, 0) != 0)
569 return (SET_ERROR(EINTR));
570 dscp->dsc_pending_op = PENDING_NONE;
573 ASSERT(BP_IS_EMBEDDED(bp));
575 memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
576 dscp->dsc_drr->drr_type = DRR_WRITE_EMBEDDED;
577 drrw->drr_object = object;
578 drrw->drr_offset = offset;
579 drrw->drr_length = blksz;
580 drrw->drr_toguid = dscp->dsc_toguid;
581 drrw->drr_compression = BP_GET_COMPRESS(bp);
582 drrw->drr_etype = BPE_GET_ETYPE(bp);
583 drrw->drr_lsize = BPE_GET_LSIZE(bp);
584 drrw->drr_psize = BPE_GET_PSIZE(bp);
586 decode_embedded_bp_compressed(bp, buf);
588 uint32_t psize = drrw->drr_psize;
589 uint32_t rsize = P2ROUNDUP(psize, 8);
592 memset(buf + psize, 0, rsize - psize);
594 if (dump_record(dscp, buf, rsize) != 0)
595 return (SET_ERROR(EINTR));
600 dump_spill(dmu_send_cookie_t *dscp, const blkptr_t *bp, uint64_t object,
603 struct drr_spill *drrs = &(dscp->dsc_drr->drr_u.drr_spill);
604 uint64_t blksz = BP_GET_LSIZE(bp);
605 uint64_t payload_size = blksz;
607 if (dscp->dsc_pending_op != PENDING_NONE) {
608 if (dump_record(dscp, NULL, 0) != 0)
609 return (SET_ERROR(EINTR));
610 dscp->dsc_pending_op = PENDING_NONE;
613 /* write a SPILL record */
614 memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
615 dscp->dsc_drr->drr_type = DRR_SPILL;
616 drrs->drr_object = object;
617 drrs->drr_length = blksz;
618 drrs->drr_toguid = dscp->dsc_toguid;
620 /* See comment in dump_dnode() for full details */
621 if (zfs_send_unmodified_spill_blocks &&
622 (bp->blk_birth <= dscp->dsc_fromtxg)) {
623 drrs->drr_flags |= DRR_SPILL_UNMODIFIED;
626 /* handle raw send fields */
627 if (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW) {
628 ASSERT(BP_IS_PROTECTED(bp));
630 if (BP_SHOULD_BYTESWAP(bp))
631 drrs->drr_flags |= DRR_RAW_BYTESWAP;
632 drrs->drr_compressiontype = BP_GET_COMPRESS(bp);
633 drrs->drr_compressed_size = BP_GET_PSIZE(bp);
634 zio_crypt_decode_params_bp(bp, drrs->drr_salt, drrs->drr_iv);
635 zio_crypt_decode_mac_bp(bp, drrs->drr_mac);
636 payload_size = drrs->drr_compressed_size;
639 if (dump_record(dscp, data, payload_size) != 0)
640 return (SET_ERROR(EINTR));
645 dump_freeobjects(dmu_send_cookie_t *dscp, uint64_t firstobj, uint64_t numobjs)
647 struct drr_freeobjects *drrfo = &(dscp->dsc_drr->drr_u.drr_freeobjects);
648 uint64_t maxobj = DNODES_PER_BLOCK *
649 (DMU_META_DNODE(dscp->dsc_os)->dn_maxblkid + 1);
652 * ZoL < 0.7 does not handle large FREEOBJECTS records correctly,
653 * leading to zfs recv never completing. to avoid this issue, don't
654 * send FREEOBJECTS records for object IDs which cannot exist on the
658 if (maxobj <= firstobj)
661 if (maxobj < firstobj + numobjs)
662 numobjs = maxobj - firstobj;
666 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
667 * push it out, since free block aggregation can only be done for
668 * blocks of the same type (i.e., DRR_FREE records can only be
669 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
670 * can only be aggregated with other DRR_FREEOBJECTS records).
672 if (dscp->dsc_pending_op != PENDING_NONE &&
673 dscp->dsc_pending_op != PENDING_FREEOBJECTS) {
674 if (dump_record(dscp, NULL, 0) != 0)
675 return (SET_ERROR(EINTR));
676 dscp->dsc_pending_op = PENDING_NONE;
679 if (dscp->dsc_pending_op == PENDING_FREEOBJECTS) {
681 * See whether this free object array can be aggregated
684 if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
685 drrfo->drr_numobjs += numobjs;
688 /* can't be aggregated. Push out pending record */
689 if (dump_record(dscp, NULL, 0) != 0)
690 return (SET_ERROR(EINTR));
691 dscp->dsc_pending_op = PENDING_NONE;
695 /* write a FREEOBJECTS record */
696 memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
697 dscp->dsc_drr->drr_type = DRR_FREEOBJECTS;
698 drrfo->drr_firstobj = firstobj;
699 drrfo->drr_numobjs = numobjs;
700 drrfo->drr_toguid = dscp->dsc_toguid;
702 dscp->dsc_pending_op = PENDING_FREEOBJECTS;
708 dump_dnode(dmu_send_cookie_t *dscp, const blkptr_t *bp, uint64_t object,
711 struct drr_object *drro = &(dscp->dsc_drr->drr_u.drr_object);
714 if (object < dscp->dsc_resume_object) {
716 * Note: when resuming, we will visit all the dnodes in
717 * the block of dnodes that we are resuming from. In
718 * this case it's unnecessary to send the dnodes prior to
719 * the one we are resuming from. We should be at most one
720 * block's worth of dnodes behind the resume point.
722 ASSERT3U(dscp->dsc_resume_object - object, <,
723 1 << (DNODE_BLOCK_SHIFT - DNODE_SHIFT));
727 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
728 return (dump_freeobjects(dscp, object, 1));
730 if (dscp->dsc_pending_op != PENDING_NONE) {
731 if (dump_record(dscp, NULL, 0) != 0)
732 return (SET_ERROR(EINTR));
733 dscp->dsc_pending_op = PENDING_NONE;
736 /* write an OBJECT record */
737 memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
738 dscp->dsc_drr->drr_type = DRR_OBJECT;
739 drro->drr_object = object;
740 drro->drr_type = dnp->dn_type;
741 drro->drr_bonustype = dnp->dn_bonustype;
742 drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
743 drro->drr_bonuslen = dnp->dn_bonuslen;
744 drro->drr_dn_slots = dnp->dn_extra_slots + 1;
745 drro->drr_checksumtype = dnp->dn_checksum;
746 drro->drr_compress = dnp->dn_compress;
747 drro->drr_toguid = dscp->dsc_toguid;
749 if (!(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
750 drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE)
751 drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE;
753 bonuslen = P2ROUNDUP(dnp->dn_bonuslen, 8);
755 if ((dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW)) {
756 ASSERT(BP_IS_ENCRYPTED(bp));
758 if (BP_SHOULD_BYTESWAP(bp))
759 drro->drr_flags |= DRR_RAW_BYTESWAP;
761 /* needed for reconstructing dnp on recv side */
762 drro->drr_maxblkid = dnp->dn_maxblkid;
763 drro->drr_indblkshift = dnp->dn_indblkshift;
764 drro->drr_nlevels = dnp->dn_nlevels;
765 drro->drr_nblkptr = dnp->dn_nblkptr;
768 * Since we encrypt the entire bonus area, the (raw) part
769 * beyond the bonuslen is actually nonzero, so we need
773 if (drro->drr_bonuslen > DN_MAX_BONUS_LEN(dnp))
774 return (SET_ERROR(EINVAL));
775 drro->drr_raw_bonuslen = DN_MAX_BONUS_LEN(dnp);
776 bonuslen = drro->drr_raw_bonuslen;
781 * DRR_OBJECT_SPILL is set for every dnode which references a
782 * spill block. This allows the receiving pool to definitively
783 * determine when a spill block should be kept or freed.
785 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
786 drro->drr_flags |= DRR_OBJECT_SPILL;
788 if (dump_record(dscp, DN_BONUS(dnp), bonuslen) != 0)
789 return (SET_ERROR(EINTR));
791 /* Free anything past the end of the file. */
792 if (dump_free(dscp, object, (dnp->dn_maxblkid + 1) *
793 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), DMU_OBJECT_END) != 0)
794 return (SET_ERROR(EINTR));
797 * Send DRR_SPILL records for unmodified spill blocks. This is useful
798 * because changing certain attributes of the object (e.g. blocksize)
799 * can cause old versions of ZFS to incorrectly remove a spill block.
800 * Including these records in the stream forces an up to date version
801 * to always be written ensuring they're never lost. Current versions
802 * of the code which understand the DRR_FLAG_SPILL_BLOCK feature can
803 * ignore these unmodified spill blocks.
805 if (zfs_send_unmodified_spill_blocks &&
806 (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) &&
807 (DN_SPILL_BLKPTR(dnp)->blk_birth <= dscp->dsc_fromtxg)) {
808 struct send_range record;
809 blkptr_t *bp = DN_SPILL_BLKPTR(dnp);
811 memset(&record, 0, sizeof (struct send_range));
813 record.object = object;
814 record.eos_marker = B_FALSE;
815 record.start_blkid = DMU_SPILL_BLKID;
816 record.end_blkid = record.start_blkid + 1;
817 record.sru.data.bp = *bp;
818 record.sru.data.obj_type = dnp->dn_type;
819 record.sru.data.datablksz = BP_GET_LSIZE(bp);
821 if (do_dump(dscp, &record) != 0)
822 return (SET_ERROR(EINTR));
825 if (dscp->dsc_err != 0)
826 return (SET_ERROR(EINTR));
832 dump_object_range(dmu_send_cookie_t *dscp, const blkptr_t *bp,
833 uint64_t firstobj, uint64_t numslots)
835 struct drr_object_range *drror =
836 &(dscp->dsc_drr->drr_u.drr_object_range);
838 /* we only use this record type for raw sends */
839 ASSERT(BP_IS_PROTECTED(bp));
840 ASSERT(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW);
841 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
842 ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_DNODE);
843 ASSERT0(BP_GET_LEVEL(bp));
845 if (dscp->dsc_pending_op != PENDING_NONE) {
846 if (dump_record(dscp, NULL, 0) != 0)
847 return (SET_ERROR(EINTR));
848 dscp->dsc_pending_op = PENDING_NONE;
851 memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
852 dscp->dsc_drr->drr_type = DRR_OBJECT_RANGE;
853 drror->drr_firstobj = firstobj;
854 drror->drr_numslots = numslots;
855 drror->drr_toguid = dscp->dsc_toguid;
856 if (BP_SHOULD_BYTESWAP(bp))
857 drror->drr_flags |= DRR_RAW_BYTESWAP;
858 zio_crypt_decode_params_bp(bp, drror->drr_salt, drror->drr_iv);
859 zio_crypt_decode_mac_bp(bp, drror->drr_mac);
861 if (dump_record(dscp, NULL, 0) != 0)
862 return (SET_ERROR(EINTR));
867 send_do_embed(const blkptr_t *bp, uint64_t featureflags)
869 if (!BP_IS_EMBEDDED(bp))
873 * Compression function must be legacy, or explicitly enabled.
875 if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS &&
876 !(featureflags & DMU_BACKUP_FEATURE_LZ4)))
880 * If we have not set the ZSTD feature flag, we can't send ZSTD
881 * compressed embedded blocks, as the receiver may not support them.
883 if ((BP_GET_COMPRESS(bp) == ZIO_COMPRESS_ZSTD &&
884 !(featureflags & DMU_BACKUP_FEATURE_ZSTD)))
888 * Embed type must be explicitly enabled.
890 switch (BPE_GET_ETYPE(bp)) {
891 case BP_EMBEDDED_TYPE_DATA:
892 if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
902 * This function actually handles figuring out what kind of record needs to be
903 * dumped, and calling the appropriate helper function. In most cases,
904 * the data has already been read by send_reader_thread().
907 do_dump(dmu_send_cookie_t *dscp, struct send_range *range)
910 switch (range->type) {
912 err = dump_dnode(dscp, &range->sru.object.bp, range->object,
913 range->sru.object.dnp);
916 ASSERT3U(range->start_blkid + 1, ==, range->end_blkid);
917 if (!(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW)) {
920 uint64_t epb = BP_GET_LSIZE(&range->sru.object_range.bp) >>
922 uint64_t firstobj = range->start_blkid * epb;
923 err = dump_object_range(dscp, &range->sru.object_range.bp,
928 struct srr *srrp = &range->sru.redact;
929 err = dump_redact(dscp, range->object, range->start_blkid *
930 srrp->datablksz, (range->end_blkid - range->start_blkid) *
935 struct srd *srdp = &range->sru.data;
936 blkptr_t *bp = &srdp->bp;
938 dmu_objset_spa(dscp->dsc_os);
940 ASSERT3U(srdp->datablksz, ==, BP_GET_LSIZE(bp));
941 ASSERT3U(range->start_blkid + 1, ==, range->end_blkid);
942 if (BP_GET_TYPE(bp) == DMU_OT_SA) {
943 arc_flags_t aflags = ARC_FLAG_WAIT;
944 zio_flag_t zioflags = ZIO_FLAG_CANFAIL;
946 if (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW) {
947 ASSERT(BP_IS_PROTECTED(bp));
948 zioflags |= ZIO_FLAG_RAW;
952 ASSERT3U(range->start_blkid, ==, DMU_SPILL_BLKID);
953 zb.zb_objset = dmu_objset_id(dscp->dsc_os);
954 zb.zb_object = range->object;
956 zb.zb_blkid = range->start_blkid;
958 arc_buf_t *abuf = NULL;
959 if (!dscp->dsc_dso->dso_dryrun && arc_read(NULL, spa,
960 bp, arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ,
961 zioflags, &aflags, &zb) != 0)
962 return (SET_ERROR(EIO));
964 err = dump_spill(dscp, bp, zb.zb_object,
965 (abuf == NULL ? NULL : abuf->b_data));
967 arc_buf_destroy(abuf, &abuf);
970 if (send_do_embed(bp, dscp->dsc_featureflags)) {
971 err = dump_write_embedded(dscp, range->object,
972 range->start_blkid * srdp->datablksz,
973 srdp->datablksz, bp);
976 ASSERT(range->object > dscp->dsc_resume_object ||
977 (range->object == dscp->dsc_resume_object &&
978 range->start_blkid * srdp->datablksz >=
979 dscp->dsc_resume_offset));
980 /* it's a level-0 block of a regular object */
982 mutex_enter(&srdp->lock);
983 while (srdp->io_outstanding)
984 cv_wait(&srdp->cv, &srdp->lock);
986 mutex_exit(&srdp->lock);
989 if (zfs_send_corrupt_data &&
990 !dscp->dsc_dso->dso_dryrun) {
992 * Send a block filled with 0x"zfs badd bloc"
994 srdp->abuf = arc_alloc_buf(spa, &srdp->abuf,
995 ARC_BUFC_DATA, srdp->datablksz);
997 for (ptr = srdp->abuf->b_data;
998 (char *)ptr < (char *)srdp->abuf->b_data +
999 srdp->datablksz; ptr++)
1000 *ptr = 0x2f5baddb10cULL;
1002 return (SET_ERROR(EIO));
1006 ASSERT(dscp->dsc_dso->dso_dryrun ||
1007 srdp->abuf != NULL || srdp->abd != NULL);
1009 uint64_t offset = range->start_blkid * srdp->datablksz;
1012 if (srdp->abd != NULL) {
1013 data = abd_to_buf(srdp->abd);
1014 ASSERT3P(srdp->abuf, ==, NULL);
1015 } else if (srdp->abuf != NULL) {
1016 data = srdp->abuf->b_data;
1020 * If we have large blocks stored on disk but the send flags
1021 * don't allow us to send large blocks, we split the data from
1022 * the arc buf into chunks.
1024 if (srdp->datablksz > SPA_OLD_MAXBLOCKSIZE &&
1025 !(dscp->dsc_featureflags &
1026 DMU_BACKUP_FEATURE_LARGE_BLOCKS)) {
1027 while (srdp->datablksz > 0 && err == 0) {
1028 int n = MIN(srdp->datablksz,
1029 SPA_OLD_MAXBLOCKSIZE);
1030 err = dmu_dump_write(dscp, srdp->obj_type,
1031 range->object, offset, n, n, NULL, B_FALSE,
1035 * When doing dry run, data==NULL is used as a
1037 * dmu_dump_write()->dump_record().
1041 srdp->datablksz -= n;
1044 err = dmu_dump_write(dscp, srdp->obj_type,
1045 range->object, offset,
1046 srdp->datablksz, srdp->datasz, bp,
1047 srdp->io_compressed, data);
1052 struct srh *srhp = &range->sru.hole;
1053 if (range->object == DMU_META_DNODE_OBJECT) {
1054 uint32_t span = srhp->datablksz >> DNODE_SHIFT;
1055 uint64_t first_obj = range->start_blkid * span;
1056 uint64_t numobj = range->end_blkid * span - first_obj;
1057 return (dump_freeobjects(dscp, first_obj, numobj));
1059 uint64_t offset = 0;
1062 * If this multiply overflows, we don't need to send this block.
1063 * Even if it has a birth time, it can never not be a hole, so
1064 * we don't need to send records for it.
1066 if (!overflow_multiply(range->start_blkid, srhp->datablksz,
1072 if (!overflow_multiply(range->end_blkid, srhp->datablksz, &len))
1075 return (dump_free(dscp, range->object, offset, len));
1078 panic("Invalid range type in do_dump: %d", range->type);
1083 static struct send_range *
1084 range_alloc(enum type type, uint64_t object, uint64_t start_blkid,
1085 uint64_t end_blkid, boolean_t eos)
1087 struct send_range *range = kmem_alloc(sizeof (*range), KM_SLEEP);
1089 range->object = object;
1090 range->start_blkid = start_blkid;
1091 range->end_blkid = end_blkid;
1092 range->eos_marker = eos;
1094 range->sru.data.abd = NULL;
1095 range->sru.data.abuf = NULL;
1096 mutex_init(&range->sru.data.lock, NULL, MUTEX_DEFAULT, NULL);
1097 cv_init(&range->sru.data.cv, NULL, CV_DEFAULT, NULL);
1098 range->sru.data.io_outstanding = 0;
1099 range->sru.data.io_err = 0;
1100 range->sru.data.io_compressed = B_FALSE;
1106 * This is the callback function to traverse_dataset that acts as a worker
1107 * thread for dmu_send_impl.
1110 send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
1111 const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg)
1114 struct send_thread_arg *sta = arg;
1115 struct send_range *record;
1117 ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
1118 zb->zb_object >= sta->resume.zb_object);
1121 * All bps of an encrypted os should have the encryption bit set.
1122 * If this is not true it indicates tampering and we report an error.
1124 if (sta->os->os_encrypted &&
1125 !BP_IS_HOLE(bp) && !BP_USES_CRYPT(bp)) {
1126 spa_log_error(spa, zb, &bp->blk_birth);
1127 zfs_panic_recover("unencrypted block in encrypted "
1128 "object set %llu", dmu_objset_id(sta->os));
1129 return (SET_ERROR(EIO));
1133 return (SET_ERROR(EINTR));
1134 if (zb->zb_object != DMU_META_DNODE_OBJECT &&
1135 DMU_OBJECT_IS_SPECIAL(zb->zb_object))
1137 atomic_inc_64(sta->num_blocks_visited);
1139 if (zb->zb_level == ZB_DNODE_LEVEL) {
1140 if (zb->zb_object == DMU_META_DNODE_OBJECT)
1142 record = range_alloc(OBJECT, zb->zb_object, 0, 0, B_FALSE);
1143 record->sru.object.bp = *bp;
1144 size_t size = sizeof (*dnp) * (dnp->dn_extra_slots + 1);
1145 record->sru.object.dnp = kmem_alloc(size, KM_SLEEP);
1146 memcpy(record->sru.object.dnp, dnp, size);
1147 bqueue_enqueue(&sta->q, record, sizeof (*record));
1150 if (zb->zb_level == 0 && zb->zb_object == DMU_META_DNODE_OBJECT &&
1152 record = range_alloc(OBJECT_RANGE, 0, zb->zb_blkid,
1153 zb->zb_blkid + 1, B_FALSE);
1154 record->sru.object_range.bp = *bp;
1155 bqueue_enqueue(&sta->q, record, sizeof (*record));
1158 if (zb->zb_level < 0 || (zb->zb_level > 0 && !BP_IS_HOLE(bp)))
1160 if (zb->zb_object == DMU_META_DNODE_OBJECT && !BP_IS_HOLE(bp))
1163 uint64_t span = bp_span_in_blocks(dnp->dn_indblkshift, zb->zb_level);
1167 * If this multiply overflows, we don't need to send this block.
1168 * Even if it has a birth time, it can never not be a hole, so
1169 * we don't need to send records for it.
1171 if (!overflow_multiply(span, zb->zb_blkid, &start) || (!(zb->zb_blkid ==
1172 DMU_SPILL_BLKID || DMU_OT_IS_METADATA(dnp->dn_type)) &&
1173 span * zb->zb_blkid > dnp->dn_maxblkid)) {
1174 ASSERT(BP_IS_HOLE(bp));
1178 if (zb->zb_blkid == DMU_SPILL_BLKID)
1179 ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_SA);
1181 enum type record_type = DATA;
1184 else if (BP_IS_REDACTED(bp))
1185 record_type = REDACT;
1189 record = range_alloc(record_type, zb->zb_object, start,
1190 (start + span < start ? 0 : start + span), B_FALSE);
1192 uint64_t datablksz = (zb->zb_blkid == DMU_SPILL_BLKID ?
1193 BP_GET_LSIZE(bp) : dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
1195 if (BP_IS_HOLE(bp)) {
1196 record->sru.hole.datablksz = datablksz;
1197 } else if (BP_IS_REDACTED(bp)) {
1198 record->sru.redact.datablksz = datablksz;
1200 record->sru.data.datablksz = datablksz;
1201 record->sru.data.obj_type = dnp->dn_type;
1202 record->sru.data.bp = *bp;
1205 bqueue_enqueue(&sta->q, record, sizeof (*record));
1209 struct redact_list_cb_arg {
1210 uint64_t *num_blocks_visited;
1213 boolean_t mark_redact;
1217 redact_list_cb(redact_block_phys_t *rb, void *arg)
1219 struct redact_list_cb_arg *rlcap = arg;
1221 atomic_inc_64(rlcap->num_blocks_visited);
1225 struct send_range *data = range_alloc(REDACT, rb->rbp_object,
1226 rb->rbp_blkid, rb->rbp_blkid + redact_block_get_count(rb), B_FALSE);
1227 ASSERT3U(data->end_blkid, >, rb->rbp_blkid);
1228 if (rlcap->mark_redact) {
1229 data->type = REDACT;
1230 data->sru.redact.datablksz = redact_block_get_size(rb);
1232 data->type = PREVIOUSLY_REDACTED;
1234 bqueue_enqueue(rlcap->q, data, sizeof (*data));
1240 * This function kicks off the traverse_dataset. It also handles setting the
1241 * error code of the thread in case something goes wrong, and pushes the End of
1242 * Stream record when the traverse_dataset call has finished.
1244 static __attribute__((noreturn)) void
1245 send_traverse_thread(void *arg)
1247 struct send_thread_arg *st_arg = arg;
1249 struct send_range *data;
1250 fstrans_cookie_t cookie = spl_fstrans_mark();
1252 err = traverse_dataset_resume(st_arg->os->os_dsl_dataset,
1253 st_arg->fromtxg, &st_arg->resume,
1254 st_arg->flags, send_cb, st_arg);
1257 st_arg->error_code = err;
1258 data = range_alloc(DATA, 0, 0, 0, B_TRUE);
1259 bqueue_enqueue_flush(&st_arg->q, data, sizeof (*data));
1260 spl_fstrans_unmark(cookie);
1265 * Utility function that causes End of Stream records to compare after of all
1266 * others, so that other threads' comparison logic can stay simple.
1268 static int __attribute__((unused))
1269 send_range_after(const struct send_range *from, const struct send_range *to)
1271 if (from->eos_marker == B_TRUE)
1273 if (to->eos_marker == B_TRUE)
1276 uint64_t from_obj = from->object;
1277 uint64_t from_end_obj = from->object + 1;
1278 uint64_t to_obj = to->object;
1279 uint64_t to_end_obj = to->object + 1;
1280 if (from_obj == 0) {
1281 ASSERT(from->type == HOLE || from->type == OBJECT_RANGE);
1282 from_obj = from->start_blkid << DNODES_PER_BLOCK_SHIFT;
1283 from_end_obj = from->end_blkid << DNODES_PER_BLOCK_SHIFT;
1286 ASSERT(to->type == HOLE || to->type == OBJECT_RANGE);
1287 to_obj = to->start_blkid << DNODES_PER_BLOCK_SHIFT;
1288 to_end_obj = to->end_blkid << DNODES_PER_BLOCK_SHIFT;
1291 if (from_end_obj <= to_obj)
1293 if (from_obj >= to_end_obj)
1295 int64_t cmp = TREE_CMP(to->type == OBJECT_RANGE, from->type ==
1299 cmp = TREE_CMP(to->type == OBJECT, from->type == OBJECT);
1302 if (from->end_blkid <= to->start_blkid)
1304 if (from->start_blkid >= to->end_blkid)
1310 * Pop the new data off the queue, check that the records we receive are in
1311 * the right order, but do not free the old data. This is used so that the
1312 * records can be sent on to the main thread without copying the data.
1314 static struct send_range *
1315 get_next_range_nofree(bqueue_t *bq, struct send_range *prev)
1317 struct send_range *next = bqueue_dequeue(bq);
1318 ASSERT3S(send_range_after(prev, next), ==, -1);
1323 * Pop the new data off the queue, check that the records we receive are in
1324 * the right order, and free the old data.
1326 static struct send_range *
1327 get_next_range(bqueue_t *bq, struct send_range *prev)
1329 struct send_range *next = get_next_range_nofree(bq, prev);
1334 static __attribute__((noreturn)) void
1335 redact_list_thread(void *arg)
1337 struct redact_list_thread_arg *rlt_arg = arg;
1338 struct send_range *record;
1339 fstrans_cookie_t cookie = spl_fstrans_mark();
1340 if (rlt_arg->rl != NULL) {
1341 struct redact_list_cb_arg rlcba = {0};
1342 rlcba.cancel = &rlt_arg->cancel;
1343 rlcba.q = &rlt_arg->q;
1344 rlcba.num_blocks_visited = rlt_arg->num_blocks_visited;
1345 rlcba.mark_redact = rlt_arg->mark_redact;
1346 int err = dsl_redaction_list_traverse(rlt_arg->rl,
1347 &rlt_arg->resume, redact_list_cb, &rlcba);
1349 rlt_arg->error_code = err;
1351 record = range_alloc(DATA, 0, 0, 0, B_TRUE);
1352 bqueue_enqueue_flush(&rlt_arg->q, record, sizeof (*record));
1353 spl_fstrans_unmark(cookie);
1359 * Compare the start point of the two provided ranges. End of stream ranges
1360 * compare last, objects compare before any data or hole inside that object and
1361 * multi-object holes that start at the same object.
1364 send_range_start_compare(struct send_range *r1, struct send_range *r2)
1366 uint64_t r1_objequiv = r1->object;
1367 uint64_t r1_l0equiv = r1->start_blkid;
1368 uint64_t r2_objequiv = r2->object;
1369 uint64_t r2_l0equiv = r2->start_blkid;
1370 int64_t cmp = TREE_CMP(r1->eos_marker, r2->eos_marker);
1373 if (r1->object == 0) {
1374 r1_objequiv = r1->start_blkid * DNODES_PER_BLOCK;
1377 if (r2->object == 0) {
1378 r2_objequiv = r2->start_blkid * DNODES_PER_BLOCK;
1382 cmp = TREE_CMP(r1_objequiv, r2_objequiv);
1385 cmp = TREE_CMP(r2->type == OBJECT_RANGE, r1->type == OBJECT_RANGE);
1388 cmp = TREE_CMP(r2->type == OBJECT, r1->type == OBJECT);
1392 return (TREE_CMP(r1_l0equiv, r2_l0equiv));
1403 * This function returns the next range the send_merge_thread should operate on.
1404 * The inputs are two arrays; the first one stores the range at the front of the
1405 * queues stored in the second one. The ranges are sorted in descending
1406 * priority order; the metadata from earlier ranges overrules metadata from
1407 * later ranges. out_mask is used to return which threads the ranges came from;
1408 * bit i is set if ranges[i] started at the same place as the returned range.
1410 * This code is not hardcoded to compare a specific number of threads; it could
1411 * be used with any number, just by changing the q_idx enum.
1413 * The "next range" is the one with the earliest start; if two starts are equal,
1414 * the highest-priority range is the next to operate on. If a higher-priority
1415 * range starts in the middle of the first range, then the first range will be
1416 * truncated to end where the higher-priority range starts, and we will operate
1417 * on that one next time. In this way, we make sure that each block covered by
1418 * some range gets covered by a returned range, and each block covered is
1419 * returned using the metadata of the highest-priority range it appears in.
1421 * For example, if the three ranges at the front of the queues were [2,4),
1422 * [3,5), and [1,3), then the ranges returned would be [1,2) with the metadata
1423 * from the third range, [2,4) with the metadata from the first range, and then
1424 * [4,5) with the metadata from the second.
1426 static struct send_range *
1427 find_next_range(struct send_range **ranges, bqueue_t **qs, uint64_t *out_mask)
1429 int idx = 0; // index of the range with the earliest start
1432 for (i = 1; i < NUM_THREADS; i++) {
1433 if (send_range_start_compare(ranges[i], ranges[idx]) < 0)
1436 if (ranges[idx]->eos_marker) {
1437 struct send_range *ret = range_alloc(DATA, 0, 0, 0, B_TRUE);
1442 * Find all the ranges that start at that same point.
1444 for (i = 0; i < NUM_THREADS; i++) {
1445 if (send_range_start_compare(ranges[i], ranges[idx]) == 0)
1450 * OBJECT_RANGE records only come from the TO thread, and should always
1451 * be treated as overlapping with nothing and sent on immediately. They
1452 * are only used in raw sends, and are never redacted.
1454 if (ranges[idx]->type == OBJECT_RANGE) {
1455 ASSERT3U(idx, ==, TO_IDX);
1456 ASSERT3U(*out_mask, ==, 1 << TO_IDX);
1457 struct send_range *ret = ranges[idx];
1458 ranges[idx] = get_next_range_nofree(qs[idx], ranges[idx]);
1462 * Find the first start or end point after the start of the first range.
1464 uint64_t first_change = ranges[idx]->end_blkid;
1465 for (i = 0; i < NUM_THREADS; i++) {
1466 if (i == idx || ranges[i]->eos_marker ||
1467 ranges[i]->object > ranges[idx]->object ||
1468 ranges[i]->object == DMU_META_DNODE_OBJECT)
1470 ASSERT3U(ranges[i]->object, ==, ranges[idx]->object);
1471 if (first_change > ranges[i]->start_blkid &&
1472 (bmask & (1 << i)) == 0)
1473 first_change = ranges[i]->start_blkid;
1474 else if (first_change > ranges[i]->end_blkid)
1475 first_change = ranges[i]->end_blkid;
1478 * Update all ranges to no longer overlap with the range we're
1479 * returning. All such ranges must start at the same place as the range
1480 * being returned, and end at or after first_change. Thus we update
1481 * their start to first_change. If that makes them size 0, then free
1482 * them and pull a new range from that thread.
1484 for (i = 0; i < NUM_THREADS; i++) {
1485 if (i == idx || (bmask & (1 << i)) == 0)
1487 ASSERT3U(first_change, >, ranges[i]->start_blkid);
1488 ranges[i]->start_blkid = first_change;
1489 ASSERT3U(ranges[i]->start_blkid, <=, ranges[i]->end_blkid);
1490 if (ranges[i]->start_blkid == ranges[i]->end_blkid)
1491 ranges[i] = get_next_range(qs[i], ranges[i]);
1494 * Short-circuit the simple case; if the range doesn't overlap with
1495 * anything else, or it only overlaps with things that start at the same
1496 * place and are longer, send it on.
1498 if (first_change == ranges[idx]->end_blkid) {
1499 struct send_range *ret = ranges[idx];
1500 ranges[idx] = get_next_range_nofree(qs[idx], ranges[idx]);
1505 * Otherwise, return a truncated copy of ranges[idx] and move the start
1506 * of ranges[idx] back to first_change.
1508 struct send_range *ret = kmem_alloc(sizeof (*ret), KM_SLEEP);
1509 *ret = *ranges[idx];
1510 ret->end_blkid = first_change;
1511 ranges[idx]->start_blkid = first_change;
1515 #define FROM_AND_REDACT_BITS ((1 << REDACT_IDX) | (1 << FROM_IDX))
1518 * Merge the results from the from thread and the to thread, and then hand the
1519 * records off to send_prefetch_thread to prefetch them. If this is not a
1520 * send from a redaction bookmark, the from thread will push an end of stream
1521 * record and stop, and we'll just send everything that was changed in the
1522 * to_ds since the ancestor's creation txg. If it is, then since
1523 * traverse_dataset has a canonical order, we can compare each change as
1524 * they're pulled off the queues. That will give us a stream that is
1525 * appropriately sorted, and covers all records. In addition, we pull the
1526 * data from the redact_list_thread and use that to determine which blocks
1527 * should be redacted.
1529 static __attribute__((noreturn)) void
1530 send_merge_thread(void *arg)
1532 struct send_merge_thread_arg *smt_arg = arg;
1533 struct send_range *front_ranges[NUM_THREADS];
1534 bqueue_t *queues[NUM_THREADS];
1536 fstrans_cookie_t cookie = spl_fstrans_mark();
1538 if (smt_arg->redact_arg == NULL) {
1539 front_ranges[REDACT_IDX] =
1540 kmem_zalloc(sizeof (struct send_range), KM_SLEEP);
1541 front_ranges[REDACT_IDX]->eos_marker = B_TRUE;
1542 front_ranges[REDACT_IDX]->type = REDACT;
1543 queues[REDACT_IDX] = NULL;
1545 front_ranges[REDACT_IDX] =
1546 bqueue_dequeue(&smt_arg->redact_arg->q);
1547 queues[REDACT_IDX] = &smt_arg->redact_arg->q;
1549 front_ranges[TO_IDX] = bqueue_dequeue(&smt_arg->to_arg->q);
1550 queues[TO_IDX] = &smt_arg->to_arg->q;
1551 front_ranges[FROM_IDX] = bqueue_dequeue(&smt_arg->from_arg->q);
1552 queues[FROM_IDX] = &smt_arg->from_arg->q;
1554 struct send_range *range;
1555 for (range = find_next_range(front_ranges, queues, &mask);
1556 !range->eos_marker && err == 0 && !smt_arg->cancel;
1557 range = find_next_range(front_ranges, queues, &mask)) {
1559 * If the range in question was in both the from redact bookmark
1560 * and the bookmark we're using to redact, then don't send it.
1561 * It's already redacted on the receiving system, so a redaction
1562 * record would be redundant.
1564 if ((mask & FROM_AND_REDACT_BITS) == FROM_AND_REDACT_BITS) {
1565 ASSERT3U(range->type, ==, REDACT);
1569 bqueue_enqueue(&smt_arg->q, range, sizeof (*range));
1571 if (smt_arg->to_arg->error_code != 0) {
1572 err = smt_arg->to_arg->error_code;
1573 } else if (smt_arg->from_arg->error_code != 0) {
1574 err = smt_arg->from_arg->error_code;
1575 } else if (smt_arg->redact_arg != NULL &&
1576 smt_arg->redact_arg->error_code != 0) {
1577 err = smt_arg->redact_arg->error_code;
1580 if (smt_arg->cancel && err == 0)
1581 err = SET_ERROR(EINTR);
1582 smt_arg->error = err;
1583 if (smt_arg->error != 0) {
1584 smt_arg->to_arg->cancel = B_TRUE;
1585 smt_arg->from_arg->cancel = B_TRUE;
1586 if (smt_arg->redact_arg != NULL)
1587 smt_arg->redact_arg->cancel = B_TRUE;
1589 for (int i = 0; i < NUM_THREADS; i++) {
1590 while (!front_ranges[i]->eos_marker) {
1591 front_ranges[i] = get_next_range(queues[i],
1594 range_free(front_ranges[i]);
1596 range->eos_marker = B_TRUE;
1597 bqueue_enqueue_flush(&smt_arg->q, range, 1);
1598 spl_fstrans_unmark(cookie);
1602 struct send_reader_thread_arg {
1603 struct send_merge_thread_arg *smta;
1606 boolean_t issue_reads;
1607 uint64_t featureflags;
1612 dmu_send_read_done(zio_t *zio)
1614 struct send_range *range = zio->io_private;
1616 mutex_enter(&range->sru.data.lock);
1617 if (zio->io_error != 0) {
1618 abd_free(range->sru.data.abd);
1619 range->sru.data.abd = NULL;
1620 range->sru.data.io_err = zio->io_error;
1623 ASSERT(range->sru.data.io_outstanding);
1624 range->sru.data.io_outstanding = B_FALSE;
1625 cv_broadcast(&range->sru.data.cv);
1626 mutex_exit(&range->sru.data.lock);
1630 issue_data_read(struct send_reader_thread_arg *srta, struct send_range *range)
1632 struct srd *srdp = &range->sru.data;
1633 blkptr_t *bp = &srdp->bp;
1634 objset_t *os = srta->smta->os;
1636 ASSERT3U(range->type, ==, DATA);
1637 ASSERT3U(range->start_blkid + 1, ==, range->end_blkid);
1639 * If we have large blocks stored on disk but
1640 * the send flags don't allow us to send large
1641 * blocks, we split the data from the arc buf
1644 boolean_t split_large_blocks =
1645 srdp->datablksz > SPA_OLD_MAXBLOCKSIZE &&
1646 !(srta->featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS);
1648 * We should only request compressed data from the ARC if all
1649 * the following are true:
1650 * - stream compression was requested
1651 * - we aren't splitting large blocks into smaller chunks
1652 * - the data won't need to be byteswapped before sending
1653 * - this isn't an embedded block
1654 * - this isn't metadata (if receiving on a different endian
1655 * system it can be byteswapped more easily)
1657 boolean_t request_compressed =
1658 (srta->featureflags & DMU_BACKUP_FEATURE_COMPRESSED) &&
1659 !split_large_blocks && !BP_SHOULD_BYTESWAP(bp) &&
1660 !BP_IS_EMBEDDED(bp) && !DMU_OT_IS_METADATA(BP_GET_TYPE(bp));
1662 zio_flag_t zioflags = ZIO_FLAG_CANFAIL;
1664 if (srta->featureflags & DMU_BACKUP_FEATURE_RAW) {
1665 zioflags |= ZIO_FLAG_RAW;
1666 srdp->io_compressed = B_TRUE;
1667 } else if (request_compressed) {
1668 zioflags |= ZIO_FLAG_RAW_COMPRESS;
1669 srdp->io_compressed = B_TRUE;
1672 srdp->datasz = (zioflags & ZIO_FLAG_RAW_COMPRESS) ?
1673 BP_GET_PSIZE(bp) : BP_GET_LSIZE(bp);
1675 if (!srta->issue_reads)
1677 if (BP_IS_REDACTED(bp))
1679 if (send_do_embed(bp, srta->featureflags))
1682 zbookmark_phys_t zb = {
1683 .zb_objset = dmu_objset_id(os),
1684 .zb_object = range->object,
1686 .zb_blkid = range->start_blkid,
1689 arc_flags_t aflags = ARC_FLAG_CACHED_ONLY;
1691 int arc_err = arc_read(NULL, os->os_spa, bp,
1692 arc_getbuf_func, &srdp->abuf, ZIO_PRIORITY_ASYNC_READ,
1693 zioflags, &aflags, &zb);
1695 * If the data is not already cached in the ARC, we read directly
1696 * from zio. This avoids the performance overhead of adding a new
1697 * entry to the ARC, and we also avoid polluting the ARC cache with
1698 * data that is not likely to be used in the future.
1701 srdp->abd = abd_alloc_linear(srdp->datasz, B_FALSE);
1702 srdp->io_outstanding = B_TRUE;
1703 zio_nowait(zio_read(NULL, os->os_spa, bp, srdp->abd,
1704 srdp->datasz, dmu_send_read_done, range,
1705 ZIO_PRIORITY_ASYNC_READ, zioflags, &zb));
1710 * Create a new record with the given values.
1713 enqueue_range(struct send_reader_thread_arg *srta, bqueue_t *q, dnode_t *dn,
1714 uint64_t blkid, uint64_t count, const blkptr_t *bp, uint32_t datablksz)
1716 enum type range_type = (bp == NULL || BP_IS_HOLE(bp) ? HOLE :
1717 (BP_IS_REDACTED(bp) ? REDACT : DATA));
1719 struct send_range *range = range_alloc(range_type, dn->dn_object,
1720 blkid, blkid + count, B_FALSE);
1722 if (blkid == DMU_SPILL_BLKID) {
1723 ASSERT3P(bp, !=, NULL);
1724 ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_SA);
1727 switch (range_type) {
1729 range->sru.hole.datablksz = datablksz;
1732 ASSERT3U(count, ==, 1);
1733 range->sru.data.datablksz = datablksz;
1734 range->sru.data.obj_type = dn->dn_type;
1735 range->sru.data.bp = *bp;
1736 issue_data_read(srta, range);
1739 range->sru.redact.datablksz = datablksz;
1744 bqueue_enqueue(q, range, datablksz);
1748 * This thread is responsible for two things: First, it retrieves the correct
1749 * blkptr in the to ds if we need to send the data because of something from
1750 * the from thread. As a result of this, we're the first ones to discover that
1751 * some indirect blocks can be discarded because they're not holes. Second,
1752 * it issues prefetches for the data we need to send.
1754 static __attribute__((noreturn)) void
1755 send_reader_thread(void *arg)
1757 struct send_reader_thread_arg *srta = arg;
1758 struct send_merge_thread_arg *smta = srta->smta;
1759 bqueue_t *inq = &smta->q;
1760 bqueue_t *outq = &srta->q;
1761 objset_t *os = smta->os;
1762 fstrans_cookie_t cookie = spl_fstrans_mark();
1763 struct send_range *range = bqueue_dequeue(inq);
1767 * If the record we're analyzing is from a redaction bookmark from the
1768 * fromds, then we need to know whether or not it exists in the tods so
1769 * we know whether to create records for it or not. If it does, we need
1770 * the datablksz so we can generate an appropriate record for it.
1771 * Finally, if it isn't redacted, we need the blkptr so that we can send
1772 * a WRITE record containing the actual data.
1774 uint64_t last_obj = UINT64_MAX;
1775 uint64_t last_obj_exists = B_TRUE;
1776 while (!range->eos_marker && !srta->cancel && smta->error == 0 &&
1778 switch (range->type) {
1780 issue_data_read(srta, range);
1781 bqueue_enqueue(outq, range, range->sru.data.datablksz);
1782 range = get_next_range_nofree(inq, range);
1787 case REDACT: // Redacted blocks must exist
1788 bqueue_enqueue(outq, range, sizeof (*range));
1789 range = get_next_range_nofree(inq, range);
1791 case PREVIOUSLY_REDACTED: {
1793 * This entry came from the "from bookmark" when
1794 * sending from a bookmark that has a redaction
1795 * list. We need to check if this object/blkid
1796 * exists in the target ("to") dataset, and if
1797 * not then we drop this entry. We also need
1798 * to fill in the block pointer so that we know
1801 * To accomplish the above, we first cache whether or
1802 * not the last object we examined exists. If it
1803 * doesn't, we can drop this record. If it does, we hold
1804 * the dnode and use it to call dbuf_dnode_findbp. We do
1805 * this instead of dbuf_bookmark_findbp because we will
1806 * often operate on large ranges, and holding the dnode
1807 * once is more efficient.
1809 boolean_t object_exists = B_TRUE;
1811 * If the data is redacted, we only care if it exists,
1812 * so that we don't send records for objects that have
1816 if (range->object == last_obj && !last_obj_exists) {
1818 * If we're still examining the same object as
1819 * previously, and it doesn't exist, we don't
1820 * need to call dbuf_bookmark_findbp.
1822 object_exists = B_FALSE;
1824 err = dnode_hold(os, range->object, FTAG, &dn);
1825 if (err == ENOENT) {
1826 object_exists = B_FALSE;
1829 last_obj = range->object;
1830 last_obj_exists = object_exists;
1835 } else if (!object_exists) {
1837 * The block was modified, but doesn't
1838 * exist in the to dataset; if it was
1839 * deleted in the to dataset, then we'll
1840 * visit the hole bp for it at some point.
1842 range = get_next_range(inq, range);
1846 MIN(dn->dn_maxblkid, range->end_blkid);
1848 * The object exists, so we need to try to find the
1849 * blkptr for each block in the range we're processing.
1851 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1852 for (uint64_t blkid = range->start_blkid;
1853 blkid < file_max; blkid++) {
1855 uint32_t datablksz =
1856 dn->dn_phys->dn_datablkszsec <<
1858 uint64_t offset = blkid * datablksz;
1860 * This call finds the next non-hole block in
1861 * the object. This is to prevent a
1862 * performance problem where we're unredacting
1863 * a large hole. Using dnode_next_offset to
1864 * skip over the large hole avoids iterating
1865 * over every block in it.
1867 err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK,
1870 offset = UINT64_MAX;
1872 } else if (err != 0) {
1875 if (offset != blkid * datablksz) {
1877 * if there is a hole from here
1880 offset = MIN(offset, file_max *
1882 uint64_t nblks = (offset / datablksz) -
1884 enqueue_range(srta, outq, dn, blkid,
1885 nblks, NULL, datablksz);
1888 if (blkid >= file_max)
1890 err = dbuf_dnode_findbp(dn, 0, blkid, &bp,
1894 ASSERT(!BP_IS_HOLE(&bp));
1895 enqueue_range(srta, outq, dn, blkid, 1, &bp,
1898 rw_exit(&dn->dn_struct_rwlock);
1899 dnode_rele(dn, FTAG);
1900 range = get_next_range(inq, range);
1904 if (srta->cancel || err != 0) {
1905 smta->cancel = B_TRUE;
1907 } else if (smta->error != 0) {
1908 srta->error = smta->error;
1910 while (!range->eos_marker)
1911 range = get_next_range(inq, range);
1913 bqueue_enqueue_flush(outq, range, 1);
1914 spl_fstrans_unmark(cookie);
1918 #define NUM_SNAPS_NOT_REDACTED UINT64_MAX
1920 struct dmu_send_params {
1922 const void *tag; // Tag dp was held with, will be used to release dp.
1924 /* To snapshot args */
1926 dsl_dataset_t *to_ds;
1927 /* From snapshot args */
1928 zfs_bookmark_phys_t ancestor_zb;
1929 uint64_t *fromredactsnaps;
1930 /* NUM_SNAPS_NOT_REDACTED if not sending from redaction bookmark */
1931 uint64_t numfromredactsnaps;
1935 boolean_t large_block_ok;
1936 boolean_t compressok;
1941 uint64_t saved_guid;
1942 zfs_bookmark_phys_t *redactbook;
1943 /* Stream output params */
1944 dmu_send_outparams_t *dso;
1946 /* Stream progress params */
1949 char saved_toname[MAXNAMELEN];
1953 setup_featureflags(struct dmu_send_params *dspp, objset_t *os,
1954 uint64_t *featureflags)
1956 dsl_dataset_t *to_ds = dspp->to_ds;
1957 dsl_pool_t *dp = dspp->dp;
1959 if (dmu_objset_type(os) == DMU_OST_ZFS) {
1961 if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0)
1962 return (SET_ERROR(EINVAL));
1964 if (version >= ZPL_VERSION_SA)
1965 *featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
1968 /* raw sends imply large_block_ok */
1969 if ((dspp->rawok || dspp->large_block_ok) &&
1970 dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_LARGE_BLOCKS)) {
1971 *featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS;
1974 /* encrypted datasets will not have embedded blocks */
1975 if ((dspp->embedok || dspp->rawok) && !os->os_encrypted &&
1976 spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) {
1977 *featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA;
1980 /* raw send implies compressok */
1981 if (dspp->compressok || dspp->rawok)
1982 *featureflags |= DMU_BACKUP_FEATURE_COMPRESSED;
1984 if (dspp->rawok && os->os_encrypted)
1985 *featureflags |= DMU_BACKUP_FEATURE_RAW;
1987 if ((*featureflags &
1988 (DMU_BACKUP_FEATURE_EMBED_DATA | DMU_BACKUP_FEATURE_COMPRESSED |
1989 DMU_BACKUP_FEATURE_RAW)) != 0 &&
1990 spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) {
1991 *featureflags |= DMU_BACKUP_FEATURE_LZ4;
1995 * We specifically do not include DMU_BACKUP_FEATURE_EMBED_DATA here to
1996 * allow sending ZSTD compressed datasets to a receiver that does not
1999 if ((*featureflags &
2000 (DMU_BACKUP_FEATURE_COMPRESSED | DMU_BACKUP_FEATURE_RAW)) != 0 &&
2001 dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_ZSTD_COMPRESS)) {
2002 *featureflags |= DMU_BACKUP_FEATURE_ZSTD;
2005 if (dspp->resumeobj != 0 || dspp->resumeoff != 0) {
2006 *featureflags |= DMU_BACKUP_FEATURE_RESUMING;
2009 if (dspp->redactbook != NULL) {
2010 *featureflags |= DMU_BACKUP_FEATURE_REDACTED;
2013 if (dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_LARGE_DNODE)) {
2014 *featureflags |= DMU_BACKUP_FEATURE_LARGE_DNODE;
2019 static dmu_replay_record_t *
2020 create_begin_record(struct dmu_send_params *dspp, objset_t *os,
2021 uint64_t featureflags)
2023 dmu_replay_record_t *drr = kmem_zalloc(sizeof (dmu_replay_record_t),
2025 drr->drr_type = DRR_BEGIN;
2027 struct drr_begin *drrb = &drr->drr_u.drr_begin;
2028 dsl_dataset_t *to_ds = dspp->to_ds;
2030 drrb->drr_magic = DMU_BACKUP_MAGIC;
2031 drrb->drr_creation_time = dsl_dataset_phys(to_ds)->ds_creation_time;
2032 drrb->drr_type = dmu_objset_type(os);
2033 drrb->drr_toguid = dsl_dataset_phys(to_ds)->ds_guid;
2034 drrb->drr_fromguid = dspp->ancestor_zb.zbm_guid;
2036 DMU_SET_STREAM_HDRTYPE(drrb->drr_versioninfo, DMU_SUBSTREAM);
2037 DMU_SET_FEATUREFLAGS(drrb->drr_versioninfo, featureflags);
2040 drrb->drr_flags |= DRR_FLAG_CLONE;
2041 if (dsl_dataset_phys(dspp->to_ds)->ds_flags & DS_FLAG_CI_DATASET)
2042 drrb->drr_flags |= DRR_FLAG_CI_DATA;
2043 if (zfs_send_set_freerecords_bit)
2044 drrb->drr_flags |= DRR_FLAG_FREERECORDS;
2045 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_SPILL_BLOCK;
2047 if (dspp->savedok) {
2048 drrb->drr_toguid = dspp->saved_guid;
2049 strlcpy(drrb->drr_toname, dspp->saved_toname,
2050 sizeof (drrb->drr_toname));
2052 dsl_dataset_name(to_ds, drrb->drr_toname);
2053 if (!to_ds->ds_is_snapshot) {
2054 (void) strlcat(drrb->drr_toname, "@--head--",
2055 sizeof (drrb->drr_toname));
2062 setup_to_thread(struct send_thread_arg *to_arg, objset_t *to_os,
2063 dmu_sendstatus_t *dssp, uint64_t fromtxg, boolean_t rawok)
2065 VERIFY0(bqueue_init(&to_arg->q, zfs_send_no_prefetch_queue_ff,
2066 MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
2067 offsetof(struct send_range, ln)));
2068 to_arg->error_code = 0;
2069 to_arg->cancel = B_FALSE;
2071 to_arg->fromtxg = fromtxg;
2072 to_arg->flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA;
2074 to_arg->flags |= TRAVERSE_NO_DECRYPT;
2075 if (zfs_send_corrupt_data)
2076 to_arg->flags |= TRAVERSE_HARD;
2077 to_arg->num_blocks_visited = &dssp->dss_blocks;
2078 (void) thread_create(NULL, 0, send_traverse_thread, to_arg, 0,
2079 curproc, TS_RUN, minclsyspri);
2083 setup_from_thread(struct redact_list_thread_arg *from_arg,
2084 redaction_list_t *from_rl, dmu_sendstatus_t *dssp)
2086 VERIFY0(bqueue_init(&from_arg->q, zfs_send_no_prefetch_queue_ff,
2087 MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
2088 offsetof(struct send_range, ln)));
2089 from_arg->error_code = 0;
2090 from_arg->cancel = B_FALSE;
2091 from_arg->rl = from_rl;
2092 from_arg->mark_redact = B_FALSE;
2093 from_arg->num_blocks_visited = &dssp->dss_blocks;
2095 * If from_ds is null, send_traverse_thread just returns success and
2096 * enqueues an eos marker.
2098 (void) thread_create(NULL, 0, redact_list_thread, from_arg, 0,
2099 curproc, TS_RUN, minclsyspri);
2103 setup_redact_list_thread(struct redact_list_thread_arg *rlt_arg,
2104 struct dmu_send_params *dspp, redaction_list_t *rl, dmu_sendstatus_t *dssp)
2106 if (dspp->redactbook == NULL)
2109 rlt_arg->cancel = B_FALSE;
2110 VERIFY0(bqueue_init(&rlt_arg->q, zfs_send_no_prefetch_queue_ff,
2111 MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
2112 offsetof(struct send_range, ln)));
2113 rlt_arg->error_code = 0;
2114 rlt_arg->mark_redact = B_TRUE;
2116 rlt_arg->num_blocks_visited = &dssp->dss_blocks;
2118 (void) thread_create(NULL, 0, redact_list_thread, rlt_arg, 0,
2119 curproc, TS_RUN, minclsyspri);
2123 setup_merge_thread(struct send_merge_thread_arg *smt_arg,
2124 struct dmu_send_params *dspp, struct redact_list_thread_arg *from_arg,
2125 struct send_thread_arg *to_arg, struct redact_list_thread_arg *rlt_arg,
2128 VERIFY0(bqueue_init(&smt_arg->q, zfs_send_no_prefetch_queue_ff,
2129 MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
2130 offsetof(struct send_range, ln)));
2131 smt_arg->cancel = B_FALSE;
2133 smt_arg->from_arg = from_arg;
2134 smt_arg->to_arg = to_arg;
2135 if (dspp->redactbook != NULL)
2136 smt_arg->redact_arg = rlt_arg;
2139 (void) thread_create(NULL, 0, send_merge_thread, smt_arg, 0, curproc,
2140 TS_RUN, minclsyspri);
2144 setup_reader_thread(struct send_reader_thread_arg *srt_arg,
2145 struct dmu_send_params *dspp, struct send_merge_thread_arg *smt_arg,
2146 uint64_t featureflags)
2148 VERIFY0(bqueue_init(&srt_arg->q, zfs_send_queue_ff,
2149 MAX(zfs_send_queue_length, 2 * zfs_max_recordsize),
2150 offsetof(struct send_range, ln)));
2151 srt_arg->smta = smt_arg;
2152 srt_arg->issue_reads = !dspp->dso->dso_dryrun;
2153 srt_arg->featureflags = featureflags;
2154 (void) thread_create(NULL, 0, send_reader_thread, srt_arg, 0,
2155 curproc, TS_RUN, minclsyspri);
2159 setup_resume_points(struct dmu_send_params *dspp,
2160 struct send_thread_arg *to_arg, struct redact_list_thread_arg *from_arg,
2161 struct redact_list_thread_arg *rlt_arg,
2162 struct send_merge_thread_arg *smt_arg, boolean_t resuming, objset_t *os,
2163 redaction_list_t *redact_rl, nvlist_t *nvl)
2166 dsl_dataset_t *to_ds = dspp->to_ds;
2172 obj = dspp->resumeobj;
2173 dmu_object_info_t to_doi;
2174 err = dmu_object_info(os, obj, &to_doi);
2178 blkid = dspp->resumeoff / to_doi.doi_data_block_size;
2181 * If we're resuming a redacted send, we can skip to the appropriate
2182 * point in the redaction bookmark by binary searching through it.
2184 if (redact_rl != NULL) {
2185 SET_BOOKMARK(&rlt_arg->resume, to_ds->ds_object, obj, 0, blkid);
2188 SET_BOOKMARK(&to_arg->resume, to_ds->ds_object, obj, 0, blkid);
2189 if (nvlist_exists(nvl, BEGINNV_REDACT_FROM_SNAPS)) {
2190 uint64_t objset = dspp->ancestor_zb.zbm_redaction_obj;
2192 * Note: If the resume point is in an object whose
2193 * blocksize is different in the from vs to snapshots,
2194 * we will have divided by the "wrong" blocksize.
2195 * However, in this case fromsnap's send_cb() will
2196 * detect that the blocksize has changed and therefore
2197 * ignore this object.
2199 * If we're resuming a send from a redaction bookmark,
2200 * we still cannot accidentally suggest blocks behind
2201 * the to_ds. In addition, we know that any blocks in
2202 * the object in the to_ds will have to be sent, since
2203 * the size changed. Therefore, we can't cause any harm
2206 SET_BOOKMARK(&from_arg->resume, objset, obj, 0, blkid);
2209 fnvlist_add_uint64(nvl, BEGINNV_RESUME_OBJECT, dspp->resumeobj);
2210 fnvlist_add_uint64(nvl, BEGINNV_RESUME_OFFSET, dspp->resumeoff);
2215 static dmu_sendstatus_t *
2216 setup_send_progress(struct dmu_send_params *dspp)
2218 dmu_sendstatus_t *dssp = kmem_zalloc(sizeof (*dssp), KM_SLEEP);
2219 dssp->dss_outfd = dspp->outfd;
2220 dssp->dss_off = dspp->off;
2221 dssp->dss_proc = curproc;
2222 mutex_enter(&dspp->to_ds->ds_sendstream_lock);
2223 list_insert_head(&dspp->to_ds->ds_sendstreams, dssp);
2224 mutex_exit(&dspp->to_ds->ds_sendstream_lock);
2229 * Actually do the bulk of the work in a zfs send.
2231 * The idea is that we want to do a send from ancestor_zb to to_ds. We also
2232 * want to not send any data that has been modified by all the datasets in
2233 * redactsnaparr, and store the list of blocks that are redacted in this way in
2234 * a bookmark named redactbook, created on the to_ds. We do this by creating
2235 * several worker threads, whose function is described below.
2237 * There are three cases.
2238 * The first case is a redacted zfs send. In this case there are 5 threads.
2239 * The first thread is the to_ds traversal thread: it calls dataset_traverse on
2240 * the to_ds and finds all the blocks that have changed since ancestor_zb (if
2241 * it's a full send, that's all blocks in the dataset). It then sends those
2242 * blocks on to the send merge thread. The redact list thread takes the data
2243 * from the redaction bookmark and sends those blocks on to the send merge
2244 * thread. The send merge thread takes the data from the to_ds traversal
2245 * thread, and combines it with the redaction records from the redact list
2246 * thread. If a block appears in both the to_ds's data and the redaction data,
2247 * the send merge thread will mark it as redacted and send it on to the prefetch
2248 * thread. Otherwise, the send merge thread will send the block on to the
2249 * prefetch thread unchanged. The prefetch thread will issue prefetch reads for
2250 * any data that isn't redacted, and then send the data on to the main thread.
2251 * The main thread behaves the same as in a normal send case, issuing demand
2252 * reads for data blocks and sending out records over the network
2254 * The graphic below diagrams the flow of data in the case of a redacted zfs
2255 * send. Each box represents a thread, and each line represents the flow of
2258 * Records from the |
2259 * redaction bookmark |
2260 * +--------------------+ | +---------------------------+
2261 * | | v | Send Merge Thread |
2262 * | Redact List Thread +----------> Apply redaction marks to |
2263 * | | | records as specified by |
2264 * +--------------------+ | redaction ranges |
2265 * +----^---------------+------+
2268 * | +------------v--------+
2269 * | | Prefetch Thread |
2270 * +--------------------+ | | Issues prefetch |
2271 * | to_ds Traversal | | | reads of data blocks|
2272 * | Thread (finds +---------------+ +------------+--------+
2273 * | candidate blocks) | Blocks modified | Prefetched data
2274 * +--------------------+ by to_ds since |
2275 * ancestor_zb +------------v----+
2276 * | Main Thread | File Descriptor
2277 * | Sends data over +->(to zfs receive)
2279 * +-----------------+
2281 * The second case is an incremental send from a redaction bookmark. The to_ds
2282 * traversal thread and the main thread behave the same as in the redacted
2283 * send case. The new thread is the from bookmark traversal thread. It
2284 * iterates over the redaction list in the redaction bookmark, and enqueues
2285 * records for each block that was redacted in the original send. The send
2286 * merge thread now has to merge the data from the two threads. For details
2287 * about that process, see the header comment of send_merge_thread(). Any data
2288 * it decides to send on will be prefetched by the prefetch thread. Note that
2289 * you can perform a redacted send from a redaction bookmark; in that case,
2290 * the data flow behaves very similarly to the flow in the redacted send case,
2291 * except with the addition of the bookmark traversal thread iterating over the
2292 * redaction bookmark. The send_merge_thread also has to take on the
2293 * responsibility of merging the redact list thread's records, the bookmark
2294 * traversal thread's records, and the to_ds records.
2296 * +---------------------+
2298 * | Redact List Thread +--------------+
2300 * +---------------------+ |
2301 * Blocks in redaction list | Ranges modified by every secure snap
2302 * of from bookmark | (or EOS if not readcted)
2304 * +---------------------+ | +----v----------------------+
2305 * | bookmark Traversal | v | Send Merge Thread |
2306 * | Thread (finds +---------> Merges bookmark, rlt, and |
2307 * | candidate blocks) | | to_ds send records |
2308 * +---------------------+ +----^---------------+------+
2310 * | +------------v--------+
2311 * | | Prefetch Thread |
2312 * +--------------------+ | | Issues prefetch |
2313 * | to_ds Traversal | | | reads of data blocks|
2314 * | Thread (finds +---------------+ +------------+--------+
2315 * | candidate blocks) | Blocks modified | Prefetched data
2316 * +--------------------+ by to_ds since +------------v----+
2317 * ancestor_zb | Main Thread | File Descriptor
2318 * | Sends data over +->(to zfs receive)
2320 * +-----------------+
2322 * The final case is a simple zfs full or incremental send. The to_ds traversal
2323 * thread behaves the same as always. The redact list thread is never started.
2324 * The send merge thread takes all the blocks that the to_ds traversal thread
2325 * sends it, prefetches the data, and sends the blocks on to the main thread.
2326 * The main thread sends the data over the wire.
2328 * To keep performance acceptable, we want to prefetch the data in the worker
2329 * threads. While the to_ds thread could simply use the TRAVERSE_PREFETCH
2330 * feature built into traverse_dataset, the combining and deletion of records
2331 * due to redaction and sends from redaction bookmarks mean that we could
2332 * issue many unnecessary prefetches. As a result, we only prefetch data
2333 * after we've determined that the record is not going to be redacted. To
2334 * prevent the prefetching from getting too far ahead of the main thread, the
2335 * blocking queues that are used for communication are capped not by the
2336 * number of entries in the queue, but by the sum of the size of the
2337 * prefetches associated with them. The limit on the amount of data that the
2338 * thread can prefetch beyond what the main thread has reached is controlled
2339 * by the global variable zfs_send_queue_length. In addition, to prevent poor
2340 * performance in the beginning of a send, we also limit the distance ahead
2341 * that the traversal threads can be. That distance is controlled by the
2342 * zfs_send_no_prefetch_queue_length tunable.
2344 * Note: Releases dp using the specified tag.
2347 dmu_send_impl(struct dmu_send_params *dspp)
2350 dmu_replay_record_t *drr;
2351 dmu_sendstatus_t *dssp;
2352 dmu_send_cookie_t dsc = {0};
2354 uint64_t fromtxg = dspp->ancestor_zb.zbm_creation_txg;
2355 uint64_t featureflags = 0;
2356 struct redact_list_thread_arg *from_arg;
2357 struct send_thread_arg *to_arg;
2358 struct redact_list_thread_arg *rlt_arg;
2359 struct send_merge_thread_arg *smt_arg;
2360 struct send_reader_thread_arg *srt_arg;
2361 struct send_range *range;
2362 redaction_list_t *from_rl = NULL;
2363 redaction_list_t *redact_rl = NULL;
2364 boolean_t resuming = (dspp->resumeobj != 0 || dspp->resumeoff != 0);
2365 boolean_t book_resuming = resuming;
2367 dsl_dataset_t *to_ds = dspp->to_ds;
2368 zfs_bookmark_phys_t *ancestor_zb = &dspp->ancestor_zb;
2369 dsl_pool_t *dp = dspp->dp;
2370 const void *tag = dspp->tag;
2372 err = dmu_objset_from_ds(to_ds, &os);
2374 dsl_pool_rele(dp, tag);
2379 * If this is a non-raw send of an encrypted ds, we can ensure that
2380 * the objset_phys_t is authenticated. This is safe because this is
2381 * either a snapshot or we have owned the dataset, ensuring that
2382 * it can't be modified.
2384 if (!dspp->rawok && os->os_encrypted &&
2385 arc_is_unauthenticated(os->os_phys_buf)) {
2386 zbookmark_phys_t zb;
2388 SET_BOOKMARK(&zb, to_ds->ds_object, ZB_ROOT_OBJECT,
2389 ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
2390 err = arc_untransform(os->os_phys_buf, os->os_spa,
2393 dsl_pool_rele(dp, tag);
2397 ASSERT0(arc_is_unauthenticated(os->os_phys_buf));
2400 if ((err = setup_featureflags(dspp, os, &featureflags)) != 0) {
2401 dsl_pool_rele(dp, tag);
2406 * If we're doing a redacted send, hold the bookmark's redaction list.
2408 if (dspp->redactbook != NULL) {
2409 err = dsl_redaction_list_hold_obj(dp,
2410 dspp->redactbook->zbm_redaction_obj, FTAG,
2413 dsl_pool_rele(dp, tag);
2414 return (SET_ERROR(EINVAL));
2416 dsl_redaction_list_long_hold(dp, redact_rl, FTAG);
2420 * If we're sending from a redaction bookmark, hold the redaction list
2421 * so that we can consider sending the redacted blocks.
2423 if (ancestor_zb->zbm_redaction_obj != 0) {
2424 err = dsl_redaction_list_hold_obj(dp,
2425 ancestor_zb->zbm_redaction_obj, FTAG, &from_rl);
2427 if (redact_rl != NULL) {
2428 dsl_redaction_list_long_rele(redact_rl, FTAG);
2429 dsl_redaction_list_rele(redact_rl, FTAG);
2431 dsl_pool_rele(dp, tag);
2432 return (SET_ERROR(EINVAL));
2434 dsl_redaction_list_long_hold(dp, from_rl, FTAG);
2437 dsl_dataset_long_hold(to_ds, FTAG);
2439 from_arg = kmem_zalloc(sizeof (*from_arg), KM_SLEEP);
2440 to_arg = kmem_zalloc(sizeof (*to_arg), KM_SLEEP);
2441 rlt_arg = kmem_zalloc(sizeof (*rlt_arg), KM_SLEEP);
2442 smt_arg = kmem_zalloc(sizeof (*smt_arg), KM_SLEEP);
2443 srt_arg = kmem_zalloc(sizeof (*srt_arg), KM_SLEEP);
2445 drr = create_begin_record(dspp, os, featureflags);
2446 dssp = setup_send_progress(dspp);
2449 dsc.dsc_dso = dspp->dso;
2451 dsc.dsc_off = dspp->off;
2452 dsc.dsc_toguid = dsl_dataset_phys(to_ds)->ds_guid;
2453 dsc.dsc_fromtxg = fromtxg;
2454 dsc.dsc_pending_op = PENDING_NONE;
2455 dsc.dsc_featureflags = featureflags;
2456 dsc.dsc_resume_object = dspp->resumeobj;
2457 dsc.dsc_resume_offset = dspp->resumeoff;
2459 dsl_pool_rele(dp, tag);
2461 void *payload = NULL;
2462 size_t payload_len = 0;
2463 nvlist_t *nvl = fnvlist_alloc();
2466 * If we're doing a redacted send, we include the snapshots we're
2467 * redacted with respect to so that the target system knows what send
2468 * streams can be correctly received on top of this dataset. If we're
2469 * instead sending a redacted dataset, we include the snapshots that the
2470 * dataset was created with respect to.
2472 if (dspp->redactbook != NULL) {
2473 fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_SNAPS,
2474 redact_rl->rl_phys->rlp_snaps,
2475 redact_rl->rl_phys->rlp_num_snaps);
2476 } else if (dsl_dataset_feature_is_active(to_ds,
2477 SPA_FEATURE_REDACTED_DATASETS)) {
2478 uint64_t *tods_guids;
2480 VERIFY(dsl_dataset_get_uint64_array_feature(to_ds,
2481 SPA_FEATURE_REDACTED_DATASETS, &length, &tods_guids));
2482 fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_SNAPS, tods_guids,
2487 * If we're sending from a redaction bookmark, then we should retrieve
2488 * the guids of that bookmark so we can send them over the wire.
2490 if (from_rl != NULL) {
2491 fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_FROM_SNAPS,
2492 from_rl->rl_phys->rlp_snaps,
2493 from_rl->rl_phys->rlp_num_snaps);
2497 * If the snapshot we're sending from is redacted, include the redaction
2498 * list in the stream.
2500 if (dspp->numfromredactsnaps != NUM_SNAPS_NOT_REDACTED) {
2501 ASSERT3P(from_rl, ==, NULL);
2502 fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_FROM_SNAPS,
2503 dspp->fromredactsnaps, (uint_t)dspp->numfromredactsnaps);
2504 if (dspp->numfromredactsnaps > 0) {
2505 kmem_free(dspp->fromredactsnaps,
2506 dspp->numfromredactsnaps * sizeof (uint64_t));
2507 dspp->fromredactsnaps = NULL;
2511 if (resuming || book_resuming) {
2512 err = setup_resume_points(dspp, to_arg, from_arg,
2513 rlt_arg, smt_arg, resuming, os, redact_rl, nvl);
2518 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
2519 uint64_t ivset_guid = ancestor_zb->zbm_ivset_guid;
2520 nvlist_t *keynvl = NULL;
2521 ASSERT(os->os_encrypted);
2523 err = dsl_crypto_populate_key_nvlist(os, ivset_guid,
2530 fnvlist_add_nvlist(nvl, "crypt_keydata", keynvl);
2531 fnvlist_free(keynvl);
2534 if (!nvlist_empty(nvl)) {
2535 payload = fnvlist_pack(nvl, &payload_len);
2536 drr->drr_payloadlen = payload_len;
2540 err = dump_record(&dsc, payload, payload_len);
2541 fnvlist_pack_free(payload, payload_len);
2547 setup_to_thread(to_arg, os, dssp, fromtxg, dspp->rawok);
2548 setup_from_thread(from_arg, from_rl, dssp);
2549 setup_redact_list_thread(rlt_arg, dspp, redact_rl, dssp);
2550 setup_merge_thread(smt_arg, dspp, from_arg, to_arg, rlt_arg, os);
2551 setup_reader_thread(srt_arg, dspp, smt_arg, featureflags);
2553 range = bqueue_dequeue(&srt_arg->q);
2554 while (err == 0 && !range->eos_marker) {
2555 err = do_dump(&dsc, range);
2556 range = get_next_range(&srt_arg->q, range);
2557 if (issig(JUSTLOOKING) && issig(FORREAL))
2558 err = SET_ERROR(EINTR);
2562 * If we hit an error or are interrupted, cancel our worker threads and
2563 * clear the queue of any pending records. The threads will pass the
2564 * cancel up the tree of worker threads, and each one will clean up any
2565 * pending records before exiting.
2568 srt_arg->cancel = B_TRUE;
2569 while (!range->eos_marker) {
2570 range = get_next_range(&srt_arg->q, range);
2575 bqueue_destroy(&srt_arg->q);
2576 bqueue_destroy(&smt_arg->q);
2577 if (dspp->redactbook != NULL)
2578 bqueue_destroy(&rlt_arg->q);
2579 bqueue_destroy(&to_arg->q);
2580 bqueue_destroy(&from_arg->q);
2582 if (err == 0 && srt_arg->error != 0)
2583 err = srt_arg->error;
2588 if (dsc.dsc_pending_op != PENDING_NONE)
2589 if (dump_record(&dsc, NULL, 0) != 0)
2590 err = SET_ERROR(EINTR);
2593 if (err == EINTR && dsc.dsc_err != 0)
2599 * Send the DRR_END record if this is not a saved stream.
2600 * Otherwise, the omitted DRR_END record will signal to
2601 * the receive side that the stream is incomplete.
2603 if (!dspp->savedok) {
2604 memset(drr, 0, sizeof (dmu_replay_record_t));
2605 drr->drr_type = DRR_END;
2606 drr->drr_u.drr_end.drr_checksum = dsc.dsc_zc;
2607 drr->drr_u.drr_end.drr_toguid = dsc.dsc_toguid;
2609 if (dump_record(&dsc, NULL, 0) != 0)
2613 mutex_enter(&to_ds->ds_sendstream_lock);
2614 list_remove(&to_ds->ds_sendstreams, dssp);
2615 mutex_exit(&to_ds->ds_sendstream_lock);
2617 VERIFY(err != 0 || (dsc.dsc_sent_begin &&
2618 (dsc.dsc_sent_end || dspp->savedok)));
2620 kmem_free(drr, sizeof (dmu_replay_record_t));
2621 kmem_free(dssp, sizeof (dmu_sendstatus_t));
2622 kmem_free(from_arg, sizeof (*from_arg));
2623 kmem_free(to_arg, sizeof (*to_arg));
2624 kmem_free(rlt_arg, sizeof (*rlt_arg));
2625 kmem_free(smt_arg, sizeof (*smt_arg));
2626 kmem_free(srt_arg, sizeof (*srt_arg));
2628 dsl_dataset_long_rele(to_ds, FTAG);
2629 if (from_rl != NULL) {
2630 dsl_redaction_list_long_rele(from_rl, FTAG);
2631 dsl_redaction_list_rele(from_rl, FTAG);
2633 if (redact_rl != NULL) {
2634 dsl_redaction_list_long_rele(redact_rl, FTAG);
2635 dsl_redaction_list_rele(redact_rl, FTAG);
2642 dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
2643 boolean_t embedok, boolean_t large_block_ok, boolean_t compressok,
2644 boolean_t rawok, boolean_t savedok, int outfd, offset_t *off,
2645 dmu_send_outparams_t *dsop)
2648 dsl_dataset_t *fromds;
2649 ds_hold_flags_t dsflags;
2650 struct dmu_send_params dspp = {0};
2651 dspp.embedok = embedok;
2652 dspp.large_block_ok = large_block_ok;
2653 dspp.compressok = compressok;
2659 dspp.savedok = savedok;
2661 dsflags = (rawok) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT;
2662 err = dsl_pool_hold(pool, FTAG, &dspp.dp);
2666 err = dsl_dataset_hold_obj_flags(dspp.dp, tosnap, dsflags, FTAG,
2669 dsl_pool_rele(dspp.dp, FTAG);
2673 if (fromsnap != 0) {
2674 err = dsl_dataset_hold_obj_flags(dspp.dp, fromsnap, dsflags,
2677 dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG);
2678 dsl_pool_rele(dspp.dp, FTAG);
2681 dspp.ancestor_zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
2682 dspp.ancestor_zb.zbm_creation_txg =
2683 dsl_dataset_phys(fromds)->ds_creation_txg;
2684 dspp.ancestor_zb.zbm_creation_time =
2685 dsl_dataset_phys(fromds)->ds_creation_time;
2687 if (dsl_dataset_is_zapified(fromds)) {
2688 (void) zap_lookup(dspp.dp->dp_meta_objset,
2689 fromds->ds_object, DS_FIELD_IVSET_GUID, 8, 1,
2690 &dspp.ancestor_zb.zbm_ivset_guid);
2693 /* See dmu_send for the reasons behind this. */
2694 uint64_t *fromredact;
2696 if (!dsl_dataset_get_uint64_array_feature(fromds,
2697 SPA_FEATURE_REDACTED_DATASETS,
2698 &dspp.numfromredactsnaps,
2700 dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
2701 } else if (dspp.numfromredactsnaps > 0) {
2702 uint64_t size = dspp.numfromredactsnaps *
2704 dspp.fromredactsnaps = kmem_zalloc(size, KM_SLEEP);
2705 memcpy(dspp.fromredactsnaps, fromredact, size);
2708 boolean_t is_before =
2709 dsl_dataset_is_before(dspp.to_ds, fromds, 0);
2710 dspp.is_clone = (dspp.to_ds->ds_dir !=
2712 dsl_dataset_rele(fromds, FTAG);
2714 dsl_pool_rele(dspp.dp, FTAG);
2715 err = SET_ERROR(EXDEV);
2717 err = dmu_send_impl(&dspp);
2720 dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
2721 err = dmu_send_impl(&dspp);
2723 if (dspp.fromredactsnaps)
2724 kmem_free(dspp.fromredactsnaps,
2725 dspp.numfromredactsnaps * sizeof (uint64_t));
2727 dsl_dataset_rele(dspp.to_ds, FTAG);
2732 dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok,
2733 boolean_t large_block_ok, boolean_t compressok, boolean_t rawok,
2734 boolean_t savedok, uint64_t resumeobj, uint64_t resumeoff,
2735 const char *redactbook, int outfd, offset_t *off,
2736 dmu_send_outparams_t *dsop)
2739 ds_hold_flags_t dsflags;
2740 boolean_t owned = B_FALSE;
2741 dsl_dataset_t *fromds = NULL;
2742 zfs_bookmark_phys_t book = {0};
2743 struct dmu_send_params dspp = {0};
2745 dsflags = (rawok) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT;
2746 dspp.tosnap = tosnap;
2747 dspp.embedok = embedok;
2748 dspp.large_block_ok = large_block_ok;
2749 dspp.compressok = compressok;
2754 dspp.resumeobj = resumeobj;
2755 dspp.resumeoff = resumeoff;
2757 dspp.savedok = savedok;
2759 if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL)
2760 return (SET_ERROR(EINVAL));
2762 err = dsl_pool_hold(tosnap, FTAG, &dspp.dp);
2766 if (strchr(tosnap, '@') == NULL && spa_writeable(dspp.dp->dp_spa)) {
2768 * We are sending a filesystem or volume. Ensure
2769 * that it doesn't change by owning the dataset.
2774 * We are looking for the dataset that represents the
2775 * partially received send stream. If this stream was
2776 * received as a new snapshot of an existing dataset,
2777 * this will be saved in a hidden clone named
2778 * "<pool>/<dataset>/%recv". Otherwise, the stream
2779 * will be saved in the live dataset itself. In
2780 * either case we need to use dsl_dataset_own_force()
2781 * because the stream is marked as inconsistent,
2782 * which would normally make it unavailable to be
2785 char *name = kmem_asprintf("%s/%s", tosnap,
2787 err = dsl_dataset_own_force(dspp.dp, name, dsflags,
2789 if (err == ENOENT) {
2790 err = dsl_dataset_own_force(dspp.dp, tosnap,
2791 dsflags, FTAG, &dspp.to_ds);
2796 err = zap_lookup(dspp.dp->dp_meta_objset,
2797 dspp.to_ds->ds_object,
2798 DS_FIELD_RESUME_TOGUID, 8, 1,
2803 err = zap_lookup(dspp.dp->dp_meta_objset,
2804 dspp.to_ds->ds_object,
2805 DS_FIELD_RESUME_TONAME, 1,
2806 sizeof (dspp.saved_toname),
2809 /* Only disown if there was an error in the lookups */
2810 if (owned && (err != 0))
2811 dsl_dataset_disown(dspp.to_ds, dsflags, FTAG);
2815 err = dsl_dataset_own(dspp.dp, tosnap, dsflags,
2821 err = dsl_dataset_hold_flags(dspp.dp, tosnap, dsflags, FTAG,
2826 /* Note: dsl dataset is not owned at this point */
2827 dsl_pool_rele(dspp.dp, FTAG);
2831 if (redactbook != NULL) {
2832 char path[ZFS_MAX_DATASET_NAME_LEN];
2833 (void) strlcpy(path, tosnap, sizeof (path));
2834 char *at = strchr(path, '@');
2838 (void) snprintf(at, sizeof (path) - (at - path), "#%s",
2840 err = dsl_bookmark_lookup(dspp.dp, path,
2842 dspp.redactbook = &book;
2847 dsl_pool_rele(dspp.dp, FTAG);
2849 dsl_dataset_disown(dspp.to_ds, dsflags, FTAG);
2851 dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG);
2855 if (fromsnap != NULL) {
2856 zfs_bookmark_phys_t *zb = &dspp.ancestor_zb;
2858 if (strpbrk(tosnap, "@#") != NULL)
2859 fsnamelen = strpbrk(tosnap, "@#") - tosnap;
2861 fsnamelen = strlen(tosnap);
2864 * If the fromsnap is in a different filesystem, then
2865 * mark the send stream as a clone.
2867 if (strncmp(tosnap, fromsnap, fsnamelen) != 0 ||
2868 (fromsnap[fsnamelen] != '@' &&
2869 fromsnap[fsnamelen] != '#')) {
2870 dspp.is_clone = B_TRUE;
2873 if (strchr(fromsnap, '@') != NULL) {
2874 err = dsl_dataset_hold(dspp.dp, fromsnap, FTAG,
2878 ASSERT3P(fromds, ==, NULL);
2881 * We need to make a deep copy of the redact
2882 * snapshots of the from snapshot, because the
2883 * array will be freed when we evict from_ds.
2885 uint64_t *fromredact;
2886 if (!dsl_dataset_get_uint64_array_feature(
2887 fromds, SPA_FEATURE_REDACTED_DATASETS,
2888 &dspp.numfromredactsnaps,
2890 dspp.numfromredactsnaps =
2891 NUM_SNAPS_NOT_REDACTED;
2892 } else if (dspp.numfromredactsnaps > 0) {
2894 dspp.numfromredactsnaps *
2896 dspp.fromredactsnaps = kmem_zalloc(size,
2898 memcpy(dspp.fromredactsnaps, fromredact,
2901 if (!dsl_dataset_is_before(dspp.to_ds, fromds,
2903 err = SET_ERROR(EXDEV);
2905 zb->zbm_creation_txg =
2906 dsl_dataset_phys(fromds)->
2908 zb->zbm_creation_time =
2909 dsl_dataset_phys(fromds)->
2912 dsl_dataset_phys(fromds)->ds_guid;
2913 zb->zbm_redaction_obj = 0;
2915 if (dsl_dataset_is_zapified(fromds)) {
2917 dspp.dp->dp_meta_objset,
2919 DS_FIELD_IVSET_GUID, 8, 1,
2920 &zb->zbm_ivset_guid);
2923 dsl_dataset_rele(fromds, FTAG);
2926 dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
2927 err = dsl_bookmark_lookup(dspp.dp, fromsnap, dspp.to_ds,
2929 if (err == EXDEV && zb->zbm_redaction_obj != 0 &&
2931 dsl_dataset_phys(dspp.to_ds)->ds_guid)
2936 /* dmu_send_impl will call dsl_pool_rele for us. */
2937 err = dmu_send_impl(&dspp);
2939 if (dspp.fromredactsnaps)
2940 kmem_free(dspp.fromredactsnaps,
2941 dspp.numfromredactsnaps *
2943 dsl_pool_rele(dspp.dp, FTAG);
2946 dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
2947 err = dmu_send_impl(&dspp);
2950 dsl_dataset_disown(dspp.to_ds, dsflags, FTAG);
2952 dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG);
2957 dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t uncompressed,
2958 uint64_t compressed, boolean_t stream_compressed, uint64_t *sizep)
2963 * Assume that space (both on-disk and in-stream) is dominated by
2964 * data. We will adjust for indirect blocks and the copies property,
2965 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
2968 uint64_t recordsize;
2969 uint64_t record_count;
2971 VERIFY0(dmu_objset_from_ds(ds, &os));
2973 /* Assume all (uncompressed) blocks are recordsize. */
2974 if (zfs_override_estimate_recordsize != 0) {
2975 recordsize = zfs_override_estimate_recordsize;
2976 } else if (os->os_phys->os_type == DMU_OST_ZVOL) {
2977 err = dsl_prop_get_int_ds(ds,
2978 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &recordsize);
2980 err = dsl_prop_get_int_ds(ds,
2981 zfs_prop_to_name(ZFS_PROP_RECORDSIZE), &recordsize);
2985 record_count = uncompressed / recordsize;
2988 * If we're estimating a send size for a compressed stream, use the
2989 * compressed data size to estimate the stream size. Otherwise, use the
2990 * uncompressed data size.
2992 size = stream_compressed ? compressed : uncompressed;
2995 * Subtract out approximate space used by indirect blocks.
2996 * Assume most space is used by data blocks (non-indirect, non-dnode).
2997 * Assume no ditto blocks or internal fragmentation.
2999 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
3002 size -= record_count * sizeof (blkptr_t);
3004 /* Add in the space for the record associated with each block. */
3005 size += record_count * sizeof (dmu_replay_record_t);
3013 dmu_send_estimate_fast(dsl_dataset_t *origds, dsl_dataset_t *fromds,
3014 zfs_bookmark_phys_t *frombook, boolean_t stream_compressed,
3015 boolean_t saved, uint64_t *sizep)
3018 dsl_dataset_t *ds = origds;
3019 uint64_t uncomp, comp;
3021 ASSERT(dsl_pool_config_held(origds->ds_dir->dd_pool));
3022 ASSERT(fromds == NULL || frombook == NULL);
3025 * If this is a saved send we may actually be sending
3026 * from the %recv clone used for resuming.
3029 objset_t *mos = origds->ds_dir->dd_pool->dp_meta_objset;
3031 char dsname[ZFS_MAX_DATASET_NAME_LEN + 6];
3033 dsl_dataset_name(origds, dsname);
3034 (void) strcat(dsname, "/");
3035 (void) strlcat(dsname, recv_clone_name, sizeof (dsname));
3037 err = dsl_dataset_hold(origds->ds_dir->dd_pool,
3039 if (err != ENOENT && err != 0) {
3041 } else if (err == ENOENT) {
3045 /* check that this dataset has partially received data */
3046 err = zap_lookup(mos, ds->ds_object,
3047 DS_FIELD_RESUME_TOGUID, 8, 1, &guid);
3049 err = SET_ERROR(err == ENOENT ? EINVAL : err);
3053 err = zap_lookup(mos, ds->ds_object,
3054 DS_FIELD_RESUME_TONAME, 1, sizeof (dsname), dsname);
3056 err = SET_ERROR(err == ENOENT ? EINVAL : err);
3061 /* tosnap must be a snapshot or the target of a saved send */
3062 if (!ds->ds_is_snapshot && ds == origds)
3063 return (SET_ERROR(EINVAL));
3065 if (fromds != NULL) {
3067 if (!fromds->ds_is_snapshot) {
3068 err = SET_ERROR(EINVAL);
3072 if (!dsl_dataset_is_before(ds, fromds, 0)) {
3073 err = SET_ERROR(EXDEV);
3077 err = dsl_dataset_space_written(fromds, ds, &used, &comp,
3081 } else if (frombook != NULL) {
3083 err = dsl_dataset_space_written_bookmark(frombook, ds, &used,
3088 uncomp = dsl_dataset_phys(ds)->ds_uncompressed_bytes;
3089 comp = dsl_dataset_phys(ds)->ds_compressed_bytes;
3092 err = dmu_adjust_send_estimate_for_indirects(ds, uncomp, comp,
3093 stream_compressed, sizep);
3095 * Add the size of the BEGIN and END records to the estimate.
3097 *sizep += 2 * sizeof (dmu_replay_record_t);
3101 dsl_dataset_rele(ds, FTAG);
3105 ZFS_MODULE_PARAM(zfs_send, zfs_send_, corrupt_data, INT, ZMOD_RW,
3106 "Allow sending corrupt data");
3108 ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_length, UINT, ZMOD_RW,
3109 "Maximum send queue length");
3111 ZFS_MODULE_PARAM(zfs_send, zfs_send_, unmodified_spill_blocks, INT, ZMOD_RW,
3112 "Send unmodified spill blocks");
3114 ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_length, UINT, ZMOD_RW,
3115 "Maximum send queue length for non-prefetch queues");
3117 ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_ff, UINT, ZMOD_RW,
3118 "Send queue fill fraction");
3120 ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_ff, UINT, ZMOD_RW,
3121 "Send queue fill fraction for non-prefetch queues");
3123 ZFS_MODULE_PARAM(zfs_send, zfs_, override_estimate_recordsize, UINT, ZMOD_RW,
3124 "Override block size estimate with fixed size");