4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright 2014 HybridCluster. All rights reserved.
27 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
28 * Copyright (c) 2019, Klara Inc.
29 * Copyright (c) 2019, Allan Jude
33 #include <sys/dmu_impl.h>
34 #include <sys/dmu_send.h>
35 #include <sys/dmu_recv.h>
36 #include <sys/dmu_tx.h>
38 #include <sys/dnode.h>
39 #include <sys/zfs_context.h>
40 #include <sys/dmu_objset.h>
41 #include <sys/dmu_traverse.h>
42 #include <sys/dsl_dataset.h>
43 #include <sys/dsl_dir.h>
44 #include <sys/dsl_prop.h>
45 #include <sys/dsl_pool.h>
46 #include <sys/dsl_synctask.h>
47 #include <sys/zfs_ioctl.h>
50 #include <sys/zio_checksum.h>
51 #include <sys/zfs_znode.h>
52 #include <zfs_fletcher.h>
55 #include <sys/zfs_onexit.h>
56 #include <sys/dsl_destroy.h>
57 #include <sys/blkptr.h>
58 #include <sys/dsl_bookmark.h>
59 #include <sys/zfeature.h>
60 #include <sys/bqueue.h>
61 #include <sys/objlist.h>
63 #include <sys/zfs_vfsops.h>
65 #include <sys/zfs_file.h>
67 static int zfs_recv_queue_length = SPA_MAXBLOCKSIZE;
68 static int zfs_recv_queue_ff = 20;
69 static int zfs_recv_write_batch_size = 1024 * 1024;
71 static const void *const dmu_recv_tag = "dmu_recv_tag";
72 const char *const recv_clone_name = "%recv";
74 static int receive_read_payload_and_next_header(dmu_recv_cookie_t *ra, int len,
77 struct receive_record_arg {
78 dmu_replay_record_t header;
79 void *payload; /* Pointer to a buffer containing the payload */
81 * If the record is a WRITE or SPILL, pointer to the abd containing the
86 uint64_t bytes_read; /* bytes read from stream when record created */
87 boolean_t eos_marker; /* Marks the end of the stream */
91 struct receive_writer_arg {
97 * These three members are used to signal to the main thread when
106 boolean_t raw; /* DMU_BACKUP_FEATURE_RAW set */
107 boolean_t spill; /* DRR_FLAG_SPILL_BLOCK set */
108 boolean_t full; /* this is a full send stream */
109 uint64_t last_object;
110 uint64_t last_offset;
111 uint64_t max_object; /* highest object ID referenced in stream */
112 uint64_t bytes_read; /* bytes read when current record created */
116 /* Encryption parameters for the last received DRR_OBJECT_RANGE */
117 boolean_t or_crypt_params_present;
118 uint64_t or_firstobj;
119 uint64_t or_numslots;
120 uint8_t or_salt[ZIO_DATA_SALT_LEN];
121 uint8_t or_iv[ZIO_DATA_IV_LEN];
122 uint8_t or_mac[ZIO_DATA_MAC_LEN];
123 boolean_t or_byteorder;
126 typedef struct dmu_recv_begin_arg {
127 const char *drba_origin;
128 dmu_recv_cookie_t *drba_cookie;
131 dsl_crypto_params_t *drba_dcp;
132 } dmu_recv_begin_arg_t;
135 byteswap_record(dmu_replay_record_t *drr)
137 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
138 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
139 drr->drr_type = BSWAP_32(drr->drr_type);
140 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
142 switch (drr->drr_type) {
144 DO64(drr_begin.drr_magic);
145 DO64(drr_begin.drr_versioninfo);
146 DO64(drr_begin.drr_creation_time);
147 DO32(drr_begin.drr_type);
148 DO32(drr_begin.drr_flags);
149 DO64(drr_begin.drr_toguid);
150 DO64(drr_begin.drr_fromguid);
153 DO64(drr_object.drr_object);
154 DO32(drr_object.drr_type);
155 DO32(drr_object.drr_bonustype);
156 DO32(drr_object.drr_blksz);
157 DO32(drr_object.drr_bonuslen);
158 DO32(drr_object.drr_raw_bonuslen);
159 DO64(drr_object.drr_toguid);
160 DO64(drr_object.drr_maxblkid);
162 case DRR_FREEOBJECTS:
163 DO64(drr_freeobjects.drr_firstobj);
164 DO64(drr_freeobjects.drr_numobjs);
165 DO64(drr_freeobjects.drr_toguid);
168 DO64(drr_write.drr_object);
169 DO32(drr_write.drr_type);
170 DO64(drr_write.drr_offset);
171 DO64(drr_write.drr_logical_size);
172 DO64(drr_write.drr_toguid);
173 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum);
174 DO64(drr_write.drr_key.ddk_prop);
175 DO64(drr_write.drr_compressed_size);
177 case DRR_WRITE_EMBEDDED:
178 DO64(drr_write_embedded.drr_object);
179 DO64(drr_write_embedded.drr_offset);
180 DO64(drr_write_embedded.drr_length);
181 DO64(drr_write_embedded.drr_toguid);
182 DO32(drr_write_embedded.drr_lsize);
183 DO32(drr_write_embedded.drr_psize);
186 DO64(drr_free.drr_object);
187 DO64(drr_free.drr_offset);
188 DO64(drr_free.drr_length);
189 DO64(drr_free.drr_toguid);
192 DO64(drr_spill.drr_object);
193 DO64(drr_spill.drr_length);
194 DO64(drr_spill.drr_toguid);
195 DO64(drr_spill.drr_compressed_size);
196 DO32(drr_spill.drr_type);
198 case DRR_OBJECT_RANGE:
199 DO64(drr_object_range.drr_firstobj);
200 DO64(drr_object_range.drr_numslots);
201 DO64(drr_object_range.drr_toguid);
204 DO64(drr_redact.drr_object);
205 DO64(drr_redact.drr_offset);
206 DO64(drr_redact.drr_length);
207 DO64(drr_redact.drr_toguid);
210 DO64(drr_end.drr_toguid);
211 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum);
217 if (drr->drr_type != DRR_BEGIN) {
218 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum);
226 redact_snaps_contains(uint64_t *snaps, uint64_t num_snaps, uint64_t guid)
228 for (int i = 0; i < num_snaps; i++) {
229 if (snaps[i] == guid)
236 * Check that the new stream we're trying to receive is redacted with respect to
237 * a subset of the snapshots that the origin was redacted with respect to. For
238 * the reasons behind this, see the man page on redacted zfs sends and receives.
241 compatible_redact_snaps(uint64_t *origin_snaps, uint64_t origin_num_snaps,
242 uint64_t *redact_snaps, uint64_t num_redact_snaps)
245 * Short circuit the comparison; if we are redacted with respect to
246 * more snapshots than the origin, we can't be redacted with respect
249 if (num_redact_snaps > origin_num_snaps) {
253 for (int i = 0; i < num_redact_snaps; i++) {
254 if (!redact_snaps_contains(origin_snaps, origin_num_snaps,
263 redact_check(dmu_recv_begin_arg_t *drba, dsl_dataset_t *origin)
265 uint64_t *origin_snaps;
266 uint64_t origin_num_snaps;
267 dmu_recv_cookie_t *drc = drba->drba_cookie;
268 struct drr_begin *drrb = drc->drc_drrb;
269 int featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
271 boolean_t ret = B_TRUE;
272 uint64_t *redact_snaps;
273 uint_t numredactsnaps;
276 * If this is a full send stream, we're safe no matter what.
278 if (drrb->drr_fromguid == 0)
281 VERIFY(dsl_dataset_get_uint64_array_feature(origin,
282 SPA_FEATURE_REDACTED_DATASETS, &origin_num_snaps, &origin_snaps));
284 if (nvlist_lookup_uint64_array(drc->drc_begin_nvl,
285 BEGINNV_REDACT_FROM_SNAPS, &redact_snaps, &numredactsnaps) ==
288 * If the send stream was sent from the redaction bookmark or
289 * the redacted version of the dataset, then we're safe. Verify
290 * that this is from the a compatible redaction bookmark or
293 if (!compatible_redact_snaps(origin_snaps, origin_num_snaps,
294 redact_snaps, numredactsnaps)) {
297 } else if (featureflags & DMU_BACKUP_FEATURE_REDACTED) {
299 * If the stream is redacted, it must be redacted with respect
300 * to a subset of what the origin is redacted with respect to.
301 * See case number 2 in the zfs man page section on redacted zfs
304 err = nvlist_lookup_uint64_array(drc->drc_begin_nvl,
305 BEGINNV_REDACT_SNAPS, &redact_snaps, &numredactsnaps);
307 if (err != 0 || !compatible_redact_snaps(origin_snaps,
308 origin_num_snaps, redact_snaps, numredactsnaps)) {
311 } else if (!redact_snaps_contains(origin_snaps, origin_num_snaps,
314 * If the stream isn't redacted but the origin is, this must be
315 * one of the snapshots the origin is redacted with respect to.
316 * See case number 1 in the zfs man page section on redacted zfs
328 * If we previously received a stream with --large-block, we don't support
329 * receiving an incremental on top of it without --large-block. This avoids
330 * forcing a read-modify-write or trying to re-aggregate a string of WRITE
334 recv_check_large_blocks(dsl_dataset_t *ds, uint64_t featureflags)
336 if (dsl_dataset_feature_is_active(ds, SPA_FEATURE_LARGE_BLOCKS) &&
337 !(featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS))
338 return (SET_ERROR(ZFS_ERR_STREAM_LARGE_BLOCK_MISMATCH));
343 recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
344 uint64_t fromguid, uint64_t featureflags)
349 dsl_pool_t *dp = ds->ds_dir->dd_pool;
350 boolean_t encrypted = ds->ds_dir->dd_crypto_obj != 0;
351 boolean_t raw = (featureflags & DMU_BACKUP_FEATURE_RAW) != 0;
352 boolean_t embed = (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) != 0;
354 /* Temporary clone name must not exist. */
355 error = zap_lookup(dp->dp_meta_objset,
356 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name,
359 return (error == 0 ? SET_ERROR(EBUSY) : error);
361 /* Resume state must not be set. */
362 if (dsl_dataset_has_resume_receive_state(ds))
363 return (SET_ERROR(EBUSY));
365 /* New snapshot name must not exist. */
366 error = zap_lookup(dp->dp_meta_objset,
367 dsl_dataset_phys(ds)->ds_snapnames_zapobj,
368 drba->drba_cookie->drc_tosnap, 8, 1, &val);
370 return (error == 0 ? SET_ERROR(EEXIST) : error);
372 /* Must not have children if receiving a ZVOL. */
373 error = zap_count(dp->dp_meta_objset,
374 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, &children);
377 if (drba->drba_cookie->drc_drrb->drr_type != DMU_OST_ZFS &&
379 return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
382 * Check snapshot limit before receiving. We'll recheck again at the
383 * end, but might as well abort before receiving if we're already over
386 * Note that we do not check the file system limit with
387 * dsl_dir_fscount_check because the temporary %clones don't count
388 * against that limit.
390 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT,
391 NULL, drba->drba_cred, drba->drba_proc);
397 uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
399 /* Can't perform a raw receive on top of a non-raw receive */
400 if (!encrypted && raw)
401 return (SET_ERROR(EINVAL));
403 /* Encryption is incompatible with embedded data */
404 if (encrypted && embed)
405 return (SET_ERROR(EINVAL));
407 /* Find snapshot in this dir that matches fromguid. */
409 error = dsl_dataset_hold_obj(dp, obj, FTAG,
412 return (SET_ERROR(ENODEV));
413 if (snap->ds_dir != ds->ds_dir) {
414 dsl_dataset_rele(snap, FTAG);
415 return (SET_ERROR(ENODEV));
417 if (dsl_dataset_phys(snap)->ds_guid == fromguid)
419 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
420 dsl_dataset_rele(snap, FTAG);
423 return (SET_ERROR(ENODEV));
425 if (drba->drba_cookie->drc_force) {
426 drba->drba_cookie->drc_fromsnapobj = obj;
429 * If we are not forcing, there must be no
430 * changes since fromsnap. Raw sends have an
431 * additional constraint that requires that
432 * no "noop" snapshots exist between fromsnap
433 * and tosnap for the IVset checking code to
436 if (dsl_dataset_modified_since_snap(ds, snap) ||
438 dsl_dataset_phys(ds)->ds_prev_snap_obj !=
440 dsl_dataset_rele(snap, FTAG);
441 return (SET_ERROR(ETXTBSY));
443 drba->drba_cookie->drc_fromsnapobj =
444 ds->ds_prev->ds_object;
447 if (dsl_dataset_feature_is_active(snap,
448 SPA_FEATURE_REDACTED_DATASETS) && !redact_check(drba,
450 dsl_dataset_rele(snap, FTAG);
451 return (SET_ERROR(EINVAL));
454 error = recv_check_large_blocks(snap, featureflags);
456 dsl_dataset_rele(snap, FTAG);
460 dsl_dataset_rele(snap, FTAG);
462 /* if full, then must be forced */
463 if (!drba->drba_cookie->drc_force)
464 return (SET_ERROR(EEXIST));
467 * We don't support using zfs recv -F to blow away
468 * encrypted filesystems. This would require the
469 * dsl dir to point to the old encryption key and
470 * the new one at the same time during the receive.
472 if ((!encrypted && raw) || encrypted)
473 return (SET_ERROR(EINVAL));
476 * Perform the same encryption checks we would if
477 * we were creating a new dataset from scratch.
480 boolean_t will_encrypt;
482 error = dmu_objset_create_crypt_check(
483 ds->ds_dir->dd_parent, drba->drba_dcp,
488 if (will_encrypt && embed)
489 return (SET_ERROR(EINVAL));
497 * Check that any feature flags used in the data stream we're receiving are
498 * supported by the pool we are receiving into.
500 * Note that some of the features we explicitly check here have additional
501 * (implicit) features they depend on, but those dependencies are enforced
502 * through the zfeature_register() calls declaring the features that we
506 recv_begin_check_feature_flags_impl(uint64_t featureflags, spa_t *spa)
509 * Check if there are any unsupported feature flags.
511 if (!DMU_STREAM_SUPPORTED(featureflags)) {
512 return (SET_ERROR(ZFS_ERR_UNKNOWN_SEND_STREAM_FEATURE));
515 /* Verify pool version supports SA if SA_SPILL feature set */
516 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
517 spa_version(spa) < SPA_VERSION_SA)
518 return (SET_ERROR(ENOTSUP));
521 * LZ4 compressed, ZSTD compressed, embedded, mooched, large blocks,
522 * and large_dnodes in the stream can only be used if those pool
523 * features are enabled because we don't attempt to decompress /
524 * un-embed / un-mooch / split up the blocks / dnodes during the
527 if ((featureflags & DMU_BACKUP_FEATURE_LZ4) &&
528 !spa_feature_is_enabled(spa, SPA_FEATURE_LZ4_COMPRESS))
529 return (SET_ERROR(ENOTSUP));
530 if ((featureflags & DMU_BACKUP_FEATURE_ZSTD) &&
531 !spa_feature_is_enabled(spa, SPA_FEATURE_ZSTD_COMPRESS))
532 return (SET_ERROR(ENOTSUP));
533 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
534 !spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA))
535 return (SET_ERROR(ENOTSUP));
536 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
537 !spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS))
538 return (SET_ERROR(ENOTSUP));
539 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_DNODE) &&
540 !spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE))
541 return (SET_ERROR(ENOTSUP));
544 * Receiving redacted streams requires that redacted datasets are
547 if ((featureflags & DMU_BACKUP_FEATURE_REDACTED) &&
548 !spa_feature_is_enabled(spa, SPA_FEATURE_REDACTED_DATASETS))
549 return (SET_ERROR(ENOTSUP));
555 dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
557 dmu_recv_begin_arg_t *drba = arg;
558 dsl_pool_t *dp = dmu_tx_pool(tx);
559 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
560 uint64_t fromguid = drrb->drr_fromguid;
561 int flags = drrb->drr_flags;
562 ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
564 uint64_t featureflags = drba->drba_cookie->drc_featureflags;
566 const char *tofs = drba->drba_cookie->drc_tofs;
568 /* already checked */
569 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
570 ASSERT(!(featureflags & DMU_BACKUP_FEATURE_RESUMING));
572 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
573 DMU_COMPOUNDSTREAM ||
574 drrb->drr_type >= DMU_OST_NUMTYPES ||
575 ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
576 return (SET_ERROR(EINVAL));
578 error = recv_begin_check_feature_flags_impl(featureflags, dp->dp_spa);
582 /* Resumable receives require extensible datasets */
583 if (drba->drba_cookie->drc_resumable &&
584 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EXTENSIBLE_DATASET))
585 return (SET_ERROR(ENOTSUP));
587 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
588 /* raw receives require the encryption feature */
589 if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ENCRYPTION))
590 return (SET_ERROR(ENOTSUP));
592 /* embedded data is incompatible with encryption and raw recv */
593 if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
594 return (SET_ERROR(EINVAL));
596 /* raw receives require spill block allocation flag */
597 if (!(flags & DRR_FLAG_SPILL_BLOCK))
598 return (SET_ERROR(ZFS_ERR_SPILL_BLOCK_FLAG_MISSING));
601 * We support unencrypted datasets below encrypted ones now,
602 * so add the DS_HOLD_FLAG_DECRYPT flag only if we are dealing
603 * with a dataset we may encrypt.
605 if (drba->drba_dcp != NULL &&
606 drba->drba_dcp->cp_crypt != ZIO_CRYPT_OFF) {
607 dsflags |= DS_HOLD_FLAG_DECRYPT;
611 error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
613 /* target fs already exists; recv into temp clone */
615 /* Can't recv a clone into an existing fs */
616 if (flags & DRR_FLAG_CLONE || drba->drba_origin) {
617 dsl_dataset_rele_flags(ds, dsflags, FTAG);
618 return (SET_ERROR(EINVAL));
621 error = recv_begin_check_existing_impl(drba, ds, fromguid,
623 dsl_dataset_rele_flags(ds, dsflags, FTAG);
624 } else if (error == ENOENT) {
625 /* target fs does not exist; must be a full backup or clone */
626 char buf[ZFS_MAX_DATASET_NAME_LEN];
630 * If it's a non-clone incremental, we are missing the
631 * target fs, so fail the recv.
633 if (fromguid != 0 && !((flags & DRR_FLAG_CLONE) ||
635 return (SET_ERROR(ENOENT));
638 * If we're receiving a full send as a clone, and it doesn't
639 * contain all the necessary free records and freeobject
640 * records, reject it.
642 if (fromguid == 0 && drba->drba_origin != NULL &&
643 !(flags & DRR_FLAG_FREERECORDS))
644 return (SET_ERROR(EINVAL));
646 /* Open the parent of tofs */
647 ASSERT3U(strlen(tofs), <, sizeof (buf));
648 (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
649 error = dsl_dataset_hold(dp, buf, FTAG, &ds);
653 if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0 &&
654 drba->drba_origin == NULL) {
655 boolean_t will_encrypt;
658 * Check that we aren't breaking any encryption rules
659 * and that we have all the parameters we need to
660 * create an encrypted dataset if necessary. If we are
661 * making an encrypted dataset the stream can't have
664 error = dmu_objset_create_crypt_check(ds->ds_dir,
665 drba->drba_dcp, &will_encrypt);
667 dsl_dataset_rele(ds, FTAG);
672 (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)) {
673 dsl_dataset_rele(ds, FTAG);
674 return (SET_ERROR(EINVAL));
679 * Check filesystem and snapshot limits before receiving. We'll
680 * recheck snapshot limits again at the end (we create the
681 * filesystems and increment those counts during begin_sync).
683 error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
684 ZFS_PROP_FILESYSTEM_LIMIT, NULL,
685 drba->drba_cred, drba->drba_proc);
687 dsl_dataset_rele(ds, FTAG);
691 error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
692 ZFS_PROP_SNAPSHOT_LIMIT, NULL,
693 drba->drba_cred, drba->drba_proc);
695 dsl_dataset_rele(ds, FTAG);
699 /* can't recv below anything but filesystems (eg. no ZVOLs) */
700 error = dmu_objset_from_ds(ds, &os);
702 dsl_dataset_rele(ds, FTAG);
705 if (dmu_objset_type(os) != DMU_OST_ZFS) {
706 dsl_dataset_rele(ds, FTAG);
707 return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
710 if (drba->drba_origin != NULL) {
711 dsl_dataset_t *origin;
712 error = dsl_dataset_hold_flags(dp, drba->drba_origin,
713 dsflags, FTAG, &origin);
715 dsl_dataset_rele(ds, FTAG);
718 if (!origin->ds_is_snapshot) {
719 dsl_dataset_rele_flags(origin, dsflags, FTAG);
720 dsl_dataset_rele(ds, FTAG);
721 return (SET_ERROR(EINVAL));
723 if (dsl_dataset_phys(origin)->ds_guid != fromguid &&
725 dsl_dataset_rele_flags(origin, dsflags, FTAG);
726 dsl_dataset_rele(ds, FTAG);
727 return (SET_ERROR(ENODEV));
730 if (origin->ds_dir->dd_crypto_obj != 0 &&
731 (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)) {
732 dsl_dataset_rele_flags(origin, dsflags, FTAG);
733 dsl_dataset_rele(ds, FTAG);
734 return (SET_ERROR(EINVAL));
738 * If the origin is redacted we need to verify that this
739 * send stream can safely be received on top of the
742 if (dsl_dataset_feature_is_active(origin,
743 SPA_FEATURE_REDACTED_DATASETS)) {
744 if (!redact_check(drba, origin)) {
745 dsl_dataset_rele_flags(origin, dsflags,
747 dsl_dataset_rele_flags(ds, dsflags,
749 return (SET_ERROR(EINVAL));
753 error = recv_check_large_blocks(ds, featureflags);
755 dsl_dataset_rele_flags(origin, dsflags, FTAG);
756 dsl_dataset_rele_flags(ds, dsflags, FTAG);
760 dsl_dataset_rele_flags(origin, dsflags, FTAG);
763 dsl_dataset_rele(ds, FTAG);
770 dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
772 dmu_recv_begin_arg_t *drba = arg;
773 dsl_pool_t *dp = dmu_tx_pool(tx);
774 objset_t *mos = dp->dp_meta_objset;
775 dmu_recv_cookie_t *drc = drba->drba_cookie;
776 struct drr_begin *drrb = drc->drc_drrb;
777 const char *tofs = drc->drc_tofs;
778 uint64_t featureflags = drc->drc_featureflags;
779 dsl_dataset_t *ds, *newds;
782 ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
784 uint64_t crflags = 0;
785 dsl_crypto_params_t dummy_dcp = { 0 };
786 dsl_crypto_params_t *dcp = drba->drba_dcp;
788 if (drrb->drr_flags & DRR_FLAG_CI_DATA)
789 crflags |= DS_FLAG_CI_DATASET;
791 if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0)
792 dsflags |= DS_HOLD_FLAG_DECRYPT;
795 * Raw, non-incremental recvs always use a dummy dcp with
796 * the raw cmd set. Raw incremental recvs do not use a dcp
797 * since the encryption parameters are already set in stone.
799 if (dcp == NULL && drrb->drr_fromguid == 0 &&
800 drba->drba_origin == NULL) {
801 ASSERT3P(dcp, ==, NULL);
804 if (featureflags & DMU_BACKUP_FEATURE_RAW)
805 dcp->cp_cmd = DCP_CMD_RAW_RECV;
808 error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
810 /* create temporary clone */
811 dsl_dataset_t *snap = NULL;
813 if (drba->drba_cookie->drc_fromsnapobj != 0) {
814 VERIFY0(dsl_dataset_hold_obj(dp,
815 drba->drba_cookie->drc_fromsnapobj, FTAG, &snap));
816 ASSERT3P(dcp, ==, NULL);
818 dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name,
819 snap, crflags, drba->drba_cred, dcp, tx);
820 if (drba->drba_cookie->drc_fromsnapobj != 0)
821 dsl_dataset_rele(snap, FTAG);
822 dsl_dataset_rele_flags(ds, dsflags, FTAG);
826 dsl_dataset_t *origin = NULL;
828 VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
830 if (drba->drba_origin != NULL) {
831 VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
833 ASSERT3P(dcp, ==, NULL);
836 /* Create new dataset. */
837 dsobj = dsl_dataset_create_sync(dd, strrchr(tofs, '/') + 1,
838 origin, crflags, drba->drba_cred, dcp, tx);
840 dsl_dataset_rele(origin, FTAG);
841 dsl_dir_rele(dd, FTAG);
842 drc->drc_newfs = B_TRUE;
844 VERIFY0(dsl_dataset_own_obj_force(dp, dsobj, dsflags, dmu_recv_tag,
846 if (dsl_dataset_feature_is_active(newds,
847 SPA_FEATURE_REDACTED_DATASETS)) {
849 * If the origin dataset is redacted, the child will be redacted
850 * when we create it. We clear the new dataset's
851 * redaction info; if it should be redacted, we'll fill
852 * in its information later.
854 dsl_dataset_deactivate_feature(newds,
855 SPA_FEATURE_REDACTED_DATASETS, tx);
857 VERIFY0(dmu_objset_from_ds(newds, &os));
859 if (drc->drc_resumable) {
860 dsl_dataset_zapify(newds, tx);
861 if (drrb->drr_fromguid != 0) {
862 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_FROMGUID,
863 8, 1, &drrb->drr_fromguid, tx));
865 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TOGUID,
866 8, 1, &drrb->drr_toguid, tx));
867 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TONAME,
868 1, strlen(drrb->drr_toname) + 1, drrb->drr_toname, tx));
871 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OBJECT,
873 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OFFSET,
875 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_BYTES,
877 if (featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) {
878 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_LARGEBLOCK,
881 if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) {
882 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_EMBEDOK,
885 if (featureflags & DMU_BACKUP_FEATURE_COMPRESSED) {
886 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_COMPRESSOK,
889 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
890 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_RAWOK,
894 uint64_t *redact_snaps;
895 uint_t numredactsnaps;
896 if (nvlist_lookup_uint64_array(drc->drc_begin_nvl,
897 BEGINNV_REDACT_FROM_SNAPS, &redact_snaps,
898 &numredactsnaps) == 0) {
899 VERIFY0(zap_add(mos, dsobj,
900 DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS,
901 sizeof (*redact_snaps), numredactsnaps,
907 * Usually the os->os_encrypted value is tied to the presence of a
908 * DSL Crypto Key object in the dd. However, that will not be received
909 * until dmu_recv_stream(), so we set the value manually for now.
911 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
912 os->os_encrypted = B_TRUE;
913 drba->drba_cookie->drc_raw = B_TRUE;
916 if (featureflags & DMU_BACKUP_FEATURE_REDACTED) {
917 uint64_t *redact_snaps;
918 uint_t numredactsnaps;
919 VERIFY0(nvlist_lookup_uint64_array(drc->drc_begin_nvl,
920 BEGINNV_REDACT_SNAPS, &redact_snaps, &numredactsnaps));
921 dsl_dataset_activate_redaction(newds, redact_snaps,
925 dmu_buf_will_dirty(newds->ds_dbuf, tx);
926 dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT;
929 * If we actually created a non-clone, we need to create the objset
930 * in our new dataset. If this is a raw send we postpone this until
931 * dmu_recv_stream() so that we can allocate the metadnode with the
932 * properties from the DRR_BEGIN payload.
934 rrw_enter(&newds->ds_bp_rwlock, RW_READER, FTAG);
935 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds)) &&
936 (featureflags & DMU_BACKUP_FEATURE_RAW) == 0) {
937 (void) dmu_objset_create_impl(dp->dp_spa,
938 newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
940 rrw_exit(&newds->ds_bp_rwlock, FTAG);
942 drba->drba_cookie->drc_ds = newds;
943 drba->drba_cookie->drc_os = os;
945 spa_history_log_internal_ds(newds, "receive", tx, " ");
949 dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx)
951 dmu_recv_begin_arg_t *drba = arg;
952 dmu_recv_cookie_t *drc = drba->drba_cookie;
953 dsl_pool_t *dp = dmu_tx_pool(tx);
954 struct drr_begin *drrb = drc->drc_drrb;
956 ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
958 const char *tofs = drc->drc_tofs;
960 /* already checked */
961 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
962 ASSERT(drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING);
964 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
965 DMU_COMPOUNDSTREAM ||
966 drrb->drr_type >= DMU_OST_NUMTYPES)
967 return (SET_ERROR(EINVAL));
970 * This is mostly a sanity check since we should have already done these
971 * checks during a previous attempt to receive the data.
973 error = recv_begin_check_feature_flags_impl(drc->drc_featureflags,
978 /* 6 extra bytes for /%recv */
979 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
981 (void) snprintf(recvname, sizeof (recvname), "%s/%s",
982 tofs, recv_clone_name);
984 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) {
985 /* raw receives require spill block allocation flag */
986 if (!(drrb->drr_flags & DRR_FLAG_SPILL_BLOCK))
987 return (SET_ERROR(ZFS_ERR_SPILL_BLOCK_FLAG_MISSING));
989 dsflags |= DS_HOLD_FLAG_DECRYPT;
992 if (dsl_dataset_hold_flags(dp, recvname, dsflags, FTAG, &ds) != 0) {
993 /* %recv does not exist; continue in tofs */
994 error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
999 /* check that ds is marked inconsistent */
1000 if (!DS_IS_INCONSISTENT(ds)) {
1001 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1002 return (SET_ERROR(EINVAL));
1005 /* check that there is resuming data, and that the toguid matches */
1006 if (!dsl_dataset_is_zapified(ds)) {
1007 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1008 return (SET_ERROR(EINVAL));
1011 error = zap_lookup(dp->dp_meta_objset, ds->ds_object,
1012 DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val);
1013 if (error != 0 || drrb->drr_toguid != val) {
1014 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1015 return (SET_ERROR(EINVAL));
1019 * Check if the receive is still running. If so, it will be owned.
1020 * Note that nothing else can own the dataset (e.g. after the receive
1021 * fails) because it will be marked inconsistent.
1023 if (dsl_dataset_has_owner(ds)) {
1024 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1025 return (SET_ERROR(EBUSY));
1028 /* There should not be any snapshots of this fs yet. */
1029 if (ds->ds_prev != NULL && ds->ds_prev->ds_dir == ds->ds_dir) {
1030 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1031 return (SET_ERROR(EINVAL));
1035 * Note: resume point will be checked when we process the first WRITE
1039 /* check that the origin matches */
1041 (void) zap_lookup(dp->dp_meta_objset, ds->ds_object,
1042 DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val);
1043 if (drrb->drr_fromguid != val) {
1044 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1045 return (SET_ERROR(EINVAL));
1048 if (ds->ds_prev != NULL && drrb->drr_fromguid != 0)
1049 drc->drc_fromsnapobj = ds->ds_prev->ds_object;
1052 * If we're resuming, and the send is redacted, then the original send
1053 * must have been redacted, and must have been redacted with respect to
1054 * the same snapshots.
1056 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_REDACTED) {
1057 uint64_t num_ds_redact_snaps;
1058 uint64_t *ds_redact_snaps;
1060 uint_t num_stream_redact_snaps;
1061 uint64_t *stream_redact_snaps;
1063 if (nvlist_lookup_uint64_array(drc->drc_begin_nvl,
1064 BEGINNV_REDACT_SNAPS, &stream_redact_snaps,
1065 &num_stream_redact_snaps) != 0) {
1066 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1067 return (SET_ERROR(EINVAL));
1070 if (!dsl_dataset_get_uint64_array_feature(ds,
1071 SPA_FEATURE_REDACTED_DATASETS, &num_ds_redact_snaps,
1072 &ds_redact_snaps)) {
1073 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1074 return (SET_ERROR(EINVAL));
1077 for (int i = 0; i < num_ds_redact_snaps; i++) {
1078 if (!redact_snaps_contains(ds_redact_snaps,
1079 num_ds_redact_snaps, stream_redact_snaps[i])) {
1080 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1081 return (SET_ERROR(EINVAL));
1086 error = recv_check_large_blocks(ds, drc->drc_featureflags);
1088 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1092 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1097 dmu_recv_resume_begin_sync(void *arg, dmu_tx_t *tx)
1099 dmu_recv_begin_arg_t *drba = arg;
1100 dsl_pool_t *dp = dmu_tx_pool(tx);
1101 const char *tofs = drba->drba_cookie->drc_tofs;
1102 uint64_t featureflags = drba->drba_cookie->drc_featureflags;
1104 ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
1105 /* 6 extra bytes for /%recv */
1106 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
1108 (void) snprintf(recvname, sizeof (recvname), "%s/%s", tofs,
1111 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
1112 drba->drba_cookie->drc_raw = B_TRUE;
1114 dsflags |= DS_HOLD_FLAG_DECRYPT;
1117 if (dsl_dataset_own_force(dp, recvname, dsflags, dmu_recv_tag, &ds)
1119 /* %recv does not exist; continue in tofs */
1120 VERIFY0(dsl_dataset_own_force(dp, tofs, dsflags, dmu_recv_tag,
1122 drba->drba_cookie->drc_newfs = B_TRUE;
1125 ASSERT(DS_IS_INCONSISTENT(ds));
1126 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
1127 ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds)) ||
1128 drba->drba_cookie->drc_raw);
1129 rrw_exit(&ds->ds_bp_rwlock, FTAG);
1131 drba->drba_cookie->drc_ds = ds;
1132 VERIFY0(dmu_objset_from_ds(ds, &drba->drba_cookie->drc_os));
1133 drba->drba_cookie->drc_should_save = B_TRUE;
1135 spa_history_log_internal_ds(ds, "resume receive", tx, " ");
1139 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
1140 * succeeds; otherwise we will leak the holds on the datasets.
1143 dmu_recv_begin(char *tofs, char *tosnap, dmu_replay_record_t *drr_begin,
1144 boolean_t force, boolean_t resumable, nvlist_t *localprops,
1145 nvlist_t *hidden_args, char *origin, dmu_recv_cookie_t *drc,
1146 zfs_file_t *fp, offset_t *voffp)
1148 dmu_recv_begin_arg_t drba = { 0 };
1151 memset(drc, 0, sizeof (dmu_recv_cookie_t));
1152 drc->drc_drr_begin = drr_begin;
1153 drc->drc_drrb = &drr_begin->drr_u.drr_begin;
1154 drc->drc_tosnap = tosnap;
1155 drc->drc_tofs = tofs;
1156 drc->drc_force = force;
1157 drc->drc_resumable = resumable;
1158 drc->drc_cred = CRED();
1159 drc->drc_proc = curproc;
1160 drc->drc_clone = (origin != NULL);
1162 if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
1163 drc->drc_byteswap = B_TRUE;
1164 (void) fletcher_4_incremental_byteswap(drr_begin,
1165 sizeof (dmu_replay_record_t), &drc->drc_cksum);
1166 byteswap_record(drr_begin);
1167 } else if (drc->drc_drrb->drr_magic == DMU_BACKUP_MAGIC) {
1168 (void) fletcher_4_incremental_native(drr_begin,
1169 sizeof (dmu_replay_record_t), &drc->drc_cksum);
1171 return (SET_ERROR(EINVAL));
1175 drc->drc_voff = *voffp;
1176 drc->drc_featureflags =
1177 DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
1179 uint32_t payloadlen = drc->drc_drr_begin->drr_payloadlen;
1180 void *payload = NULL;
1181 if (payloadlen != 0)
1182 payload = kmem_alloc(payloadlen, KM_SLEEP);
1184 err = receive_read_payload_and_next_header(drc, payloadlen,
1187 kmem_free(payload, payloadlen);
1190 if (payloadlen != 0) {
1191 err = nvlist_unpack(payload, payloadlen, &drc->drc_begin_nvl,
1193 kmem_free(payload, payloadlen);
1195 kmem_free(drc->drc_next_rrd,
1196 sizeof (*drc->drc_next_rrd));
1201 if (drc->drc_drrb->drr_flags & DRR_FLAG_SPILL_BLOCK)
1202 drc->drc_spill = B_TRUE;
1204 drba.drba_origin = origin;
1205 drba.drba_cookie = drc;
1206 drba.drba_cred = CRED();
1207 drba.drba_proc = curproc;
1209 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING) {
1210 err = dsl_sync_task(tofs,
1211 dmu_recv_resume_begin_check, dmu_recv_resume_begin_sync,
1212 &drba, 5, ZFS_SPACE_CHECK_NORMAL);
1215 * For non-raw, non-incremental, non-resuming receives the
1216 * user can specify encryption parameters on the command line
1217 * with "zfs recv -o". For these receives we create a dcp and
1218 * pass it to the sync task. Creating the dcp will implicitly
1219 * remove the encryption params from the localprops nvlist,
1220 * which avoids errors when trying to set these normally
1221 * read-only properties. Any other kind of receive that
1222 * attempts to set these properties will fail as a result.
1224 if ((DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) &
1225 DMU_BACKUP_FEATURE_RAW) == 0 &&
1226 origin == NULL && drc->drc_drrb->drr_fromguid == 0) {
1227 err = dsl_crypto_params_create_nvlist(DCP_CMD_NONE,
1228 localprops, hidden_args, &drba.drba_dcp);
1232 err = dsl_sync_task(tofs,
1233 dmu_recv_begin_check, dmu_recv_begin_sync,
1234 &drba, 5, ZFS_SPACE_CHECK_NORMAL);
1235 dsl_crypto_params_free(drba.drba_dcp, !!err);
1240 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
1241 nvlist_free(drc->drc_begin_nvl);
1247 receive_read(dmu_recv_cookie_t *drc, int len, void *buf)
1252 * The code doesn't rely on this (lengths being multiples of 8). See
1253 * comment in dump_bytes.
1255 ASSERT(len % 8 == 0 ||
1256 (drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) != 0);
1258 while (done < len) {
1260 zfs_file_t *fp = drc->drc_fp;
1261 int err = zfs_file_read(fp, (char *)buf + done,
1262 len - done, &resid);
1263 if (resid == len - done) {
1265 * Note: ECKSUM or ZFS_ERR_STREAM_TRUNCATED indicates
1266 * that the receive was interrupted and can
1267 * potentially be resumed.
1269 err = SET_ERROR(ZFS_ERR_STREAM_TRUNCATED);
1271 drc->drc_voff += len - done - resid;
1277 drc->drc_bytes_read += len;
1279 ASSERT3U(done, ==, len);
1283 static inline uint8_t
1284 deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size)
1286 if (bonus_type == DMU_OT_SA) {
1290 ((DN_OLD_MAX_BONUSLEN -
1291 MIN(DN_OLD_MAX_BONUSLEN, bonus_size)) >> SPA_BLKPTRSHIFT));
1296 save_resume_state(struct receive_writer_arg *rwa,
1297 uint64_t object, uint64_t offset, dmu_tx_t *tx)
1299 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
1301 if (!rwa->resumable)
1305 * We use ds_resume_bytes[] != 0 to indicate that we need to
1306 * update this on disk, so it must not be 0.
1308 ASSERT(rwa->bytes_read != 0);
1311 * We only resume from write records, which have a valid
1312 * (non-meta-dnode) object number.
1314 ASSERT(object != 0);
1317 * For resuming to work correctly, we must receive records in order,
1318 * sorted by object,offset. This is checked by the callers, but
1319 * assert it here for good measure.
1321 ASSERT3U(object, >=, rwa->os->os_dsl_dataset->ds_resume_object[txgoff]);
1322 ASSERT(object != rwa->os->os_dsl_dataset->ds_resume_object[txgoff] ||
1323 offset >= rwa->os->os_dsl_dataset->ds_resume_offset[txgoff]);
1324 ASSERT3U(rwa->bytes_read, >=,
1325 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff]);
1327 rwa->os->os_dsl_dataset->ds_resume_object[txgoff] = object;
1328 rwa->os->os_dsl_dataset->ds_resume_offset[txgoff] = offset;
1329 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read;
1333 receive_object_is_same_generation(objset_t *os, uint64_t object,
1334 dmu_object_type_t old_bonus_type, dmu_object_type_t new_bonus_type,
1335 const void *new_bonus, boolean_t *samegenp)
1337 zfs_file_info_t zoi;
1340 dmu_buf_t *old_bonus_dbuf;
1341 err = dmu_bonus_hold(os, object, FTAG, &old_bonus_dbuf);
1344 err = dmu_get_file_info(os, old_bonus_type, old_bonus_dbuf->db_data,
1346 dmu_buf_rele(old_bonus_dbuf, FTAG);
1349 uint64_t old_gen = zoi.zfi_generation;
1351 err = dmu_get_file_info(os, new_bonus_type, new_bonus, &zoi);
1354 uint64_t new_gen = zoi.zfi_generation;
1356 *samegenp = (old_gen == new_gen);
1361 receive_handle_existing_object(const struct receive_writer_arg *rwa,
1362 const struct drr_object *drro, const dmu_object_info_t *doi,
1363 const void *bonus_data,
1364 uint64_t *object_to_hold, uint32_t *new_blksz)
1366 uint32_t indblksz = drro->drr_indblkshift ?
1367 1ULL << drro->drr_indblkshift : 0;
1368 int nblkptr = deduce_nblkptr(drro->drr_bonustype,
1369 drro->drr_bonuslen);
1370 uint8_t dn_slots = drro->drr_dn_slots != 0 ?
1371 drro->drr_dn_slots : DNODE_MIN_SLOTS;
1372 boolean_t do_free_range = B_FALSE;
1375 *object_to_hold = drro->drr_object;
1377 /* nblkptr should be bounded by the bonus size and type */
1378 if (rwa->raw && nblkptr != drro->drr_nblkptr)
1379 return (SET_ERROR(EINVAL));
1382 * After the previous send stream, the sending system may
1383 * have freed this object, and then happened to re-allocate
1384 * this object number in a later txg. In this case, we are
1385 * receiving a different logical file, and the block size may
1386 * appear to be different. i.e. we may have a different
1387 * block size for this object than what the send stream says.
1388 * In this case we need to remove the object's contents,
1389 * so that its structure can be changed and then its contents
1390 * entirely replaced by subsequent WRITE records.
1392 * If this is a -L (--large-block) incremental stream, and
1393 * the previous stream was not -L, the block size may appear
1394 * to increase. i.e. we may have a smaller block size for
1395 * this object than what the send stream says. In this case
1396 * we need to keep the object's contents and block size
1397 * intact, so that we don't lose parts of the object's
1398 * contents that are not changed by this incremental send
1401 * We can distinguish between the two above cases by using
1402 * the ZPL's generation number (see
1403 * receive_object_is_same_generation()). However, we only
1404 * want to rely on the generation number when absolutely
1405 * necessary, because with raw receives, the generation is
1406 * encrypted. We also want to minimize dependence on the
1407 * ZPL, so that other types of datasets can also be received
1408 * (e.g. ZVOLs, although note that ZVOLS currently do not
1409 * reallocate their objects or change their structure).
1410 * Therefore, we check a number of different cases where we
1411 * know it is safe to discard the object's contents, before
1412 * using the ZPL's generation number to make the above
1415 if (drro->drr_blksz != doi->doi_data_block_size) {
1418 * RAW streams always have large blocks, so
1419 * we are sure that the data is not needed
1420 * due to changing --large-block to be on.
1421 * Which is fortunate since the bonus buffer
1422 * (which contains the ZPL generation) is
1423 * encrypted, and the key might not be
1426 do_free_range = B_TRUE;
1427 } else if (rwa->full) {
1429 * This is a full send stream, so it always
1430 * replaces what we have. Even if the
1431 * generation numbers happen to match, this
1432 * can not actually be the same logical file.
1433 * This is relevant when receiving a full
1436 do_free_range = B_TRUE;
1437 } else if (drro->drr_type !=
1438 DMU_OT_PLAIN_FILE_CONTENTS ||
1439 doi->doi_type != DMU_OT_PLAIN_FILE_CONTENTS) {
1441 * PLAIN_FILE_CONTENTS are the only type of
1442 * objects that have ever been stored with
1443 * large blocks, so we don't need the special
1444 * logic below. ZAP blocks can shrink (when
1445 * there's only one block), so we don't want
1446 * to hit the error below about block size
1449 do_free_range = B_TRUE;
1450 } else if (doi->doi_max_offset <=
1451 doi->doi_data_block_size) {
1453 * There is only one block. We can free it,
1454 * because its contents will be replaced by a
1455 * WRITE record. This can not be the no-L ->
1456 * -L case, because the no-L case would have
1457 * resulted in multiple blocks. If we
1458 * supported -L -> no-L, it would not be safe
1459 * to free the file's contents. Fortunately,
1460 * that is not allowed (see
1461 * recv_check_large_blocks()).
1463 do_free_range = B_TRUE;
1465 boolean_t is_same_gen;
1466 err = receive_object_is_same_generation(rwa->os,
1467 drro->drr_object, doi->doi_bonus_type,
1468 drro->drr_bonustype, bonus_data, &is_same_gen);
1470 return (SET_ERROR(EINVAL));
1474 * This is the same logical file, and
1475 * the block size must be increasing.
1476 * It could only decrease if
1477 * --large-block was changed to be
1478 * off, which is checked in
1479 * recv_check_large_blocks().
1481 if (drro->drr_blksz <=
1482 doi->doi_data_block_size)
1483 return (SET_ERROR(EINVAL));
1485 * We keep the existing blocksize and
1489 doi->doi_data_block_size;
1491 do_free_range = B_TRUE;
1496 /* nblkptr can only decrease if the object was reallocated */
1497 if (nblkptr < doi->doi_nblkptr)
1498 do_free_range = B_TRUE;
1500 /* number of slots can only change on reallocation */
1501 if (dn_slots != doi->doi_dnodesize >> DNODE_SHIFT)
1502 do_free_range = B_TRUE;
1505 * For raw sends we also check a few other fields to
1506 * ensure we are preserving the objset structure exactly
1507 * as it was on the receive side:
1508 * - A changed indirect block size
1509 * - A smaller nlevels
1512 if (indblksz != doi->doi_metadata_block_size)
1513 do_free_range = B_TRUE;
1514 if (drro->drr_nlevels < doi->doi_indirection)
1515 do_free_range = B_TRUE;
1518 if (do_free_range) {
1519 err = dmu_free_long_range(rwa->os, drro->drr_object,
1522 return (SET_ERROR(EINVAL));
1526 * The dmu does not currently support decreasing nlevels
1527 * or changing the number of dnode slots on an object. For
1528 * non-raw sends, this does not matter and the new object
1529 * can just use the previous one's nlevels. For raw sends,
1530 * however, the structure of the received dnode (including
1531 * nlevels and dnode slots) must match that of the send
1532 * side. Therefore, instead of using dmu_object_reclaim(),
1533 * we must free the object completely and call
1534 * dmu_object_claim_dnsize() instead.
1536 if ((rwa->raw && drro->drr_nlevels < doi->doi_indirection) ||
1537 dn_slots != doi->doi_dnodesize >> DNODE_SHIFT) {
1538 err = dmu_free_long_object(rwa->os, drro->drr_object);
1540 return (SET_ERROR(EINVAL));
1542 txg_wait_synced(dmu_objset_pool(rwa->os), 0);
1543 *object_to_hold = DMU_NEW_OBJECT;
1547 * For raw receives, free everything beyond the new incoming
1548 * maxblkid. Normally this would be done with a DRR_FREE
1549 * record that would come after this DRR_OBJECT record is
1550 * processed. However, for raw receives we manually set the
1551 * maxblkid from the drr_maxblkid and so we must first free
1552 * everything above that blkid to ensure the DMU is always
1553 * consistent with itself. We will never free the first block
1554 * of the object here because a maxblkid of 0 could indicate
1555 * an object with a single block or one with no blocks. This
1556 * free may be skipped when dmu_free_long_range() was called
1557 * above since it covers the entire object's contents.
1559 if (rwa->raw && *object_to_hold != DMU_NEW_OBJECT && !do_free_range) {
1560 err = dmu_free_long_range(rwa->os, drro->drr_object,
1561 (drro->drr_maxblkid + 1) * doi->doi_data_block_size,
1564 return (SET_ERROR(EINVAL));
1570 receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
1573 dmu_object_info_t doi;
1576 uint32_t new_blksz = drro->drr_blksz;
1577 uint8_t dn_slots = drro->drr_dn_slots != 0 ?
1578 drro->drr_dn_slots : DNODE_MIN_SLOTS;
1580 if (drro->drr_type == DMU_OT_NONE ||
1581 !DMU_OT_IS_VALID(drro->drr_type) ||
1582 !DMU_OT_IS_VALID(drro->drr_bonustype) ||
1583 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
1584 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
1585 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
1586 drro->drr_blksz < SPA_MINBLOCKSIZE ||
1587 drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) ||
1588 drro->drr_bonuslen >
1589 DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa->os))) ||
1591 (spa_maxdnodesize(dmu_objset_spa(rwa->os)) >> DNODE_SHIFT)) {
1592 return (SET_ERROR(EINVAL));
1597 * We should have received a DRR_OBJECT_RANGE record
1598 * containing this block and stored it in rwa.
1600 if (drro->drr_object < rwa->or_firstobj ||
1601 drro->drr_object >= rwa->or_firstobj + rwa->or_numslots ||
1602 drro->drr_raw_bonuslen < drro->drr_bonuslen ||
1603 drro->drr_indblkshift > SPA_MAXBLOCKSHIFT ||
1604 drro->drr_nlevels > DN_MAX_LEVELS ||
1605 drro->drr_nblkptr > DN_MAX_NBLKPTR ||
1606 DN_SLOTS_TO_BONUSLEN(dn_slots) <
1607 drro->drr_raw_bonuslen)
1608 return (SET_ERROR(EINVAL));
1611 * The DRR_OBJECT_SPILL flag is valid when the DRR_BEGIN
1612 * record indicates this by setting DRR_FLAG_SPILL_BLOCK.
1614 if (((drro->drr_flags & ~(DRR_OBJECT_SPILL))) ||
1615 (!rwa->spill && DRR_OBJECT_HAS_SPILL(drro->drr_flags))) {
1616 return (SET_ERROR(EINVAL));
1619 if (drro->drr_raw_bonuslen != 0 || drro->drr_nblkptr != 0 ||
1620 drro->drr_indblkshift != 0 || drro->drr_nlevels != 0) {
1621 return (SET_ERROR(EINVAL));
1625 err = dmu_object_info(rwa->os, drro->drr_object, &doi);
1627 if (err != 0 && err != ENOENT && err != EEXIST)
1628 return (SET_ERROR(EINVAL));
1630 if (drro->drr_object > rwa->max_object)
1631 rwa->max_object = drro->drr_object;
1634 * If we are losing blkptrs or changing the block size this must
1635 * be a new file instance. We must clear out the previous file
1636 * contents before we can change this type of metadata in the dnode.
1637 * Raw receives will also check that the indirect structure of the
1638 * dnode hasn't changed.
1640 uint64_t object_to_hold;
1642 err = receive_handle_existing_object(rwa, drro, &doi, data,
1643 &object_to_hold, &new_blksz);
1644 } else if (err == EEXIST) {
1646 * The object requested is currently an interior slot of a
1647 * multi-slot dnode. This will be resolved when the next txg
1648 * is synced out, since the send stream will have told us
1649 * to free this slot when we freed the associated dnode
1650 * earlier in the stream.
1652 txg_wait_synced(dmu_objset_pool(rwa->os), 0);
1654 if (dmu_object_info(rwa->os, drro->drr_object, NULL) != ENOENT)
1655 return (SET_ERROR(EINVAL));
1657 /* object was freed and we are about to allocate a new one */
1658 object_to_hold = DMU_NEW_OBJECT;
1660 /* object is free and we are about to allocate a new one */
1661 object_to_hold = DMU_NEW_OBJECT;
1665 * If this is a multi-slot dnode there is a chance that this
1666 * object will expand into a slot that is already used by
1667 * another object from the previous snapshot. We must free
1668 * these objects before we attempt to allocate the new dnode.
1671 boolean_t need_sync = B_FALSE;
1673 for (uint64_t slot = drro->drr_object + 1;
1674 slot < drro->drr_object + dn_slots;
1676 dmu_object_info_t slot_doi;
1678 err = dmu_object_info(rwa->os, slot, &slot_doi);
1679 if (err == ENOENT || err == EEXIST)
1684 err = dmu_free_long_object(rwa->os, slot);
1692 txg_wait_synced(dmu_objset_pool(rwa->os), 0);
1695 tx = dmu_tx_create(rwa->os);
1696 dmu_tx_hold_bonus(tx, object_to_hold);
1697 dmu_tx_hold_write(tx, object_to_hold, 0, 0);
1698 err = dmu_tx_assign(tx, TXG_WAIT);
1704 if (object_to_hold == DMU_NEW_OBJECT) {
1705 /* Currently free, wants to be allocated */
1706 err = dmu_object_claim_dnsize(rwa->os, drro->drr_object,
1707 drro->drr_type, new_blksz,
1708 drro->drr_bonustype, drro->drr_bonuslen,
1709 dn_slots << DNODE_SHIFT, tx);
1710 } else if (drro->drr_type != doi.doi_type ||
1711 new_blksz != doi.doi_data_block_size ||
1712 drro->drr_bonustype != doi.doi_bonus_type ||
1713 drro->drr_bonuslen != doi.doi_bonus_size) {
1714 /* Currently allocated, but with different properties */
1715 err = dmu_object_reclaim_dnsize(rwa->os, drro->drr_object,
1716 drro->drr_type, new_blksz,
1717 drro->drr_bonustype, drro->drr_bonuslen,
1718 dn_slots << DNODE_SHIFT, rwa->spill ?
1719 DRR_OBJECT_HAS_SPILL(drro->drr_flags) : B_FALSE, tx);
1720 } else if (rwa->spill && !DRR_OBJECT_HAS_SPILL(drro->drr_flags)) {
1722 * Currently allocated, the existing version of this object
1723 * may reference a spill block that is no longer allocated
1724 * at the source and needs to be freed.
1726 err = dmu_object_rm_spill(rwa->os, drro->drr_object, tx);
1731 return (SET_ERROR(EINVAL));
1734 if (rwa->or_crypt_params_present) {
1736 * Set the crypt params for the buffer associated with this
1737 * range of dnodes. This causes the blkptr_t to have the
1738 * same crypt params (byteorder, salt, iv, mac) as on the
1741 * Since we are committing this tx now, it is possible for
1742 * the dnode block to end up on-disk with the incorrect MAC,
1743 * if subsequent objects in this block are received in a
1744 * different txg. However, since the dataset is marked as
1745 * inconsistent, no code paths will do a non-raw read (or
1746 * decrypt the block / verify the MAC). The receive code and
1747 * scrub code can safely do raw reads and verify the
1748 * checksum. They don't need to verify the MAC.
1750 dmu_buf_t *db = NULL;
1751 uint64_t offset = rwa->or_firstobj * DNODE_MIN_SIZE;
1753 err = dmu_buf_hold_by_dnode(DMU_META_DNODE(rwa->os),
1754 offset, FTAG, &db, DMU_READ_PREFETCH | DMU_READ_NO_DECRYPT);
1757 return (SET_ERROR(EINVAL));
1760 dmu_buf_set_crypt_params(db, rwa->or_byteorder,
1761 rwa->or_salt, rwa->or_iv, rwa->or_mac, tx);
1763 dmu_buf_rele(db, FTAG);
1765 rwa->or_crypt_params_present = B_FALSE;
1768 dmu_object_set_checksum(rwa->os, drro->drr_object,
1769 drro->drr_checksumtype, tx);
1770 dmu_object_set_compress(rwa->os, drro->drr_object,
1771 drro->drr_compress, tx);
1773 /* handle more restrictive dnode structuring for raw recvs */
1776 * Set the indirect block size, block shift, nlevels.
1777 * This will not fail because we ensured all of the
1778 * blocks were freed earlier if this is a new object.
1779 * For non-new objects block size and indirect block
1780 * shift cannot change and nlevels can only increase.
1782 ASSERT3U(new_blksz, ==, drro->drr_blksz);
1783 VERIFY0(dmu_object_set_blocksize(rwa->os, drro->drr_object,
1784 drro->drr_blksz, drro->drr_indblkshift, tx));
1785 VERIFY0(dmu_object_set_nlevels(rwa->os, drro->drr_object,
1786 drro->drr_nlevels, tx));
1789 * Set the maxblkid. This will always succeed because
1790 * we freed all blocks beyond the new maxblkid above.
1792 VERIFY0(dmu_object_set_maxblkid(rwa->os, drro->drr_object,
1793 drro->drr_maxblkid, tx));
1799 uint32_t flags = DMU_READ_NO_PREFETCH;
1802 flags |= DMU_READ_NO_DECRYPT;
1804 VERIFY0(dnode_hold(rwa->os, drro->drr_object, FTAG, &dn));
1805 VERIFY0(dmu_bonus_hold_by_dnode(dn, FTAG, &db, flags));
1807 dmu_buf_will_dirty(db, tx);
1809 ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
1810 memcpy(db->db_data, data, DRR_OBJECT_PAYLOAD_SIZE(drro));
1813 * Raw bonus buffers have their byteorder determined by the
1814 * DRR_OBJECT_RANGE record.
1816 if (rwa->byteswap && !rwa->raw) {
1817 dmu_object_byteswap_t byteswap =
1818 DMU_OT_BYTESWAP(drro->drr_bonustype);
1819 dmu_ot_byteswap[byteswap].ob_func(db->db_data,
1820 DRR_OBJECT_PAYLOAD_SIZE(drro));
1822 dmu_buf_rele(db, FTAG);
1823 dnode_rele(dn, FTAG);
1831 receive_freeobjects(struct receive_writer_arg *rwa,
1832 struct drr_freeobjects *drrfo)
1837 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
1838 return (SET_ERROR(EINVAL));
1840 for (obj = drrfo->drr_firstobj == 0 ? 1 : drrfo->drr_firstobj;
1841 obj < drrfo->drr_firstobj + drrfo->drr_numobjs &&
1842 obj < DN_MAX_OBJECT && next_err == 0;
1843 next_err = dmu_object_next(rwa->os, &obj, FALSE, 0)) {
1844 dmu_object_info_t doi;
1847 err = dmu_object_info(rwa->os, obj, &doi);
1853 err = dmu_free_long_object(rwa->os, obj);
1858 if (next_err != ESRCH)
1864 * Note: if this fails, the caller will clean up any records left on the
1865 * rwa->write_batch list.
1868 flush_write_batch_impl(struct receive_writer_arg *rwa)
1873 if (dnode_hold(rwa->os, rwa->last_object, FTAG, &dn) != 0)
1874 return (SET_ERROR(EINVAL));
1876 struct receive_record_arg *last_rrd = list_tail(&rwa->write_batch);
1877 struct drr_write *last_drrw = &last_rrd->header.drr_u.drr_write;
1879 struct receive_record_arg *first_rrd = list_head(&rwa->write_batch);
1880 struct drr_write *first_drrw = &first_rrd->header.drr_u.drr_write;
1882 ASSERT3U(rwa->last_object, ==, last_drrw->drr_object);
1883 ASSERT3U(rwa->last_offset, ==, last_drrw->drr_offset);
1885 dmu_tx_t *tx = dmu_tx_create(rwa->os);
1886 dmu_tx_hold_write_by_dnode(tx, dn, first_drrw->drr_offset,
1887 last_drrw->drr_offset - first_drrw->drr_offset +
1888 last_drrw->drr_logical_size);
1889 err = dmu_tx_assign(tx, TXG_WAIT);
1892 dnode_rele(dn, FTAG);
1896 struct receive_record_arg *rrd;
1897 while ((rrd = list_head(&rwa->write_batch)) != NULL) {
1898 struct drr_write *drrw = &rrd->header.drr_u.drr_write;
1899 abd_t *abd = rrd->abd;
1901 ASSERT3U(drrw->drr_object, ==, rwa->last_object);
1903 if (drrw->drr_logical_size != dn->dn_datablksz) {
1905 * The WRITE record is larger than the object's block
1906 * size. We must be receiving an incremental
1907 * large-block stream into a dataset that previously did
1908 * a non-large-block receive. Lightweight writes must
1909 * be exactly one block, so we need to decompress the
1910 * data (if compressed) and do a normal dmu_write().
1912 ASSERT3U(drrw->drr_logical_size, >, dn->dn_datablksz);
1913 if (DRR_WRITE_COMPRESSED(drrw)) {
1915 abd_alloc_linear(drrw->drr_logical_size,
1918 err = zio_decompress_data(
1919 drrw->drr_compressiontype,
1920 abd, abd_to_buf(decomp_abd),
1922 abd_get_size(decomp_abd), NULL);
1925 dmu_write_by_dnode(dn,
1927 drrw->drr_logical_size,
1928 abd_to_buf(decomp_abd), tx);
1930 abd_free(decomp_abd);
1932 dmu_write_by_dnode(dn,
1934 drrw->drr_logical_size,
1935 abd_to_buf(abd), tx);
1941 dmu_write_policy(rwa->os, dn, 0, 0, &zp);
1943 enum zio_flag zio_flags = 0;
1946 zp.zp_encrypt = B_TRUE;
1947 zp.zp_compress = drrw->drr_compressiontype;
1948 zp.zp_byteorder = ZFS_HOST_BYTEORDER ^
1949 !!DRR_IS_RAW_BYTESWAPPED(drrw->drr_flags) ^
1951 memcpy(zp.zp_salt, drrw->drr_salt,
1953 memcpy(zp.zp_iv, drrw->drr_iv,
1955 memcpy(zp.zp_mac, drrw->drr_mac,
1957 if (DMU_OT_IS_ENCRYPTED(zp.zp_type)) {
1958 zp.zp_nopwrite = B_FALSE;
1959 zp.zp_copies = MIN(zp.zp_copies,
1960 SPA_DVAS_PER_BP - 1);
1962 zio_flags |= ZIO_FLAG_RAW;
1963 } else if (DRR_WRITE_COMPRESSED(drrw)) {
1964 ASSERT3U(drrw->drr_compressed_size, >, 0);
1965 ASSERT3U(drrw->drr_logical_size, >=,
1966 drrw->drr_compressed_size);
1967 zp.zp_compress = drrw->drr_compressiontype;
1968 zio_flags |= ZIO_FLAG_RAW_COMPRESS;
1969 } else if (rwa->byteswap) {
1971 * Note: compressed blocks never need to be
1972 * byteswapped, because WRITE records for
1973 * metadata blocks are never compressed. The
1974 * exception is raw streams, which are written
1975 * in the original byteorder, and the byteorder
1976 * bit is preserved in the BP by setting
1977 * zp_byteorder above.
1979 dmu_object_byteswap_t byteswap =
1980 DMU_OT_BYTESWAP(drrw->drr_type);
1981 dmu_ot_byteswap[byteswap].ob_func(
1983 DRR_WRITE_PAYLOAD_SIZE(drrw));
1987 * Since this data can't be read until the receive
1988 * completes, we can do a "lightweight" write for
1989 * improved performance.
1991 err = dmu_lightweight_write_by_dnode(dn,
1992 drrw->drr_offset, abd, &zp, zio_flags, tx);
1997 * This rrd is left on the list, so the caller will
1998 * free it (and the abd).
2004 * Note: If the receive fails, we want the resume stream to
2005 * start with the same record that we last successfully
2006 * received (as opposed to the next record), so that we can
2007 * verify that we are resuming from the correct location.
2009 save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx);
2011 list_remove(&rwa->write_batch, rrd);
2012 kmem_free(rrd, sizeof (*rrd));
2016 dnode_rele(dn, FTAG);
2021 flush_write_batch(struct receive_writer_arg *rwa)
2023 if (list_is_empty(&rwa->write_batch))
2027 err = flush_write_batch_impl(rwa);
2029 struct receive_record_arg *rrd;
2030 while ((rrd = list_remove_head(&rwa->write_batch)) != NULL) {
2032 kmem_free(rrd, sizeof (*rrd));
2035 ASSERT(list_is_empty(&rwa->write_batch));
2040 receive_process_write_record(struct receive_writer_arg *rwa,
2041 struct receive_record_arg *rrd)
2045 ASSERT3U(rrd->header.drr_type, ==, DRR_WRITE);
2046 struct drr_write *drrw = &rrd->header.drr_u.drr_write;
2048 if (drrw->drr_offset + drrw->drr_logical_size < drrw->drr_offset ||
2049 !DMU_OT_IS_VALID(drrw->drr_type))
2050 return (SET_ERROR(EINVAL));
2053 * For resuming to work, records must be in increasing order
2054 * by (object, offset).
2056 if (drrw->drr_object < rwa->last_object ||
2057 (drrw->drr_object == rwa->last_object &&
2058 drrw->drr_offset < rwa->last_offset)) {
2059 return (SET_ERROR(EINVAL));
2062 struct receive_record_arg *first_rrd = list_head(&rwa->write_batch);
2063 struct drr_write *first_drrw = &first_rrd->header.drr_u.drr_write;
2064 uint64_t batch_size =
2065 MIN(zfs_recv_write_batch_size, DMU_MAX_ACCESS / 2);
2066 if (first_rrd != NULL &&
2067 (drrw->drr_object != first_drrw->drr_object ||
2068 drrw->drr_offset >= first_drrw->drr_offset + batch_size)) {
2069 err = flush_write_batch(rwa);
2074 rwa->last_object = drrw->drr_object;
2075 rwa->last_offset = drrw->drr_offset;
2077 if (rwa->last_object > rwa->max_object)
2078 rwa->max_object = rwa->last_object;
2080 list_insert_tail(&rwa->write_batch, rrd);
2082 * Return EAGAIN to indicate that we will use this rrd again,
2083 * so the caller should not free it
2089 receive_write_embedded(struct receive_writer_arg *rwa,
2090 struct drr_write_embedded *drrwe, void *data)
2095 if (drrwe->drr_offset + drrwe->drr_length < drrwe->drr_offset)
2096 return (SET_ERROR(EINVAL));
2098 if (drrwe->drr_psize > BPE_PAYLOAD_SIZE)
2099 return (SET_ERROR(EINVAL));
2101 if (drrwe->drr_etype >= NUM_BP_EMBEDDED_TYPES)
2102 return (SET_ERROR(EINVAL));
2103 if (drrwe->drr_compression >= ZIO_COMPRESS_FUNCTIONS)
2104 return (SET_ERROR(EINVAL));
2106 return (SET_ERROR(EINVAL));
2108 if (drrwe->drr_object > rwa->max_object)
2109 rwa->max_object = drrwe->drr_object;
2111 tx = dmu_tx_create(rwa->os);
2113 dmu_tx_hold_write(tx, drrwe->drr_object,
2114 drrwe->drr_offset, drrwe->drr_length);
2115 err = dmu_tx_assign(tx, TXG_WAIT);
2121 dmu_write_embedded(rwa->os, drrwe->drr_object,
2122 drrwe->drr_offset, data, drrwe->drr_etype,
2123 drrwe->drr_compression, drrwe->drr_lsize, drrwe->drr_psize,
2124 rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx);
2126 /* See comment in restore_write. */
2127 save_resume_state(rwa, drrwe->drr_object, drrwe->drr_offset, tx);
2133 receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
2136 dmu_buf_t *db, *db_spill;
2139 if (drrs->drr_length < SPA_MINBLOCKSIZE ||
2140 drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os)))
2141 return (SET_ERROR(EINVAL));
2144 * This is an unmodified spill block which was added to the stream
2145 * to resolve an issue with incorrectly removing spill blocks. It
2146 * should be ignored by current versions of the code which support
2147 * the DRR_FLAG_SPILL_BLOCK flag.
2149 if (rwa->spill && DRR_SPILL_IS_UNMODIFIED(drrs->drr_flags)) {
2155 if (!DMU_OT_IS_VALID(drrs->drr_type) ||
2156 drrs->drr_compressiontype >= ZIO_COMPRESS_FUNCTIONS ||
2157 drrs->drr_compressed_size == 0)
2158 return (SET_ERROR(EINVAL));
2161 if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0)
2162 return (SET_ERROR(EINVAL));
2164 if (drrs->drr_object > rwa->max_object)
2165 rwa->max_object = drrs->drr_object;
2167 VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db));
2168 if ((err = dmu_spill_hold_by_bonus(db, DMU_READ_NO_DECRYPT, FTAG,
2170 dmu_buf_rele(db, FTAG);
2174 dmu_tx_t *tx = dmu_tx_create(rwa->os);
2176 dmu_tx_hold_spill(tx, db->db_object);
2178 err = dmu_tx_assign(tx, TXG_WAIT);
2180 dmu_buf_rele(db, FTAG);
2181 dmu_buf_rele(db_spill, FTAG);
2187 * Spill blocks may both grow and shrink. When a change in size
2188 * occurs any existing dbuf must be updated to match the logical
2189 * size of the provided arc_buf_t.
2191 if (db_spill->db_size != drrs->drr_length) {
2192 dmu_buf_will_fill(db_spill, tx);
2193 VERIFY0(dbuf_spill_set_blksz(db_spill,
2194 drrs->drr_length, tx));
2199 boolean_t byteorder = ZFS_HOST_BYTEORDER ^
2200 !!DRR_IS_RAW_BYTESWAPPED(drrs->drr_flags) ^
2203 abuf = arc_loan_raw_buf(dmu_objset_spa(rwa->os),
2204 drrs->drr_object, byteorder, drrs->drr_salt,
2205 drrs->drr_iv, drrs->drr_mac, drrs->drr_type,
2206 drrs->drr_compressed_size, drrs->drr_length,
2207 drrs->drr_compressiontype, 0);
2209 abuf = arc_loan_buf(dmu_objset_spa(rwa->os),
2210 DMU_OT_IS_METADATA(drrs->drr_type),
2212 if (rwa->byteswap) {
2213 dmu_object_byteswap_t byteswap =
2214 DMU_OT_BYTESWAP(drrs->drr_type);
2215 dmu_ot_byteswap[byteswap].ob_func(abd_to_buf(abd),
2216 DRR_SPILL_PAYLOAD_SIZE(drrs));
2220 memcpy(abuf->b_data, abd_to_buf(abd), DRR_SPILL_PAYLOAD_SIZE(drrs));
2222 dbuf_assign_arcbuf((dmu_buf_impl_t *)db_spill, abuf, tx);
2224 dmu_buf_rele(db, FTAG);
2225 dmu_buf_rele(db_spill, FTAG);
2232 receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf)
2236 if (drrf->drr_length != -1ULL &&
2237 drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
2238 return (SET_ERROR(EINVAL));
2240 if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0)
2241 return (SET_ERROR(EINVAL));
2243 if (drrf->drr_object > rwa->max_object)
2244 rwa->max_object = drrf->drr_object;
2246 err = dmu_free_long_range(rwa->os, drrf->drr_object,
2247 drrf->drr_offset, drrf->drr_length);
2253 receive_object_range(struct receive_writer_arg *rwa,
2254 struct drr_object_range *drror)
2257 * By default, we assume this block is in our native format
2258 * (ZFS_HOST_BYTEORDER). We then take into account whether
2259 * the send stream is byteswapped (rwa->byteswap). Finally,
2260 * we need to byteswap again if this particular block was
2261 * in non-native format on the send side.
2263 boolean_t byteorder = ZFS_HOST_BYTEORDER ^ rwa->byteswap ^
2264 !!DRR_IS_RAW_BYTESWAPPED(drror->drr_flags);
2267 * Since dnode block sizes are constant, we should not need to worry
2268 * about making sure that the dnode block size is the same on the
2269 * sending and receiving sides for the time being. For non-raw sends,
2270 * this does not matter (and in fact we do not send a DRR_OBJECT_RANGE
2271 * record at all). Raw sends require this record type because the
2272 * encryption parameters are used to protect an entire block of bonus
2273 * buffers. If the size of dnode blocks ever becomes variable,
2274 * handling will need to be added to ensure that dnode block sizes
2275 * match on the sending and receiving side.
2277 if (drror->drr_numslots != DNODES_PER_BLOCK ||
2278 P2PHASE(drror->drr_firstobj, DNODES_PER_BLOCK) != 0 ||
2280 return (SET_ERROR(EINVAL));
2282 if (drror->drr_firstobj > rwa->max_object)
2283 rwa->max_object = drror->drr_firstobj;
2286 * The DRR_OBJECT_RANGE handling must be deferred to receive_object()
2287 * so that the block of dnodes is not written out when it's empty,
2288 * and converted to a HOLE BP.
2290 rwa->or_crypt_params_present = B_TRUE;
2291 rwa->or_firstobj = drror->drr_firstobj;
2292 rwa->or_numslots = drror->drr_numslots;
2293 memcpy(rwa->or_salt, drror->drr_salt, ZIO_DATA_SALT_LEN);
2294 memcpy(rwa->or_iv, drror->drr_iv, ZIO_DATA_IV_LEN);
2295 memcpy(rwa->or_mac, drror->drr_mac, ZIO_DATA_MAC_LEN);
2296 rwa->or_byteorder = byteorder;
2302 * Until we have the ability to redact large ranges of data efficiently, we
2303 * process these records as frees.
2306 receive_redact(struct receive_writer_arg *rwa, struct drr_redact *drrr)
2308 struct drr_free drrf = {0};
2309 drrf.drr_length = drrr->drr_length;
2310 drrf.drr_object = drrr->drr_object;
2311 drrf.drr_offset = drrr->drr_offset;
2312 drrf.drr_toguid = drrr->drr_toguid;
2313 return (receive_free(rwa, &drrf));
2316 /* used to destroy the drc_ds on error */
2318 dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
2320 dsl_dataset_t *ds = drc->drc_ds;
2321 ds_hold_flags_t dsflags;
2323 dsflags = (drc->drc_raw) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT;
2325 * Wait for the txg sync before cleaning up the receive. For
2326 * resumable receives, this ensures that our resume state has
2327 * been written out to disk. For raw receives, this ensures
2328 * that the user accounting code will not attempt to do anything
2329 * after we stopped receiving the dataset.
2331 txg_wait_synced(ds->ds_dir->dd_pool, 0);
2332 ds->ds_objset->os_raw_receive = B_FALSE;
2334 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
2335 if (drc->drc_resumable && drc->drc_should_save &&
2336 !BP_IS_HOLE(dsl_dataset_get_blkptr(ds))) {
2337 rrw_exit(&ds->ds_bp_rwlock, FTAG);
2338 dsl_dataset_disown(ds, dsflags, dmu_recv_tag);
2340 char name[ZFS_MAX_DATASET_NAME_LEN];
2341 rrw_exit(&ds->ds_bp_rwlock, FTAG);
2342 dsl_dataset_name(ds, name);
2343 dsl_dataset_disown(ds, dsflags, dmu_recv_tag);
2344 (void) dsl_destroy_head(name);
2349 receive_cksum(dmu_recv_cookie_t *drc, int len, void *buf)
2351 if (drc->drc_byteswap) {
2352 (void) fletcher_4_incremental_byteswap(buf, len,
2355 (void) fletcher_4_incremental_native(buf, len, &drc->drc_cksum);
2360 * Read the payload into a buffer of size len, and update the current record's
2362 * Allocate drc->drc_next_rrd and read the next record's header into
2363 * drc->drc_next_rrd->header.
2364 * Verify checksum of payload and next record.
2367 receive_read_payload_and_next_header(dmu_recv_cookie_t *drc, int len, void *buf)
2372 ASSERT3U(len, <=, SPA_MAXBLOCKSIZE);
2373 err = receive_read(drc, len, buf);
2376 receive_cksum(drc, len, buf);
2378 /* note: rrd is NULL when reading the begin record's payload */
2379 if (drc->drc_rrd != NULL) {
2380 drc->drc_rrd->payload = buf;
2381 drc->drc_rrd->payload_size = len;
2382 drc->drc_rrd->bytes_read = drc->drc_bytes_read;
2385 ASSERT3P(buf, ==, NULL);
2388 drc->drc_prev_cksum = drc->drc_cksum;
2390 drc->drc_next_rrd = kmem_zalloc(sizeof (*drc->drc_next_rrd), KM_SLEEP);
2391 err = receive_read(drc, sizeof (drc->drc_next_rrd->header),
2392 &drc->drc_next_rrd->header);
2393 drc->drc_next_rrd->bytes_read = drc->drc_bytes_read;
2396 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
2397 drc->drc_next_rrd = NULL;
2400 if (drc->drc_next_rrd->header.drr_type == DRR_BEGIN) {
2401 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
2402 drc->drc_next_rrd = NULL;
2403 return (SET_ERROR(EINVAL));
2407 * Note: checksum is of everything up to but not including the
2410 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
2411 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
2413 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
2414 &drc->drc_next_rrd->header);
2416 zio_cksum_t cksum_orig =
2417 drc->drc_next_rrd->header.drr_u.drr_checksum.drr_checksum;
2418 zio_cksum_t *cksump =
2419 &drc->drc_next_rrd->header.drr_u.drr_checksum.drr_checksum;
2421 if (drc->drc_byteswap)
2422 byteswap_record(&drc->drc_next_rrd->header);
2424 if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) &&
2425 !ZIO_CHECKSUM_EQUAL(drc->drc_cksum, *cksump)) {
2426 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
2427 drc->drc_next_rrd = NULL;
2428 return (SET_ERROR(ECKSUM));
2431 receive_cksum(drc, sizeof (cksum_orig), &cksum_orig);
2437 * Issue the prefetch reads for any necessary indirect blocks.
2439 * We use the object ignore list to tell us whether or not to issue prefetches
2440 * for a given object. We do this for both correctness (in case the blocksize
2441 * of an object has changed) and performance (if the object doesn't exist, don't
2442 * needlessly try to issue prefetches). We also trim the list as we go through
2443 * the stream to prevent it from growing to an unbounded size.
2445 * The object numbers within will always be in sorted order, and any write
2446 * records we see will also be in sorted order, but they're not sorted with
2447 * respect to each other (i.e. we can get several object records before
2448 * receiving each object's write records). As a result, once we've reached a
2449 * given object number, we can safely remove any reference to lower object
2450 * numbers in the ignore list. In practice, we receive up to 32 object records
2451 * before receiving write records, so the list can have up to 32 nodes in it.
2454 receive_read_prefetch(dmu_recv_cookie_t *drc, uint64_t object, uint64_t offset,
2457 if (!objlist_exists(drc->drc_ignore_objlist, object)) {
2458 dmu_prefetch(drc->drc_os, object, 1, offset, length,
2459 ZIO_PRIORITY_SYNC_READ);
2464 * Read records off the stream, issuing any necessary prefetches.
2467 receive_read_record(dmu_recv_cookie_t *drc)
2471 switch (drc->drc_rrd->header.drr_type) {
2474 struct drr_object *drro =
2475 &drc->drc_rrd->header.drr_u.drr_object;
2476 uint32_t size = DRR_OBJECT_PAYLOAD_SIZE(drro);
2478 dmu_object_info_t doi;
2481 buf = kmem_zalloc(size, KM_SLEEP);
2483 err = receive_read_payload_and_next_header(drc, size, buf);
2485 kmem_free(buf, size);
2488 err = dmu_object_info(drc->drc_os, drro->drr_object, &doi);
2490 * See receive_read_prefetch for an explanation why we're
2491 * storing this object in the ignore_obj_list.
2493 if (err == ENOENT || err == EEXIST ||
2494 (err == 0 && doi.doi_data_block_size != drro->drr_blksz)) {
2495 objlist_insert(drc->drc_ignore_objlist,
2501 case DRR_FREEOBJECTS:
2503 err = receive_read_payload_and_next_header(drc, 0, NULL);
2508 struct drr_write *drrw = &drc->drc_rrd->header.drr_u.drr_write;
2509 int size = DRR_WRITE_PAYLOAD_SIZE(drrw);
2510 abd_t *abd = abd_alloc_linear(size, B_FALSE);
2511 err = receive_read_payload_and_next_header(drc, size,
2517 drc->drc_rrd->abd = abd;
2518 receive_read_prefetch(drc, drrw->drr_object, drrw->drr_offset,
2519 drrw->drr_logical_size);
2522 case DRR_WRITE_EMBEDDED:
2524 struct drr_write_embedded *drrwe =
2525 &drc->drc_rrd->header.drr_u.drr_write_embedded;
2526 uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8);
2527 void *buf = kmem_zalloc(size, KM_SLEEP);
2529 err = receive_read_payload_and_next_header(drc, size, buf);
2531 kmem_free(buf, size);
2535 receive_read_prefetch(drc, drrwe->drr_object, drrwe->drr_offset,
2543 * It might be beneficial to prefetch indirect blocks here, but
2544 * we don't really have the data to decide for sure.
2546 err = receive_read_payload_and_next_header(drc, 0, NULL);
2551 struct drr_end *drre = &drc->drc_rrd->header.drr_u.drr_end;
2552 if (!ZIO_CHECKSUM_EQUAL(drc->drc_prev_cksum,
2553 drre->drr_checksum))
2554 return (SET_ERROR(ECKSUM));
2559 struct drr_spill *drrs = &drc->drc_rrd->header.drr_u.drr_spill;
2560 int size = DRR_SPILL_PAYLOAD_SIZE(drrs);
2561 abd_t *abd = abd_alloc_linear(size, B_FALSE);
2562 err = receive_read_payload_and_next_header(drc, size,
2567 drc->drc_rrd->abd = abd;
2570 case DRR_OBJECT_RANGE:
2572 err = receive_read_payload_and_next_header(drc, 0, NULL);
2577 return (SET_ERROR(EINVAL));
2584 dprintf_drr(struct receive_record_arg *rrd, int err)
2587 switch (rrd->header.drr_type) {
2590 struct drr_object *drro = &rrd->header.drr_u.drr_object;
2591 dprintf("drr_type = OBJECT obj = %llu type = %u "
2592 "bonustype = %u blksz = %u bonuslen = %u cksumtype = %u "
2593 "compress = %u dn_slots = %u err = %d\n",
2594 (u_longlong_t)drro->drr_object, drro->drr_type,
2595 drro->drr_bonustype, drro->drr_blksz, drro->drr_bonuslen,
2596 drro->drr_checksumtype, drro->drr_compress,
2597 drro->drr_dn_slots, err);
2600 case DRR_FREEOBJECTS:
2602 struct drr_freeobjects *drrfo =
2603 &rrd->header.drr_u.drr_freeobjects;
2604 dprintf("drr_type = FREEOBJECTS firstobj = %llu "
2605 "numobjs = %llu err = %d\n",
2606 (u_longlong_t)drrfo->drr_firstobj,
2607 (u_longlong_t)drrfo->drr_numobjs, err);
2612 struct drr_write *drrw = &rrd->header.drr_u.drr_write;
2613 dprintf("drr_type = WRITE obj = %llu type = %u offset = %llu "
2614 "lsize = %llu cksumtype = %u flags = %u "
2615 "compress = %u psize = %llu err = %d\n",
2616 (u_longlong_t)drrw->drr_object, drrw->drr_type,
2617 (u_longlong_t)drrw->drr_offset,
2618 (u_longlong_t)drrw->drr_logical_size,
2619 drrw->drr_checksumtype, drrw->drr_flags,
2620 drrw->drr_compressiontype,
2621 (u_longlong_t)drrw->drr_compressed_size, err);
2624 case DRR_WRITE_BYREF:
2626 struct drr_write_byref *drrwbr =
2627 &rrd->header.drr_u.drr_write_byref;
2628 dprintf("drr_type = WRITE_BYREF obj = %llu offset = %llu "
2629 "length = %llu toguid = %llx refguid = %llx "
2630 "refobject = %llu refoffset = %llu cksumtype = %u "
2631 "flags = %u err = %d\n",
2632 (u_longlong_t)drrwbr->drr_object,
2633 (u_longlong_t)drrwbr->drr_offset,
2634 (u_longlong_t)drrwbr->drr_length,
2635 (u_longlong_t)drrwbr->drr_toguid,
2636 (u_longlong_t)drrwbr->drr_refguid,
2637 (u_longlong_t)drrwbr->drr_refobject,
2638 (u_longlong_t)drrwbr->drr_refoffset,
2639 drrwbr->drr_checksumtype, drrwbr->drr_flags, err);
2642 case DRR_WRITE_EMBEDDED:
2644 struct drr_write_embedded *drrwe =
2645 &rrd->header.drr_u.drr_write_embedded;
2646 dprintf("drr_type = WRITE_EMBEDDED obj = %llu offset = %llu "
2647 "length = %llu compress = %u etype = %u lsize = %u "
2648 "psize = %u err = %d\n",
2649 (u_longlong_t)drrwe->drr_object,
2650 (u_longlong_t)drrwe->drr_offset,
2651 (u_longlong_t)drrwe->drr_length,
2652 drrwe->drr_compression, drrwe->drr_etype,
2653 drrwe->drr_lsize, drrwe->drr_psize, err);
2658 struct drr_free *drrf = &rrd->header.drr_u.drr_free;
2659 dprintf("drr_type = FREE obj = %llu offset = %llu "
2660 "length = %lld err = %d\n",
2661 (u_longlong_t)drrf->drr_object,
2662 (u_longlong_t)drrf->drr_offset,
2663 (longlong_t)drrf->drr_length,
2669 struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
2670 dprintf("drr_type = SPILL obj = %llu length = %llu "
2671 "err = %d\n", (u_longlong_t)drrs->drr_object,
2672 (u_longlong_t)drrs->drr_length, err);
2675 case DRR_OBJECT_RANGE:
2677 struct drr_object_range *drror =
2678 &rrd->header.drr_u.drr_object_range;
2679 dprintf("drr_type = OBJECT_RANGE firstobj = %llu "
2680 "numslots = %llu flags = %u err = %d\n",
2681 (u_longlong_t)drror->drr_firstobj,
2682 (u_longlong_t)drror->drr_numslots,
2683 drror->drr_flags, err);
2693 * Commit the records to the pool.
2696 receive_process_record(struct receive_writer_arg *rwa,
2697 struct receive_record_arg *rrd)
2701 /* Processing in order, therefore bytes_read should be increasing. */
2702 ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read);
2703 rwa->bytes_read = rrd->bytes_read;
2705 if (rrd->header.drr_type != DRR_WRITE) {
2706 err = flush_write_batch(rwa);
2708 if (rrd->abd != NULL) {
2711 rrd->payload = NULL;
2712 } else if (rrd->payload != NULL) {
2713 kmem_free(rrd->payload, rrd->payload_size);
2714 rrd->payload = NULL;
2721 switch (rrd->header.drr_type) {
2724 struct drr_object *drro = &rrd->header.drr_u.drr_object;
2725 err = receive_object(rwa, drro, rrd->payload);
2726 kmem_free(rrd->payload, rrd->payload_size);
2727 rrd->payload = NULL;
2730 case DRR_FREEOBJECTS:
2732 struct drr_freeobjects *drrfo =
2733 &rrd->header.drr_u.drr_freeobjects;
2734 err = receive_freeobjects(rwa, drrfo);
2739 err = receive_process_write_record(rwa, rrd);
2740 if (err != EAGAIN) {
2742 * On success, receive_process_write_record() returns
2743 * EAGAIN to indicate that we do not want to free
2744 * the rrd or arc_buf.
2752 case DRR_WRITE_EMBEDDED:
2754 struct drr_write_embedded *drrwe =
2755 &rrd->header.drr_u.drr_write_embedded;
2756 err = receive_write_embedded(rwa, drrwe, rrd->payload);
2757 kmem_free(rrd->payload, rrd->payload_size);
2758 rrd->payload = NULL;
2763 struct drr_free *drrf = &rrd->header.drr_u.drr_free;
2764 err = receive_free(rwa, drrf);
2769 struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
2770 err = receive_spill(rwa, drrs, rrd->abd);
2774 rrd->payload = NULL;
2777 case DRR_OBJECT_RANGE:
2779 struct drr_object_range *drror =
2780 &rrd->header.drr_u.drr_object_range;
2781 err = receive_object_range(rwa, drror);
2786 struct drr_redact *drrr = &rrd->header.drr_u.drr_redact;
2787 err = receive_redact(rwa, drrr);
2791 err = (SET_ERROR(EINVAL));
2795 dprintf_drr(rrd, err);
2801 * dmu_recv_stream's worker thread; pull records off the queue, and then call
2802 * receive_process_record When we're done, signal the main thread and exit.
2804 static __attribute__((noreturn)) void
2805 receive_writer_thread(void *arg)
2807 struct receive_writer_arg *rwa = arg;
2808 struct receive_record_arg *rrd;
2809 fstrans_cookie_t cookie = spl_fstrans_mark();
2811 for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker;
2812 rrd = bqueue_dequeue(&rwa->q)) {
2814 * If there's an error, the main thread will stop putting things
2815 * on the queue, but we need to clear everything in it before we
2819 if (rwa->err == 0) {
2820 err = receive_process_record(rwa, rrd);
2821 } else if (rrd->abd != NULL) {
2824 rrd->payload = NULL;
2825 } else if (rrd->payload != NULL) {
2826 kmem_free(rrd->payload, rrd->payload_size);
2827 rrd->payload = NULL;
2830 * EAGAIN indicates that this record has been saved (on
2831 * raw->write_batch), and will be used again, so we don't
2834 if (err != EAGAIN) {
2837 kmem_free(rrd, sizeof (*rrd));
2840 kmem_free(rrd, sizeof (*rrd));
2842 int err = flush_write_batch(rwa);
2846 mutex_enter(&rwa->mutex);
2848 cv_signal(&rwa->cv);
2849 mutex_exit(&rwa->mutex);
2850 spl_fstrans_unmark(cookie);
2855 resume_check(dmu_recv_cookie_t *drc, nvlist_t *begin_nvl)
2858 objset_t *mos = dmu_objset_pool(drc->drc_os)->dp_meta_objset;
2859 uint64_t dsobj = dmu_objset_id(drc->drc_os);
2860 uint64_t resume_obj, resume_off;
2862 if (nvlist_lookup_uint64(begin_nvl,
2863 "resume_object", &resume_obj) != 0 ||
2864 nvlist_lookup_uint64(begin_nvl,
2865 "resume_offset", &resume_off) != 0) {
2866 return (SET_ERROR(EINVAL));
2868 VERIFY0(zap_lookup(mos, dsobj,
2869 DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val));
2870 if (resume_obj != val)
2871 return (SET_ERROR(EINVAL));
2872 VERIFY0(zap_lookup(mos, dsobj,
2873 DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val));
2874 if (resume_off != val)
2875 return (SET_ERROR(EINVAL));
2881 * Read in the stream's records, one by one, and apply them to the pool. There
2882 * are two threads involved; the thread that calls this function will spin up a
2883 * worker thread, read the records off the stream one by one, and issue
2884 * prefetches for any necessary indirect blocks. It will then push the records
2885 * onto an internal blocking queue. The worker thread will pull the records off
2886 * the queue, and actually write the data into the DMU. This way, the worker
2887 * thread doesn't have to wait for reads to complete, since everything it needs
2888 * (the indirect blocks) will be prefetched.
2890 * NB: callers *must* call dmu_recv_end() if this succeeds.
2893 dmu_recv_stream(dmu_recv_cookie_t *drc, offset_t *voffp)
2896 struct receive_writer_arg *rwa = kmem_zalloc(sizeof (*rwa), KM_SLEEP);
2898 if (dsl_dataset_has_resume_receive_state(drc->drc_ds)) {
2900 (void) zap_lookup(drc->drc_ds->ds_dir->dd_pool->dp_meta_objset,
2901 drc->drc_ds->ds_object, DS_FIELD_RESUME_BYTES,
2902 sizeof (bytes), 1, &bytes);
2903 drc->drc_bytes_read += bytes;
2906 drc->drc_ignore_objlist = objlist_create();
2908 /* these were verified in dmu_recv_begin */
2909 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
2911 ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
2913 ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT);
2914 ASSERT0(drc->drc_os->os_encrypted &&
2915 (drc->drc_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA));
2917 /* handle DSL encryption key payload */
2918 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) {
2919 nvlist_t *keynvl = NULL;
2921 ASSERT(drc->drc_os->os_encrypted);
2922 ASSERT(drc->drc_raw);
2924 err = nvlist_lookup_nvlist(drc->drc_begin_nvl, "crypt_keydata",
2930 * If this is a new dataset we set the key immediately.
2931 * Otherwise we don't want to change the key until we
2932 * are sure the rest of the receive succeeded so we stash
2933 * the keynvl away until then.
2935 err = dsl_crypto_recv_raw(spa_name(drc->drc_os->os_spa),
2936 drc->drc_ds->ds_object, drc->drc_fromsnapobj,
2937 drc->drc_drrb->drr_type, keynvl, drc->drc_newfs);
2941 /* see comment in dmu_recv_end_sync() */
2942 drc->drc_ivset_guid = 0;
2943 (void) nvlist_lookup_uint64(keynvl, "to_ivset_guid",
2944 &drc->drc_ivset_guid);
2946 if (!drc->drc_newfs)
2947 drc->drc_keynvl = fnvlist_dup(keynvl);
2950 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING) {
2951 err = resume_check(drc, drc->drc_begin_nvl);
2957 * If we failed before this point we will clean up any new resume
2958 * state that was created. Now that we've gotten past the initial
2959 * checks we are ok to retain that resume state.
2961 drc->drc_should_save = B_TRUE;
2963 (void) bqueue_init(&rwa->q, zfs_recv_queue_ff,
2964 MAX(zfs_recv_queue_length, 2 * zfs_max_recordsize),
2965 offsetof(struct receive_record_arg, node));
2966 cv_init(&rwa->cv, NULL, CV_DEFAULT, NULL);
2967 mutex_init(&rwa->mutex, NULL, MUTEX_DEFAULT, NULL);
2968 rwa->os = drc->drc_os;
2969 rwa->byteswap = drc->drc_byteswap;
2970 rwa->resumable = drc->drc_resumable;
2971 rwa->raw = drc->drc_raw;
2972 rwa->spill = drc->drc_spill;
2973 rwa->full = (drc->drc_drr_begin->drr_u.drr_begin.drr_fromguid == 0);
2974 rwa->os->os_raw_receive = drc->drc_raw;
2975 list_create(&rwa->write_batch, sizeof (struct receive_record_arg),
2976 offsetof(struct receive_record_arg, node.bqn_node));
2978 (void) thread_create(NULL, 0, receive_writer_thread, rwa, 0, curproc,
2979 TS_RUN, minclsyspri);
2981 * We're reading rwa->err without locks, which is safe since we are the
2982 * only reader, and the worker thread is the only writer. It's ok if we
2983 * miss a write for an iteration or two of the loop, since the writer
2984 * thread will keep freeing records we send it until we send it an eos
2987 * We can leave this loop in 3 ways: First, if rwa->err is
2988 * non-zero. In that case, the writer thread will free the rrd we just
2989 * pushed. Second, if we're interrupted; in that case, either it's the
2990 * first loop and drc->drc_rrd was never allocated, or it's later, and
2991 * drc->drc_rrd has been handed off to the writer thread who will free
2992 * it. Finally, if receive_read_record fails or we're at the end of the
2993 * stream, then we free drc->drc_rrd and exit.
2995 while (rwa->err == 0) {
2996 if (issig(JUSTLOOKING) && issig(FORREAL)) {
2997 err = SET_ERROR(EINTR);
3001 ASSERT3P(drc->drc_rrd, ==, NULL);
3002 drc->drc_rrd = drc->drc_next_rrd;
3003 drc->drc_next_rrd = NULL;
3004 /* Allocates and loads header into drc->drc_next_rrd */
3005 err = receive_read_record(drc);
3007 if (drc->drc_rrd->header.drr_type == DRR_END || err != 0) {
3008 kmem_free(drc->drc_rrd, sizeof (*drc->drc_rrd));
3009 drc->drc_rrd = NULL;
3013 bqueue_enqueue(&rwa->q, drc->drc_rrd,
3014 sizeof (struct receive_record_arg) +
3015 drc->drc_rrd->payload_size);
3016 drc->drc_rrd = NULL;
3019 ASSERT3P(drc->drc_rrd, ==, NULL);
3020 drc->drc_rrd = kmem_zalloc(sizeof (*drc->drc_rrd), KM_SLEEP);
3021 drc->drc_rrd->eos_marker = B_TRUE;
3022 bqueue_enqueue_flush(&rwa->q, drc->drc_rrd, 1);
3024 mutex_enter(&rwa->mutex);
3025 while (!rwa->done) {
3027 * We need to use cv_wait_sig() so that any process that may
3028 * be sleeping here can still fork.
3030 (void) cv_wait_sig(&rwa->cv, &rwa->mutex);
3032 mutex_exit(&rwa->mutex);
3035 * If we are receiving a full stream as a clone, all object IDs which
3036 * are greater than the maximum ID referenced in the stream are
3037 * by definition unused and must be freed.
3039 if (drc->drc_clone && drc->drc_drrb->drr_fromguid == 0) {
3040 uint64_t obj = rwa->max_object + 1;
3044 while (next_err == 0) {
3045 free_err = dmu_free_long_object(rwa->os, obj);
3046 if (free_err != 0 && free_err != ENOENT)
3049 next_err = dmu_object_next(rwa->os, &obj, FALSE, 0);
3053 if (free_err != 0 && free_err != ENOENT)
3055 else if (next_err != ESRCH)
3060 cv_destroy(&rwa->cv);
3061 mutex_destroy(&rwa->mutex);
3062 bqueue_destroy(&rwa->q);
3063 list_destroy(&rwa->write_batch);
3069 * If we hit an error before we started the receive_writer_thread
3070 * we need to clean up the next_rrd we create by processing the
3073 if (drc->drc_next_rrd != NULL)
3074 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
3077 * The objset will be invalidated by dmu_recv_end() when we do
3078 * dsl_dataset_clone_swap_sync_impl().
3082 kmem_free(rwa, sizeof (*rwa));
3083 nvlist_free(drc->drc_begin_nvl);
3087 * Clean up references. If receive is not resumable,
3088 * destroy what we created, so we don't leave it in
3089 * the inconsistent state.
3091 dmu_recv_cleanup_ds(drc);
3092 nvlist_free(drc->drc_keynvl);
3095 objlist_destroy(drc->drc_ignore_objlist);
3096 drc->drc_ignore_objlist = NULL;
3097 *voffp = drc->drc_voff;
3102 dmu_recv_end_check(void *arg, dmu_tx_t *tx)
3104 dmu_recv_cookie_t *drc = arg;
3105 dsl_pool_t *dp = dmu_tx_pool(tx);
3108 ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
3110 if (!drc->drc_newfs) {
3111 dsl_dataset_t *origin_head;
3113 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
3116 if (drc->drc_force) {
3118 * We will destroy any snapshots in tofs (i.e. before
3119 * origin_head) that are after the origin (which is
3120 * the snap before drc_ds, because drc_ds can not
3121 * have any snaps of its own).
3125 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
3127 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
3128 dsl_dataset_t *snap;
3129 error = dsl_dataset_hold_obj(dp, obj, FTAG,
3133 if (snap->ds_dir != origin_head->ds_dir)
3134 error = SET_ERROR(EINVAL);
3136 error = dsl_destroy_snapshot_check_impl(
3139 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
3140 dsl_dataset_rele(snap, FTAG);
3145 dsl_dataset_rele(origin_head, FTAG);
3149 if (drc->drc_keynvl != NULL) {
3150 error = dsl_crypto_recv_raw_key_check(drc->drc_ds,
3151 drc->drc_keynvl, tx);
3153 dsl_dataset_rele(origin_head, FTAG);
3158 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
3159 origin_head, drc->drc_force, drc->drc_owner, tx);
3161 dsl_dataset_rele(origin_head, FTAG);
3164 error = dsl_dataset_snapshot_check_impl(origin_head,
3165 drc->drc_tosnap, tx, B_TRUE, 1,
3166 drc->drc_cred, drc->drc_proc);
3167 dsl_dataset_rele(origin_head, FTAG);
3171 error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
3173 error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
3174 drc->drc_tosnap, tx, B_TRUE, 1,
3175 drc->drc_cred, drc->drc_proc);
3181 dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
3183 dmu_recv_cookie_t *drc = arg;
3184 dsl_pool_t *dp = dmu_tx_pool(tx);
3185 boolean_t encrypted = drc->drc_ds->ds_dir->dd_crypto_obj != 0;
3186 uint64_t newsnapobj;
3188 spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
3189 tx, "snap=%s", drc->drc_tosnap);
3190 drc->drc_ds->ds_objset->os_raw_receive = B_FALSE;
3192 if (!drc->drc_newfs) {
3193 dsl_dataset_t *origin_head;
3195 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
3198 if (drc->drc_force) {
3200 * Destroy any snapshots of drc_tofs (origin_head)
3201 * after the origin (the snap before drc_ds).
3205 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
3207 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
3208 dsl_dataset_t *snap;
3209 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
3211 ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
3212 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
3213 dsl_destroy_snapshot_sync_impl(snap,
3215 dsl_dataset_rele(snap, FTAG);
3218 if (drc->drc_keynvl != NULL) {
3219 dsl_crypto_recv_raw_key_sync(drc->drc_ds,
3220 drc->drc_keynvl, tx);
3221 nvlist_free(drc->drc_keynvl);
3222 drc->drc_keynvl = NULL;
3225 VERIFY3P(drc->drc_ds->ds_prev, ==,
3226 origin_head->ds_prev);
3228 dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
3231 * The objset was evicted by dsl_dataset_clone_swap_sync_impl,
3232 * so drc_os is no longer valid.
3236 dsl_dataset_snapshot_sync_impl(origin_head,
3237 drc->drc_tosnap, tx);
3239 /* set snapshot's creation time and guid */
3240 dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
3241 dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time =
3242 drc->drc_drrb->drr_creation_time;
3243 dsl_dataset_phys(origin_head->ds_prev)->ds_guid =
3244 drc->drc_drrb->drr_toguid;
3245 dsl_dataset_phys(origin_head->ds_prev)->ds_flags &=
3246 ~DS_FLAG_INCONSISTENT;
3248 dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
3249 dsl_dataset_phys(origin_head)->ds_flags &=
3250 ~DS_FLAG_INCONSISTENT;
3253 dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
3255 dsl_dataset_rele(origin_head, FTAG);
3256 dsl_destroy_head_sync_impl(drc->drc_ds, tx);
3258 if (drc->drc_owner != NULL)
3259 VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
3261 dsl_dataset_t *ds = drc->drc_ds;
3263 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
3265 /* set snapshot's creation time and guid */
3266 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
3267 dsl_dataset_phys(ds->ds_prev)->ds_creation_time =
3268 drc->drc_drrb->drr_creation_time;
3269 dsl_dataset_phys(ds->ds_prev)->ds_guid =
3270 drc->drc_drrb->drr_toguid;
3271 dsl_dataset_phys(ds->ds_prev)->ds_flags &=
3272 ~DS_FLAG_INCONSISTENT;
3274 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3275 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
3276 if (dsl_dataset_has_resume_receive_state(ds)) {
3277 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3278 DS_FIELD_RESUME_FROMGUID, tx);
3279 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3280 DS_FIELD_RESUME_OBJECT, tx);
3281 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3282 DS_FIELD_RESUME_OFFSET, tx);
3283 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3284 DS_FIELD_RESUME_BYTES, tx);
3285 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3286 DS_FIELD_RESUME_TOGUID, tx);
3287 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3288 DS_FIELD_RESUME_TONAME, tx);
3289 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3290 DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS, tx);
3293 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj;
3297 * If this is a raw receive, the crypt_keydata nvlist will include
3298 * a to_ivset_guid for us to set on the new snapshot. This value
3299 * will override the value generated by the snapshot code. However,
3300 * this value may not be present, because older implementations of
3301 * the raw send code did not include this value, and we are still
3302 * allowed to receive them if the zfs_disable_ivset_guid_check
3303 * tunable is set, in which case we will leave the newly-generated
3306 if (drc->drc_raw && drc->drc_ivset_guid != 0) {
3307 dmu_object_zapify(dp->dp_meta_objset, newsnapobj,
3308 DMU_OT_DSL_DATASET, tx);
3309 VERIFY0(zap_update(dp->dp_meta_objset, newsnapobj,
3310 DS_FIELD_IVSET_GUID, sizeof (uint64_t), 1,
3311 &drc->drc_ivset_guid, tx));
3315 * Release the hold from dmu_recv_begin. This must be done before
3316 * we return to open context, so that when we free the dataset's dnode
3317 * we can evict its bonus buffer. Since the dataset may be destroyed
3318 * at this point (and therefore won't have a valid pointer to the spa)
3319 * we release the key mapping manually here while we do have a valid
3320 * pointer, if it exists.
3322 if (!drc->drc_raw && encrypted) {
3323 (void) spa_keystore_remove_mapping(dmu_tx_pool(tx)->dp_spa,
3324 drc->drc_ds->ds_object, drc->drc_ds);
3326 dsl_dataset_disown(drc->drc_ds, 0, dmu_recv_tag);
3330 static int dmu_recv_end_modified_blocks = 3;
3333 dmu_recv_existing_end(dmu_recv_cookie_t *drc)
3337 * We will be destroying the ds; make sure its origin is unmounted if
3340 char name[ZFS_MAX_DATASET_NAME_LEN];
3341 dsl_dataset_name(drc->drc_ds, name);
3342 zfs_destroy_unmount_origin(name);
3345 return (dsl_sync_task(drc->drc_tofs,
3346 dmu_recv_end_check, dmu_recv_end_sync, drc,
3347 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
3351 dmu_recv_new_end(dmu_recv_cookie_t *drc)
3353 return (dsl_sync_task(drc->drc_tofs,
3354 dmu_recv_end_check, dmu_recv_end_sync, drc,
3355 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
3359 dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
3363 drc->drc_owner = owner;
3366 error = dmu_recv_new_end(drc);
3368 error = dmu_recv_existing_end(drc);
3371 dmu_recv_cleanup_ds(drc);
3372 nvlist_free(drc->drc_keynvl);
3374 if (drc->drc_newfs) {
3375 zvol_create_minor(drc->drc_tofs);
3377 char *snapname = kmem_asprintf("%s@%s",
3378 drc->drc_tofs, drc->drc_tosnap);
3379 zvol_create_minor(snapname);
3380 kmem_strfree(snapname);
3386 * Return TRUE if this objset is currently being received into.
3389 dmu_objset_is_receiving(objset_t *os)
3391 return (os->os_dsl_dataset != NULL &&
3392 os->os_dsl_dataset->ds_owner == dmu_recv_tag);
3395 ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, queue_length, INT, ZMOD_RW,
3396 "Maximum receive queue length");
3398 ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, queue_ff, INT, ZMOD_RW,
3399 "Receive queue fill fraction");
3401 ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, write_batch_size, INT, ZMOD_RW,
3402 "Maximum amount of writes to batch into one transaction");