4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright 2014 HybridCluster. All rights reserved.
27 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
28 * Copyright (c) 2019, Klara Inc.
29 * Copyright (c) 2019, Allan Jude
30 * Copyright (c) 2019 Datto Inc.
31 * Copyright (c) 2022 Axcient.
34 #include <sys/spa_impl.h>
36 #include <sys/dmu_impl.h>
37 #include <sys/dmu_send.h>
38 #include <sys/dmu_recv.h>
39 #include <sys/dmu_tx.h>
41 #include <sys/dnode.h>
42 #include <sys/zfs_context.h>
43 #include <sys/dmu_objset.h>
44 #include <sys/dmu_traverse.h>
45 #include <sys/dsl_dataset.h>
46 #include <sys/dsl_dir.h>
47 #include <sys/dsl_prop.h>
48 #include <sys/dsl_pool.h>
49 #include <sys/dsl_synctask.h>
50 #include <sys/zfs_ioctl.h>
53 #include <sys/zio_checksum.h>
54 #include <sys/zfs_znode.h>
55 #include <zfs_fletcher.h>
58 #include <sys/zfs_onexit.h>
59 #include <sys/dsl_destroy.h>
60 #include <sys/blkptr.h>
61 #include <sys/dsl_bookmark.h>
62 #include <sys/zfeature.h>
63 #include <sys/bqueue.h>
64 #include <sys/objlist.h>
66 #include <sys/zfs_vfsops.h>
68 #include <sys/zfs_file.h>
70 static int zfs_recv_queue_length = SPA_MAXBLOCKSIZE;
71 static int zfs_recv_queue_ff = 20;
72 static int zfs_recv_write_batch_size = 1024 * 1024;
73 static int zfs_recv_best_effort_corrective = 0;
75 static const void *const dmu_recv_tag = "dmu_recv_tag";
76 const char *const recv_clone_name = "%recv";
78 static int receive_read_payload_and_next_header(dmu_recv_cookie_t *ra, int len,
81 struct receive_record_arg {
82 dmu_replay_record_t header;
83 void *payload; /* Pointer to a buffer containing the payload */
85 * If the record is a WRITE or SPILL, pointer to the abd containing the
90 uint64_t bytes_read; /* bytes read from stream when record created */
91 boolean_t eos_marker; /* Marks the end of the stream */
95 struct receive_writer_arg {
101 * These three members are used to signal to the main thread when
112 boolean_t raw; /* DMU_BACKUP_FEATURE_RAW set */
113 boolean_t spill; /* DRR_FLAG_SPILL_BLOCK set */
114 boolean_t full; /* this is a full send stream */
115 uint64_t last_object;
116 uint64_t last_offset;
117 uint64_t max_object; /* highest object ID referenced in stream */
118 uint64_t bytes_read; /* bytes read when current record created */
122 /* Encryption parameters for the last received DRR_OBJECT_RANGE */
123 boolean_t or_crypt_params_present;
124 uint64_t or_firstobj;
125 uint64_t or_numslots;
126 uint8_t or_salt[ZIO_DATA_SALT_LEN];
127 uint8_t or_iv[ZIO_DATA_IV_LEN];
128 uint8_t or_mac[ZIO_DATA_MAC_LEN];
129 boolean_t or_byteorder;
133 typedef struct dmu_recv_begin_arg {
134 const char *drba_origin;
135 dmu_recv_cookie_t *drba_cookie;
138 dsl_crypto_params_t *drba_dcp;
139 } dmu_recv_begin_arg_t;
142 byteswap_record(dmu_replay_record_t *drr)
144 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
145 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
146 drr->drr_type = BSWAP_32(drr->drr_type);
147 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
149 switch (drr->drr_type) {
151 DO64(drr_begin.drr_magic);
152 DO64(drr_begin.drr_versioninfo);
153 DO64(drr_begin.drr_creation_time);
154 DO32(drr_begin.drr_type);
155 DO32(drr_begin.drr_flags);
156 DO64(drr_begin.drr_toguid);
157 DO64(drr_begin.drr_fromguid);
160 DO64(drr_object.drr_object);
161 DO32(drr_object.drr_type);
162 DO32(drr_object.drr_bonustype);
163 DO32(drr_object.drr_blksz);
164 DO32(drr_object.drr_bonuslen);
165 DO32(drr_object.drr_raw_bonuslen);
166 DO64(drr_object.drr_toguid);
167 DO64(drr_object.drr_maxblkid);
169 case DRR_FREEOBJECTS:
170 DO64(drr_freeobjects.drr_firstobj);
171 DO64(drr_freeobjects.drr_numobjs);
172 DO64(drr_freeobjects.drr_toguid);
175 DO64(drr_write.drr_object);
176 DO32(drr_write.drr_type);
177 DO64(drr_write.drr_offset);
178 DO64(drr_write.drr_logical_size);
179 DO64(drr_write.drr_toguid);
180 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum);
181 DO64(drr_write.drr_key.ddk_prop);
182 DO64(drr_write.drr_compressed_size);
184 case DRR_WRITE_EMBEDDED:
185 DO64(drr_write_embedded.drr_object);
186 DO64(drr_write_embedded.drr_offset);
187 DO64(drr_write_embedded.drr_length);
188 DO64(drr_write_embedded.drr_toguid);
189 DO32(drr_write_embedded.drr_lsize);
190 DO32(drr_write_embedded.drr_psize);
193 DO64(drr_free.drr_object);
194 DO64(drr_free.drr_offset);
195 DO64(drr_free.drr_length);
196 DO64(drr_free.drr_toguid);
199 DO64(drr_spill.drr_object);
200 DO64(drr_spill.drr_length);
201 DO64(drr_spill.drr_toguid);
202 DO64(drr_spill.drr_compressed_size);
203 DO32(drr_spill.drr_type);
205 case DRR_OBJECT_RANGE:
206 DO64(drr_object_range.drr_firstobj);
207 DO64(drr_object_range.drr_numslots);
208 DO64(drr_object_range.drr_toguid);
211 DO64(drr_redact.drr_object);
212 DO64(drr_redact.drr_offset);
213 DO64(drr_redact.drr_length);
214 DO64(drr_redact.drr_toguid);
217 DO64(drr_end.drr_toguid);
218 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum);
224 if (drr->drr_type != DRR_BEGIN) {
225 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum);
233 redact_snaps_contains(uint64_t *snaps, uint64_t num_snaps, uint64_t guid)
235 for (int i = 0; i < num_snaps; i++) {
236 if (snaps[i] == guid)
243 * Check that the new stream we're trying to receive is redacted with respect to
244 * a subset of the snapshots that the origin was redacted with respect to. For
245 * the reasons behind this, see the man page on redacted zfs sends and receives.
248 compatible_redact_snaps(uint64_t *origin_snaps, uint64_t origin_num_snaps,
249 uint64_t *redact_snaps, uint64_t num_redact_snaps)
252 * Short circuit the comparison; if we are redacted with respect to
253 * more snapshots than the origin, we can't be redacted with respect
256 if (num_redact_snaps > origin_num_snaps) {
260 for (int i = 0; i < num_redact_snaps; i++) {
261 if (!redact_snaps_contains(origin_snaps, origin_num_snaps,
270 redact_check(dmu_recv_begin_arg_t *drba, dsl_dataset_t *origin)
272 uint64_t *origin_snaps;
273 uint64_t origin_num_snaps;
274 dmu_recv_cookie_t *drc = drba->drba_cookie;
275 struct drr_begin *drrb = drc->drc_drrb;
276 int featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
278 boolean_t ret = B_TRUE;
279 uint64_t *redact_snaps;
280 uint_t numredactsnaps;
283 * If this is a full send stream, we're safe no matter what.
285 if (drrb->drr_fromguid == 0)
288 VERIFY(dsl_dataset_get_uint64_array_feature(origin,
289 SPA_FEATURE_REDACTED_DATASETS, &origin_num_snaps, &origin_snaps));
291 if (nvlist_lookup_uint64_array(drc->drc_begin_nvl,
292 BEGINNV_REDACT_FROM_SNAPS, &redact_snaps, &numredactsnaps) ==
295 * If the send stream was sent from the redaction bookmark or
296 * the redacted version of the dataset, then we're safe. Verify
297 * that this is from the a compatible redaction bookmark or
300 if (!compatible_redact_snaps(origin_snaps, origin_num_snaps,
301 redact_snaps, numredactsnaps)) {
304 } else if (featureflags & DMU_BACKUP_FEATURE_REDACTED) {
306 * If the stream is redacted, it must be redacted with respect
307 * to a subset of what the origin is redacted with respect to.
308 * See case number 2 in the zfs man page section on redacted zfs
311 err = nvlist_lookup_uint64_array(drc->drc_begin_nvl,
312 BEGINNV_REDACT_SNAPS, &redact_snaps, &numredactsnaps);
314 if (err != 0 || !compatible_redact_snaps(origin_snaps,
315 origin_num_snaps, redact_snaps, numredactsnaps)) {
318 } else if (!redact_snaps_contains(origin_snaps, origin_num_snaps,
321 * If the stream isn't redacted but the origin is, this must be
322 * one of the snapshots the origin is redacted with respect to.
323 * See case number 1 in the zfs man page section on redacted zfs
335 * If we previously received a stream with --large-block, we don't support
336 * receiving an incremental on top of it without --large-block. This avoids
337 * forcing a read-modify-write or trying to re-aggregate a string of WRITE
341 recv_check_large_blocks(dsl_dataset_t *ds, uint64_t featureflags)
343 if (dsl_dataset_feature_is_active(ds, SPA_FEATURE_LARGE_BLOCKS) &&
344 !(featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS))
345 return (SET_ERROR(ZFS_ERR_STREAM_LARGE_BLOCK_MISMATCH));
350 recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
351 uint64_t fromguid, uint64_t featureflags)
357 dsl_pool_t *dp = ds->ds_dir->dd_pool;
358 boolean_t encrypted = ds->ds_dir->dd_crypto_obj != 0;
359 boolean_t raw = (featureflags & DMU_BACKUP_FEATURE_RAW) != 0;
360 boolean_t embed = (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) != 0;
362 /* Temporary clone name must not exist. */
363 error = zap_lookup(dp->dp_meta_objset,
364 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name,
367 return (error == 0 ? SET_ERROR(EBUSY) : error);
369 /* Resume state must not be set. */
370 if (dsl_dataset_has_resume_receive_state(ds))
371 return (SET_ERROR(EBUSY));
373 /* New snapshot name must not exist if we're not healing it. */
374 error = zap_lookup(dp->dp_meta_objset,
375 dsl_dataset_phys(ds)->ds_snapnames_zapobj,
376 drba->drba_cookie->drc_tosnap, 8, 1, &obj);
377 if (drba->drba_cookie->drc_heal) {
380 } else if (error != ENOENT) {
381 return (error == 0 ? SET_ERROR(EEXIST) : error);
384 /* Must not have children if receiving a ZVOL. */
385 error = zap_count(dp->dp_meta_objset,
386 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, &children);
389 if (drba->drba_cookie->drc_drrb->drr_type != DMU_OST_ZFS &&
391 return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
394 * Check snapshot limit before receiving. We'll recheck again at the
395 * end, but might as well abort before receiving if we're already over
398 * Note that we do not check the file system limit with
399 * dsl_dir_fscount_check because the temporary %clones don't count
400 * against that limit.
402 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT,
403 NULL, drba->drba_cred, drba->drba_proc);
407 if (drba->drba_cookie->drc_heal) {
408 /* Encryption is incompatible with embedded data. */
409 if (encrypted && embed)
410 return (SET_ERROR(EINVAL));
412 /* Healing is not supported when in 'force' mode. */
413 if (drba->drba_cookie->drc_force)
414 return (SET_ERROR(EINVAL));
416 /* Must have keys loaded if doing encrypted non-raw recv. */
417 if (encrypted && !raw) {
418 if (spa_keystore_lookup_key(dp->dp_spa, ds->ds_object,
420 return (SET_ERROR(EACCES));
423 error = dsl_dataset_hold_obj(dp, obj, FTAG, &snap);
428 * When not doing best effort corrective recv healing can only
429 * be done if the send stream is for the same snapshot as the
430 * one we are trying to heal.
432 if (zfs_recv_best_effort_corrective == 0 &&
433 drba->drba_cookie->drc_drrb->drr_toguid !=
434 dsl_dataset_phys(snap)->ds_guid) {
435 dsl_dataset_rele(snap, FTAG);
436 return (SET_ERROR(ENOTSUP));
438 dsl_dataset_rele(snap, FTAG);
439 } else if (fromguid != 0) {
440 /* Sanity check the incremental recv */
441 uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
443 /* Can't perform a raw receive on top of a non-raw receive */
444 if (!encrypted && raw)
445 return (SET_ERROR(EINVAL));
447 /* Encryption is incompatible with embedded data */
448 if (encrypted && embed)
449 return (SET_ERROR(EINVAL));
451 /* Find snapshot in this dir that matches fromguid. */
453 error = dsl_dataset_hold_obj(dp, obj, FTAG,
456 return (SET_ERROR(ENODEV));
457 if (snap->ds_dir != ds->ds_dir) {
458 dsl_dataset_rele(snap, FTAG);
459 return (SET_ERROR(ENODEV));
461 if (dsl_dataset_phys(snap)->ds_guid == fromguid)
463 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
464 dsl_dataset_rele(snap, FTAG);
467 return (SET_ERROR(ENODEV));
469 if (drba->drba_cookie->drc_force) {
470 drba->drba_cookie->drc_fromsnapobj = obj;
473 * If we are not forcing, there must be no
474 * changes since fromsnap. Raw sends have an
475 * additional constraint that requires that
476 * no "noop" snapshots exist between fromsnap
477 * and tosnap for the IVset checking code to
480 if (dsl_dataset_modified_since_snap(ds, snap) ||
482 dsl_dataset_phys(ds)->ds_prev_snap_obj !=
484 dsl_dataset_rele(snap, FTAG);
485 return (SET_ERROR(ETXTBSY));
487 drba->drba_cookie->drc_fromsnapobj =
488 ds->ds_prev->ds_object;
491 if (dsl_dataset_feature_is_active(snap,
492 SPA_FEATURE_REDACTED_DATASETS) && !redact_check(drba,
494 dsl_dataset_rele(snap, FTAG);
495 return (SET_ERROR(EINVAL));
498 error = recv_check_large_blocks(snap, featureflags);
500 dsl_dataset_rele(snap, FTAG);
504 dsl_dataset_rele(snap, FTAG);
506 /* If full and not healing then must be forced. */
507 if (!drba->drba_cookie->drc_force)
508 return (SET_ERROR(EEXIST));
511 * We don't support using zfs recv -F to blow away
512 * encrypted filesystems. This would require the
513 * dsl dir to point to the old encryption key and
514 * the new one at the same time during the receive.
516 if ((!encrypted && raw) || encrypted)
517 return (SET_ERROR(EINVAL));
520 * Perform the same encryption checks we would if
521 * we were creating a new dataset from scratch.
524 boolean_t will_encrypt;
526 error = dmu_objset_create_crypt_check(
527 ds->ds_dir->dd_parent, drba->drba_dcp,
532 if (will_encrypt && embed)
533 return (SET_ERROR(EINVAL));
541 * Check that any feature flags used in the data stream we're receiving are
542 * supported by the pool we are receiving into.
544 * Note that some of the features we explicitly check here have additional
545 * (implicit) features they depend on, but those dependencies are enforced
546 * through the zfeature_register() calls declaring the features that we
550 recv_begin_check_feature_flags_impl(uint64_t featureflags, spa_t *spa)
553 * Check if there are any unsupported feature flags.
555 if (!DMU_STREAM_SUPPORTED(featureflags)) {
556 return (SET_ERROR(ZFS_ERR_UNKNOWN_SEND_STREAM_FEATURE));
559 /* Verify pool version supports SA if SA_SPILL feature set */
560 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
561 spa_version(spa) < SPA_VERSION_SA)
562 return (SET_ERROR(ENOTSUP));
565 * LZ4 compressed, ZSTD compressed, embedded, mooched, large blocks,
566 * and large_dnodes in the stream can only be used if those pool
567 * features are enabled because we don't attempt to decompress /
568 * un-embed / un-mooch / split up the blocks / dnodes during the
571 if ((featureflags & DMU_BACKUP_FEATURE_LZ4) &&
572 !spa_feature_is_enabled(spa, SPA_FEATURE_LZ4_COMPRESS))
573 return (SET_ERROR(ENOTSUP));
574 if ((featureflags & DMU_BACKUP_FEATURE_ZSTD) &&
575 !spa_feature_is_enabled(spa, SPA_FEATURE_ZSTD_COMPRESS))
576 return (SET_ERROR(ENOTSUP));
577 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
578 !spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA))
579 return (SET_ERROR(ENOTSUP));
580 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
581 !spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS))
582 return (SET_ERROR(ENOTSUP));
583 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_DNODE) &&
584 !spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE))
585 return (SET_ERROR(ENOTSUP));
588 * Receiving redacted streams requires that redacted datasets are
591 if ((featureflags & DMU_BACKUP_FEATURE_REDACTED) &&
592 !spa_feature_is_enabled(spa, SPA_FEATURE_REDACTED_DATASETS))
593 return (SET_ERROR(ENOTSUP));
599 dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
601 dmu_recv_begin_arg_t *drba = arg;
602 dsl_pool_t *dp = dmu_tx_pool(tx);
603 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
604 uint64_t fromguid = drrb->drr_fromguid;
605 int flags = drrb->drr_flags;
606 ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
608 uint64_t featureflags = drba->drba_cookie->drc_featureflags;
610 const char *tofs = drba->drba_cookie->drc_tofs;
612 /* already checked */
613 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
614 ASSERT(!(featureflags & DMU_BACKUP_FEATURE_RESUMING));
616 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
617 DMU_COMPOUNDSTREAM ||
618 drrb->drr_type >= DMU_OST_NUMTYPES ||
619 ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
620 return (SET_ERROR(EINVAL));
622 error = recv_begin_check_feature_flags_impl(featureflags, dp->dp_spa);
626 /* Resumable receives require extensible datasets */
627 if (drba->drba_cookie->drc_resumable &&
628 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EXTENSIBLE_DATASET))
629 return (SET_ERROR(ENOTSUP));
631 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
632 /* raw receives require the encryption feature */
633 if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ENCRYPTION))
634 return (SET_ERROR(ENOTSUP));
636 /* embedded data is incompatible with encryption and raw recv */
637 if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
638 return (SET_ERROR(EINVAL));
640 /* raw receives require spill block allocation flag */
641 if (!(flags & DRR_FLAG_SPILL_BLOCK))
642 return (SET_ERROR(ZFS_ERR_SPILL_BLOCK_FLAG_MISSING));
645 * We support unencrypted datasets below encrypted ones now,
646 * so add the DS_HOLD_FLAG_DECRYPT flag only if we are dealing
647 * with a dataset we may encrypt.
649 if (drba->drba_dcp != NULL &&
650 drba->drba_dcp->cp_crypt != ZIO_CRYPT_OFF) {
651 dsflags |= DS_HOLD_FLAG_DECRYPT;
655 error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
657 /* target fs already exists; recv into temp clone */
659 /* Can't recv a clone into an existing fs */
660 if (flags & DRR_FLAG_CLONE || drba->drba_origin) {
661 dsl_dataset_rele_flags(ds, dsflags, FTAG);
662 return (SET_ERROR(EINVAL));
665 error = recv_begin_check_existing_impl(drba, ds, fromguid,
667 dsl_dataset_rele_flags(ds, dsflags, FTAG);
668 } else if (error == ENOENT) {
669 /* target fs does not exist; must be a full backup or clone */
670 char buf[ZFS_MAX_DATASET_NAME_LEN];
673 /* healing recv must be done "into" an existing snapshot */
674 if (drba->drba_cookie->drc_heal == B_TRUE)
675 return (SET_ERROR(ENOTSUP));
678 * If it's a non-clone incremental, we are missing the
679 * target fs, so fail the recv.
681 if (fromguid != 0 && !((flags & DRR_FLAG_CLONE) ||
683 return (SET_ERROR(ENOENT));
686 * If we're receiving a full send as a clone, and it doesn't
687 * contain all the necessary free records and freeobject
688 * records, reject it.
690 if (fromguid == 0 && drba->drba_origin != NULL &&
691 !(flags & DRR_FLAG_FREERECORDS))
692 return (SET_ERROR(EINVAL));
694 /* Open the parent of tofs */
695 ASSERT3U(strlen(tofs), <, sizeof (buf));
696 (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
697 error = dsl_dataset_hold(dp, buf, FTAG, &ds);
701 if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0 &&
702 drba->drba_origin == NULL) {
703 boolean_t will_encrypt;
706 * Check that we aren't breaking any encryption rules
707 * and that we have all the parameters we need to
708 * create an encrypted dataset if necessary. If we are
709 * making an encrypted dataset the stream can't have
712 error = dmu_objset_create_crypt_check(ds->ds_dir,
713 drba->drba_dcp, &will_encrypt);
715 dsl_dataset_rele(ds, FTAG);
720 (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)) {
721 dsl_dataset_rele(ds, FTAG);
722 return (SET_ERROR(EINVAL));
727 * Check filesystem and snapshot limits before receiving. We'll
728 * recheck snapshot limits again at the end (we create the
729 * filesystems and increment those counts during begin_sync).
731 error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
732 ZFS_PROP_FILESYSTEM_LIMIT, NULL,
733 drba->drba_cred, drba->drba_proc);
735 dsl_dataset_rele(ds, FTAG);
739 error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
740 ZFS_PROP_SNAPSHOT_LIMIT, NULL,
741 drba->drba_cred, drba->drba_proc);
743 dsl_dataset_rele(ds, FTAG);
747 /* can't recv below anything but filesystems (eg. no ZVOLs) */
748 error = dmu_objset_from_ds(ds, &os);
750 dsl_dataset_rele(ds, FTAG);
753 if (dmu_objset_type(os) != DMU_OST_ZFS) {
754 dsl_dataset_rele(ds, FTAG);
755 return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
758 if (drba->drba_origin != NULL) {
759 dsl_dataset_t *origin;
760 error = dsl_dataset_hold_flags(dp, drba->drba_origin,
761 dsflags, FTAG, &origin);
763 dsl_dataset_rele(ds, FTAG);
766 if (!origin->ds_is_snapshot) {
767 dsl_dataset_rele_flags(origin, dsflags, FTAG);
768 dsl_dataset_rele(ds, FTAG);
769 return (SET_ERROR(EINVAL));
771 if (dsl_dataset_phys(origin)->ds_guid != fromguid &&
773 dsl_dataset_rele_flags(origin, dsflags, FTAG);
774 dsl_dataset_rele(ds, FTAG);
775 return (SET_ERROR(ENODEV));
778 if (origin->ds_dir->dd_crypto_obj != 0 &&
779 (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)) {
780 dsl_dataset_rele_flags(origin, dsflags, FTAG);
781 dsl_dataset_rele(ds, FTAG);
782 return (SET_ERROR(EINVAL));
786 * If the origin is redacted we need to verify that this
787 * send stream can safely be received on top of the
790 if (dsl_dataset_feature_is_active(origin,
791 SPA_FEATURE_REDACTED_DATASETS)) {
792 if (!redact_check(drba, origin)) {
793 dsl_dataset_rele_flags(origin, dsflags,
795 dsl_dataset_rele_flags(ds, dsflags,
797 return (SET_ERROR(EINVAL));
801 error = recv_check_large_blocks(ds, featureflags);
803 dsl_dataset_rele_flags(origin, dsflags, FTAG);
804 dsl_dataset_rele_flags(ds, dsflags, FTAG);
808 dsl_dataset_rele_flags(origin, dsflags, FTAG);
811 dsl_dataset_rele(ds, FTAG);
818 dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
820 dmu_recv_begin_arg_t *drba = arg;
821 dsl_pool_t *dp = dmu_tx_pool(tx);
822 objset_t *mos = dp->dp_meta_objset;
823 dmu_recv_cookie_t *drc = drba->drba_cookie;
824 struct drr_begin *drrb = drc->drc_drrb;
825 const char *tofs = drc->drc_tofs;
826 uint64_t featureflags = drc->drc_featureflags;
827 dsl_dataset_t *ds, *newds;
830 ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
832 uint64_t crflags = 0;
833 dsl_crypto_params_t dummy_dcp = { 0 };
834 dsl_crypto_params_t *dcp = drba->drba_dcp;
836 if (drrb->drr_flags & DRR_FLAG_CI_DATA)
837 crflags |= DS_FLAG_CI_DATASET;
839 if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0)
840 dsflags |= DS_HOLD_FLAG_DECRYPT;
843 * Raw, non-incremental recvs always use a dummy dcp with
844 * the raw cmd set. Raw incremental recvs do not use a dcp
845 * since the encryption parameters are already set in stone.
847 if (dcp == NULL && drrb->drr_fromguid == 0 &&
848 drba->drba_origin == NULL) {
849 ASSERT3P(dcp, ==, NULL);
852 if (featureflags & DMU_BACKUP_FEATURE_RAW)
853 dcp->cp_cmd = DCP_CMD_RAW_RECV;
856 error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
858 /* Create temporary clone unless we're doing corrective recv */
859 dsl_dataset_t *snap = NULL;
861 if (drba->drba_cookie->drc_fromsnapobj != 0) {
862 VERIFY0(dsl_dataset_hold_obj(dp,
863 drba->drba_cookie->drc_fromsnapobj, FTAG, &snap));
864 ASSERT3P(dcp, ==, NULL);
867 /* When healing we want to use the provided snapshot */
868 VERIFY0(dsl_dataset_snap_lookup(ds, drc->drc_tosnap,
871 dsobj = dsl_dataset_create_sync(ds->ds_dir,
872 recv_clone_name, snap, crflags, drba->drba_cred,
875 if (drba->drba_cookie->drc_fromsnapobj != 0)
876 dsl_dataset_rele(snap, FTAG);
877 dsl_dataset_rele_flags(ds, dsflags, FTAG);
881 dsl_dataset_t *origin = NULL;
883 VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
885 if (drba->drba_origin != NULL) {
886 VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
888 ASSERT3P(dcp, ==, NULL);
891 /* Create new dataset. */
892 dsobj = dsl_dataset_create_sync(dd, strrchr(tofs, '/') + 1,
893 origin, crflags, drba->drba_cred, dcp, tx);
895 dsl_dataset_rele(origin, FTAG);
896 dsl_dir_rele(dd, FTAG);
897 drc->drc_newfs = B_TRUE;
899 VERIFY0(dsl_dataset_own_obj_force(dp, dsobj, dsflags, dmu_recv_tag,
901 if (dsl_dataset_feature_is_active(newds,
902 SPA_FEATURE_REDACTED_DATASETS)) {
904 * If the origin dataset is redacted, the child will be redacted
905 * when we create it. We clear the new dataset's
906 * redaction info; if it should be redacted, we'll fill
907 * in its information later.
909 dsl_dataset_deactivate_feature(newds,
910 SPA_FEATURE_REDACTED_DATASETS, tx);
912 VERIFY0(dmu_objset_from_ds(newds, &os));
914 if (drc->drc_resumable) {
915 dsl_dataset_zapify(newds, tx);
916 if (drrb->drr_fromguid != 0) {
917 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_FROMGUID,
918 8, 1, &drrb->drr_fromguid, tx));
920 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TOGUID,
921 8, 1, &drrb->drr_toguid, tx));
922 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TONAME,
923 1, strlen(drrb->drr_toname) + 1, drrb->drr_toname, tx));
926 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OBJECT,
928 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OFFSET,
930 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_BYTES,
932 if (featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) {
933 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_LARGEBLOCK,
936 if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) {
937 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_EMBEDOK,
940 if (featureflags & DMU_BACKUP_FEATURE_COMPRESSED) {
941 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_COMPRESSOK,
944 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
945 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_RAWOK,
949 uint64_t *redact_snaps;
950 uint_t numredactsnaps;
951 if (nvlist_lookup_uint64_array(drc->drc_begin_nvl,
952 BEGINNV_REDACT_FROM_SNAPS, &redact_snaps,
953 &numredactsnaps) == 0) {
954 VERIFY0(zap_add(mos, dsobj,
955 DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS,
956 sizeof (*redact_snaps), numredactsnaps,
962 * Usually the os->os_encrypted value is tied to the presence of a
963 * DSL Crypto Key object in the dd. However, that will not be received
964 * until dmu_recv_stream(), so we set the value manually for now.
966 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
967 os->os_encrypted = B_TRUE;
968 drba->drba_cookie->drc_raw = B_TRUE;
971 if (featureflags & DMU_BACKUP_FEATURE_REDACTED) {
972 uint64_t *redact_snaps;
973 uint_t numredactsnaps;
974 VERIFY0(nvlist_lookup_uint64_array(drc->drc_begin_nvl,
975 BEGINNV_REDACT_SNAPS, &redact_snaps, &numredactsnaps));
976 dsl_dataset_activate_redaction(newds, redact_snaps,
980 dmu_buf_will_dirty(newds->ds_dbuf, tx);
981 dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT;
984 * If we actually created a non-clone, we need to create the objset
985 * in our new dataset. If this is a raw send we postpone this until
986 * dmu_recv_stream() so that we can allocate the metadnode with the
987 * properties from the DRR_BEGIN payload.
989 rrw_enter(&newds->ds_bp_rwlock, RW_READER, FTAG);
990 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds)) &&
991 (featureflags & DMU_BACKUP_FEATURE_RAW) == 0 &&
993 (void) dmu_objset_create_impl(dp->dp_spa,
994 newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
996 rrw_exit(&newds->ds_bp_rwlock, FTAG);
998 drba->drba_cookie->drc_ds = newds;
999 drba->drba_cookie->drc_os = os;
1001 spa_history_log_internal_ds(newds, "receive", tx, " ");
1005 dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx)
1007 dmu_recv_begin_arg_t *drba = arg;
1008 dmu_recv_cookie_t *drc = drba->drba_cookie;
1009 dsl_pool_t *dp = dmu_tx_pool(tx);
1010 struct drr_begin *drrb = drc->drc_drrb;
1012 ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
1014 const char *tofs = drc->drc_tofs;
1016 /* already checked */
1017 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
1018 ASSERT(drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING);
1020 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
1021 DMU_COMPOUNDSTREAM ||
1022 drrb->drr_type >= DMU_OST_NUMTYPES)
1023 return (SET_ERROR(EINVAL));
1026 * This is mostly a sanity check since we should have already done these
1027 * checks during a previous attempt to receive the data.
1029 error = recv_begin_check_feature_flags_impl(drc->drc_featureflags,
1034 /* 6 extra bytes for /%recv */
1035 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
1037 (void) snprintf(recvname, sizeof (recvname), "%s/%s",
1038 tofs, recv_clone_name);
1040 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) {
1041 /* raw receives require spill block allocation flag */
1042 if (!(drrb->drr_flags & DRR_FLAG_SPILL_BLOCK))
1043 return (SET_ERROR(ZFS_ERR_SPILL_BLOCK_FLAG_MISSING));
1045 dsflags |= DS_HOLD_FLAG_DECRYPT;
1048 if (dsl_dataset_hold_flags(dp, recvname, dsflags, FTAG, &ds) != 0) {
1049 /* %recv does not exist; continue in tofs */
1050 error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
1055 /* check that ds is marked inconsistent */
1056 if (!DS_IS_INCONSISTENT(ds)) {
1057 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1058 return (SET_ERROR(EINVAL));
1061 /* check that there is resuming data, and that the toguid matches */
1062 if (!dsl_dataset_is_zapified(ds)) {
1063 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1064 return (SET_ERROR(EINVAL));
1067 error = zap_lookup(dp->dp_meta_objset, ds->ds_object,
1068 DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val);
1069 if (error != 0 || drrb->drr_toguid != val) {
1070 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1071 return (SET_ERROR(EINVAL));
1075 * Check if the receive is still running. If so, it will be owned.
1076 * Note that nothing else can own the dataset (e.g. after the receive
1077 * fails) because it will be marked inconsistent.
1079 if (dsl_dataset_has_owner(ds)) {
1080 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1081 return (SET_ERROR(EBUSY));
1084 /* There should not be any snapshots of this fs yet. */
1085 if (ds->ds_prev != NULL && ds->ds_prev->ds_dir == ds->ds_dir) {
1086 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1087 return (SET_ERROR(EINVAL));
1091 * Note: resume point will be checked when we process the first WRITE
1095 /* check that the origin matches */
1097 (void) zap_lookup(dp->dp_meta_objset, ds->ds_object,
1098 DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val);
1099 if (drrb->drr_fromguid != val) {
1100 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1101 return (SET_ERROR(EINVAL));
1104 if (ds->ds_prev != NULL && drrb->drr_fromguid != 0)
1105 drc->drc_fromsnapobj = ds->ds_prev->ds_object;
1108 * If we're resuming, and the send is redacted, then the original send
1109 * must have been redacted, and must have been redacted with respect to
1110 * the same snapshots.
1112 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_REDACTED) {
1113 uint64_t num_ds_redact_snaps;
1114 uint64_t *ds_redact_snaps;
1116 uint_t num_stream_redact_snaps;
1117 uint64_t *stream_redact_snaps;
1119 if (nvlist_lookup_uint64_array(drc->drc_begin_nvl,
1120 BEGINNV_REDACT_SNAPS, &stream_redact_snaps,
1121 &num_stream_redact_snaps) != 0) {
1122 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1123 return (SET_ERROR(EINVAL));
1126 if (!dsl_dataset_get_uint64_array_feature(ds,
1127 SPA_FEATURE_REDACTED_DATASETS, &num_ds_redact_snaps,
1128 &ds_redact_snaps)) {
1129 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1130 return (SET_ERROR(EINVAL));
1133 for (int i = 0; i < num_ds_redact_snaps; i++) {
1134 if (!redact_snaps_contains(ds_redact_snaps,
1135 num_ds_redact_snaps, stream_redact_snaps[i])) {
1136 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1137 return (SET_ERROR(EINVAL));
1142 error = recv_check_large_blocks(ds, drc->drc_featureflags);
1144 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1148 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1153 dmu_recv_resume_begin_sync(void *arg, dmu_tx_t *tx)
1155 dmu_recv_begin_arg_t *drba = arg;
1156 dsl_pool_t *dp = dmu_tx_pool(tx);
1157 const char *tofs = drba->drba_cookie->drc_tofs;
1158 uint64_t featureflags = drba->drba_cookie->drc_featureflags;
1160 ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
1161 /* 6 extra bytes for /%recv */
1162 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
1164 (void) snprintf(recvname, sizeof (recvname), "%s/%s", tofs,
1167 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
1168 drba->drba_cookie->drc_raw = B_TRUE;
1170 dsflags |= DS_HOLD_FLAG_DECRYPT;
1173 if (dsl_dataset_own_force(dp, recvname, dsflags, dmu_recv_tag, &ds)
1175 /* %recv does not exist; continue in tofs */
1176 VERIFY0(dsl_dataset_own_force(dp, tofs, dsflags, dmu_recv_tag,
1178 drba->drba_cookie->drc_newfs = B_TRUE;
1181 ASSERT(DS_IS_INCONSISTENT(ds));
1182 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
1183 ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds)) ||
1184 drba->drba_cookie->drc_raw);
1185 rrw_exit(&ds->ds_bp_rwlock, FTAG);
1187 drba->drba_cookie->drc_ds = ds;
1188 VERIFY0(dmu_objset_from_ds(ds, &drba->drba_cookie->drc_os));
1189 drba->drba_cookie->drc_should_save = B_TRUE;
1191 spa_history_log_internal_ds(ds, "resume receive", tx, " ");
1195 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
1196 * succeeds; otherwise we will leak the holds on the datasets.
1199 dmu_recv_begin(char *tofs, char *tosnap, dmu_replay_record_t *drr_begin,
1200 boolean_t force, boolean_t heal, boolean_t resumable, nvlist_t *localprops,
1201 nvlist_t *hidden_args, char *origin, dmu_recv_cookie_t *drc,
1202 zfs_file_t *fp, offset_t *voffp)
1204 dmu_recv_begin_arg_t drba = { 0 };
1207 memset(drc, 0, sizeof (dmu_recv_cookie_t));
1208 drc->drc_drr_begin = drr_begin;
1209 drc->drc_drrb = &drr_begin->drr_u.drr_begin;
1210 drc->drc_tosnap = tosnap;
1211 drc->drc_tofs = tofs;
1212 drc->drc_force = force;
1213 drc->drc_heal = heal;
1214 drc->drc_resumable = resumable;
1215 drc->drc_cred = CRED();
1216 drc->drc_proc = curproc;
1217 drc->drc_clone = (origin != NULL);
1219 if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
1220 drc->drc_byteswap = B_TRUE;
1221 (void) fletcher_4_incremental_byteswap(drr_begin,
1222 sizeof (dmu_replay_record_t), &drc->drc_cksum);
1223 byteswap_record(drr_begin);
1224 } else if (drc->drc_drrb->drr_magic == DMU_BACKUP_MAGIC) {
1225 (void) fletcher_4_incremental_native(drr_begin,
1226 sizeof (dmu_replay_record_t), &drc->drc_cksum);
1228 return (SET_ERROR(EINVAL));
1232 drc->drc_voff = *voffp;
1233 drc->drc_featureflags =
1234 DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
1236 uint32_t payloadlen = drc->drc_drr_begin->drr_payloadlen;
1237 void *payload = NULL;
1238 if (payloadlen != 0)
1239 payload = kmem_alloc(payloadlen, KM_SLEEP);
1241 err = receive_read_payload_and_next_header(drc, payloadlen,
1244 kmem_free(payload, payloadlen);
1247 if (payloadlen != 0) {
1248 err = nvlist_unpack(payload, payloadlen, &drc->drc_begin_nvl,
1250 kmem_free(payload, payloadlen);
1252 kmem_free(drc->drc_next_rrd,
1253 sizeof (*drc->drc_next_rrd));
1258 if (drc->drc_drrb->drr_flags & DRR_FLAG_SPILL_BLOCK)
1259 drc->drc_spill = B_TRUE;
1261 drba.drba_origin = origin;
1262 drba.drba_cookie = drc;
1263 drba.drba_cred = CRED();
1264 drba.drba_proc = curproc;
1266 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING) {
1267 err = dsl_sync_task(tofs,
1268 dmu_recv_resume_begin_check, dmu_recv_resume_begin_sync,
1269 &drba, 5, ZFS_SPACE_CHECK_NORMAL);
1272 * For non-raw, non-incremental, non-resuming receives the
1273 * user can specify encryption parameters on the command line
1274 * with "zfs recv -o". For these receives we create a dcp and
1275 * pass it to the sync task. Creating the dcp will implicitly
1276 * remove the encryption params from the localprops nvlist,
1277 * which avoids errors when trying to set these normally
1278 * read-only properties. Any other kind of receive that
1279 * attempts to set these properties will fail as a result.
1281 if ((DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) &
1282 DMU_BACKUP_FEATURE_RAW) == 0 &&
1283 origin == NULL && drc->drc_drrb->drr_fromguid == 0) {
1284 err = dsl_crypto_params_create_nvlist(DCP_CMD_NONE,
1285 localprops, hidden_args, &drba.drba_dcp);
1289 err = dsl_sync_task(tofs,
1290 dmu_recv_begin_check, dmu_recv_begin_sync,
1291 &drba, 5, ZFS_SPACE_CHECK_NORMAL);
1292 dsl_crypto_params_free(drba.drba_dcp, !!err);
1297 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
1298 nvlist_free(drc->drc_begin_nvl);
1304 * Holds data need for corrective recv callback
1306 typedef struct cr_cb_data {
1308 zbookmark_phys_t zb;
1313 corrective_read_done(zio_t *zio)
1315 cr_cb_data_t *data = zio->io_private;
1316 /* Corruption corrected; update error log if needed */
1317 if (zio->io_error == 0)
1318 spa_remove_error(data->spa, &data->zb);
1319 kmem_free(data, sizeof (cr_cb_data_t));
1320 abd_free(zio->io_abd);
1324 * zio_rewrite the data pointed to by bp with the data from the rrd's abd.
1327 do_corrective_recv(struct receive_writer_arg *rwa, struct drr_write *drrw,
1328 struct receive_record_arg *rrd, blkptr_t *bp)
1332 zbookmark_phys_t zb;
1334 abd_t *abd = rrd->abd;
1335 zio_cksum_t bp_cksum = bp->blk_cksum;
1336 enum zio_flag flags = ZIO_FLAG_SPECULATIVE |
1337 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_CANFAIL;
1340 flags |= ZIO_FLAG_RAW;
1342 err = dnode_hold(rwa->os, drrw->drr_object, FTAG, &dn);
1345 SET_BOOKMARK(&zb, dmu_objset_id(rwa->os), drrw->drr_object, 0,
1346 dbuf_whichblock(dn, 0, drrw->drr_offset));
1347 dnode_rele(dn, FTAG);
1349 if (!rwa->raw && DRR_WRITE_COMPRESSED(drrw)) {
1350 /* Decompress the stream data */
1351 abd_t *dabd = abd_alloc_linear(
1352 drrw->drr_logical_size, B_FALSE);
1353 err = zio_decompress_data(drrw->drr_compressiontype,
1354 abd, abd_to_buf(dabd), abd_get_size(abd),
1355 abd_get_size(dabd), NULL);
1361 /* Swap in the newly decompressed data into the abd */
1366 if (!rwa->raw && BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) {
1367 /* Recompress the data */
1368 abd_t *cabd = abd_alloc_linear(BP_GET_PSIZE(bp),
1370 uint64_t csize = zio_compress_data(BP_GET_COMPRESS(bp),
1371 abd, abd_to_buf(cabd), abd_get_size(abd),
1372 rwa->os->os_complevel);
1373 abd_zero_off(cabd, csize, BP_GET_PSIZE(bp) - csize);
1374 /* Swap in newly compressed data into the abd */
1377 flags |= ZIO_FLAG_RAW_COMPRESS;
1381 * The stream is not encrypted but the data on-disk is.
1382 * We need to re-encrypt the buf using the same
1383 * encryption type, salt, iv, and mac that was used to encrypt
1384 * the block previosly.
1386 if (!rwa->raw && BP_USES_CRYPT(bp)) {
1388 dsl_crypto_key_t *dck = NULL;
1389 uint8_t salt[ZIO_DATA_SALT_LEN];
1390 uint8_t iv[ZIO_DATA_IV_LEN];
1391 uint8_t mac[ZIO_DATA_MAC_LEN];
1392 boolean_t no_crypt = B_FALSE;
1393 dsl_pool_t *dp = dmu_objset_pool(rwa->os);
1394 abd_t *eabd = abd_alloc_linear(BP_GET_PSIZE(bp), B_FALSE);
1396 zio_crypt_decode_params_bp(bp, salt, iv);
1397 zio_crypt_decode_mac_bp(bp, mac);
1399 dsl_pool_config_enter(dp, FTAG);
1400 err = dsl_dataset_hold_flags(dp, rwa->tofs,
1401 DS_HOLD_FLAG_DECRYPT, FTAG, &ds);
1403 dsl_pool_config_exit(dp, FTAG);
1405 return (SET_ERROR(EACCES));
1408 /* Look up the key from the spa's keystore */
1409 err = spa_keystore_lookup_key(rwa->os->os_spa,
1410 zb.zb_objset, FTAG, &dck);
1412 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT,
1414 dsl_pool_config_exit(dp, FTAG);
1416 return (SET_ERROR(EACCES));
1419 err = zio_do_crypt_abd(B_TRUE, &dck->dck_key,
1420 BP_GET_TYPE(bp), BP_SHOULD_BYTESWAP(bp), salt, iv,
1421 mac, abd_get_size(abd), abd, eabd, &no_crypt);
1423 spa_keystore_dsl_key_rele(rwa->os->os_spa, dck, FTAG);
1424 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
1425 dsl_pool_config_exit(dp, FTAG);
1432 /* Swap in the newly encrypted data into the abd */
1437 * We want to prevent zio_rewrite() from trying to
1438 * encrypt the data again
1440 flags |= ZIO_FLAG_RAW_ENCRYPT;
1444 io = zio_rewrite(NULL, rwa->os->os_spa, bp->blk_birth, bp, abd,
1445 BP_GET_PSIZE(bp), NULL, NULL, ZIO_PRIORITY_SYNC_WRITE, flags, &zb);
1447 ASSERT(abd_get_size(abd) == BP_GET_LSIZE(bp) ||
1448 abd_get_size(abd) == BP_GET_PSIZE(bp));
1450 /* compute new bp checksum value and make sure it matches the old one */
1451 zio_checksum_compute(io, BP_GET_CHECKSUM(bp), abd, abd_get_size(abd));
1452 if (!ZIO_CHECKSUM_EQUAL(bp_cksum, io->io_bp->blk_cksum)) {
1454 if (zfs_recv_best_effort_corrective != 0)
1456 return (SET_ERROR(ECKSUM));
1459 /* Correct the corruption in place */
1462 cr_cb_data_t *cb_data =
1463 kmem_alloc(sizeof (cr_cb_data_t), KM_SLEEP);
1464 cb_data->spa = rwa->os->os_spa;
1465 cb_data->size = drrw->drr_logical_size;
1467 /* Test if healing worked by re-reading the bp */
1468 err = zio_wait(zio_read(rwa->heal_pio, rwa->os->os_spa, bp,
1469 abd_alloc_for_io(drrw->drr_logical_size, B_FALSE),
1470 drrw->drr_logical_size, corrective_read_done,
1471 cb_data, ZIO_PRIORITY_ASYNC_READ, flags, NULL));
1473 if (err != 0 && zfs_recv_best_effort_corrective != 0)
1480 receive_read(dmu_recv_cookie_t *drc, int len, void *buf)
1485 * The code doesn't rely on this (lengths being multiples of 8). See
1486 * comment in dump_bytes.
1488 ASSERT(len % 8 == 0 ||
1489 (drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) != 0);
1491 while (done < len) {
1493 zfs_file_t *fp = drc->drc_fp;
1494 int err = zfs_file_read(fp, (char *)buf + done,
1495 len - done, &resid);
1496 if (resid == len - done) {
1498 * Note: ECKSUM or ZFS_ERR_STREAM_TRUNCATED indicates
1499 * that the receive was interrupted and can
1500 * potentially be resumed.
1502 err = SET_ERROR(ZFS_ERR_STREAM_TRUNCATED);
1504 drc->drc_voff += len - done - resid;
1510 drc->drc_bytes_read += len;
1512 ASSERT3U(done, ==, len);
1516 static inline uint8_t
1517 deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size)
1519 if (bonus_type == DMU_OT_SA) {
1523 ((DN_OLD_MAX_BONUSLEN -
1524 MIN(DN_OLD_MAX_BONUSLEN, bonus_size)) >> SPA_BLKPTRSHIFT));
1529 save_resume_state(struct receive_writer_arg *rwa,
1530 uint64_t object, uint64_t offset, dmu_tx_t *tx)
1532 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
1534 if (!rwa->resumable)
1538 * We use ds_resume_bytes[] != 0 to indicate that we need to
1539 * update this on disk, so it must not be 0.
1541 ASSERT(rwa->bytes_read != 0);
1544 * We only resume from write records, which have a valid
1545 * (non-meta-dnode) object number.
1547 ASSERT(object != 0);
1550 * For resuming to work correctly, we must receive records in order,
1551 * sorted by object,offset. This is checked by the callers, but
1552 * assert it here for good measure.
1554 ASSERT3U(object, >=, rwa->os->os_dsl_dataset->ds_resume_object[txgoff]);
1555 ASSERT(object != rwa->os->os_dsl_dataset->ds_resume_object[txgoff] ||
1556 offset >= rwa->os->os_dsl_dataset->ds_resume_offset[txgoff]);
1557 ASSERT3U(rwa->bytes_read, >=,
1558 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff]);
1560 rwa->os->os_dsl_dataset->ds_resume_object[txgoff] = object;
1561 rwa->os->os_dsl_dataset->ds_resume_offset[txgoff] = offset;
1562 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read;
1566 receive_object_is_same_generation(objset_t *os, uint64_t object,
1567 dmu_object_type_t old_bonus_type, dmu_object_type_t new_bonus_type,
1568 const void *new_bonus, boolean_t *samegenp)
1570 zfs_file_info_t zoi;
1573 dmu_buf_t *old_bonus_dbuf;
1574 err = dmu_bonus_hold(os, object, FTAG, &old_bonus_dbuf);
1577 err = dmu_get_file_info(os, old_bonus_type, old_bonus_dbuf->db_data,
1579 dmu_buf_rele(old_bonus_dbuf, FTAG);
1582 uint64_t old_gen = zoi.zfi_generation;
1584 err = dmu_get_file_info(os, new_bonus_type, new_bonus, &zoi);
1587 uint64_t new_gen = zoi.zfi_generation;
1589 *samegenp = (old_gen == new_gen);
1594 receive_handle_existing_object(const struct receive_writer_arg *rwa,
1595 const struct drr_object *drro, const dmu_object_info_t *doi,
1596 const void *bonus_data,
1597 uint64_t *object_to_hold, uint32_t *new_blksz)
1599 uint32_t indblksz = drro->drr_indblkshift ?
1600 1ULL << drro->drr_indblkshift : 0;
1601 int nblkptr = deduce_nblkptr(drro->drr_bonustype,
1602 drro->drr_bonuslen);
1603 uint8_t dn_slots = drro->drr_dn_slots != 0 ?
1604 drro->drr_dn_slots : DNODE_MIN_SLOTS;
1605 boolean_t do_free_range = B_FALSE;
1608 *object_to_hold = drro->drr_object;
1610 /* nblkptr should be bounded by the bonus size and type */
1611 if (rwa->raw && nblkptr != drro->drr_nblkptr)
1612 return (SET_ERROR(EINVAL));
1615 * After the previous send stream, the sending system may
1616 * have freed this object, and then happened to re-allocate
1617 * this object number in a later txg. In this case, we are
1618 * receiving a different logical file, and the block size may
1619 * appear to be different. i.e. we may have a different
1620 * block size for this object than what the send stream says.
1621 * In this case we need to remove the object's contents,
1622 * so that its structure can be changed and then its contents
1623 * entirely replaced by subsequent WRITE records.
1625 * If this is a -L (--large-block) incremental stream, and
1626 * the previous stream was not -L, the block size may appear
1627 * to increase. i.e. we may have a smaller block size for
1628 * this object than what the send stream says. In this case
1629 * we need to keep the object's contents and block size
1630 * intact, so that we don't lose parts of the object's
1631 * contents that are not changed by this incremental send
1634 * We can distinguish between the two above cases by using
1635 * the ZPL's generation number (see
1636 * receive_object_is_same_generation()). However, we only
1637 * want to rely on the generation number when absolutely
1638 * necessary, because with raw receives, the generation is
1639 * encrypted. We also want to minimize dependence on the
1640 * ZPL, so that other types of datasets can also be received
1641 * (e.g. ZVOLs, although note that ZVOLS currently do not
1642 * reallocate their objects or change their structure).
1643 * Therefore, we check a number of different cases where we
1644 * know it is safe to discard the object's contents, before
1645 * using the ZPL's generation number to make the above
1648 if (drro->drr_blksz != doi->doi_data_block_size) {
1651 * RAW streams always have large blocks, so
1652 * we are sure that the data is not needed
1653 * due to changing --large-block to be on.
1654 * Which is fortunate since the bonus buffer
1655 * (which contains the ZPL generation) is
1656 * encrypted, and the key might not be
1659 do_free_range = B_TRUE;
1660 } else if (rwa->full) {
1662 * This is a full send stream, so it always
1663 * replaces what we have. Even if the
1664 * generation numbers happen to match, this
1665 * can not actually be the same logical file.
1666 * This is relevant when receiving a full
1669 do_free_range = B_TRUE;
1670 } else if (drro->drr_type !=
1671 DMU_OT_PLAIN_FILE_CONTENTS ||
1672 doi->doi_type != DMU_OT_PLAIN_FILE_CONTENTS) {
1674 * PLAIN_FILE_CONTENTS are the only type of
1675 * objects that have ever been stored with
1676 * large blocks, so we don't need the special
1677 * logic below. ZAP blocks can shrink (when
1678 * there's only one block), so we don't want
1679 * to hit the error below about block size
1682 do_free_range = B_TRUE;
1683 } else if (doi->doi_max_offset <=
1684 doi->doi_data_block_size) {
1686 * There is only one block. We can free it,
1687 * because its contents will be replaced by a
1688 * WRITE record. This can not be the no-L ->
1689 * -L case, because the no-L case would have
1690 * resulted in multiple blocks. If we
1691 * supported -L -> no-L, it would not be safe
1692 * to free the file's contents. Fortunately,
1693 * that is not allowed (see
1694 * recv_check_large_blocks()).
1696 do_free_range = B_TRUE;
1698 boolean_t is_same_gen;
1699 err = receive_object_is_same_generation(rwa->os,
1700 drro->drr_object, doi->doi_bonus_type,
1701 drro->drr_bonustype, bonus_data, &is_same_gen);
1703 return (SET_ERROR(EINVAL));
1707 * This is the same logical file, and
1708 * the block size must be increasing.
1709 * It could only decrease if
1710 * --large-block was changed to be
1711 * off, which is checked in
1712 * recv_check_large_blocks().
1714 if (drro->drr_blksz <=
1715 doi->doi_data_block_size)
1716 return (SET_ERROR(EINVAL));
1718 * We keep the existing blocksize and
1722 doi->doi_data_block_size;
1724 do_free_range = B_TRUE;
1729 /* nblkptr can only decrease if the object was reallocated */
1730 if (nblkptr < doi->doi_nblkptr)
1731 do_free_range = B_TRUE;
1733 /* number of slots can only change on reallocation */
1734 if (dn_slots != doi->doi_dnodesize >> DNODE_SHIFT)
1735 do_free_range = B_TRUE;
1738 * For raw sends we also check a few other fields to
1739 * ensure we are preserving the objset structure exactly
1740 * as it was on the receive side:
1741 * - A changed indirect block size
1742 * - A smaller nlevels
1745 if (indblksz != doi->doi_metadata_block_size)
1746 do_free_range = B_TRUE;
1747 if (drro->drr_nlevels < doi->doi_indirection)
1748 do_free_range = B_TRUE;
1751 if (do_free_range) {
1752 err = dmu_free_long_range(rwa->os, drro->drr_object,
1755 return (SET_ERROR(EINVAL));
1759 * The dmu does not currently support decreasing nlevels
1760 * or changing the number of dnode slots on an object. For
1761 * non-raw sends, this does not matter and the new object
1762 * can just use the previous one's nlevels. For raw sends,
1763 * however, the structure of the received dnode (including
1764 * nlevels and dnode slots) must match that of the send
1765 * side. Therefore, instead of using dmu_object_reclaim(),
1766 * we must free the object completely and call
1767 * dmu_object_claim_dnsize() instead.
1769 if ((rwa->raw && drro->drr_nlevels < doi->doi_indirection) ||
1770 dn_slots != doi->doi_dnodesize >> DNODE_SHIFT) {
1771 err = dmu_free_long_object(rwa->os, drro->drr_object);
1773 return (SET_ERROR(EINVAL));
1775 txg_wait_synced(dmu_objset_pool(rwa->os), 0);
1776 *object_to_hold = DMU_NEW_OBJECT;
1780 * For raw receives, free everything beyond the new incoming
1781 * maxblkid. Normally this would be done with a DRR_FREE
1782 * record that would come after this DRR_OBJECT record is
1783 * processed. However, for raw receives we manually set the
1784 * maxblkid from the drr_maxblkid and so we must first free
1785 * everything above that blkid to ensure the DMU is always
1786 * consistent with itself. We will never free the first block
1787 * of the object here because a maxblkid of 0 could indicate
1788 * an object with a single block or one with no blocks. This
1789 * free may be skipped when dmu_free_long_range() was called
1790 * above since it covers the entire object's contents.
1792 if (rwa->raw && *object_to_hold != DMU_NEW_OBJECT && !do_free_range) {
1793 err = dmu_free_long_range(rwa->os, drro->drr_object,
1794 (drro->drr_maxblkid + 1) * doi->doi_data_block_size,
1797 return (SET_ERROR(EINVAL));
1803 receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
1806 dmu_object_info_t doi;
1809 uint32_t new_blksz = drro->drr_blksz;
1810 uint8_t dn_slots = drro->drr_dn_slots != 0 ?
1811 drro->drr_dn_slots : DNODE_MIN_SLOTS;
1813 if (drro->drr_type == DMU_OT_NONE ||
1814 !DMU_OT_IS_VALID(drro->drr_type) ||
1815 !DMU_OT_IS_VALID(drro->drr_bonustype) ||
1816 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
1817 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
1818 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
1819 drro->drr_blksz < SPA_MINBLOCKSIZE ||
1820 drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) ||
1821 drro->drr_bonuslen >
1822 DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa->os))) ||
1824 (spa_maxdnodesize(dmu_objset_spa(rwa->os)) >> DNODE_SHIFT)) {
1825 return (SET_ERROR(EINVAL));
1830 * We should have received a DRR_OBJECT_RANGE record
1831 * containing this block and stored it in rwa.
1833 if (drro->drr_object < rwa->or_firstobj ||
1834 drro->drr_object >= rwa->or_firstobj + rwa->or_numslots ||
1835 drro->drr_raw_bonuslen < drro->drr_bonuslen ||
1836 drro->drr_indblkshift > SPA_MAXBLOCKSHIFT ||
1837 drro->drr_nlevels > DN_MAX_LEVELS ||
1838 drro->drr_nblkptr > DN_MAX_NBLKPTR ||
1839 DN_SLOTS_TO_BONUSLEN(dn_slots) <
1840 drro->drr_raw_bonuslen)
1841 return (SET_ERROR(EINVAL));
1844 * The DRR_OBJECT_SPILL flag is valid when the DRR_BEGIN
1845 * record indicates this by setting DRR_FLAG_SPILL_BLOCK.
1847 if (((drro->drr_flags & ~(DRR_OBJECT_SPILL))) ||
1848 (!rwa->spill && DRR_OBJECT_HAS_SPILL(drro->drr_flags))) {
1849 return (SET_ERROR(EINVAL));
1852 if (drro->drr_raw_bonuslen != 0 || drro->drr_nblkptr != 0 ||
1853 drro->drr_indblkshift != 0 || drro->drr_nlevels != 0) {
1854 return (SET_ERROR(EINVAL));
1858 err = dmu_object_info(rwa->os, drro->drr_object, &doi);
1860 if (err != 0 && err != ENOENT && err != EEXIST)
1861 return (SET_ERROR(EINVAL));
1863 if (drro->drr_object > rwa->max_object)
1864 rwa->max_object = drro->drr_object;
1867 * If we are losing blkptrs or changing the block size this must
1868 * be a new file instance. We must clear out the previous file
1869 * contents before we can change this type of metadata in the dnode.
1870 * Raw receives will also check that the indirect structure of the
1871 * dnode hasn't changed.
1873 uint64_t object_to_hold;
1875 err = receive_handle_existing_object(rwa, drro, &doi, data,
1876 &object_to_hold, &new_blksz);
1877 } else if (err == EEXIST) {
1879 * The object requested is currently an interior slot of a
1880 * multi-slot dnode. This will be resolved when the next txg
1881 * is synced out, since the send stream will have told us
1882 * to free this slot when we freed the associated dnode
1883 * earlier in the stream.
1885 txg_wait_synced(dmu_objset_pool(rwa->os), 0);
1887 if (dmu_object_info(rwa->os, drro->drr_object, NULL) != ENOENT)
1888 return (SET_ERROR(EINVAL));
1890 /* object was freed and we are about to allocate a new one */
1891 object_to_hold = DMU_NEW_OBJECT;
1893 /* object is free and we are about to allocate a new one */
1894 object_to_hold = DMU_NEW_OBJECT;
1898 * If this is a multi-slot dnode there is a chance that this
1899 * object will expand into a slot that is already used by
1900 * another object from the previous snapshot. We must free
1901 * these objects before we attempt to allocate the new dnode.
1904 boolean_t need_sync = B_FALSE;
1906 for (uint64_t slot = drro->drr_object + 1;
1907 slot < drro->drr_object + dn_slots;
1909 dmu_object_info_t slot_doi;
1911 err = dmu_object_info(rwa->os, slot, &slot_doi);
1912 if (err == ENOENT || err == EEXIST)
1917 err = dmu_free_long_object(rwa->os, slot);
1925 txg_wait_synced(dmu_objset_pool(rwa->os), 0);
1928 tx = dmu_tx_create(rwa->os);
1929 dmu_tx_hold_bonus(tx, object_to_hold);
1930 dmu_tx_hold_write(tx, object_to_hold, 0, 0);
1931 err = dmu_tx_assign(tx, TXG_WAIT);
1937 if (object_to_hold == DMU_NEW_OBJECT) {
1938 /* Currently free, wants to be allocated */
1939 err = dmu_object_claim_dnsize(rwa->os, drro->drr_object,
1940 drro->drr_type, new_blksz,
1941 drro->drr_bonustype, drro->drr_bonuslen,
1942 dn_slots << DNODE_SHIFT, tx);
1943 } else if (drro->drr_type != doi.doi_type ||
1944 new_blksz != doi.doi_data_block_size ||
1945 drro->drr_bonustype != doi.doi_bonus_type ||
1946 drro->drr_bonuslen != doi.doi_bonus_size) {
1947 /* Currently allocated, but with different properties */
1948 err = dmu_object_reclaim_dnsize(rwa->os, drro->drr_object,
1949 drro->drr_type, new_blksz,
1950 drro->drr_bonustype, drro->drr_bonuslen,
1951 dn_slots << DNODE_SHIFT, rwa->spill ?
1952 DRR_OBJECT_HAS_SPILL(drro->drr_flags) : B_FALSE, tx);
1953 } else if (rwa->spill && !DRR_OBJECT_HAS_SPILL(drro->drr_flags)) {
1955 * Currently allocated, the existing version of this object
1956 * may reference a spill block that is no longer allocated
1957 * at the source and needs to be freed.
1959 err = dmu_object_rm_spill(rwa->os, drro->drr_object, tx);
1964 return (SET_ERROR(EINVAL));
1967 if (rwa->or_crypt_params_present) {
1969 * Set the crypt params for the buffer associated with this
1970 * range of dnodes. This causes the blkptr_t to have the
1971 * same crypt params (byteorder, salt, iv, mac) as on the
1974 * Since we are committing this tx now, it is possible for
1975 * the dnode block to end up on-disk with the incorrect MAC,
1976 * if subsequent objects in this block are received in a
1977 * different txg. However, since the dataset is marked as
1978 * inconsistent, no code paths will do a non-raw read (or
1979 * decrypt the block / verify the MAC). The receive code and
1980 * scrub code can safely do raw reads and verify the
1981 * checksum. They don't need to verify the MAC.
1983 dmu_buf_t *db = NULL;
1984 uint64_t offset = rwa->or_firstobj * DNODE_MIN_SIZE;
1986 err = dmu_buf_hold_by_dnode(DMU_META_DNODE(rwa->os),
1987 offset, FTAG, &db, DMU_READ_PREFETCH | DMU_READ_NO_DECRYPT);
1990 return (SET_ERROR(EINVAL));
1993 dmu_buf_set_crypt_params(db, rwa->or_byteorder,
1994 rwa->or_salt, rwa->or_iv, rwa->or_mac, tx);
1996 dmu_buf_rele(db, FTAG);
1998 rwa->or_crypt_params_present = B_FALSE;
2001 dmu_object_set_checksum(rwa->os, drro->drr_object,
2002 drro->drr_checksumtype, tx);
2003 dmu_object_set_compress(rwa->os, drro->drr_object,
2004 drro->drr_compress, tx);
2006 /* handle more restrictive dnode structuring for raw recvs */
2009 * Set the indirect block size, block shift, nlevels.
2010 * This will not fail because we ensured all of the
2011 * blocks were freed earlier if this is a new object.
2012 * For non-new objects block size and indirect block
2013 * shift cannot change and nlevels can only increase.
2015 ASSERT3U(new_blksz, ==, drro->drr_blksz);
2016 VERIFY0(dmu_object_set_blocksize(rwa->os, drro->drr_object,
2017 drro->drr_blksz, drro->drr_indblkshift, tx));
2018 VERIFY0(dmu_object_set_nlevels(rwa->os, drro->drr_object,
2019 drro->drr_nlevels, tx));
2022 * Set the maxblkid. This will always succeed because
2023 * we freed all blocks beyond the new maxblkid above.
2025 VERIFY0(dmu_object_set_maxblkid(rwa->os, drro->drr_object,
2026 drro->drr_maxblkid, tx));
2032 uint32_t flags = DMU_READ_NO_PREFETCH;
2035 flags |= DMU_READ_NO_DECRYPT;
2037 VERIFY0(dnode_hold(rwa->os, drro->drr_object, FTAG, &dn));
2038 VERIFY0(dmu_bonus_hold_by_dnode(dn, FTAG, &db, flags));
2040 dmu_buf_will_dirty(db, tx);
2042 ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
2043 memcpy(db->db_data, data, DRR_OBJECT_PAYLOAD_SIZE(drro));
2046 * Raw bonus buffers have their byteorder determined by the
2047 * DRR_OBJECT_RANGE record.
2049 if (rwa->byteswap && !rwa->raw) {
2050 dmu_object_byteswap_t byteswap =
2051 DMU_OT_BYTESWAP(drro->drr_bonustype);
2052 dmu_ot_byteswap[byteswap].ob_func(db->db_data,
2053 DRR_OBJECT_PAYLOAD_SIZE(drro));
2055 dmu_buf_rele(db, FTAG);
2056 dnode_rele(dn, FTAG);
2064 receive_freeobjects(struct receive_writer_arg *rwa,
2065 struct drr_freeobjects *drrfo)
2070 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
2071 return (SET_ERROR(EINVAL));
2073 for (obj = drrfo->drr_firstobj == 0 ? 1 : drrfo->drr_firstobj;
2074 obj < drrfo->drr_firstobj + drrfo->drr_numobjs &&
2075 obj < DN_MAX_OBJECT && next_err == 0;
2076 next_err = dmu_object_next(rwa->os, &obj, FALSE, 0)) {
2077 dmu_object_info_t doi;
2080 err = dmu_object_info(rwa->os, obj, &doi);
2086 err = dmu_free_long_object(rwa->os, obj);
2091 if (next_err != ESRCH)
2097 * Note: if this fails, the caller will clean up any records left on the
2098 * rwa->write_batch list.
2101 flush_write_batch_impl(struct receive_writer_arg *rwa)
2106 if (dnode_hold(rwa->os, rwa->last_object, FTAG, &dn) != 0)
2107 return (SET_ERROR(EINVAL));
2109 struct receive_record_arg *last_rrd = list_tail(&rwa->write_batch);
2110 struct drr_write *last_drrw = &last_rrd->header.drr_u.drr_write;
2112 struct receive_record_arg *first_rrd = list_head(&rwa->write_batch);
2113 struct drr_write *first_drrw = &first_rrd->header.drr_u.drr_write;
2115 ASSERT3U(rwa->last_object, ==, last_drrw->drr_object);
2116 ASSERT3U(rwa->last_offset, ==, last_drrw->drr_offset);
2118 dmu_tx_t *tx = dmu_tx_create(rwa->os);
2119 dmu_tx_hold_write_by_dnode(tx, dn, first_drrw->drr_offset,
2120 last_drrw->drr_offset - first_drrw->drr_offset +
2121 last_drrw->drr_logical_size);
2122 err = dmu_tx_assign(tx, TXG_WAIT);
2125 dnode_rele(dn, FTAG);
2129 struct receive_record_arg *rrd;
2130 while ((rrd = list_head(&rwa->write_batch)) != NULL) {
2131 struct drr_write *drrw = &rrd->header.drr_u.drr_write;
2132 abd_t *abd = rrd->abd;
2134 ASSERT3U(drrw->drr_object, ==, rwa->last_object);
2136 if (drrw->drr_logical_size != dn->dn_datablksz) {
2138 * The WRITE record is larger than the object's block
2139 * size. We must be receiving an incremental
2140 * large-block stream into a dataset that previously did
2141 * a non-large-block receive. Lightweight writes must
2142 * be exactly one block, so we need to decompress the
2143 * data (if compressed) and do a normal dmu_write().
2145 ASSERT3U(drrw->drr_logical_size, >, dn->dn_datablksz);
2146 if (DRR_WRITE_COMPRESSED(drrw)) {
2148 abd_alloc_linear(drrw->drr_logical_size,
2151 err = zio_decompress_data(
2152 drrw->drr_compressiontype,
2153 abd, abd_to_buf(decomp_abd),
2155 abd_get_size(decomp_abd), NULL);
2158 dmu_write_by_dnode(dn,
2160 drrw->drr_logical_size,
2161 abd_to_buf(decomp_abd), tx);
2163 abd_free(decomp_abd);
2165 dmu_write_by_dnode(dn,
2167 drrw->drr_logical_size,
2168 abd_to_buf(abd), tx);
2174 dmu_write_policy(rwa->os, dn, 0, 0, &zp);
2176 enum zio_flag zio_flags = 0;
2179 zp.zp_encrypt = B_TRUE;
2180 zp.zp_compress = drrw->drr_compressiontype;
2181 zp.zp_byteorder = ZFS_HOST_BYTEORDER ^
2182 !!DRR_IS_RAW_BYTESWAPPED(drrw->drr_flags) ^
2184 memcpy(zp.zp_salt, drrw->drr_salt,
2186 memcpy(zp.zp_iv, drrw->drr_iv,
2188 memcpy(zp.zp_mac, drrw->drr_mac,
2190 if (DMU_OT_IS_ENCRYPTED(zp.zp_type)) {
2191 zp.zp_nopwrite = B_FALSE;
2192 zp.zp_copies = MIN(zp.zp_copies,
2193 SPA_DVAS_PER_BP - 1);
2195 zio_flags |= ZIO_FLAG_RAW;
2196 } else if (DRR_WRITE_COMPRESSED(drrw)) {
2197 ASSERT3U(drrw->drr_compressed_size, >, 0);
2198 ASSERT3U(drrw->drr_logical_size, >=,
2199 drrw->drr_compressed_size);
2200 zp.zp_compress = drrw->drr_compressiontype;
2201 zio_flags |= ZIO_FLAG_RAW_COMPRESS;
2202 } else if (rwa->byteswap) {
2204 * Note: compressed blocks never need to be
2205 * byteswapped, because WRITE records for
2206 * metadata blocks are never compressed. The
2207 * exception is raw streams, which are written
2208 * in the original byteorder, and the byteorder
2209 * bit is preserved in the BP by setting
2210 * zp_byteorder above.
2212 dmu_object_byteswap_t byteswap =
2213 DMU_OT_BYTESWAP(drrw->drr_type);
2214 dmu_ot_byteswap[byteswap].ob_func(
2216 DRR_WRITE_PAYLOAD_SIZE(drrw));
2220 * Since this data can't be read until the receive
2221 * completes, we can do a "lightweight" write for
2222 * improved performance.
2224 err = dmu_lightweight_write_by_dnode(dn,
2225 drrw->drr_offset, abd, &zp, zio_flags, tx);
2230 * This rrd is left on the list, so the caller will
2231 * free it (and the abd).
2237 * Note: If the receive fails, we want the resume stream to
2238 * start with the same record that we last successfully
2239 * received (as opposed to the next record), so that we can
2240 * verify that we are resuming from the correct location.
2242 save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx);
2244 list_remove(&rwa->write_batch, rrd);
2245 kmem_free(rrd, sizeof (*rrd));
2249 dnode_rele(dn, FTAG);
2254 flush_write_batch(struct receive_writer_arg *rwa)
2256 if (list_is_empty(&rwa->write_batch))
2260 err = flush_write_batch_impl(rwa);
2262 struct receive_record_arg *rrd;
2263 while ((rrd = list_remove_head(&rwa->write_batch)) != NULL) {
2265 kmem_free(rrd, sizeof (*rrd));
2268 ASSERT(list_is_empty(&rwa->write_batch));
2273 receive_process_write_record(struct receive_writer_arg *rwa,
2274 struct receive_record_arg *rrd)
2278 ASSERT3U(rrd->header.drr_type, ==, DRR_WRITE);
2279 struct drr_write *drrw = &rrd->header.drr_u.drr_write;
2281 if (drrw->drr_offset + drrw->drr_logical_size < drrw->drr_offset ||
2282 !DMU_OT_IS_VALID(drrw->drr_type))
2283 return (SET_ERROR(EINVAL));
2289 int flags = DB_RF_CANFAIL;
2292 flags |= DB_RF_NO_DECRYPT;
2294 if (rwa->byteswap) {
2295 dmu_object_byteswap_t byteswap =
2296 DMU_OT_BYTESWAP(drrw->drr_type);
2297 dmu_ot_byteswap[byteswap].ob_func(abd_to_buf(rrd->abd),
2298 DRR_WRITE_PAYLOAD_SIZE(drrw));
2301 err = dmu_buf_hold_noread(rwa->os, drrw->drr_object,
2302 drrw->drr_offset, FTAG, &dbp);
2306 /* Try to read the object to see if it needs healing */
2307 err = dbuf_read((dmu_buf_impl_t *)dbp, NULL, flags);
2309 * We only try to heal when dbuf_read() returns a ECKSUMs.
2310 * Other errors (even EIO) get returned to caller.
2311 * EIO indicates that the device is not present/accessible,
2312 * so writing to it will likely fail.
2313 * If the block is healthy, we don't want to overwrite it
2316 if (err != ECKSUM) {
2317 dmu_buf_rele(dbp, FTAG);
2320 dn = dmu_buf_dnode_enter(dbp);
2321 /* Make sure the on-disk block and recv record sizes match */
2322 if (drrw->drr_logical_size !=
2323 dn->dn_datablkszsec << SPA_MINBLOCKSHIFT) {
2325 dmu_buf_dnode_exit(dbp);
2326 dmu_buf_rele(dbp, FTAG);
2329 /* Get the block pointer for the corrupted block */
2330 bp = dmu_buf_get_blkptr(dbp);
2331 err = do_corrective_recv(rwa, drrw, rrd, bp);
2332 dmu_buf_dnode_exit(dbp);
2333 dmu_buf_rele(dbp, FTAG);
2338 * For resuming to work, records must be in increasing order
2339 * by (object, offset).
2341 if (drrw->drr_object < rwa->last_object ||
2342 (drrw->drr_object == rwa->last_object &&
2343 drrw->drr_offset < rwa->last_offset)) {
2344 return (SET_ERROR(EINVAL));
2347 struct receive_record_arg *first_rrd = list_head(&rwa->write_batch);
2348 struct drr_write *first_drrw = &first_rrd->header.drr_u.drr_write;
2349 uint64_t batch_size =
2350 MIN(zfs_recv_write_batch_size, DMU_MAX_ACCESS / 2);
2351 if (first_rrd != NULL &&
2352 (drrw->drr_object != first_drrw->drr_object ||
2353 drrw->drr_offset >= first_drrw->drr_offset + batch_size)) {
2354 err = flush_write_batch(rwa);
2359 rwa->last_object = drrw->drr_object;
2360 rwa->last_offset = drrw->drr_offset;
2362 if (rwa->last_object > rwa->max_object)
2363 rwa->max_object = rwa->last_object;
2365 list_insert_tail(&rwa->write_batch, rrd);
2367 * Return EAGAIN to indicate that we will use this rrd again,
2368 * so the caller should not free it
2374 receive_write_embedded(struct receive_writer_arg *rwa,
2375 struct drr_write_embedded *drrwe, void *data)
2380 if (drrwe->drr_offset + drrwe->drr_length < drrwe->drr_offset)
2381 return (SET_ERROR(EINVAL));
2383 if (drrwe->drr_psize > BPE_PAYLOAD_SIZE)
2384 return (SET_ERROR(EINVAL));
2386 if (drrwe->drr_etype >= NUM_BP_EMBEDDED_TYPES)
2387 return (SET_ERROR(EINVAL));
2388 if (drrwe->drr_compression >= ZIO_COMPRESS_FUNCTIONS)
2389 return (SET_ERROR(EINVAL));
2391 return (SET_ERROR(EINVAL));
2393 if (drrwe->drr_object > rwa->max_object)
2394 rwa->max_object = drrwe->drr_object;
2396 tx = dmu_tx_create(rwa->os);
2398 dmu_tx_hold_write(tx, drrwe->drr_object,
2399 drrwe->drr_offset, drrwe->drr_length);
2400 err = dmu_tx_assign(tx, TXG_WAIT);
2406 dmu_write_embedded(rwa->os, drrwe->drr_object,
2407 drrwe->drr_offset, data, drrwe->drr_etype,
2408 drrwe->drr_compression, drrwe->drr_lsize, drrwe->drr_psize,
2409 rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx);
2411 /* See comment in restore_write. */
2412 save_resume_state(rwa, drrwe->drr_object, drrwe->drr_offset, tx);
2418 receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
2421 dmu_buf_t *db, *db_spill;
2424 if (drrs->drr_length < SPA_MINBLOCKSIZE ||
2425 drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os)))
2426 return (SET_ERROR(EINVAL));
2429 * This is an unmodified spill block which was added to the stream
2430 * to resolve an issue with incorrectly removing spill blocks. It
2431 * should be ignored by current versions of the code which support
2432 * the DRR_FLAG_SPILL_BLOCK flag.
2434 if (rwa->spill && DRR_SPILL_IS_UNMODIFIED(drrs->drr_flags)) {
2440 if (!DMU_OT_IS_VALID(drrs->drr_type) ||
2441 drrs->drr_compressiontype >= ZIO_COMPRESS_FUNCTIONS ||
2442 drrs->drr_compressed_size == 0)
2443 return (SET_ERROR(EINVAL));
2446 if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0)
2447 return (SET_ERROR(EINVAL));
2449 if (drrs->drr_object > rwa->max_object)
2450 rwa->max_object = drrs->drr_object;
2452 VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db));
2453 if ((err = dmu_spill_hold_by_bonus(db, DMU_READ_NO_DECRYPT, FTAG,
2455 dmu_buf_rele(db, FTAG);
2459 dmu_tx_t *tx = dmu_tx_create(rwa->os);
2461 dmu_tx_hold_spill(tx, db->db_object);
2463 err = dmu_tx_assign(tx, TXG_WAIT);
2465 dmu_buf_rele(db, FTAG);
2466 dmu_buf_rele(db_spill, FTAG);
2472 * Spill blocks may both grow and shrink. When a change in size
2473 * occurs any existing dbuf must be updated to match the logical
2474 * size of the provided arc_buf_t.
2476 if (db_spill->db_size != drrs->drr_length) {
2477 dmu_buf_will_fill(db_spill, tx);
2478 VERIFY0(dbuf_spill_set_blksz(db_spill,
2479 drrs->drr_length, tx));
2484 boolean_t byteorder = ZFS_HOST_BYTEORDER ^
2485 !!DRR_IS_RAW_BYTESWAPPED(drrs->drr_flags) ^
2488 abuf = arc_loan_raw_buf(dmu_objset_spa(rwa->os),
2489 drrs->drr_object, byteorder, drrs->drr_salt,
2490 drrs->drr_iv, drrs->drr_mac, drrs->drr_type,
2491 drrs->drr_compressed_size, drrs->drr_length,
2492 drrs->drr_compressiontype, 0);
2494 abuf = arc_loan_buf(dmu_objset_spa(rwa->os),
2495 DMU_OT_IS_METADATA(drrs->drr_type),
2497 if (rwa->byteswap) {
2498 dmu_object_byteswap_t byteswap =
2499 DMU_OT_BYTESWAP(drrs->drr_type);
2500 dmu_ot_byteswap[byteswap].ob_func(abd_to_buf(abd),
2501 DRR_SPILL_PAYLOAD_SIZE(drrs));
2505 memcpy(abuf->b_data, abd_to_buf(abd), DRR_SPILL_PAYLOAD_SIZE(drrs));
2507 dbuf_assign_arcbuf((dmu_buf_impl_t *)db_spill, abuf, tx);
2509 dmu_buf_rele(db, FTAG);
2510 dmu_buf_rele(db_spill, FTAG);
2517 receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf)
2521 if (drrf->drr_length != -1ULL &&
2522 drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
2523 return (SET_ERROR(EINVAL));
2525 if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0)
2526 return (SET_ERROR(EINVAL));
2528 if (drrf->drr_object > rwa->max_object)
2529 rwa->max_object = drrf->drr_object;
2531 err = dmu_free_long_range(rwa->os, drrf->drr_object,
2532 drrf->drr_offset, drrf->drr_length);
2538 receive_object_range(struct receive_writer_arg *rwa,
2539 struct drr_object_range *drror)
2542 * By default, we assume this block is in our native format
2543 * (ZFS_HOST_BYTEORDER). We then take into account whether
2544 * the send stream is byteswapped (rwa->byteswap). Finally,
2545 * we need to byteswap again if this particular block was
2546 * in non-native format on the send side.
2548 boolean_t byteorder = ZFS_HOST_BYTEORDER ^ rwa->byteswap ^
2549 !!DRR_IS_RAW_BYTESWAPPED(drror->drr_flags);
2552 * Since dnode block sizes are constant, we should not need to worry
2553 * about making sure that the dnode block size is the same on the
2554 * sending and receiving sides for the time being. For non-raw sends,
2555 * this does not matter (and in fact we do not send a DRR_OBJECT_RANGE
2556 * record at all). Raw sends require this record type because the
2557 * encryption parameters are used to protect an entire block of bonus
2558 * buffers. If the size of dnode blocks ever becomes variable,
2559 * handling will need to be added to ensure that dnode block sizes
2560 * match on the sending and receiving side.
2562 if (drror->drr_numslots != DNODES_PER_BLOCK ||
2563 P2PHASE(drror->drr_firstobj, DNODES_PER_BLOCK) != 0 ||
2565 return (SET_ERROR(EINVAL));
2567 if (drror->drr_firstobj > rwa->max_object)
2568 rwa->max_object = drror->drr_firstobj;
2571 * The DRR_OBJECT_RANGE handling must be deferred to receive_object()
2572 * so that the block of dnodes is not written out when it's empty,
2573 * and converted to a HOLE BP.
2575 rwa->or_crypt_params_present = B_TRUE;
2576 rwa->or_firstobj = drror->drr_firstobj;
2577 rwa->or_numslots = drror->drr_numslots;
2578 memcpy(rwa->or_salt, drror->drr_salt, ZIO_DATA_SALT_LEN);
2579 memcpy(rwa->or_iv, drror->drr_iv, ZIO_DATA_IV_LEN);
2580 memcpy(rwa->or_mac, drror->drr_mac, ZIO_DATA_MAC_LEN);
2581 rwa->or_byteorder = byteorder;
2587 * Until we have the ability to redact large ranges of data efficiently, we
2588 * process these records as frees.
2591 receive_redact(struct receive_writer_arg *rwa, struct drr_redact *drrr)
2593 struct drr_free drrf = {0};
2594 drrf.drr_length = drrr->drr_length;
2595 drrf.drr_object = drrr->drr_object;
2596 drrf.drr_offset = drrr->drr_offset;
2597 drrf.drr_toguid = drrr->drr_toguid;
2598 return (receive_free(rwa, &drrf));
2601 /* used to destroy the drc_ds on error */
2603 dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
2605 dsl_dataset_t *ds = drc->drc_ds;
2606 ds_hold_flags_t dsflags;
2608 dsflags = (drc->drc_raw) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT;
2610 * Wait for the txg sync before cleaning up the receive. For
2611 * resumable receives, this ensures that our resume state has
2612 * been written out to disk. For raw receives, this ensures
2613 * that the user accounting code will not attempt to do anything
2614 * after we stopped receiving the dataset.
2616 txg_wait_synced(ds->ds_dir->dd_pool, 0);
2617 ds->ds_objset->os_raw_receive = B_FALSE;
2619 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
2620 if (drc->drc_resumable && drc->drc_should_save &&
2621 !BP_IS_HOLE(dsl_dataset_get_blkptr(ds))) {
2622 rrw_exit(&ds->ds_bp_rwlock, FTAG);
2623 dsl_dataset_disown(ds, dsflags, dmu_recv_tag);
2625 char name[ZFS_MAX_DATASET_NAME_LEN];
2626 rrw_exit(&ds->ds_bp_rwlock, FTAG);
2627 dsl_dataset_name(ds, name);
2628 dsl_dataset_disown(ds, dsflags, dmu_recv_tag);
2630 (void) dsl_destroy_head(name);
2635 receive_cksum(dmu_recv_cookie_t *drc, int len, void *buf)
2637 if (drc->drc_byteswap) {
2638 (void) fletcher_4_incremental_byteswap(buf, len,
2641 (void) fletcher_4_incremental_native(buf, len, &drc->drc_cksum);
2646 * Read the payload into a buffer of size len, and update the current record's
2648 * Allocate drc->drc_next_rrd and read the next record's header into
2649 * drc->drc_next_rrd->header.
2650 * Verify checksum of payload and next record.
2653 receive_read_payload_and_next_header(dmu_recv_cookie_t *drc, int len, void *buf)
2658 ASSERT3U(len, <=, SPA_MAXBLOCKSIZE);
2659 err = receive_read(drc, len, buf);
2662 receive_cksum(drc, len, buf);
2664 /* note: rrd is NULL when reading the begin record's payload */
2665 if (drc->drc_rrd != NULL) {
2666 drc->drc_rrd->payload = buf;
2667 drc->drc_rrd->payload_size = len;
2668 drc->drc_rrd->bytes_read = drc->drc_bytes_read;
2671 ASSERT3P(buf, ==, NULL);
2674 drc->drc_prev_cksum = drc->drc_cksum;
2676 drc->drc_next_rrd = kmem_zalloc(sizeof (*drc->drc_next_rrd), KM_SLEEP);
2677 err = receive_read(drc, sizeof (drc->drc_next_rrd->header),
2678 &drc->drc_next_rrd->header);
2679 drc->drc_next_rrd->bytes_read = drc->drc_bytes_read;
2682 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
2683 drc->drc_next_rrd = NULL;
2686 if (drc->drc_next_rrd->header.drr_type == DRR_BEGIN) {
2687 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
2688 drc->drc_next_rrd = NULL;
2689 return (SET_ERROR(EINVAL));
2693 * Note: checksum is of everything up to but not including the
2696 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
2697 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
2699 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
2700 &drc->drc_next_rrd->header);
2702 zio_cksum_t cksum_orig =
2703 drc->drc_next_rrd->header.drr_u.drr_checksum.drr_checksum;
2704 zio_cksum_t *cksump =
2705 &drc->drc_next_rrd->header.drr_u.drr_checksum.drr_checksum;
2707 if (drc->drc_byteswap)
2708 byteswap_record(&drc->drc_next_rrd->header);
2710 if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) &&
2711 !ZIO_CHECKSUM_EQUAL(drc->drc_cksum, *cksump)) {
2712 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
2713 drc->drc_next_rrd = NULL;
2714 return (SET_ERROR(ECKSUM));
2717 receive_cksum(drc, sizeof (cksum_orig), &cksum_orig);
2723 * Issue the prefetch reads for any necessary indirect blocks.
2725 * We use the object ignore list to tell us whether or not to issue prefetches
2726 * for a given object. We do this for both correctness (in case the blocksize
2727 * of an object has changed) and performance (if the object doesn't exist, don't
2728 * needlessly try to issue prefetches). We also trim the list as we go through
2729 * the stream to prevent it from growing to an unbounded size.
2731 * The object numbers within will always be in sorted order, and any write
2732 * records we see will also be in sorted order, but they're not sorted with
2733 * respect to each other (i.e. we can get several object records before
2734 * receiving each object's write records). As a result, once we've reached a
2735 * given object number, we can safely remove any reference to lower object
2736 * numbers in the ignore list. In practice, we receive up to 32 object records
2737 * before receiving write records, so the list can have up to 32 nodes in it.
2740 receive_read_prefetch(dmu_recv_cookie_t *drc, uint64_t object, uint64_t offset,
2743 if (!objlist_exists(drc->drc_ignore_objlist, object)) {
2744 dmu_prefetch(drc->drc_os, object, 1, offset, length,
2745 ZIO_PRIORITY_SYNC_READ);
2750 * Read records off the stream, issuing any necessary prefetches.
2753 receive_read_record(dmu_recv_cookie_t *drc)
2757 switch (drc->drc_rrd->header.drr_type) {
2760 struct drr_object *drro =
2761 &drc->drc_rrd->header.drr_u.drr_object;
2762 uint32_t size = DRR_OBJECT_PAYLOAD_SIZE(drro);
2764 dmu_object_info_t doi;
2767 buf = kmem_zalloc(size, KM_SLEEP);
2769 err = receive_read_payload_and_next_header(drc, size, buf);
2771 kmem_free(buf, size);
2774 err = dmu_object_info(drc->drc_os, drro->drr_object, &doi);
2776 * See receive_read_prefetch for an explanation why we're
2777 * storing this object in the ignore_obj_list.
2779 if (err == ENOENT || err == EEXIST ||
2780 (err == 0 && doi.doi_data_block_size != drro->drr_blksz)) {
2781 objlist_insert(drc->drc_ignore_objlist,
2787 case DRR_FREEOBJECTS:
2789 err = receive_read_payload_and_next_header(drc, 0, NULL);
2794 struct drr_write *drrw = &drc->drc_rrd->header.drr_u.drr_write;
2795 int size = DRR_WRITE_PAYLOAD_SIZE(drrw);
2796 abd_t *abd = abd_alloc_linear(size, B_FALSE);
2797 err = receive_read_payload_and_next_header(drc, size,
2803 drc->drc_rrd->abd = abd;
2804 receive_read_prefetch(drc, drrw->drr_object, drrw->drr_offset,
2805 drrw->drr_logical_size);
2808 case DRR_WRITE_EMBEDDED:
2810 struct drr_write_embedded *drrwe =
2811 &drc->drc_rrd->header.drr_u.drr_write_embedded;
2812 uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8);
2813 void *buf = kmem_zalloc(size, KM_SLEEP);
2815 err = receive_read_payload_and_next_header(drc, size, buf);
2817 kmem_free(buf, size);
2821 receive_read_prefetch(drc, drrwe->drr_object, drrwe->drr_offset,
2829 * It might be beneficial to prefetch indirect blocks here, but
2830 * we don't really have the data to decide for sure.
2832 err = receive_read_payload_and_next_header(drc, 0, NULL);
2837 struct drr_end *drre = &drc->drc_rrd->header.drr_u.drr_end;
2838 if (!ZIO_CHECKSUM_EQUAL(drc->drc_prev_cksum,
2839 drre->drr_checksum))
2840 return (SET_ERROR(ECKSUM));
2845 struct drr_spill *drrs = &drc->drc_rrd->header.drr_u.drr_spill;
2846 int size = DRR_SPILL_PAYLOAD_SIZE(drrs);
2847 abd_t *abd = abd_alloc_linear(size, B_FALSE);
2848 err = receive_read_payload_and_next_header(drc, size,
2853 drc->drc_rrd->abd = abd;
2856 case DRR_OBJECT_RANGE:
2858 err = receive_read_payload_and_next_header(drc, 0, NULL);
2863 return (SET_ERROR(EINVAL));
2870 dprintf_drr(struct receive_record_arg *rrd, int err)
2873 switch (rrd->header.drr_type) {
2876 struct drr_object *drro = &rrd->header.drr_u.drr_object;
2877 dprintf("drr_type = OBJECT obj = %llu type = %u "
2878 "bonustype = %u blksz = %u bonuslen = %u cksumtype = %u "
2879 "compress = %u dn_slots = %u err = %d\n",
2880 (u_longlong_t)drro->drr_object, drro->drr_type,
2881 drro->drr_bonustype, drro->drr_blksz, drro->drr_bonuslen,
2882 drro->drr_checksumtype, drro->drr_compress,
2883 drro->drr_dn_slots, err);
2886 case DRR_FREEOBJECTS:
2888 struct drr_freeobjects *drrfo =
2889 &rrd->header.drr_u.drr_freeobjects;
2890 dprintf("drr_type = FREEOBJECTS firstobj = %llu "
2891 "numobjs = %llu err = %d\n",
2892 (u_longlong_t)drrfo->drr_firstobj,
2893 (u_longlong_t)drrfo->drr_numobjs, err);
2898 struct drr_write *drrw = &rrd->header.drr_u.drr_write;
2899 dprintf("drr_type = WRITE obj = %llu type = %u offset = %llu "
2900 "lsize = %llu cksumtype = %u flags = %u "
2901 "compress = %u psize = %llu err = %d\n",
2902 (u_longlong_t)drrw->drr_object, drrw->drr_type,
2903 (u_longlong_t)drrw->drr_offset,
2904 (u_longlong_t)drrw->drr_logical_size,
2905 drrw->drr_checksumtype, drrw->drr_flags,
2906 drrw->drr_compressiontype,
2907 (u_longlong_t)drrw->drr_compressed_size, err);
2910 case DRR_WRITE_BYREF:
2912 struct drr_write_byref *drrwbr =
2913 &rrd->header.drr_u.drr_write_byref;
2914 dprintf("drr_type = WRITE_BYREF obj = %llu offset = %llu "
2915 "length = %llu toguid = %llx refguid = %llx "
2916 "refobject = %llu refoffset = %llu cksumtype = %u "
2917 "flags = %u err = %d\n",
2918 (u_longlong_t)drrwbr->drr_object,
2919 (u_longlong_t)drrwbr->drr_offset,
2920 (u_longlong_t)drrwbr->drr_length,
2921 (u_longlong_t)drrwbr->drr_toguid,
2922 (u_longlong_t)drrwbr->drr_refguid,
2923 (u_longlong_t)drrwbr->drr_refobject,
2924 (u_longlong_t)drrwbr->drr_refoffset,
2925 drrwbr->drr_checksumtype, drrwbr->drr_flags, err);
2928 case DRR_WRITE_EMBEDDED:
2930 struct drr_write_embedded *drrwe =
2931 &rrd->header.drr_u.drr_write_embedded;
2932 dprintf("drr_type = WRITE_EMBEDDED obj = %llu offset = %llu "
2933 "length = %llu compress = %u etype = %u lsize = %u "
2934 "psize = %u err = %d\n",
2935 (u_longlong_t)drrwe->drr_object,
2936 (u_longlong_t)drrwe->drr_offset,
2937 (u_longlong_t)drrwe->drr_length,
2938 drrwe->drr_compression, drrwe->drr_etype,
2939 drrwe->drr_lsize, drrwe->drr_psize, err);
2944 struct drr_free *drrf = &rrd->header.drr_u.drr_free;
2945 dprintf("drr_type = FREE obj = %llu offset = %llu "
2946 "length = %lld err = %d\n",
2947 (u_longlong_t)drrf->drr_object,
2948 (u_longlong_t)drrf->drr_offset,
2949 (longlong_t)drrf->drr_length,
2955 struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
2956 dprintf("drr_type = SPILL obj = %llu length = %llu "
2957 "err = %d\n", (u_longlong_t)drrs->drr_object,
2958 (u_longlong_t)drrs->drr_length, err);
2961 case DRR_OBJECT_RANGE:
2963 struct drr_object_range *drror =
2964 &rrd->header.drr_u.drr_object_range;
2965 dprintf("drr_type = OBJECT_RANGE firstobj = %llu "
2966 "numslots = %llu flags = %u err = %d\n",
2967 (u_longlong_t)drror->drr_firstobj,
2968 (u_longlong_t)drror->drr_numslots,
2969 drror->drr_flags, err);
2979 * Commit the records to the pool.
2982 receive_process_record(struct receive_writer_arg *rwa,
2983 struct receive_record_arg *rrd)
2987 /* Processing in order, therefore bytes_read should be increasing. */
2988 ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read);
2989 rwa->bytes_read = rrd->bytes_read;
2991 /* We can only heal write records; other ones get ignored */
2992 if (rwa->heal && rrd->header.drr_type != DRR_WRITE) {
2993 if (rrd->abd != NULL) {
2996 } else if (rrd->payload != NULL) {
2997 kmem_free(rrd->payload, rrd->payload_size);
2998 rrd->payload = NULL;
3003 if (!rwa->heal && rrd->header.drr_type != DRR_WRITE) {
3004 err = flush_write_batch(rwa);
3006 if (rrd->abd != NULL) {
3009 rrd->payload = NULL;
3010 } else if (rrd->payload != NULL) {
3011 kmem_free(rrd->payload, rrd->payload_size);
3012 rrd->payload = NULL;
3019 switch (rrd->header.drr_type) {
3022 struct drr_object *drro = &rrd->header.drr_u.drr_object;
3023 err = receive_object(rwa, drro, rrd->payload);
3024 kmem_free(rrd->payload, rrd->payload_size);
3025 rrd->payload = NULL;
3028 case DRR_FREEOBJECTS:
3030 struct drr_freeobjects *drrfo =
3031 &rrd->header.drr_u.drr_freeobjects;
3032 err = receive_freeobjects(rwa, drrfo);
3037 err = receive_process_write_record(rwa, rrd);
3040 * If healing - always free the abd after processing
3044 } else if (err != EAGAIN) {
3046 * On success, a non-healing
3047 * receive_process_write_record() returns
3048 * EAGAIN to indicate that we do not want to free
3049 * the rrd or arc_buf.
3057 case DRR_WRITE_EMBEDDED:
3059 struct drr_write_embedded *drrwe =
3060 &rrd->header.drr_u.drr_write_embedded;
3061 err = receive_write_embedded(rwa, drrwe, rrd->payload);
3062 kmem_free(rrd->payload, rrd->payload_size);
3063 rrd->payload = NULL;
3068 struct drr_free *drrf = &rrd->header.drr_u.drr_free;
3069 err = receive_free(rwa, drrf);
3074 struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
3075 err = receive_spill(rwa, drrs, rrd->abd);
3079 rrd->payload = NULL;
3082 case DRR_OBJECT_RANGE:
3084 struct drr_object_range *drror =
3085 &rrd->header.drr_u.drr_object_range;
3086 err = receive_object_range(rwa, drror);
3091 struct drr_redact *drrr = &rrd->header.drr_u.drr_redact;
3092 err = receive_redact(rwa, drrr);
3096 err = (SET_ERROR(EINVAL));
3100 dprintf_drr(rrd, err);
3106 * dmu_recv_stream's worker thread; pull records off the queue, and then call
3107 * receive_process_record When we're done, signal the main thread and exit.
3109 static __attribute__((noreturn)) void
3110 receive_writer_thread(void *arg)
3112 struct receive_writer_arg *rwa = arg;
3113 struct receive_record_arg *rrd;
3114 fstrans_cookie_t cookie = spl_fstrans_mark();
3116 for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker;
3117 rrd = bqueue_dequeue(&rwa->q)) {
3119 * If there's an error, the main thread will stop putting things
3120 * on the queue, but we need to clear everything in it before we
3124 if (rwa->err == 0) {
3125 err = receive_process_record(rwa, rrd);
3126 } else if (rrd->abd != NULL) {
3129 rrd->payload = NULL;
3130 } else if (rrd->payload != NULL) {
3131 kmem_free(rrd->payload, rrd->payload_size);
3132 rrd->payload = NULL;
3135 * EAGAIN indicates that this record has been saved (on
3136 * raw->write_batch), and will be used again, so we don't
3138 * When healing data we always need to free the record.
3140 if (err != EAGAIN || rwa->heal) {
3143 kmem_free(rrd, sizeof (*rrd));
3146 kmem_free(rrd, sizeof (*rrd));
3149 zio_wait(rwa->heal_pio);
3151 int err = flush_write_batch(rwa);
3155 mutex_enter(&rwa->mutex);
3157 cv_signal(&rwa->cv);
3158 mutex_exit(&rwa->mutex);
3159 spl_fstrans_unmark(cookie);
3164 resume_check(dmu_recv_cookie_t *drc, nvlist_t *begin_nvl)
3167 objset_t *mos = dmu_objset_pool(drc->drc_os)->dp_meta_objset;
3168 uint64_t dsobj = dmu_objset_id(drc->drc_os);
3169 uint64_t resume_obj, resume_off;
3171 if (nvlist_lookup_uint64(begin_nvl,
3172 "resume_object", &resume_obj) != 0 ||
3173 nvlist_lookup_uint64(begin_nvl,
3174 "resume_offset", &resume_off) != 0) {
3175 return (SET_ERROR(EINVAL));
3177 VERIFY0(zap_lookup(mos, dsobj,
3178 DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val));
3179 if (resume_obj != val)
3180 return (SET_ERROR(EINVAL));
3181 VERIFY0(zap_lookup(mos, dsobj,
3182 DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val));
3183 if (resume_off != val)
3184 return (SET_ERROR(EINVAL));
3190 * Read in the stream's records, one by one, and apply them to the pool. There
3191 * are two threads involved; the thread that calls this function will spin up a
3192 * worker thread, read the records off the stream one by one, and issue
3193 * prefetches for any necessary indirect blocks. It will then push the records
3194 * onto an internal blocking queue. The worker thread will pull the records off
3195 * the queue, and actually write the data into the DMU. This way, the worker
3196 * thread doesn't have to wait for reads to complete, since everything it needs
3197 * (the indirect blocks) will be prefetched.
3199 * NB: callers *must* call dmu_recv_end() if this succeeds.
3202 dmu_recv_stream(dmu_recv_cookie_t *drc, offset_t *voffp)
3205 struct receive_writer_arg *rwa = kmem_zalloc(sizeof (*rwa), KM_SLEEP);
3207 if (dsl_dataset_has_resume_receive_state(drc->drc_ds)) {
3209 (void) zap_lookup(drc->drc_ds->ds_dir->dd_pool->dp_meta_objset,
3210 drc->drc_ds->ds_object, DS_FIELD_RESUME_BYTES,
3211 sizeof (bytes), 1, &bytes);
3212 drc->drc_bytes_read += bytes;
3215 drc->drc_ignore_objlist = objlist_create();
3217 /* these were verified in dmu_recv_begin */
3218 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
3220 ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
3222 ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT);
3223 ASSERT0(drc->drc_os->os_encrypted &&
3224 (drc->drc_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA));
3226 /* handle DSL encryption key payload */
3227 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) {
3228 nvlist_t *keynvl = NULL;
3230 ASSERT(drc->drc_os->os_encrypted);
3231 ASSERT(drc->drc_raw);
3233 err = nvlist_lookup_nvlist(drc->drc_begin_nvl, "crypt_keydata",
3238 if (!drc->drc_heal) {
3240 * If this is a new dataset we set the key immediately.
3241 * Otherwise we don't want to change the key until we
3242 * are sure the rest of the receive succeeded so we
3243 * stash the keynvl away until then.
3245 err = dsl_crypto_recv_raw(spa_name(drc->drc_os->os_spa),
3246 drc->drc_ds->ds_object, drc->drc_fromsnapobj,
3247 drc->drc_drrb->drr_type, keynvl, drc->drc_newfs);
3252 /* see comment in dmu_recv_end_sync() */
3253 drc->drc_ivset_guid = 0;
3254 (void) nvlist_lookup_uint64(keynvl, "to_ivset_guid",
3255 &drc->drc_ivset_guid);
3257 if (!drc->drc_newfs)
3258 drc->drc_keynvl = fnvlist_dup(keynvl);
3261 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING) {
3262 err = resume_check(drc, drc->drc_begin_nvl);
3268 * If we failed before this point we will clean up any new resume
3269 * state that was created. Now that we've gotten past the initial
3270 * checks we are ok to retain that resume state.
3272 drc->drc_should_save = B_TRUE;
3274 (void) bqueue_init(&rwa->q, zfs_recv_queue_ff,
3275 MAX(zfs_recv_queue_length, 2 * zfs_max_recordsize),
3276 offsetof(struct receive_record_arg, node));
3277 cv_init(&rwa->cv, NULL, CV_DEFAULT, NULL);
3278 mutex_init(&rwa->mutex, NULL, MUTEX_DEFAULT, NULL);
3279 rwa->os = drc->drc_os;
3280 rwa->byteswap = drc->drc_byteswap;
3281 rwa->heal = drc->drc_heal;
3282 rwa->tofs = drc->drc_tofs;
3283 rwa->resumable = drc->drc_resumable;
3284 rwa->raw = drc->drc_raw;
3285 rwa->spill = drc->drc_spill;
3286 rwa->full = (drc->drc_drr_begin->drr_u.drr_begin.drr_fromguid == 0);
3287 rwa->os->os_raw_receive = drc->drc_raw;
3288 if (drc->drc_heal) {
3289 rwa->heal_pio = zio_root(drc->drc_os->os_spa, NULL, NULL,
3290 ZIO_FLAG_GODFATHER);
3292 list_create(&rwa->write_batch, sizeof (struct receive_record_arg),
3293 offsetof(struct receive_record_arg, node.bqn_node));
3295 (void) thread_create(NULL, 0, receive_writer_thread, rwa, 0, curproc,
3296 TS_RUN, minclsyspri);
3298 * We're reading rwa->err without locks, which is safe since we are the
3299 * only reader, and the worker thread is the only writer. It's ok if we
3300 * miss a write for an iteration or two of the loop, since the writer
3301 * thread will keep freeing records we send it until we send it an eos
3304 * We can leave this loop in 3 ways: First, if rwa->err is
3305 * non-zero. In that case, the writer thread will free the rrd we just
3306 * pushed. Second, if we're interrupted; in that case, either it's the
3307 * first loop and drc->drc_rrd was never allocated, or it's later, and
3308 * drc->drc_rrd has been handed off to the writer thread who will free
3309 * it. Finally, if receive_read_record fails or we're at the end of the
3310 * stream, then we free drc->drc_rrd and exit.
3312 while (rwa->err == 0) {
3313 if (issig(JUSTLOOKING) && issig(FORREAL)) {
3314 err = SET_ERROR(EINTR);
3318 ASSERT3P(drc->drc_rrd, ==, NULL);
3319 drc->drc_rrd = drc->drc_next_rrd;
3320 drc->drc_next_rrd = NULL;
3321 /* Allocates and loads header into drc->drc_next_rrd */
3322 err = receive_read_record(drc);
3324 if (drc->drc_rrd->header.drr_type == DRR_END || err != 0) {
3325 kmem_free(drc->drc_rrd, sizeof (*drc->drc_rrd));
3326 drc->drc_rrd = NULL;
3330 bqueue_enqueue(&rwa->q, drc->drc_rrd,
3331 sizeof (struct receive_record_arg) +
3332 drc->drc_rrd->payload_size);
3333 drc->drc_rrd = NULL;
3336 ASSERT3P(drc->drc_rrd, ==, NULL);
3337 drc->drc_rrd = kmem_zalloc(sizeof (*drc->drc_rrd), KM_SLEEP);
3338 drc->drc_rrd->eos_marker = B_TRUE;
3339 bqueue_enqueue_flush(&rwa->q, drc->drc_rrd, 1);
3341 mutex_enter(&rwa->mutex);
3342 while (!rwa->done) {
3344 * We need to use cv_wait_sig() so that any process that may
3345 * be sleeping here can still fork.
3347 (void) cv_wait_sig(&rwa->cv, &rwa->mutex);
3349 mutex_exit(&rwa->mutex);
3352 * If we are receiving a full stream as a clone, all object IDs which
3353 * are greater than the maximum ID referenced in the stream are
3354 * by definition unused and must be freed.
3356 if (drc->drc_clone && drc->drc_drrb->drr_fromguid == 0) {
3357 uint64_t obj = rwa->max_object + 1;
3361 while (next_err == 0) {
3362 free_err = dmu_free_long_object(rwa->os, obj);
3363 if (free_err != 0 && free_err != ENOENT)
3366 next_err = dmu_object_next(rwa->os, &obj, FALSE, 0);
3370 if (free_err != 0 && free_err != ENOENT)
3372 else if (next_err != ESRCH)
3377 cv_destroy(&rwa->cv);
3378 mutex_destroy(&rwa->mutex);
3379 bqueue_destroy(&rwa->q);
3380 list_destroy(&rwa->write_batch);
3386 * If we hit an error before we started the receive_writer_thread
3387 * we need to clean up the next_rrd we create by processing the
3390 if (drc->drc_next_rrd != NULL)
3391 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
3394 * The objset will be invalidated by dmu_recv_end() when we do
3395 * dsl_dataset_clone_swap_sync_impl().
3399 kmem_free(rwa, sizeof (*rwa));
3400 nvlist_free(drc->drc_begin_nvl);
3404 * Clean up references. If receive is not resumable,
3405 * destroy what we created, so we don't leave it in
3406 * the inconsistent state.
3408 dmu_recv_cleanup_ds(drc);
3409 nvlist_free(drc->drc_keynvl);
3412 objlist_destroy(drc->drc_ignore_objlist);
3413 drc->drc_ignore_objlist = NULL;
3414 *voffp = drc->drc_voff;
3419 dmu_recv_end_check(void *arg, dmu_tx_t *tx)
3421 dmu_recv_cookie_t *drc = arg;
3422 dsl_pool_t *dp = dmu_tx_pool(tx);
3425 ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
3427 if (drc->drc_heal) {
3429 } else if (!drc->drc_newfs) {
3430 dsl_dataset_t *origin_head;
3432 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
3435 if (drc->drc_force) {
3437 * We will destroy any snapshots in tofs (i.e. before
3438 * origin_head) that are after the origin (which is
3439 * the snap before drc_ds, because drc_ds can not
3440 * have any snaps of its own).
3444 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
3446 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
3447 dsl_dataset_t *snap;
3448 error = dsl_dataset_hold_obj(dp, obj, FTAG,
3452 if (snap->ds_dir != origin_head->ds_dir)
3453 error = SET_ERROR(EINVAL);
3455 error = dsl_destroy_snapshot_check_impl(
3458 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
3459 dsl_dataset_rele(snap, FTAG);
3464 dsl_dataset_rele(origin_head, FTAG);
3468 if (drc->drc_keynvl != NULL) {
3469 error = dsl_crypto_recv_raw_key_check(drc->drc_ds,
3470 drc->drc_keynvl, tx);
3472 dsl_dataset_rele(origin_head, FTAG);
3477 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
3478 origin_head, drc->drc_force, drc->drc_owner, tx);
3480 dsl_dataset_rele(origin_head, FTAG);
3483 error = dsl_dataset_snapshot_check_impl(origin_head,
3484 drc->drc_tosnap, tx, B_TRUE, 1,
3485 drc->drc_cred, drc->drc_proc);
3486 dsl_dataset_rele(origin_head, FTAG);
3490 error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
3492 error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
3493 drc->drc_tosnap, tx, B_TRUE, 1,
3494 drc->drc_cred, drc->drc_proc);
3500 dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
3502 dmu_recv_cookie_t *drc = arg;
3503 dsl_pool_t *dp = dmu_tx_pool(tx);
3504 boolean_t encrypted = drc->drc_ds->ds_dir->dd_crypto_obj != 0;
3505 uint64_t newsnapobj = 0;
3507 spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
3508 tx, "snap=%s", drc->drc_tosnap);
3509 drc->drc_ds->ds_objset->os_raw_receive = B_FALSE;
3511 if (drc->drc_heal) {
3512 if (drc->drc_keynvl != NULL) {
3513 nvlist_free(drc->drc_keynvl);
3514 drc->drc_keynvl = NULL;
3516 } else if (!drc->drc_newfs) {
3517 dsl_dataset_t *origin_head;
3519 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
3522 if (drc->drc_force) {
3524 * Destroy any snapshots of drc_tofs (origin_head)
3525 * after the origin (the snap before drc_ds).
3529 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
3531 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
3532 dsl_dataset_t *snap;
3533 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
3535 ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
3536 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
3537 dsl_destroy_snapshot_sync_impl(snap,
3539 dsl_dataset_rele(snap, FTAG);
3542 if (drc->drc_keynvl != NULL) {
3543 dsl_crypto_recv_raw_key_sync(drc->drc_ds,
3544 drc->drc_keynvl, tx);
3545 nvlist_free(drc->drc_keynvl);
3546 drc->drc_keynvl = NULL;
3549 VERIFY3P(drc->drc_ds->ds_prev, ==,
3550 origin_head->ds_prev);
3552 dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
3555 * The objset was evicted by dsl_dataset_clone_swap_sync_impl,
3556 * so drc_os is no longer valid.
3560 dsl_dataset_snapshot_sync_impl(origin_head,
3561 drc->drc_tosnap, tx);
3563 /* set snapshot's creation time and guid */
3564 dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
3565 dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time =
3566 drc->drc_drrb->drr_creation_time;
3567 dsl_dataset_phys(origin_head->ds_prev)->ds_guid =
3568 drc->drc_drrb->drr_toguid;
3569 dsl_dataset_phys(origin_head->ds_prev)->ds_flags &=
3570 ~DS_FLAG_INCONSISTENT;
3572 dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
3573 dsl_dataset_phys(origin_head)->ds_flags &=
3574 ~DS_FLAG_INCONSISTENT;
3577 dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
3579 dsl_dataset_rele(origin_head, FTAG);
3580 dsl_destroy_head_sync_impl(drc->drc_ds, tx);
3582 if (drc->drc_owner != NULL)
3583 VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
3585 dsl_dataset_t *ds = drc->drc_ds;
3587 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
3589 /* set snapshot's creation time and guid */
3590 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
3591 dsl_dataset_phys(ds->ds_prev)->ds_creation_time =
3592 drc->drc_drrb->drr_creation_time;
3593 dsl_dataset_phys(ds->ds_prev)->ds_guid =
3594 drc->drc_drrb->drr_toguid;
3595 dsl_dataset_phys(ds->ds_prev)->ds_flags &=
3596 ~DS_FLAG_INCONSISTENT;
3598 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3599 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
3600 if (dsl_dataset_has_resume_receive_state(ds)) {
3601 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3602 DS_FIELD_RESUME_FROMGUID, tx);
3603 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3604 DS_FIELD_RESUME_OBJECT, tx);
3605 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3606 DS_FIELD_RESUME_OFFSET, tx);
3607 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3608 DS_FIELD_RESUME_BYTES, tx);
3609 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3610 DS_FIELD_RESUME_TOGUID, tx);
3611 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3612 DS_FIELD_RESUME_TONAME, tx);
3613 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3614 DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS, tx);
3617 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj;
3621 * If this is a raw receive, the crypt_keydata nvlist will include
3622 * a to_ivset_guid for us to set on the new snapshot. This value
3623 * will override the value generated by the snapshot code. However,
3624 * this value may not be present, because older implementations of
3625 * the raw send code did not include this value, and we are still
3626 * allowed to receive them if the zfs_disable_ivset_guid_check
3627 * tunable is set, in which case we will leave the newly-generated
3630 if (!drc->drc_heal && drc->drc_raw && drc->drc_ivset_guid != 0) {
3631 dmu_object_zapify(dp->dp_meta_objset, newsnapobj,
3632 DMU_OT_DSL_DATASET, tx);
3633 VERIFY0(zap_update(dp->dp_meta_objset, newsnapobj,
3634 DS_FIELD_IVSET_GUID, sizeof (uint64_t), 1,
3635 &drc->drc_ivset_guid, tx));
3639 * Release the hold from dmu_recv_begin. This must be done before
3640 * we return to open context, so that when we free the dataset's dnode
3641 * we can evict its bonus buffer. Since the dataset may be destroyed
3642 * at this point (and therefore won't have a valid pointer to the spa)
3643 * we release the key mapping manually here while we do have a valid
3644 * pointer, if it exists.
3646 if (!drc->drc_raw && encrypted) {
3647 (void) spa_keystore_remove_mapping(dmu_tx_pool(tx)->dp_spa,
3648 drc->drc_ds->ds_object, drc->drc_ds);
3650 dsl_dataset_disown(drc->drc_ds, 0, dmu_recv_tag);
3654 static int dmu_recv_end_modified_blocks = 3;
3657 dmu_recv_existing_end(dmu_recv_cookie_t *drc)
3661 * We will be destroying the ds; make sure its origin is unmounted if
3664 char name[ZFS_MAX_DATASET_NAME_LEN];
3665 dsl_dataset_name(drc->drc_ds, name);
3666 zfs_destroy_unmount_origin(name);
3669 return (dsl_sync_task(drc->drc_tofs,
3670 dmu_recv_end_check, dmu_recv_end_sync, drc,
3671 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
3675 dmu_recv_new_end(dmu_recv_cookie_t *drc)
3677 return (dsl_sync_task(drc->drc_tofs,
3678 dmu_recv_end_check, dmu_recv_end_sync, drc,
3679 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
3683 dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
3687 drc->drc_owner = owner;
3690 error = dmu_recv_new_end(drc);
3692 error = dmu_recv_existing_end(drc);
3695 dmu_recv_cleanup_ds(drc);
3696 nvlist_free(drc->drc_keynvl);
3697 } else if (!drc->drc_heal) {
3698 if (drc->drc_newfs) {
3699 zvol_create_minor(drc->drc_tofs);
3701 char *snapname = kmem_asprintf("%s@%s",
3702 drc->drc_tofs, drc->drc_tosnap);
3703 zvol_create_minor(snapname);
3704 kmem_strfree(snapname);
3710 * Return TRUE if this objset is currently being received into.
3713 dmu_objset_is_receiving(objset_t *os)
3715 return (os->os_dsl_dataset != NULL &&
3716 os->os_dsl_dataset->ds_owner == dmu_recv_tag);
3719 ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, queue_length, INT, ZMOD_RW,
3720 "Maximum receive queue length");
3722 ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, queue_ff, INT, ZMOD_RW,
3723 "Receive queue fill fraction");
3725 ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, write_batch_size, INT, ZMOD_RW,
3726 "Maximum amount of writes to batch into one transaction");
3728 ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, best_effort_corrective, INT, ZMOD_RW,
3729 "Ignore errors during corrective receive");