4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright (c) 2012, Martin Matuska <mm@FreeBSD.org>. All rights reserved.
27 * Copyright 2014 HybridCluster. All rights reserved.
31 #include <sys/dmu_impl.h>
32 #include <sys/dmu_tx.h>
34 #include <sys/dnode.h>
35 #include <sys/zfs_context.h>
36 #include <sys/dmu_objset.h>
37 #include <sys/dmu_traverse.h>
38 #include <sys/dsl_dataset.h>
39 #include <sys/dsl_dir.h>
40 #include <sys/dsl_prop.h>
41 #include <sys/dsl_pool.h>
42 #include <sys/dsl_synctask.h>
43 #include <sys/zfs_ioctl.h>
45 #include <sys/zio_checksum.h>
46 #include <sys/zfs_znode.h>
47 #include <zfs_fletcher.h>
50 #include <sys/zfs_onexit.h>
51 #include <sys/dmu_send.h>
52 #include <sys/dsl_destroy.h>
53 #include <sys/blkptr.h>
54 #include <sys/dsl_bookmark.h>
55 #include <sys/zfeature.h>
56 #include <sys/bqueue.h>
60 #define dump_write dmu_dump_write
63 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
64 int zfs_send_corrupt_data = B_FALSE;
65 int zfs_send_queue_length = 16 * 1024 * 1024;
66 int zfs_recv_queue_length = 16 * 1024 * 1024;
68 static char *dmu_recv_tag = "dmu_recv_tag";
69 const char *recv_clone_name = "%recv";
71 #define BP_SPAN(datablkszsec, indblkshift, level) \
72 (((uint64_t)datablkszsec) << (SPA_MINBLOCKSHIFT + \
73 (level) * (indblkshift - SPA_BLKPTRSHIFT)))
75 static void byteswap_record(dmu_replay_record_t *drr);
77 struct send_thread_arg {
79 dsl_dataset_t *ds; /* Dataset to traverse */
80 uint64_t fromtxg; /* Traverse from this txg */
81 int flags; /* flags to pass to traverse_dataset */
84 zbookmark_phys_t resume;
87 struct send_block_record {
88 boolean_t eos_marker; /* Marks the end of the stream */
92 uint16_t datablkszsec;
97 dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
99 dsl_dataset_t *ds = dmu_objset_ds(dsp->dsa_os);
106 auio.uio_iov = &aiov;
108 auio.uio_resid = len;
109 auio.uio_segflg = UIO_SYSSPACE;
110 auio.uio_rw = UIO_WRITE;
111 auio.uio_offset = (off_t)-1;
112 auio.uio_td = dsp->dsa_td;
114 if (dsp->dsa_fp->f_type == DTYPE_VNODE)
116 dsp->dsa_err = fo_write(dsp->dsa_fp, &auio, dsp->dsa_td->td_ucred, 0,
119 fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
120 dsp->dsa_err = EOPNOTSUPP;
122 mutex_enter(&ds->ds_sendstream_lock);
123 *dsp->dsa_off += len;
124 mutex_exit(&ds->ds_sendstream_lock);
126 return (dsp->dsa_err);
130 * For all record types except BEGIN, fill in the checksum (overlaid in
131 * drr_u.drr_checksum.drr_checksum). The checksum verifies everything
132 * up to the start of the checksum itself.
135 dump_record(dmu_sendarg_t *dsp, void *payload, int payload_len)
137 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
138 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
139 fletcher_4_incremental_native(dsp->dsa_drr,
140 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
142 if (dsp->dsa_drr->drr_type != DRR_BEGIN) {
143 ASSERT(ZIO_CHECKSUM_IS_ZERO(&dsp->dsa_drr->drr_u.
144 drr_checksum.drr_checksum));
145 dsp->dsa_drr->drr_u.drr_checksum.drr_checksum = dsp->dsa_zc;
147 fletcher_4_incremental_native(&dsp->dsa_drr->
148 drr_u.drr_checksum.drr_checksum,
149 sizeof (zio_cksum_t), &dsp->dsa_zc);
150 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
151 return (SET_ERROR(EINTR));
152 if (payload_len != 0) {
153 fletcher_4_incremental_native(payload, payload_len,
155 if (dump_bytes(dsp, payload, payload_len) != 0)
156 return (SET_ERROR(EINTR));
162 dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
165 struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free);
168 * When we receive a free record, dbuf_free_range() assumes
169 * that the receiving system doesn't have any dbufs in the range
170 * being freed. This is always true because there is a one-record
171 * constraint: we only send one WRITE record for any given
172 * object,offset. We know that the one-record constraint is
173 * true because we always send data in increasing order by
176 * If the increasing-order constraint ever changes, we should find
177 * another way to assert that the one-record constraint is still
180 ASSERT(object > dsp->dsa_last_data_object ||
181 (object == dsp->dsa_last_data_object &&
182 offset > dsp->dsa_last_data_offset));
185 * If we are doing a non-incremental send, then there can't
186 * be any data in the dataset we're receiving into. Therefore
187 * a free record would simply be a no-op. Save space by not
188 * sending it to begin with.
190 if (!dsp->dsa_incremental)
193 if (length != -1ULL && offset + length < offset)
197 * If there is a pending op, but it's not PENDING_FREE, push it out,
198 * since free block aggregation can only be done for blocks of the
199 * same type (i.e., DRR_FREE records can only be aggregated with
200 * other DRR_FREE records. DRR_FREEOBJECTS records can only be
201 * aggregated with other DRR_FREEOBJECTS records.
203 if (dsp->dsa_pending_op != PENDING_NONE &&
204 dsp->dsa_pending_op != PENDING_FREE) {
205 if (dump_record(dsp, NULL, 0) != 0)
206 return (SET_ERROR(EINTR));
207 dsp->dsa_pending_op = PENDING_NONE;
210 if (dsp->dsa_pending_op == PENDING_FREE) {
212 * There should never be a PENDING_FREE if length is -1
213 * (because dump_dnode is the only place where this
214 * function is called with a -1, and only after flushing
215 * any pending record).
217 ASSERT(length != -1ULL);
219 * Check to see whether this free block can be aggregated
222 if (drrf->drr_object == object && drrf->drr_offset +
223 drrf->drr_length == offset) {
224 drrf->drr_length += length;
227 /* not a continuation. Push out pending record */
228 if (dump_record(dsp, NULL, 0) != 0)
229 return (SET_ERROR(EINTR));
230 dsp->dsa_pending_op = PENDING_NONE;
233 /* create a FREE record and make it pending */
234 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
235 dsp->dsa_drr->drr_type = DRR_FREE;
236 drrf->drr_object = object;
237 drrf->drr_offset = offset;
238 drrf->drr_length = length;
239 drrf->drr_toguid = dsp->dsa_toguid;
240 if (length == -1ULL) {
241 if (dump_record(dsp, NULL, 0) != 0)
242 return (SET_ERROR(EINTR));
244 dsp->dsa_pending_op = PENDING_FREE;
251 dump_write(dmu_sendarg_t *dsp, dmu_object_type_t type,
252 uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data)
254 struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write);
257 * We send data in increasing object, offset order.
258 * See comment in dump_free() for details.
260 ASSERT(object > dsp->dsa_last_data_object ||
261 (object == dsp->dsa_last_data_object &&
262 offset > dsp->dsa_last_data_offset));
263 dsp->dsa_last_data_object = object;
264 dsp->dsa_last_data_offset = offset + blksz - 1;
267 * If there is any kind of pending aggregation (currently either
268 * a grouping of free objects or free blocks), push it out to
269 * the stream, since aggregation can't be done across operations
270 * of different types.
272 if (dsp->dsa_pending_op != PENDING_NONE) {
273 if (dump_record(dsp, NULL, 0) != 0)
274 return (SET_ERROR(EINTR));
275 dsp->dsa_pending_op = PENDING_NONE;
277 /* write a WRITE record */
278 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
279 dsp->dsa_drr->drr_type = DRR_WRITE;
280 drrw->drr_object = object;
281 drrw->drr_type = type;
282 drrw->drr_offset = offset;
283 drrw->drr_length = blksz;
284 drrw->drr_toguid = dsp->dsa_toguid;
285 if (bp == NULL || BP_IS_EMBEDDED(bp)) {
287 * There's no pre-computed checksum for partial-block
288 * writes or embedded BP's, so (like
289 * fletcher4-checkummed blocks) userland will have to
290 * compute a dedup-capable checksum itself.
292 drrw->drr_checksumtype = ZIO_CHECKSUM_OFF;
294 drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
295 if (zio_checksum_table[drrw->drr_checksumtype].ci_flags &
296 ZCHECKSUM_FLAG_DEDUP)
297 drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP;
298 DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
299 DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
300 DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
301 drrw->drr_key.ddk_cksum = bp->blk_cksum;
304 if (dump_record(dsp, data, blksz) != 0)
305 return (SET_ERROR(EINTR));
310 dump_write_embedded(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
311 int blksz, const blkptr_t *bp)
313 char buf[BPE_PAYLOAD_SIZE];
314 struct drr_write_embedded *drrw =
315 &(dsp->dsa_drr->drr_u.drr_write_embedded);
317 if (dsp->dsa_pending_op != PENDING_NONE) {
318 if (dump_record(dsp, NULL, 0) != 0)
320 dsp->dsa_pending_op = PENDING_NONE;
323 ASSERT(BP_IS_EMBEDDED(bp));
325 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
326 dsp->dsa_drr->drr_type = DRR_WRITE_EMBEDDED;
327 drrw->drr_object = object;
328 drrw->drr_offset = offset;
329 drrw->drr_length = blksz;
330 drrw->drr_toguid = dsp->dsa_toguid;
331 drrw->drr_compression = BP_GET_COMPRESS(bp);
332 drrw->drr_etype = BPE_GET_ETYPE(bp);
333 drrw->drr_lsize = BPE_GET_LSIZE(bp);
334 drrw->drr_psize = BPE_GET_PSIZE(bp);
336 decode_embedded_bp_compressed(bp, buf);
338 if (dump_record(dsp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0)
344 dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data)
346 struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill);
348 if (dsp->dsa_pending_op != PENDING_NONE) {
349 if (dump_record(dsp, NULL, 0) != 0)
350 return (SET_ERROR(EINTR));
351 dsp->dsa_pending_op = PENDING_NONE;
354 /* write a SPILL record */
355 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
356 dsp->dsa_drr->drr_type = DRR_SPILL;
357 drrs->drr_object = object;
358 drrs->drr_length = blksz;
359 drrs->drr_toguid = dsp->dsa_toguid;
361 if (dump_record(dsp, data, blksz) != 0)
362 return (SET_ERROR(EINTR));
367 dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
369 struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects);
371 /* See comment in dump_free(). */
372 if (!dsp->dsa_incremental)
376 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
377 * push it out, since free block aggregation can only be done for
378 * blocks of the same type (i.e., DRR_FREE records can only be
379 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
380 * can only be aggregated with other DRR_FREEOBJECTS records.
382 if (dsp->dsa_pending_op != PENDING_NONE &&
383 dsp->dsa_pending_op != PENDING_FREEOBJECTS) {
384 if (dump_record(dsp, NULL, 0) != 0)
385 return (SET_ERROR(EINTR));
386 dsp->dsa_pending_op = PENDING_NONE;
388 if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) {
390 * See whether this free object array can be aggregated
393 if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
394 drrfo->drr_numobjs += numobjs;
397 /* can't be aggregated. Push out pending record */
398 if (dump_record(dsp, NULL, 0) != 0)
399 return (SET_ERROR(EINTR));
400 dsp->dsa_pending_op = PENDING_NONE;
404 /* write a FREEOBJECTS record */
405 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
406 dsp->dsa_drr->drr_type = DRR_FREEOBJECTS;
407 drrfo->drr_firstobj = firstobj;
408 drrfo->drr_numobjs = numobjs;
409 drrfo->drr_toguid = dsp->dsa_toguid;
411 dsp->dsa_pending_op = PENDING_FREEOBJECTS;
417 dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp)
419 struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object);
421 if (object < dsp->dsa_resume_object) {
423 * Note: when resuming, we will visit all the dnodes in
424 * the block of dnodes that we are resuming from. In
425 * this case it's unnecessary to send the dnodes prior to
426 * the one we are resuming from. We should be at most one
427 * block's worth of dnodes behind the resume point.
429 ASSERT3U(dsp->dsa_resume_object - object, <,
430 1 << (DNODE_BLOCK_SHIFT - DNODE_SHIFT));
434 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
435 return (dump_freeobjects(dsp, object, 1));
437 if (dsp->dsa_pending_op != PENDING_NONE) {
438 if (dump_record(dsp, NULL, 0) != 0)
439 return (SET_ERROR(EINTR));
440 dsp->dsa_pending_op = PENDING_NONE;
443 /* write an OBJECT record */
444 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
445 dsp->dsa_drr->drr_type = DRR_OBJECT;
446 drro->drr_object = object;
447 drro->drr_type = dnp->dn_type;
448 drro->drr_bonustype = dnp->dn_bonustype;
449 drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
450 drro->drr_bonuslen = dnp->dn_bonuslen;
451 drro->drr_checksumtype = dnp->dn_checksum;
452 drro->drr_compress = dnp->dn_compress;
453 drro->drr_toguid = dsp->dsa_toguid;
455 if (!(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
456 drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE)
457 drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE;
459 if (dump_record(dsp, DN_BONUS(dnp),
460 P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0) {
461 return (SET_ERROR(EINTR));
464 /* Free anything past the end of the file. */
465 if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) *
466 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL) != 0)
467 return (SET_ERROR(EINTR));
468 if (dsp->dsa_err != 0)
469 return (SET_ERROR(EINTR));
474 backup_do_embed(dmu_sendarg_t *dsp, const blkptr_t *bp)
476 if (!BP_IS_EMBEDDED(bp))
480 * Compression function must be legacy, or explicitly enabled.
482 if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS &&
483 !(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4)))
487 * Embed type must be explicitly enabled.
489 switch (BPE_GET_ETYPE(bp)) {
490 case BP_EMBEDDED_TYPE_DATA:
491 if (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
501 * This is the callback function to traverse_dataset that acts as the worker
502 * thread for dmu_send_impl.
506 send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
507 const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg)
509 struct send_thread_arg *sta = arg;
510 struct send_block_record *record;
511 uint64_t record_size;
514 ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
515 zb->zb_object >= sta->resume.zb_object);
518 return (SET_ERROR(EINTR));
521 ASSERT3U(zb->zb_level, ==, ZB_DNODE_LEVEL);
523 } else if (zb->zb_level < 0) {
527 record = kmem_zalloc(sizeof (struct send_block_record), KM_SLEEP);
528 record->eos_marker = B_FALSE;
531 record->indblkshift = dnp->dn_indblkshift;
532 record->datablkszsec = dnp->dn_datablkszsec;
533 record_size = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
534 bqueue_enqueue(&sta->q, record, record_size);
540 * This function kicks off the traverse_dataset. It also handles setting the
541 * error code of the thread in case something goes wrong, and pushes the End of
542 * Stream record when the traverse_dataset call has finished. If there is no
543 * dataset to traverse, the thread immediately pushes End of Stream marker.
546 send_traverse_thread(void *arg)
548 struct send_thread_arg *st_arg = arg;
550 struct send_block_record *data;
552 if (st_arg->ds != NULL) {
553 err = traverse_dataset_resume(st_arg->ds,
554 st_arg->fromtxg, &st_arg->resume,
555 st_arg->flags, send_cb, st_arg);
558 st_arg->error_code = err;
560 data = kmem_zalloc(sizeof (*data), KM_SLEEP);
561 data->eos_marker = B_TRUE;
562 bqueue_enqueue(&st_arg->q, data, 1);
567 * This function actually handles figuring out what kind of record needs to be
568 * dumped, reading the data (which has hopefully been prefetched), and calling
569 * the appropriate helper function.
572 do_dump(dmu_sendarg_t *dsa, struct send_block_record *data)
574 dsl_dataset_t *ds = dmu_objset_ds(dsa->dsa_os);
575 const blkptr_t *bp = &data->bp;
576 const zbookmark_phys_t *zb = &data->zb;
577 uint8_t indblkshift = data->indblkshift;
578 uint16_t dblkszsec = data->datablkszsec;
579 spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
580 dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
583 ASSERT3U(zb->zb_level, >=, 0);
585 ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
586 zb->zb_object >= dsa->dsa_resume_object);
588 if (zb->zb_object != DMU_META_DNODE_OBJECT &&
589 DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
591 } else if (BP_IS_HOLE(bp) &&
592 zb->zb_object == DMU_META_DNODE_OBJECT) {
593 uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level);
594 uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
595 err = dump_freeobjects(dsa, dnobj, span >> DNODE_SHIFT);
596 } else if (BP_IS_HOLE(bp)) {
597 uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level);
598 uint64_t offset = zb->zb_blkid * span;
599 err = dump_free(dsa, zb->zb_object, offset, span);
600 } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
602 } else if (type == DMU_OT_DNODE) {
603 int blksz = BP_GET_LSIZE(bp);
604 arc_flags_t aflags = ARC_FLAG_WAIT;
607 ASSERT0(zb->zb_level);
609 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
610 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
612 return (SET_ERROR(EIO));
614 dnode_phys_t *blk = abuf->b_data;
615 uint64_t dnobj = zb->zb_blkid * (blksz >> DNODE_SHIFT);
616 for (int i = 0; i < blksz >> DNODE_SHIFT; i++) {
617 err = dump_dnode(dsa, dnobj + i, blk + i);
621 (void) arc_buf_remove_ref(abuf, &abuf);
622 } else if (type == DMU_OT_SA) {
623 arc_flags_t aflags = ARC_FLAG_WAIT;
625 int blksz = BP_GET_LSIZE(bp);
627 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
628 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
630 return (SET_ERROR(EIO));
632 err = dump_spill(dsa, zb->zb_object, blksz, abuf->b_data);
633 (void) arc_buf_remove_ref(abuf, &abuf);
634 } else if (backup_do_embed(dsa, bp)) {
635 /* it's an embedded level-0 block of a regular object */
636 int blksz = dblkszsec << SPA_MINBLOCKSHIFT;
637 ASSERT0(zb->zb_level);
638 err = dump_write_embedded(dsa, zb->zb_object,
639 zb->zb_blkid * blksz, blksz, bp);
641 /* it's a level-0 block of a regular object */
642 arc_flags_t aflags = ARC_FLAG_WAIT;
644 int blksz = dblkszsec << SPA_MINBLOCKSHIFT;
647 ASSERT0(zb->zb_level);
648 ASSERT(zb->zb_object > dsa->dsa_resume_object ||
649 (zb->zb_object == dsa->dsa_resume_object &&
650 zb->zb_blkid * blksz >= dsa->dsa_resume_offset));
652 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
653 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
655 if (zfs_send_corrupt_data) {
656 /* Send a block filled with 0x"zfs badd bloc" */
657 abuf = arc_buf_alloc(spa, blksz, &abuf,
660 for (ptr = abuf->b_data;
661 (char *)ptr < (char *)abuf->b_data + blksz;
663 *ptr = 0x2f5baddb10cULL;
665 return (SET_ERROR(EIO));
669 offset = zb->zb_blkid * blksz;
671 if (!(dsa->dsa_featureflags &
672 DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
673 blksz > SPA_OLD_MAXBLOCKSIZE) {
674 char *buf = abuf->b_data;
675 while (blksz > 0 && err == 0) {
676 int n = MIN(blksz, SPA_OLD_MAXBLOCKSIZE);
677 err = dump_write(dsa, type, zb->zb_object,
678 offset, n, NULL, buf);
684 err = dump_write(dsa, type, zb->zb_object,
685 offset, blksz, bp, abuf->b_data);
687 (void) arc_buf_remove_ref(abuf, &abuf);
690 ASSERT(err == 0 || err == EINTR);
695 * Pop the new data off the queue, and free the old data.
697 static struct send_block_record *
698 get_next_record(bqueue_t *bq, struct send_block_record *data)
700 struct send_block_record *tmp = bqueue_dequeue(bq);
701 kmem_free(data, sizeof (*data));
706 * Actually do the bulk of the work in a zfs send.
708 * Note: Releases dp using the specified tag.
711 dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *to_ds,
712 zfs_bookmark_phys_t *ancestor_zb,
713 boolean_t is_clone, boolean_t embedok, boolean_t large_block_ok, int outfd,
714 uint64_t resumeobj, uint64_t resumeoff,
716 vnode_t *vp, offset_t *off)
718 struct file *fp, offset_t *off)
722 dmu_replay_record_t *drr;
725 uint64_t fromtxg = 0;
726 uint64_t featureflags = 0;
727 struct send_thread_arg to_arg = { 0 };
729 err = dmu_objset_from_ds(to_ds, &os);
731 dsl_pool_rele(dp, tag);
735 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
736 drr->drr_type = DRR_BEGIN;
737 drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
738 DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
742 if (dmu_objset_type(os) == DMU_OST_ZFS) {
744 if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) {
745 kmem_free(drr, sizeof (dmu_replay_record_t));
746 dsl_pool_rele(dp, tag);
747 return (SET_ERROR(EINVAL));
749 if (version >= ZPL_VERSION_SA) {
750 featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
755 if (large_block_ok && to_ds->ds_feature_inuse[SPA_FEATURE_LARGE_BLOCKS])
756 featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS;
758 spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) {
759 featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA;
760 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
761 featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA_LZ4;
764 if (resumeobj != 0 || resumeoff != 0) {
765 featureflags |= DMU_BACKUP_FEATURE_RESUMING;
768 DMU_SET_FEATUREFLAGS(drr->drr_u.drr_begin.drr_versioninfo,
771 drr->drr_u.drr_begin.drr_creation_time =
772 dsl_dataset_phys(to_ds)->ds_creation_time;
773 drr->drr_u.drr_begin.drr_type = dmu_objset_type(os);
775 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
776 drr->drr_u.drr_begin.drr_toguid = dsl_dataset_phys(to_ds)->ds_guid;
777 if (dsl_dataset_phys(to_ds)->ds_flags & DS_FLAG_CI_DATASET)
778 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
780 if (ancestor_zb != NULL) {
781 drr->drr_u.drr_begin.drr_fromguid =
782 ancestor_zb->zbm_guid;
783 fromtxg = ancestor_zb->zbm_creation_txg;
785 dsl_dataset_name(to_ds, drr->drr_u.drr_begin.drr_toname);
786 if (!to_ds->ds_is_snapshot) {
787 (void) strlcat(drr->drr_u.drr_begin.drr_toname, "@--head--",
788 sizeof (drr->drr_u.drr_begin.drr_toname));
791 dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP);
794 dsp->dsa_outfd = outfd;
795 dsp->dsa_proc = curproc;
796 dsp->dsa_td = curthread;
800 dsp->dsa_toguid = dsl_dataset_phys(to_ds)->ds_guid;
801 dsp->dsa_pending_op = PENDING_NONE;
802 dsp->dsa_incremental = (ancestor_zb != NULL);
803 dsp->dsa_featureflags = featureflags;
804 dsp->dsa_resume_object = resumeobj;
805 dsp->dsa_resume_offset = resumeoff;
807 mutex_enter(&to_ds->ds_sendstream_lock);
808 list_insert_head(&to_ds->ds_sendstreams, dsp);
809 mutex_exit(&to_ds->ds_sendstream_lock);
811 dsl_dataset_long_hold(to_ds, FTAG);
812 dsl_pool_rele(dp, tag);
814 void *payload = NULL;
815 size_t payload_len = 0;
816 if (resumeobj != 0 || resumeoff != 0) {
817 dmu_object_info_t to_doi;
818 err = dmu_object_info(os, resumeobj, &to_doi);
821 SET_BOOKMARK(&to_arg.resume, to_ds->ds_object, resumeobj, 0,
822 resumeoff / to_doi.doi_data_block_size);
824 nvlist_t *nvl = fnvlist_alloc();
825 fnvlist_add_uint64(nvl, "resume_object", resumeobj);
826 fnvlist_add_uint64(nvl, "resume_offset", resumeoff);
827 payload = fnvlist_pack(nvl, &payload_len);
828 drr->drr_payloadlen = payload_len;
832 err = dump_record(dsp, payload, payload_len);
833 fnvlist_pack_free(payload, payload_len);
839 err = bqueue_init(&to_arg.q, zfs_send_queue_length,
840 offsetof(struct send_block_record, ln));
841 to_arg.error_code = 0;
842 to_arg.cancel = B_FALSE;
844 to_arg.fromtxg = fromtxg;
845 to_arg.flags = TRAVERSE_PRE | TRAVERSE_PREFETCH;
846 (void) thread_create(NULL, 0, send_traverse_thread, &to_arg, 0, &p0,
847 TS_RUN, minclsyspri);
849 struct send_block_record *to_data;
850 to_data = bqueue_dequeue(&to_arg.q);
852 while (!to_data->eos_marker && err == 0) {
853 err = do_dump(dsp, to_data);
854 to_data = get_next_record(&to_arg.q, to_data);
855 if (issig(JUSTLOOKING) && issig(FORREAL))
860 to_arg.cancel = B_TRUE;
861 while (!to_data->eos_marker) {
862 to_data = get_next_record(&to_arg.q, to_data);
865 kmem_free(to_data, sizeof (*to_data));
867 bqueue_destroy(&to_arg.q);
869 if (err == 0 && to_arg.error_code != 0)
870 err = to_arg.error_code;
875 if (dsp->dsa_pending_op != PENDING_NONE)
876 if (dump_record(dsp, NULL, 0) != 0)
877 err = SET_ERROR(EINTR);
880 if (err == EINTR && dsp->dsa_err != 0)
885 bzero(drr, sizeof (dmu_replay_record_t));
886 drr->drr_type = DRR_END;
887 drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc;
888 drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid;
890 if (dump_record(dsp, NULL, 0) != 0)
894 mutex_enter(&to_ds->ds_sendstream_lock);
895 list_remove(&to_ds->ds_sendstreams, dsp);
896 mutex_exit(&to_ds->ds_sendstream_lock);
898 kmem_free(drr, sizeof (dmu_replay_record_t));
899 kmem_free(dsp, sizeof (dmu_sendarg_t));
901 dsl_dataset_long_rele(to_ds, FTAG);
907 dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
908 boolean_t embedok, boolean_t large_block_ok,
910 int outfd, vnode_t *vp, offset_t *off)
912 int outfd, struct file *fp, offset_t *off)
917 dsl_dataset_t *fromds = NULL;
920 err = dsl_pool_hold(pool, FTAG, &dp);
924 err = dsl_dataset_hold_obj(dp, tosnap, FTAG, &ds);
926 dsl_pool_rele(dp, FTAG);
931 zfs_bookmark_phys_t zb;
934 err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds);
936 dsl_dataset_rele(ds, FTAG);
937 dsl_pool_rele(dp, FTAG);
940 if (!dsl_dataset_is_before(ds, fromds, 0))
941 err = SET_ERROR(EXDEV);
942 zb.zbm_creation_time =
943 dsl_dataset_phys(fromds)->ds_creation_time;
944 zb.zbm_creation_txg = dsl_dataset_phys(fromds)->ds_creation_txg;
945 zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
946 is_clone = (fromds->ds_dir != ds->ds_dir);
947 dsl_dataset_rele(fromds, FTAG);
948 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
949 embedok, large_block_ok, outfd, 0, 0, fp, off);
951 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
952 embedok, large_block_ok, outfd, 0, 0, fp, off);
954 dsl_dataset_rele(ds, FTAG);
959 dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok,
960 boolean_t large_block_ok, int outfd, uint64_t resumeobj, uint64_t resumeoff,
962 vnode_t *vp, offset_t *off)
964 struct file *fp, offset_t *off)
970 boolean_t owned = B_FALSE;
972 if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL)
973 return (SET_ERROR(EINVAL));
975 err = dsl_pool_hold(tosnap, FTAG, &dp);
979 if (strchr(tosnap, '@') == NULL && spa_writeable(dp->dp_spa)) {
981 * We are sending a filesystem or volume. Ensure
982 * that it doesn't change by owning the dataset.
984 err = dsl_dataset_own(dp, tosnap, FTAG, &ds);
987 err = dsl_dataset_hold(dp, tosnap, FTAG, &ds);
990 dsl_pool_rele(dp, FTAG);
994 if (fromsnap != NULL) {
995 zfs_bookmark_phys_t zb;
996 boolean_t is_clone = B_FALSE;
997 int fsnamelen = strchr(tosnap, '@') - tosnap;
1000 * If the fromsnap is in a different filesystem, then
1001 * mark the send stream as a clone.
1003 if (strncmp(tosnap, fromsnap, fsnamelen) != 0 ||
1004 (fromsnap[fsnamelen] != '@' &&
1005 fromsnap[fsnamelen] != '#')) {
1009 if (strchr(fromsnap, '@')) {
1010 dsl_dataset_t *fromds;
1011 err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds);
1013 if (!dsl_dataset_is_before(ds, fromds, 0))
1014 err = SET_ERROR(EXDEV);
1015 zb.zbm_creation_time =
1016 dsl_dataset_phys(fromds)->ds_creation_time;
1017 zb.zbm_creation_txg =
1018 dsl_dataset_phys(fromds)->ds_creation_txg;
1019 zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
1020 is_clone = (ds->ds_dir != fromds->ds_dir);
1021 dsl_dataset_rele(fromds, FTAG);
1024 err = dsl_bookmark_lookup(dp, fromsnap, ds, &zb);
1027 dsl_dataset_rele(ds, FTAG);
1028 dsl_pool_rele(dp, FTAG);
1031 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
1032 embedok, large_block_ok,
1033 outfd, resumeobj, resumeoff, fp, off);
1035 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
1036 embedok, large_block_ok,
1037 outfd, resumeobj, resumeoff, fp, off);
1040 dsl_dataset_disown(ds, FTAG);
1042 dsl_dataset_rele(ds, FTAG);
1047 dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t size,
1052 * Assume that space (both on-disk and in-stream) is dominated by
1053 * data. We will adjust for indirect blocks and the copies property,
1054 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
1058 * Subtract out approximate space used by indirect blocks.
1059 * Assume most space is used by data blocks (non-indirect, non-dnode).
1060 * Assume all blocks are recordsize. Assume ditto blocks and
1061 * internal fragmentation counter out compression.
1063 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
1064 * block, which we observe in practice.
1066 uint64_t recordsize;
1067 err = dsl_prop_get_int_ds(ds, "recordsize", &recordsize);
1070 size -= size / recordsize * sizeof (blkptr_t);
1072 /* Add in the space for the record associated with each block. */
1073 size += size / recordsize * sizeof (dmu_replay_record_t);
1081 dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds, uint64_t *sizep)
1083 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1087 ASSERT(dsl_pool_config_held(dp));
1089 /* tosnap must be a snapshot */
1090 if (!ds->ds_is_snapshot)
1091 return (SET_ERROR(EINVAL));
1093 /* fromsnap, if provided, must be a snapshot */
1094 if (fromds != NULL && !fromds->ds_is_snapshot)
1095 return (SET_ERROR(EINVAL));
1098 * fromsnap must be an earlier snapshot from the same fs as tosnap,
1099 * or the origin's fs.
1101 if (fromds != NULL && !dsl_dataset_is_before(ds, fromds, 0))
1102 return (SET_ERROR(EXDEV));
1104 /* Get uncompressed size estimate of changed data. */
1105 if (fromds == NULL) {
1106 size = dsl_dataset_phys(ds)->ds_uncompressed_bytes;
1108 uint64_t used, comp;
1109 err = dsl_dataset_space_written(fromds, ds,
1110 &used, &comp, &size);
1115 err = dmu_adjust_send_estimate_for_indirects(ds, size, sizep);
1120 * Simple callback used to traverse the blocks of a snapshot and sum their
1125 dmu_calculate_send_traversal(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
1126 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
1128 uint64_t *spaceptr = arg;
1129 if (bp != NULL && !BP_IS_HOLE(bp)) {
1130 *spaceptr += BP_GET_UCSIZE(bp);
1136 * Given a desination snapshot and a TXG, calculate the approximate size of a
1137 * send stream sent from that TXG. from_txg may be zero, indicating that the
1138 * whole snapshot will be sent.
1141 dmu_send_estimate_from_txg(dsl_dataset_t *ds, uint64_t from_txg,
1144 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1148 ASSERT(dsl_pool_config_held(dp));
1150 /* tosnap must be a snapshot */
1151 if (!dsl_dataset_is_snapshot(ds))
1152 return (SET_ERROR(EINVAL));
1154 /* verify that from_txg is before the provided snapshot was taken */
1155 if (from_txg >= dsl_dataset_phys(ds)->ds_creation_txg) {
1156 return (SET_ERROR(EXDEV));
1160 * traverse the blocks of the snapshot with birth times after
1161 * from_txg, summing their uncompressed size
1163 err = traverse_dataset(ds, from_txg, TRAVERSE_POST,
1164 dmu_calculate_send_traversal, &size);
1168 err = dmu_adjust_send_estimate_for_indirects(ds, size, sizep);
1172 typedef struct dmu_recv_begin_arg {
1173 const char *drba_origin;
1174 dmu_recv_cookie_t *drba_cookie;
1176 uint64_t drba_snapobj;
1177 } dmu_recv_begin_arg_t;
1180 recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
1185 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1187 /* temporary clone name must not exist */
1188 error = zap_lookup(dp->dp_meta_objset,
1189 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name,
1191 if (error != ENOENT)
1192 return (error == 0 ? EBUSY : error);
1194 /* new snapshot name must not exist */
1195 error = zap_lookup(dp->dp_meta_objset,
1196 dsl_dataset_phys(ds)->ds_snapnames_zapobj,
1197 drba->drba_cookie->drc_tosnap, 8, 1, &val);
1198 if (error != ENOENT)
1199 return (error == 0 ? EEXIST : error);
1202 * Check snapshot limit before receiving. We'll recheck again at the
1203 * end, but might as well abort before receiving if we're already over
1206 * Note that we do not check the file system limit with
1207 * dsl_dir_fscount_check because the temporary %clones don't count
1208 * against that limit.
1210 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT,
1211 NULL, drba->drba_cred);
1215 if (fromguid != 0) {
1216 dsl_dataset_t *snap;
1217 uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
1219 /* Find snapshot in this dir that matches fromguid. */
1221 error = dsl_dataset_hold_obj(dp, obj, FTAG,
1224 return (SET_ERROR(ENODEV));
1225 if (snap->ds_dir != ds->ds_dir) {
1226 dsl_dataset_rele(snap, FTAG);
1227 return (SET_ERROR(ENODEV));
1229 if (dsl_dataset_phys(snap)->ds_guid == fromguid)
1231 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
1232 dsl_dataset_rele(snap, FTAG);
1235 return (SET_ERROR(ENODEV));
1237 if (drba->drba_cookie->drc_force) {
1238 drba->drba_snapobj = obj;
1241 * If we are not forcing, there must be no
1242 * changes since fromsnap.
1244 if (dsl_dataset_modified_since_snap(ds, snap)) {
1245 dsl_dataset_rele(snap, FTAG);
1246 return (SET_ERROR(ETXTBSY));
1248 drba->drba_snapobj = ds->ds_prev->ds_object;
1251 dsl_dataset_rele(snap, FTAG);
1253 /* if full, then must be forced */
1254 if (!drba->drba_cookie->drc_force)
1255 return (SET_ERROR(EEXIST));
1256 /* start from $ORIGIN@$ORIGIN, if supported */
1257 drba->drba_snapobj = dp->dp_origin_snap != NULL ?
1258 dp->dp_origin_snap->ds_object : 0;
1266 dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
1268 dmu_recv_begin_arg_t *drba = arg;
1269 dsl_pool_t *dp = dmu_tx_pool(tx);
1270 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1271 uint64_t fromguid = drrb->drr_fromguid;
1272 int flags = drrb->drr_flags;
1274 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
1276 const char *tofs = drba->drba_cookie->drc_tofs;
1278 /* already checked */
1279 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
1280 ASSERT(!(featureflags & DMU_BACKUP_FEATURE_RESUMING));
1282 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
1283 DMU_COMPOUNDSTREAM ||
1284 drrb->drr_type >= DMU_OST_NUMTYPES ||
1285 ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
1286 return (SET_ERROR(EINVAL));
1288 /* Verify pool version supports SA if SA_SPILL feature set */
1289 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
1290 spa_version(dp->dp_spa) < SPA_VERSION_SA)
1291 return (SET_ERROR(ENOTSUP));
1293 if (drba->drba_cookie->drc_resumable &&
1294 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EXTENSIBLE_DATASET))
1295 return (SET_ERROR(ENOTSUP));
1298 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
1299 * record to a plan WRITE record, so the pool must have the
1300 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
1301 * records. Same with WRITE_EMBEDDED records that use LZ4 compression.
1303 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
1304 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
1305 return (SET_ERROR(ENOTSUP));
1306 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4) &&
1307 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
1308 return (SET_ERROR(ENOTSUP));
1311 * The receiving code doesn't know how to translate large blocks
1312 * to smaller ones, so the pool must have the LARGE_BLOCKS
1313 * feature enabled if the stream has LARGE_BLOCKS.
1315 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
1316 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS))
1317 return (SET_ERROR(ENOTSUP));
1319 error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1321 /* target fs already exists; recv into temp clone */
1323 /* Can't recv a clone into an existing fs */
1324 if (flags & DRR_FLAG_CLONE) {
1325 dsl_dataset_rele(ds, FTAG);
1326 return (SET_ERROR(EINVAL));
1329 error = recv_begin_check_existing_impl(drba, ds, fromguid);
1330 dsl_dataset_rele(ds, FTAG);
1331 } else if (error == ENOENT) {
1332 /* target fs does not exist; must be a full backup or clone */
1333 char buf[MAXNAMELEN];
1336 * If it's a non-clone incremental, we are missing the
1337 * target fs, so fail the recv.
1339 if (fromguid != 0 && !(flags & DRR_FLAG_CLONE ||
1341 return (SET_ERROR(ENOENT));
1343 /* Open the parent of tofs */
1344 ASSERT3U(strlen(tofs), <, MAXNAMELEN);
1345 (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
1346 error = dsl_dataset_hold(dp, buf, FTAG, &ds);
1351 * Check filesystem and snapshot limits before receiving. We'll
1352 * recheck snapshot limits again at the end (we create the
1353 * filesystems and increment those counts during begin_sync).
1355 error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1356 ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred);
1358 dsl_dataset_rele(ds, FTAG);
1362 error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1363 ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred);
1365 dsl_dataset_rele(ds, FTAG);
1369 if (drba->drba_origin != NULL) {
1370 dsl_dataset_t *origin;
1371 error = dsl_dataset_hold(dp, drba->drba_origin,
1374 dsl_dataset_rele(ds, FTAG);
1377 if (!origin->ds_is_snapshot) {
1378 dsl_dataset_rele(origin, FTAG);
1379 dsl_dataset_rele(ds, FTAG);
1380 return (SET_ERROR(EINVAL));
1382 if (dsl_dataset_phys(origin)->ds_guid != fromguid) {
1383 dsl_dataset_rele(origin, FTAG);
1384 dsl_dataset_rele(ds, FTAG);
1385 return (SET_ERROR(ENODEV));
1387 dsl_dataset_rele(origin, FTAG);
1389 dsl_dataset_rele(ds, FTAG);
1396 dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
1398 dmu_recv_begin_arg_t *drba = arg;
1399 dsl_pool_t *dp = dmu_tx_pool(tx);
1400 objset_t *mos = dp->dp_meta_objset;
1401 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1402 const char *tofs = drba->drba_cookie->drc_tofs;
1403 dsl_dataset_t *ds, *newds;
1406 uint64_t crflags = 0;
1408 if (drrb->drr_flags & DRR_FLAG_CI_DATA)
1409 crflags |= DS_FLAG_CI_DATASET;
1411 error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1413 /* create temporary clone */
1414 dsl_dataset_t *snap = NULL;
1415 if (drba->drba_snapobj != 0) {
1416 VERIFY0(dsl_dataset_hold_obj(dp,
1417 drba->drba_snapobj, FTAG, &snap));
1419 dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name,
1420 snap, crflags, drba->drba_cred, tx);
1421 if (drba->drba_snapobj != 0)
1422 dsl_dataset_rele(snap, FTAG);
1423 dsl_dataset_rele(ds, FTAG);
1427 dsl_dataset_t *origin = NULL;
1429 VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
1431 if (drba->drba_origin != NULL) {
1432 VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
1436 /* Create new dataset. */
1437 dsobj = dsl_dataset_create_sync(dd,
1438 strrchr(tofs, '/') + 1,
1439 origin, crflags, drba->drba_cred, tx);
1441 dsl_dataset_rele(origin, FTAG);
1442 dsl_dir_rele(dd, FTAG);
1443 drba->drba_cookie->drc_newfs = B_TRUE;
1445 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &newds));
1447 if (drba->drba_cookie->drc_resumable) {
1448 dsl_dataset_zapify(newds, tx);
1449 if (drrb->drr_fromguid != 0) {
1450 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_FROMGUID,
1451 8, 1, &drrb->drr_fromguid, tx));
1453 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TOGUID,
1454 8, 1, &drrb->drr_toguid, tx));
1455 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TONAME,
1456 1, strlen(drrb->drr_toname) + 1, drrb->drr_toname, tx));
1459 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OBJECT,
1461 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OFFSET,
1463 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_BYTES,
1465 if (DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
1466 DMU_BACKUP_FEATURE_EMBED_DATA) {
1467 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_EMBEDOK,
1472 dmu_buf_will_dirty(newds->ds_dbuf, tx);
1473 dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT;
1476 * If we actually created a non-clone, we need to create the
1477 * objset in our new dataset.
1479 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds))) {
1480 (void) dmu_objset_create_impl(dp->dp_spa,
1481 newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
1484 drba->drba_cookie->drc_ds = newds;
1486 spa_history_log_internal_ds(newds, "receive", tx, "");
1490 dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx)
1492 dmu_recv_begin_arg_t *drba = arg;
1493 dsl_pool_t *dp = dmu_tx_pool(tx);
1494 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1496 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
1498 const char *tofs = drba->drba_cookie->drc_tofs;
1500 /* already checked */
1501 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
1502 ASSERT(featureflags & DMU_BACKUP_FEATURE_RESUMING);
1504 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
1505 DMU_COMPOUNDSTREAM ||
1506 drrb->drr_type >= DMU_OST_NUMTYPES)
1507 return (SET_ERROR(EINVAL));
1509 /* Verify pool version supports SA if SA_SPILL feature set */
1510 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
1511 spa_version(dp->dp_spa) < SPA_VERSION_SA)
1512 return (SET_ERROR(ENOTSUP));
1515 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
1516 * record to a plain WRITE record, so the pool must have the
1517 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
1518 * records. Same with WRITE_EMBEDDED records that use LZ4 compression.
1520 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
1521 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
1522 return (SET_ERROR(ENOTSUP));
1523 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4) &&
1524 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
1525 return (SET_ERROR(ENOTSUP));
1527 char recvname[ZFS_MAXNAMELEN];
1529 (void) snprintf(recvname, sizeof (recvname), "%s/%s",
1530 tofs, recv_clone_name);
1532 if (dsl_dataset_hold(dp, recvname, FTAG, &ds) != 0) {
1533 /* %recv does not exist; continue in tofs */
1534 error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1539 /* check that ds is marked inconsistent */
1540 if (!DS_IS_INCONSISTENT(ds)) {
1541 dsl_dataset_rele(ds, FTAG);
1542 return (SET_ERROR(EINVAL));
1545 /* check that there is resuming data, and that the toguid matches */
1546 if (!dsl_dataset_is_zapified(ds)) {
1547 dsl_dataset_rele(ds, FTAG);
1548 return (SET_ERROR(EINVAL));
1551 error = zap_lookup(dp->dp_meta_objset, ds->ds_object,
1552 DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val);
1553 if (error != 0 || drrb->drr_toguid != val) {
1554 dsl_dataset_rele(ds, FTAG);
1555 return (SET_ERROR(EINVAL));
1559 * Check if the receive is still running. If so, it will be owned.
1560 * Note that nothing else can own the dataset (e.g. after the receive
1561 * fails) because it will be marked inconsistent.
1563 if (dsl_dataset_has_owner(ds)) {
1564 dsl_dataset_rele(ds, FTAG);
1565 return (SET_ERROR(EBUSY));
1568 /* There should not be any snapshots of this fs yet. */
1569 if (ds->ds_prev != NULL && ds->ds_prev->ds_dir == ds->ds_dir) {
1570 dsl_dataset_rele(ds, FTAG);
1571 return (SET_ERROR(EINVAL));
1575 * Note: resume point will be checked when we process the first WRITE
1579 /* check that the origin matches */
1581 (void) zap_lookup(dp->dp_meta_objset, ds->ds_object,
1582 DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val);
1583 if (drrb->drr_fromguid != val) {
1584 dsl_dataset_rele(ds, FTAG);
1585 return (SET_ERROR(EINVAL));
1588 dsl_dataset_rele(ds, FTAG);
1593 dmu_recv_resume_begin_sync(void *arg, dmu_tx_t *tx)
1595 dmu_recv_begin_arg_t *drba = arg;
1596 dsl_pool_t *dp = dmu_tx_pool(tx);
1597 const char *tofs = drba->drba_cookie->drc_tofs;
1600 char recvname[ZFS_MAXNAMELEN];
1602 (void) snprintf(recvname, sizeof (recvname), "%s/%s",
1603 tofs, recv_clone_name);
1605 if (dsl_dataset_hold(dp, recvname, FTAG, &ds) != 0) {
1606 /* %recv does not exist; continue in tofs */
1607 VERIFY0(dsl_dataset_hold(dp, tofs, FTAG, &ds));
1608 drba->drba_cookie->drc_newfs = B_TRUE;
1611 /* clear the inconsistent flag so that we can own it */
1612 ASSERT(DS_IS_INCONSISTENT(ds));
1613 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1614 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
1615 dsobj = ds->ds_object;
1616 dsl_dataset_rele(ds, FTAG);
1618 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &ds));
1620 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1621 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT;
1623 ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds)));
1625 drba->drba_cookie->drc_ds = ds;
1627 spa_history_log_internal_ds(ds, "resume receive", tx, "");
1631 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
1632 * succeeds; otherwise we will leak the holds on the datasets.
1635 dmu_recv_begin(char *tofs, char *tosnap, dmu_replay_record_t *drr_begin,
1636 boolean_t force, boolean_t resumable, char *origin, dmu_recv_cookie_t *drc)
1638 dmu_recv_begin_arg_t drba = { 0 };
1640 bzero(drc, sizeof (dmu_recv_cookie_t));
1641 drc->drc_drr_begin = drr_begin;
1642 drc->drc_drrb = &drr_begin->drr_u.drr_begin;
1643 drc->drc_tosnap = tosnap;
1644 drc->drc_tofs = tofs;
1645 drc->drc_force = force;
1646 drc->drc_resumable = resumable;
1647 drc->drc_cred = CRED();
1649 if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
1650 drc->drc_byteswap = B_TRUE;
1651 fletcher_4_incremental_byteswap(drr_begin,
1652 sizeof (dmu_replay_record_t), &drc->drc_cksum);
1653 byteswap_record(drr_begin);
1654 } else if (drc->drc_drrb->drr_magic == DMU_BACKUP_MAGIC) {
1655 fletcher_4_incremental_native(drr_begin,
1656 sizeof (dmu_replay_record_t), &drc->drc_cksum);
1658 return (SET_ERROR(EINVAL));
1661 drba.drba_origin = origin;
1662 drba.drba_cookie = drc;
1663 drba.drba_cred = CRED();
1665 if (DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) &
1666 DMU_BACKUP_FEATURE_RESUMING) {
1667 return (dsl_sync_task(tofs,
1668 dmu_recv_resume_begin_check, dmu_recv_resume_begin_sync,
1669 &drba, 5, ZFS_SPACE_CHECK_NORMAL));
1671 return (dsl_sync_task(tofs,
1672 dmu_recv_begin_check, dmu_recv_begin_sync,
1673 &drba, 5, ZFS_SPACE_CHECK_NORMAL));
1677 struct receive_record_arg {
1678 dmu_replay_record_t header;
1679 void *payload; /* Pointer to a buffer containing the payload */
1681 * If the record is a write, pointer to the arc_buf_t containing the
1684 arc_buf_t *write_buf;
1686 uint64_t bytes_read; /* bytes read from stream when record created */
1687 boolean_t eos_marker; /* Marks the end of the stream */
1691 struct receive_writer_arg {
1697 * These three args are used to signal to the main thread that we're
1705 /* A map from guid to dataset to help handle dedup'd streams. */
1706 avl_tree_t *guid_to_ds_map;
1707 boolean_t resumable;
1708 uint64_t last_object, last_offset;
1709 uint64_t bytes_read; /* bytes read when current record created */
1712 struct receive_arg {
1716 uint64_t voff; /* The current offset in the stream */
1717 uint64_t bytes_read;
1719 * A record that has had its payload read in, but hasn't yet been handed
1720 * off to the worker thread.
1722 struct receive_record_arg *rrd;
1723 /* A record that has had its header read in, but not its payload. */
1724 struct receive_record_arg *next_rrd;
1726 zio_cksum_t prev_cksum;
1729 /* Sorted list of objects not to issue prefetches for. */
1730 list_t ignore_obj_list;
1733 struct receive_ign_obj_node {
1738 typedef struct guid_map_entry {
1740 dsl_dataset_t *gme_ds;
1745 guid_compare(const void *arg1, const void *arg2)
1747 const guid_map_entry_t *gmep1 = arg1;
1748 const guid_map_entry_t *gmep2 = arg2;
1750 if (gmep1->guid < gmep2->guid)
1752 else if (gmep1->guid > gmep2->guid)
1758 free_guid_map_onexit(void *arg)
1760 avl_tree_t *ca = arg;
1761 void *cookie = NULL;
1762 guid_map_entry_t *gmep;
1764 while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
1765 dsl_dataset_long_rele(gmep->gme_ds, gmep);
1766 dsl_dataset_rele(gmep->gme_ds, gmep);
1767 kmem_free(gmep, sizeof (guid_map_entry_t));
1770 kmem_free(ca, sizeof (avl_tree_t));
1774 restore_bytes(struct receive_arg *ra, void *buf, int len, off_t off, ssize_t *resid)
1780 aiov.iov_base = buf;
1782 auio.uio_iov = &aiov;
1783 auio.uio_iovcnt = 1;
1784 auio.uio_resid = len;
1785 auio.uio_segflg = UIO_SYSSPACE;
1786 auio.uio_rw = UIO_READ;
1787 auio.uio_offset = off;
1788 auio.uio_td = ra->td;
1790 error = fo_read(ra->fp, &auio, ra->td->td_ucred, FOF_OFFSET, ra->td);
1792 fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
1795 *resid = auio.uio_resid;
1800 receive_read(struct receive_arg *ra, int len, void *buf)
1804 /* some things will require 8-byte alignment, so everything must */
1807 while (done < len) {
1810 ra->err = restore_bytes(ra, buf + done,
1811 len - done, ra->voff, &resid);
1813 if (resid == len - done) {
1815 * Note: ECKSUM indicates that the receive
1816 * was interrupted and can potentially be resumed.
1818 ra->err = SET_ERROR(ECKSUM);
1820 ra->voff += len - done - resid;
1826 ra->bytes_read += len;
1828 ASSERT3U(done, ==, len);
1833 byteswap_record(dmu_replay_record_t *drr)
1835 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
1836 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
1837 drr->drr_type = BSWAP_32(drr->drr_type);
1838 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
1840 switch (drr->drr_type) {
1842 DO64(drr_begin.drr_magic);
1843 DO64(drr_begin.drr_versioninfo);
1844 DO64(drr_begin.drr_creation_time);
1845 DO32(drr_begin.drr_type);
1846 DO32(drr_begin.drr_flags);
1847 DO64(drr_begin.drr_toguid);
1848 DO64(drr_begin.drr_fromguid);
1851 DO64(drr_object.drr_object);
1852 DO32(drr_object.drr_type);
1853 DO32(drr_object.drr_bonustype);
1854 DO32(drr_object.drr_blksz);
1855 DO32(drr_object.drr_bonuslen);
1856 DO64(drr_object.drr_toguid);
1858 case DRR_FREEOBJECTS:
1859 DO64(drr_freeobjects.drr_firstobj);
1860 DO64(drr_freeobjects.drr_numobjs);
1861 DO64(drr_freeobjects.drr_toguid);
1864 DO64(drr_write.drr_object);
1865 DO32(drr_write.drr_type);
1866 DO64(drr_write.drr_offset);
1867 DO64(drr_write.drr_length);
1868 DO64(drr_write.drr_toguid);
1869 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum);
1870 DO64(drr_write.drr_key.ddk_prop);
1872 case DRR_WRITE_BYREF:
1873 DO64(drr_write_byref.drr_object);
1874 DO64(drr_write_byref.drr_offset);
1875 DO64(drr_write_byref.drr_length);
1876 DO64(drr_write_byref.drr_toguid);
1877 DO64(drr_write_byref.drr_refguid);
1878 DO64(drr_write_byref.drr_refobject);
1879 DO64(drr_write_byref.drr_refoffset);
1880 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write_byref.
1882 DO64(drr_write_byref.drr_key.ddk_prop);
1884 case DRR_WRITE_EMBEDDED:
1885 DO64(drr_write_embedded.drr_object);
1886 DO64(drr_write_embedded.drr_offset);
1887 DO64(drr_write_embedded.drr_length);
1888 DO64(drr_write_embedded.drr_toguid);
1889 DO32(drr_write_embedded.drr_lsize);
1890 DO32(drr_write_embedded.drr_psize);
1893 DO64(drr_free.drr_object);
1894 DO64(drr_free.drr_offset);
1895 DO64(drr_free.drr_length);
1896 DO64(drr_free.drr_toguid);
1899 DO64(drr_spill.drr_object);
1900 DO64(drr_spill.drr_length);
1901 DO64(drr_spill.drr_toguid);
1904 DO64(drr_end.drr_toguid);
1905 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum);
1909 if (drr->drr_type != DRR_BEGIN) {
1910 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum);
1917 static inline uint8_t
1918 deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size)
1920 if (bonus_type == DMU_OT_SA) {
1924 ((DN_MAX_BONUSLEN - bonus_size) >> SPA_BLKPTRSHIFT));
1929 save_resume_state(struct receive_writer_arg *rwa,
1930 uint64_t object, uint64_t offset, dmu_tx_t *tx)
1932 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
1934 if (!rwa->resumable)
1938 * We use ds_resume_bytes[] != 0 to indicate that we need to
1939 * update this on disk, so it must not be 0.
1941 ASSERT(rwa->bytes_read != 0);
1944 * We only resume from write records, which have a valid
1945 * (non-meta-dnode) object number.
1947 ASSERT(object != 0);
1950 * For resuming to work correctly, we must receive records in order,
1951 * sorted by object,offset. This is checked by the callers, but
1952 * assert it here for good measure.
1954 ASSERT3U(object, >=, rwa->os->os_dsl_dataset->ds_resume_object[txgoff]);
1955 ASSERT(object != rwa->os->os_dsl_dataset->ds_resume_object[txgoff] ||
1956 offset >= rwa->os->os_dsl_dataset->ds_resume_offset[txgoff]);
1957 ASSERT3U(rwa->bytes_read, >=,
1958 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff]);
1960 rwa->os->os_dsl_dataset->ds_resume_object[txgoff] = object;
1961 rwa->os->os_dsl_dataset->ds_resume_offset[txgoff] = offset;
1962 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read;
1966 receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
1969 dmu_object_info_t doi;
1974 if (drro->drr_type == DMU_OT_NONE ||
1975 !DMU_OT_IS_VALID(drro->drr_type) ||
1976 !DMU_OT_IS_VALID(drro->drr_bonustype) ||
1977 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
1978 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
1979 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
1980 drro->drr_blksz < SPA_MINBLOCKSIZE ||
1981 drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) ||
1982 drro->drr_bonuslen > DN_MAX_BONUSLEN) {
1983 return (SET_ERROR(EINVAL));
1986 err = dmu_object_info(rwa->os, drro->drr_object, &doi);
1988 if (err != 0 && err != ENOENT)
1989 return (SET_ERROR(EINVAL));
1990 object = err == 0 ? drro->drr_object : DMU_NEW_OBJECT;
1993 * If we are losing blkptrs or changing the block size this must
1994 * be a new file instance. We must clear out the previous file
1995 * contents before we can change this type of metadata in the dnode.
2000 nblkptr = deduce_nblkptr(drro->drr_bonustype,
2001 drro->drr_bonuslen);
2003 if (drro->drr_blksz != doi.doi_data_block_size ||
2004 nblkptr < doi.doi_nblkptr) {
2005 err = dmu_free_long_range(rwa->os, drro->drr_object,
2008 return (SET_ERROR(EINVAL));
2012 tx = dmu_tx_create(rwa->os);
2013 dmu_tx_hold_bonus(tx, object);
2014 err = dmu_tx_assign(tx, TXG_WAIT);
2020 if (object == DMU_NEW_OBJECT) {
2021 /* currently free, want to be allocated */
2022 err = dmu_object_claim(rwa->os, drro->drr_object,
2023 drro->drr_type, drro->drr_blksz,
2024 drro->drr_bonustype, drro->drr_bonuslen, tx);
2025 } else if (drro->drr_type != doi.doi_type ||
2026 drro->drr_blksz != doi.doi_data_block_size ||
2027 drro->drr_bonustype != doi.doi_bonus_type ||
2028 drro->drr_bonuslen != doi.doi_bonus_size) {
2029 /* currently allocated, but with different properties */
2030 err = dmu_object_reclaim(rwa->os, drro->drr_object,
2031 drro->drr_type, drro->drr_blksz,
2032 drro->drr_bonustype, drro->drr_bonuslen, tx);
2036 return (SET_ERROR(EINVAL));
2039 dmu_object_set_checksum(rwa->os, drro->drr_object,
2040 drro->drr_checksumtype, tx);
2041 dmu_object_set_compress(rwa->os, drro->drr_object,
2042 drro->drr_compress, tx);
2047 VERIFY0(dmu_bonus_hold(rwa->os, drro->drr_object, FTAG, &db));
2048 dmu_buf_will_dirty(db, tx);
2050 ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
2051 bcopy(data, db->db_data, drro->drr_bonuslen);
2052 if (rwa->byteswap) {
2053 dmu_object_byteswap_t byteswap =
2054 DMU_OT_BYTESWAP(drro->drr_bonustype);
2055 dmu_ot_byteswap[byteswap].ob_func(db->db_data,
2056 drro->drr_bonuslen);
2058 dmu_buf_rele(db, FTAG);
2067 receive_freeobjects(struct receive_writer_arg *rwa,
2068 struct drr_freeobjects *drrfo)
2072 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
2073 return (SET_ERROR(EINVAL));
2075 for (obj = drrfo->drr_firstobj;
2076 obj < drrfo->drr_firstobj + drrfo->drr_numobjs;
2077 (void) dmu_object_next(rwa->os, &obj, FALSE, 0)) {
2080 if (dmu_object_info(rwa->os, obj, NULL) != 0)
2083 err = dmu_free_long_object(rwa->os, obj);
2092 receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw,
2098 if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset ||
2099 !DMU_OT_IS_VALID(drrw->drr_type))
2100 return (SET_ERROR(EINVAL));
2103 * For resuming to work, records must be in increasing order
2104 * by (object, offset).
2106 if (drrw->drr_object < rwa->last_object ||
2107 (drrw->drr_object == rwa->last_object &&
2108 drrw->drr_offset < rwa->last_offset)) {
2109 return (SET_ERROR(EINVAL));
2111 rwa->last_object = drrw->drr_object;
2112 rwa->last_offset = drrw->drr_offset;
2114 if (dmu_object_info(rwa->os, drrw->drr_object, NULL) != 0)
2115 return (SET_ERROR(EINVAL));
2117 tx = dmu_tx_create(rwa->os);
2119 dmu_tx_hold_write(tx, drrw->drr_object,
2120 drrw->drr_offset, drrw->drr_length);
2121 err = dmu_tx_assign(tx, TXG_WAIT);
2126 if (rwa->byteswap) {
2127 dmu_object_byteswap_t byteswap =
2128 DMU_OT_BYTESWAP(drrw->drr_type);
2129 dmu_ot_byteswap[byteswap].ob_func(abuf->b_data,
2134 if (dmu_bonus_hold(rwa->os, drrw->drr_object, FTAG, &bonus) != 0)
2135 return (SET_ERROR(EINVAL));
2136 dmu_assign_arcbuf(bonus, drrw->drr_offset, abuf, tx);
2139 * Note: If the receive fails, we want the resume stream to start
2140 * with the same record that we last successfully received (as opposed
2141 * to the next record), so that we can verify that we are
2142 * resuming from the correct location.
2144 save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx);
2146 dmu_buf_rele(bonus, FTAG);
2152 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed
2153 * streams to refer to a copy of the data that is already on the
2154 * system because it came in earlier in the stream. This function
2155 * finds the earlier copy of the data, and uses that copy instead of
2156 * data from the stream to fulfill this write.
2159 receive_write_byref(struct receive_writer_arg *rwa,
2160 struct drr_write_byref *drrwbr)
2164 guid_map_entry_t gmesrch;
2165 guid_map_entry_t *gmep;
2167 objset_t *ref_os = NULL;
2170 if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
2171 return (SET_ERROR(EINVAL));
2174 * If the GUID of the referenced dataset is different from the
2175 * GUID of the target dataset, find the referenced dataset.
2177 if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
2178 gmesrch.guid = drrwbr->drr_refguid;
2179 if ((gmep = avl_find(rwa->guid_to_ds_map, &gmesrch,
2181 return (SET_ERROR(EINVAL));
2183 if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
2184 return (SET_ERROR(EINVAL));
2189 err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
2190 drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH);
2194 tx = dmu_tx_create(rwa->os);
2196 dmu_tx_hold_write(tx, drrwbr->drr_object,
2197 drrwbr->drr_offset, drrwbr->drr_length);
2198 err = dmu_tx_assign(tx, TXG_WAIT);
2203 dmu_write(rwa->os, drrwbr->drr_object,
2204 drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
2205 dmu_buf_rele(dbp, FTAG);
2207 /* See comment in restore_write. */
2208 save_resume_state(rwa, drrwbr->drr_object, drrwbr->drr_offset, tx);
2214 receive_write_embedded(struct receive_writer_arg *rwa,
2215 struct drr_write_embedded *drrwe, void *data)
2220 if (drrwe->drr_offset + drrwe->drr_length < drrwe->drr_offset)
2223 if (drrwe->drr_psize > BPE_PAYLOAD_SIZE)
2226 if (drrwe->drr_etype >= NUM_BP_EMBEDDED_TYPES)
2228 if (drrwe->drr_compression >= ZIO_COMPRESS_FUNCTIONS)
2231 tx = dmu_tx_create(rwa->os);
2233 dmu_tx_hold_write(tx, drrwe->drr_object,
2234 drrwe->drr_offset, drrwe->drr_length);
2235 err = dmu_tx_assign(tx, TXG_WAIT);
2241 dmu_write_embedded(rwa->os, drrwe->drr_object,
2242 drrwe->drr_offset, data, drrwe->drr_etype,
2243 drrwe->drr_compression, drrwe->drr_lsize, drrwe->drr_psize,
2244 rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx);
2246 /* See comment in restore_write. */
2247 save_resume_state(rwa, drrwe->drr_object, drrwe->drr_offset, tx);
2253 receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
2257 dmu_buf_t *db, *db_spill;
2260 if (drrs->drr_length < SPA_MINBLOCKSIZE ||
2261 drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os)))
2262 return (SET_ERROR(EINVAL));
2264 if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0)
2265 return (SET_ERROR(EINVAL));
2267 VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db));
2268 if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
2269 dmu_buf_rele(db, FTAG);
2273 tx = dmu_tx_create(rwa->os);
2275 dmu_tx_hold_spill(tx, db->db_object);
2277 err = dmu_tx_assign(tx, TXG_WAIT);
2279 dmu_buf_rele(db, FTAG);
2280 dmu_buf_rele(db_spill, FTAG);
2284 dmu_buf_will_dirty(db_spill, tx);
2286 if (db_spill->db_size < drrs->drr_length)
2287 VERIFY(0 == dbuf_spill_set_blksz(db_spill,
2288 drrs->drr_length, tx));
2289 bcopy(data, db_spill->db_data, drrs->drr_length);
2291 dmu_buf_rele(db, FTAG);
2292 dmu_buf_rele(db_spill, FTAG);
2300 receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf)
2304 if (drrf->drr_length != -1ULL &&
2305 drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
2306 return (SET_ERROR(EINVAL));
2308 if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0)
2309 return (SET_ERROR(EINVAL));
2311 err = dmu_free_long_range(rwa->os, drrf->drr_object,
2312 drrf->drr_offset, drrf->drr_length);
2317 /* used to destroy the drc_ds on error */
2319 dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
2321 if (drc->drc_resumable) {
2322 /* wait for our resume state to be written to disk */
2323 txg_wait_synced(drc->drc_ds->ds_dir->dd_pool, 0);
2324 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
2326 char name[MAXNAMELEN];
2327 dsl_dataset_name(drc->drc_ds, name);
2328 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
2329 (void) dsl_destroy_head(name);
2334 receive_cksum(struct receive_arg *ra, int len, void *buf)
2337 fletcher_4_incremental_byteswap(buf, len, &ra->cksum);
2339 fletcher_4_incremental_native(buf, len, &ra->cksum);
2344 * Read the payload into a buffer of size len, and update the current record's
2346 * Allocate ra->next_rrd and read the next record's header into
2347 * ra->next_rrd->header.
2348 * Verify checksum of payload and next record.
2351 receive_read_payload_and_next_header(struct receive_arg *ra, int len, void *buf)
2356 ASSERT3U(len, <=, SPA_MAXBLOCKSIZE);
2357 err = receive_read(ra, len, buf);
2360 receive_cksum(ra, len, buf);
2362 /* note: rrd is NULL when reading the begin record's payload */
2363 if (ra->rrd != NULL) {
2364 ra->rrd->payload = buf;
2365 ra->rrd->payload_size = len;
2366 ra->rrd->bytes_read = ra->bytes_read;
2370 ra->prev_cksum = ra->cksum;
2372 ra->next_rrd = kmem_zalloc(sizeof (*ra->next_rrd), KM_SLEEP);
2373 err = receive_read(ra, sizeof (ra->next_rrd->header),
2374 &ra->next_rrd->header);
2375 ra->next_rrd->bytes_read = ra->bytes_read;
2377 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2378 ra->next_rrd = NULL;
2381 if (ra->next_rrd->header.drr_type == DRR_BEGIN) {
2382 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2383 ra->next_rrd = NULL;
2384 return (SET_ERROR(EINVAL));
2388 * Note: checksum is of everything up to but not including the
2391 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
2392 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
2394 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
2395 &ra->next_rrd->header);
2397 zio_cksum_t cksum_orig =
2398 ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
2399 zio_cksum_t *cksump =
2400 &ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
2403 byteswap_record(&ra->next_rrd->header);
2405 if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) &&
2406 !ZIO_CHECKSUM_EQUAL(ra->cksum, *cksump)) {
2407 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2408 ra->next_rrd = NULL;
2409 return (SET_ERROR(ECKSUM));
2412 receive_cksum(ra, sizeof (cksum_orig), &cksum_orig);
2418 * Issue the prefetch reads for any necessary indirect blocks.
2420 * We use the object ignore list to tell us whether or not to issue prefetches
2421 * for a given object. We do this for both correctness (in case the blocksize
2422 * of an object has changed) and performance (if the object doesn't exist, don't
2423 * needlessly try to issue prefetches). We also trim the list as we go through
2424 * the stream to prevent it from growing to an unbounded size.
2426 * The object numbers within will always be in sorted order, and any write
2427 * records we see will also be in sorted order, but they're not sorted with
2428 * respect to each other (i.e. we can get several object records before
2429 * receiving each object's write records). As a result, once we've reached a
2430 * given object number, we can safely remove any reference to lower object
2431 * numbers in the ignore list. In practice, we receive up to 32 object records
2432 * before receiving write records, so the list can have up to 32 nodes in it.
2436 receive_read_prefetch(struct receive_arg *ra,
2437 uint64_t object, uint64_t offset, uint64_t length)
2439 struct receive_ign_obj_node *node = list_head(&ra->ignore_obj_list);
2440 while (node != NULL && node->object < object) {
2441 VERIFY3P(node, ==, list_remove_head(&ra->ignore_obj_list));
2442 kmem_free(node, sizeof (*node));
2443 node = list_head(&ra->ignore_obj_list);
2445 if (node == NULL || node->object > object) {
2446 dmu_prefetch(ra->os, object, 1, offset, length,
2447 ZIO_PRIORITY_SYNC_READ);
2452 * Read records off the stream, issuing any necessary prefetches.
2455 receive_read_record(struct receive_arg *ra)
2459 switch (ra->rrd->header.drr_type) {
2462 struct drr_object *drro = &ra->rrd->header.drr_u.drr_object;
2463 uint32_t size = P2ROUNDUP(drro->drr_bonuslen, 8);
2464 void *buf = kmem_zalloc(size, KM_SLEEP);
2465 dmu_object_info_t doi;
2466 err = receive_read_payload_and_next_header(ra, size, buf);
2468 kmem_free(buf, size);
2471 err = dmu_object_info(ra->os, drro->drr_object, &doi);
2473 * See receive_read_prefetch for an explanation why we're
2474 * storing this object in the ignore_obj_list.
2476 if (err == ENOENT ||
2477 (err == 0 && doi.doi_data_block_size != drro->drr_blksz)) {
2478 struct receive_ign_obj_node *node =
2479 kmem_zalloc(sizeof (*node),
2481 node->object = drro->drr_object;
2483 struct receive_ign_obj_node *last_object =
2484 list_tail(&ra->ignore_obj_list);
2485 uint64_t last_objnum = (last_object != NULL ?
2486 last_object->object : 0);
2487 ASSERT3U(node->object, >, last_objnum);
2489 list_insert_tail(&ra->ignore_obj_list, node);
2494 case DRR_FREEOBJECTS:
2496 err = receive_read_payload_and_next_header(ra, 0, NULL);
2501 struct drr_write *drrw = &ra->rrd->header.drr_u.drr_write;
2502 arc_buf_t *abuf = arc_loan_buf(dmu_objset_spa(ra->os),
2505 err = receive_read_payload_and_next_header(ra,
2506 drrw->drr_length, abuf->b_data);
2508 dmu_return_arcbuf(abuf);
2511 ra->rrd->write_buf = abuf;
2512 receive_read_prefetch(ra, drrw->drr_object, drrw->drr_offset,
2516 case DRR_WRITE_BYREF:
2518 struct drr_write_byref *drrwb =
2519 &ra->rrd->header.drr_u.drr_write_byref;
2520 err = receive_read_payload_and_next_header(ra, 0, NULL);
2521 receive_read_prefetch(ra, drrwb->drr_object, drrwb->drr_offset,
2525 case DRR_WRITE_EMBEDDED:
2527 struct drr_write_embedded *drrwe =
2528 &ra->rrd->header.drr_u.drr_write_embedded;
2529 uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8);
2530 void *buf = kmem_zalloc(size, KM_SLEEP);
2532 err = receive_read_payload_and_next_header(ra, size, buf);
2534 kmem_free(buf, size);
2538 receive_read_prefetch(ra, drrwe->drr_object, drrwe->drr_offset,
2545 * It might be beneficial to prefetch indirect blocks here, but
2546 * we don't really have the data to decide for sure.
2548 err = receive_read_payload_and_next_header(ra, 0, NULL);
2553 struct drr_end *drre = &ra->rrd->header.drr_u.drr_end;
2554 if (!ZIO_CHECKSUM_EQUAL(ra->prev_cksum, drre->drr_checksum))
2555 return (SET_ERROR(ECKSUM));
2560 struct drr_spill *drrs = &ra->rrd->header.drr_u.drr_spill;
2561 void *buf = kmem_zalloc(drrs->drr_length, KM_SLEEP);
2562 err = receive_read_payload_and_next_header(ra, drrs->drr_length,
2565 kmem_free(buf, drrs->drr_length);
2569 return (SET_ERROR(EINVAL));
2574 * Commit the records to the pool.
2577 receive_process_record(struct receive_writer_arg *rwa,
2578 struct receive_record_arg *rrd)
2582 /* Processing in order, therefore bytes_read should be increasing. */
2583 ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read);
2584 rwa->bytes_read = rrd->bytes_read;
2586 switch (rrd->header.drr_type) {
2589 struct drr_object *drro = &rrd->header.drr_u.drr_object;
2590 err = receive_object(rwa, drro, rrd->payload);
2591 kmem_free(rrd->payload, rrd->payload_size);
2592 rrd->payload = NULL;
2595 case DRR_FREEOBJECTS:
2597 struct drr_freeobjects *drrfo =
2598 &rrd->header.drr_u.drr_freeobjects;
2599 return (receive_freeobjects(rwa, drrfo));
2603 struct drr_write *drrw = &rrd->header.drr_u.drr_write;
2604 err = receive_write(rwa, drrw, rrd->write_buf);
2605 /* if receive_write() is successful, it consumes the arc_buf */
2607 dmu_return_arcbuf(rrd->write_buf);
2608 rrd->write_buf = NULL;
2609 rrd->payload = NULL;
2612 case DRR_WRITE_BYREF:
2614 struct drr_write_byref *drrwbr =
2615 &rrd->header.drr_u.drr_write_byref;
2616 return (receive_write_byref(rwa, drrwbr));
2618 case DRR_WRITE_EMBEDDED:
2620 struct drr_write_embedded *drrwe =
2621 &rrd->header.drr_u.drr_write_embedded;
2622 err = receive_write_embedded(rwa, drrwe, rrd->payload);
2623 kmem_free(rrd->payload, rrd->payload_size);
2624 rrd->payload = NULL;
2629 struct drr_free *drrf = &rrd->header.drr_u.drr_free;
2630 return (receive_free(rwa, drrf));
2634 struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
2635 err = receive_spill(rwa, drrs, rrd->payload);
2636 kmem_free(rrd->payload, rrd->payload_size);
2637 rrd->payload = NULL;
2641 return (SET_ERROR(EINVAL));
2646 * dmu_recv_stream's worker thread; pull records off the queue, and then call
2647 * receive_process_record When we're done, signal the main thread and exit.
2650 receive_writer_thread(void *arg)
2652 struct receive_writer_arg *rwa = arg;
2653 struct receive_record_arg *rrd;
2654 for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker;
2655 rrd = bqueue_dequeue(&rwa->q)) {
2657 * If there's an error, the main thread will stop putting things
2658 * on the queue, but we need to clear everything in it before we
2661 if (rwa->err == 0) {
2662 rwa->err = receive_process_record(rwa, rrd);
2663 } else if (rrd->write_buf != NULL) {
2664 dmu_return_arcbuf(rrd->write_buf);
2665 rrd->write_buf = NULL;
2666 rrd->payload = NULL;
2667 } else if (rrd->payload != NULL) {
2668 kmem_free(rrd->payload, rrd->payload_size);
2669 rrd->payload = NULL;
2671 kmem_free(rrd, sizeof (*rrd));
2673 kmem_free(rrd, sizeof (*rrd));
2674 mutex_enter(&rwa->mutex);
2676 cv_signal(&rwa->cv);
2677 mutex_exit(&rwa->mutex);
2682 resume_check(struct receive_arg *ra, nvlist_t *begin_nvl)
2685 objset_t *mos = dmu_objset_pool(ra->os)->dp_meta_objset;
2686 uint64_t dsobj = dmu_objset_id(ra->os);
2687 uint64_t resume_obj, resume_off;
2689 if (nvlist_lookup_uint64(begin_nvl,
2690 "resume_object", &resume_obj) != 0 ||
2691 nvlist_lookup_uint64(begin_nvl,
2692 "resume_offset", &resume_off) != 0) {
2693 return (SET_ERROR(EINVAL));
2695 VERIFY0(zap_lookup(mos, dsobj,
2696 DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val));
2697 if (resume_obj != val)
2698 return (SET_ERROR(EINVAL));
2699 VERIFY0(zap_lookup(mos, dsobj,
2700 DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val));
2701 if (resume_off != val)
2702 return (SET_ERROR(EINVAL));
2709 * Read in the stream's records, one by one, and apply them to the pool. There
2710 * are two threads involved; the thread that calls this function will spin up a
2711 * worker thread, read the records off the stream one by one, and issue
2712 * prefetches for any necessary indirect blocks. It will then push the records
2713 * onto an internal blocking queue. The worker thread will pull the records off
2714 * the queue, and actually write the data into the DMU. This way, the worker
2715 * thread doesn't have to wait for reads to complete, since everything it needs
2716 * (the indirect blocks) will be prefetched.
2718 * NB: callers *must* call dmu_recv_end() if this succeeds.
2721 dmu_recv_stream(dmu_recv_cookie_t *drc, struct file *fp, offset_t *voffp,
2722 int cleanup_fd, uint64_t *action_handlep)
2725 struct receive_arg ra = { 0 };
2726 struct receive_writer_arg rwa = { 0 };
2728 nvlist_t *begin_nvl = NULL;
2730 ra.byteswap = drc->drc_byteswap;
2731 ra.cksum = drc->drc_cksum;
2736 if (dsl_dataset_is_zapified(drc->drc_ds)) {
2737 (void) zap_lookup(drc->drc_ds->ds_dir->dd_pool->dp_meta_objset,
2738 drc->drc_ds->ds_object, DS_FIELD_RESUME_BYTES,
2739 sizeof (ra.bytes_read), 1, &ra.bytes_read);
2742 list_create(&ra.ignore_obj_list, sizeof (struct receive_ign_obj_node),
2743 offsetof(struct receive_ign_obj_node, node));
2745 /* these were verified in dmu_recv_begin */
2746 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
2748 ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
2751 * Open the objset we are modifying.
2753 VERIFY0(dmu_objset_from_ds(drc->drc_ds, &ra.os));
2755 ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT);
2757 featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
2759 /* if this stream is dedup'ed, set up the avl tree for guid mapping */
2760 if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
2763 if (cleanup_fd == -1) {
2764 ra.err = SET_ERROR(EBADF);
2767 ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor);
2773 if (*action_handlep == 0) {
2774 rwa.guid_to_ds_map =
2775 kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
2776 avl_create(rwa.guid_to_ds_map, guid_compare,
2777 sizeof (guid_map_entry_t),
2778 offsetof(guid_map_entry_t, avlnode));
2779 err = zfs_onexit_add_cb(minor,
2780 free_guid_map_onexit, rwa.guid_to_ds_map,
2785 err = zfs_onexit_cb_data(minor, *action_handlep,
2786 (void **)&rwa.guid_to_ds_map);
2791 drc->drc_guid_to_ds_map = rwa.guid_to_ds_map;
2794 uint32_t payloadlen = drc->drc_drr_begin->drr_payloadlen;
2795 void *payload = NULL;
2796 if (payloadlen != 0)
2797 payload = kmem_alloc(payloadlen, KM_SLEEP);
2799 err = receive_read_payload_and_next_header(&ra, payloadlen, payload);
2801 if (payloadlen != 0)
2802 kmem_free(payload, payloadlen);
2805 if (payloadlen != 0) {
2806 err = nvlist_unpack(payload, payloadlen, &begin_nvl, KM_SLEEP);
2807 kmem_free(payload, payloadlen);
2812 if (featureflags & DMU_BACKUP_FEATURE_RESUMING) {
2813 err = resume_check(&ra, begin_nvl);
2818 (void) bqueue_init(&rwa.q, zfs_recv_queue_length,
2819 offsetof(struct receive_record_arg, node));
2820 cv_init(&rwa.cv, NULL, CV_DEFAULT, NULL);
2821 mutex_init(&rwa.mutex, NULL, MUTEX_DEFAULT, NULL);
2823 rwa.byteswap = drc->drc_byteswap;
2824 rwa.resumable = drc->drc_resumable;
2826 (void) thread_create(NULL, 0, receive_writer_thread, &rwa, 0, &p0,
2827 TS_RUN, minclsyspri);
2829 * We're reading rwa.err without locks, which is safe since we are the
2830 * only reader, and the worker thread is the only writer. It's ok if we
2831 * miss a write for an iteration or two of the loop, since the writer
2832 * thread will keep freeing records we send it until we send it an eos
2835 * We can leave this loop in 3 ways: First, if rwa.err is
2836 * non-zero. In that case, the writer thread will free the rrd we just
2837 * pushed. Second, if we're interrupted; in that case, either it's the
2838 * first loop and ra.rrd was never allocated, or it's later, and ra.rrd
2839 * has been handed off to the writer thread who will free it. Finally,
2840 * if receive_read_record fails or we're at the end of the stream, then
2841 * we free ra.rrd and exit.
2843 while (rwa.err == 0) {
2844 if (issig(JUSTLOOKING) && issig(FORREAL)) {
2845 err = SET_ERROR(EINTR);
2849 ASSERT3P(ra.rrd, ==, NULL);
2850 ra.rrd = ra.next_rrd;
2852 /* Allocates and loads header into ra.next_rrd */
2853 err = receive_read_record(&ra);
2855 if (ra.rrd->header.drr_type == DRR_END || err != 0) {
2856 kmem_free(ra.rrd, sizeof (*ra.rrd));
2861 bqueue_enqueue(&rwa.q, ra.rrd,
2862 sizeof (struct receive_record_arg) + ra.rrd->payload_size);
2865 if (ra.next_rrd == NULL)
2866 ra.next_rrd = kmem_zalloc(sizeof (*ra.next_rrd), KM_SLEEP);
2867 ra.next_rrd->eos_marker = B_TRUE;
2868 bqueue_enqueue(&rwa.q, ra.next_rrd, 1);
2870 mutex_enter(&rwa.mutex);
2872 cv_wait(&rwa.cv, &rwa.mutex);
2874 mutex_exit(&rwa.mutex);
2876 cv_destroy(&rwa.cv);
2877 mutex_destroy(&rwa.mutex);
2878 bqueue_destroy(&rwa.q);
2883 nvlist_free(begin_nvl);
2884 if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
2885 zfs_onexit_fd_rele(cleanup_fd);
2889 * Clean up references. If receive is not resumable,
2890 * destroy what we created, so we don't leave it in
2891 * the inconsistent state.
2893 dmu_recv_cleanup_ds(drc);
2897 for (struct receive_ign_obj_node *n =
2898 list_remove_head(&ra.ignore_obj_list); n != NULL;
2899 n = list_remove_head(&ra.ignore_obj_list)) {
2900 kmem_free(n, sizeof (*n));
2902 list_destroy(&ra.ignore_obj_list);
2907 dmu_recv_end_check(void *arg, dmu_tx_t *tx)
2909 dmu_recv_cookie_t *drc = arg;
2910 dsl_pool_t *dp = dmu_tx_pool(tx);
2913 ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
2915 if (!drc->drc_newfs) {
2916 dsl_dataset_t *origin_head;
2918 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
2921 if (drc->drc_force) {
2923 * We will destroy any snapshots in tofs (i.e. before
2924 * origin_head) that are after the origin (which is
2925 * the snap before drc_ds, because drc_ds can not
2926 * have any snaps of its own).
2930 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
2932 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
2933 dsl_dataset_t *snap;
2934 error = dsl_dataset_hold_obj(dp, obj, FTAG,
2938 if (snap->ds_dir != origin_head->ds_dir)
2939 error = SET_ERROR(EINVAL);
2941 error = dsl_destroy_snapshot_check_impl(
2944 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
2945 dsl_dataset_rele(snap, FTAG);
2950 dsl_dataset_rele(origin_head, FTAG);
2954 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
2955 origin_head, drc->drc_force, drc->drc_owner, tx);
2957 dsl_dataset_rele(origin_head, FTAG);
2960 error = dsl_dataset_snapshot_check_impl(origin_head,
2961 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
2962 dsl_dataset_rele(origin_head, FTAG);
2966 error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
2968 error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
2969 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
2975 dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
2977 dmu_recv_cookie_t *drc = arg;
2978 dsl_pool_t *dp = dmu_tx_pool(tx);
2980 spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
2981 tx, "snap=%s", drc->drc_tosnap);
2983 if (!drc->drc_newfs) {
2984 dsl_dataset_t *origin_head;
2986 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
2989 if (drc->drc_force) {
2991 * Destroy any snapshots of drc_tofs (origin_head)
2992 * after the origin (the snap before drc_ds).
2996 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
2998 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
2999 dsl_dataset_t *snap;
3000 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
3002 ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
3003 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
3004 dsl_destroy_snapshot_sync_impl(snap,
3006 dsl_dataset_rele(snap, FTAG);
3009 VERIFY3P(drc->drc_ds->ds_prev, ==,
3010 origin_head->ds_prev);
3012 dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
3014 dsl_dataset_snapshot_sync_impl(origin_head,
3015 drc->drc_tosnap, tx);
3017 /* set snapshot's creation time and guid */
3018 dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
3019 dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time =
3020 drc->drc_drrb->drr_creation_time;
3021 dsl_dataset_phys(origin_head->ds_prev)->ds_guid =
3022 drc->drc_drrb->drr_toguid;
3023 dsl_dataset_phys(origin_head->ds_prev)->ds_flags &=
3024 ~DS_FLAG_INCONSISTENT;
3026 dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
3027 dsl_dataset_phys(origin_head)->ds_flags &=
3028 ~DS_FLAG_INCONSISTENT;
3030 dsl_dataset_rele(origin_head, FTAG);
3031 dsl_destroy_head_sync_impl(drc->drc_ds, tx);
3033 if (drc->drc_owner != NULL)
3034 VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
3036 dsl_dataset_t *ds = drc->drc_ds;
3038 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
3040 /* set snapshot's creation time and guid */
3041 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
3042 dsl_dataset_phys(ds->ds_prev)->ds_creation_time =
3043 drc->drc_drrb->drr_creation_time;
3044 dsl_dataset_phys(ds->ds_prev)->ds_guid =
3045 drc->drc_drrb->drr_toguid;
3046 dsl_dataset_phys(ds->ds_prev)->ds_flags &=
3047 ~DS_FLAG_INCONSISTENT;
3049 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3050 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
3051 if (dsl_dataset_has_resume_receive_state(ds)) {
3052 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3053 DS_FIELD_RESUME_FROMGUID, tx);
3054 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3055 DS_FIELD_RESUME_OBJECT, tx);
3056 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3057 DS_FIELD_RESUME_OFFSET, tx);
3058 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3059 DS_FIELD_RESUME_BYTES, tx);
3060 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3061 DS_FIELD_RESUME_TOGUID, tx);
3062 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3063 DS_FIELD_RESUME_TONAME, tx);
3066 drc->drc_newsnapobj = dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj;
3068 * Release the hold from dmu_recv_begin. This must be done before
3069 * we return to open context, so that when we free the dataset's dnode,
3070 * we can evict its bonus buffer.
3072 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
3077 add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj)
3080 dsl_dataset_t *snapds;
3081 guid_map_entry_t *gmep;
3084 ASSERT(guid_map != NULL);
3086 err = dsl_pool_hold(name, FTAG, &dp);
3089 gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP);
3090 err = dsl_dataset_hold_obj(dp, snapobj, gmep, &snapds);
3092 gmep->guid = dsl_dataset_phys(snapds)->ds_guid;
3093 gmep->gme_ds = snapds;
3094 avl_add(guid_map, gmep);
3095 dsl_dataset_long_hold(snapds, gmep);
3097 kmem_free(gmep, sizeof (*gmep));
3099 dsl_pool_rele(dp, FTAG);
3103 static int dmu_recv_end_modified_blocks = 3;
3106 dmu_recv_existing_end(dmu_recv_cookie_t *drc)
3109 char name[MAXNAMELEN];
3113 * We will be destroying the ds; make sure its origin is unmounted if
3116 dsl_dataset_name(drc->drc_ds, name);
3117 zfs_destroy_unmount_origin(name);
3120 error = dsl_sync_task(drc->drc_tofs,
3121 dmu_recv_end_check, dmu_recv_end_sync, drc,
3122 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL);
3125 dmu_recv_cleanup_ds(drc);
3130 dmu_recv_new_end(dmu_recv_cookie_t *drc)
3134 error = dsl_sync_task(drc->drc_tofs,
3135 dmu_recv_end_check, dmu_recv_end_sync, drc,
3136 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL);
3139 dmu_recv_cleanup_ds(drc);
3140 } else if (drc->drc_guid_to_ds_map != NULL) {
3141 (void) add_ds_to_guidmap(drc->drc_tofs,
3142 drc->drc_guid_to_ds_map,
3143 drc->drc_newsnapobj);
3149 dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
3151 drc->drc_owner = owner;
3154 return (dmu_recv_new_end(drc));
3156 return (dmu_recv_existing_end(drc));
3160 * Return TRUE if this objset is currently being received into.
3163 dmu_objset_is_receiving(objset_t *os)
3165 return (os->os_dsl_dataset != NULL &&
3166 os->os_dsl_dataset->ds_owner == dmu_recv_tag);