4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2013 by Delphix. All rights reserved.
25 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
26 * Copyright (c) 2012, Martin Matuska <mm@FreeBSD.org>. All rights reserved.
30 #include <sys/dmu_impl.h>
31 #include <sys/dmu_tx.h>
33 #include <sys/dnode.h>
34 #include <sys/zfs_context.h>
35 #include <sys/dmu_objset.h>
36 #include <sys/dmu_traverse.h>
37 #include <sys/dsl_dataset.h>
38 #include <sys/dsl_dir.h>
39 #include <sys/dsl_prop.h>
40 #include <sys/dsl_pool.h>
41 #include <sys/dsl_synctask.h>
42 #include <sys/zfs_ioctl.h>
44 #include <sys/zio_checksum.h>
45 #include <sys/zfs_znode.h>
46 #include <zfs_fletcher.h>
49 #include <sys/zfs_onexit.h>
50 #include <sys/dmu_send.h>
51 #include <sys/dsl_destroy.h>
53 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
54 int zfs_send_corrupt_data = B_FALSE;
56 static char *dmu_recv_tag = "dmu_recv_tag";
57 static const char *recv_clone_name = "%recv";
60 dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
62 dsl_dataset_t *ds = dsp->dsa_os->os_dsl_dataset;
67 fletcher_4_incremental_native(buf, len, &dsp->dsa_zc);
73 auio.uio_segflg = UIO_SYSSPACE;
74 auio.uio_rw = UIO_WRITE;
75 auio.uio_offset = (off_t)-1;
76 auio.uio_td = dsp->dsa_td;
78 if (dsp->dsa_fp->f_type == DTYPE_VNODE)
80 dsp->dsa_err = fo_write(dsp->dsa_fp, &auio, dsp->dsa_td->td_ucred, 0,
83 fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
84 dsp->dsa_err = EOPNOTSUPP;
86 mutex_enter(&ds->ds_sendstream_lock);
88 mutex_exit(&ds->ds_sendstream_lock);
90 return (dsp->dsa_err);
94 dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
97 struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free);
100 * When we receive a free record, dbuf_free_range() assumes
101 * that the receiving system doesn't have any dbufs in the range
102 * being freed. This is always true because there is a one-record
103 * constraint: we only send one WRITE record for any given
104 * object+offset. We know that the one-record constraint is
105 * true because we always send data in increasing order by
108 * If the increasing-order constraint ever changes, we should find
109 * another way to assert that the one-record constraint is still
112 ASSERT(object > dsp->dsa_last_data_object ||
113 (object == dsp->dsa_last_data_object &&
114 offset > dsp->dsa_last_data_offset));
117 * If we are doing a non-incremental send, then there can't
118 * be any data in the dataset we're receiving into. Therefore
119 * a free record would simply be a no-op. Save space by not
120 * sending it to begin with.
122 if (!dsp->dsa_incremental)
125 if (length != -1ULL && offset + length < offset)
129 * If there is a pending op, but it's not PENDING_FREE, push it out,
130 * since free block aggregation can only be done for blocks of the
131 * same type (i.e., DRR_FREE records can only be aggregated with
132 * other DRR_FREE records. DRR_FREEOBJECTS records can only be
133 * aggregated with other DRR_FREEOBJECTS records.
135 if (dsp->dsa_pending_op != PENDING_NONE &&
136 dsp->dsa_pending_op != PENDING_FREE) {
137 if (dump_bytes(dsp, dsp->dsa_drr,
138 sizeof (dmu_replay_record_t)) != 0)
139 return (SET_ERROR(EINTR));
140 dsp->dsa_pending_op = PENDING_NONE;
143 if (dsp->dsa_pending_op == PENDING_FREE) {
145 * There should never be a PENDING_FREE if length is -1
146 * (because dump_dnode is the only place where this
147 * function is called with a -1, and only after flushing
148 * any pending record).
150 ASSERT(length != -1ULL);
152 * Check to see whether this free block can be aggregated
155 if (drrf->drr_object == object && drrf->drr_offset +
156 drrf->drr_length == offset) {
157 drrf->drr_length += length;
160 /* not a continuation. Push out pending record */
161 if (dump_bytes(dsp, dsp->dsa_drr,
162 sizeof (dmu_replay_record_t)) != 0)
163 return (SET_ERROR(EINTR));
164 dsp->dsa_pending_op = PENDING_NONE;
167 /* create a FREE record and make it pending */
168 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
169 dsp->dsa_drr->drr_type = DRR_FREE;
170 drrf->drr_object = object;
171 drrf->drr_offset = offset;
172 drrf->drr_length = length;
173 drrf->drr_toguid = dsp->dsa_toguid;
174 if (length == -1ULL) {
175 if (dump_bytes(dsp, dsp->dsa_drr,
176 sizeof (dmu_replay_record_t)) != 0)
177 return (SET_ERROR(EINTR));
179 dsp->dsa_pending_op = PENDING_FREE;
186 dump_data(dmu_sendarg_t *dsp, dmu_object_type_t type,
187 uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data)
189 struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write);
192 * We send data in increasing object, offset order.
193 * See comment in dump_free() for details.
195 ASSERT(object > dsp->dsa_last_data_object ||
196 (object == dsp->dsa_last_data_object &&
197 offset > dsp->dsa_last_data_offset));
198 dsp->dsa_last_data_object = object;
199 dsp->dsa_last_data_offset = offset + blksz - 1;
202 * If there is any kind of pending aggregation (currently either
203 * a grouping of free objects or free blocks), push it out to
204 * the stream, since aggregation can't be done across operations
205 * of different types.
207 if (dsp->dsa_pending_op != PENDING_NONE) {
208 if (dump_bytes(dsp, dsp->dsa_drr,
209 sizeof (dmu_replay_record_t)) != 0)
210 return (SET_ERROR(EINTR));
211 dsp->dsa_pending_op = PENDING_NONE;
213 /* write a DATA record */
214 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
215 dsp->dsa_drr->drr_type = DRR_WRITE;
216 drrw->drr_object = object;
217 drrw->drr_type = type;
218 drrw->drr_offset = offset;
219 drrw->drr_length = blksz;
220 drrw->drr_toguid = dsp->dsa_toguid;
221 drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
222 if (zio_checksum_table[drrw->drr_checksumtype].ci_dedup)
223 drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP;
224 DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
225 DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
226 DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
227 drrw->drr_key.ddk_cksum = bp->blk_cksum;
229 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
230 return (SET_ERROR(EINTR));
231 if (dump_bytes(dsp, data, blksz) != 0)
232 return (SET_ERROR(EINTR));
237 dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data)
239 struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill);
241 if (dsp->dsa_pending_op != PENDING_NONE) {
242 if (dump_bytes(dsp, dsp->dsa_drr,
243 sizeof (dmu_replay_record_t)) != 0)
244 return (SET_ERROR(EINTR));
245 dsp->dsa_pending_op = PENDING_NONE;
248 /* write a SPILL record */
249 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
250 dsp->dsa_drr->drr_type = DRR_SPILL;
251 drrs->drr_object = object;
252 drrs->drr_length = blksz;
253 drrs->drr_toguid = dsp->dsa_toguid;
255 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)))
256 return (SET_ERROR(EINTR));
257 if (dump_bytes(dsp, data, blksz))
258 return (SET_ERROR(EINTR));
263 dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
265 struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects);
267 /* See comment in dump_free(). */
268 if (!dsp->dsa_incremental)
272 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
273 * push it out, since free block aggregation can only be done for
274 * blocks of the same type (i.e., DRR_FREE records can only be
275 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
276 * can only be aggregated with other DRR_FREEOBJECTS records.
278 if (dsp->dsa_pending_op != PENDING_NONE &&
279 dsp->dsa_pending_op != PENDING_FREEOBJECTS) {
280 if (dump_bytes(dsp, dsp->dsa_drr,
281 sizeof (dmu_replay_record_t)) != 0)
282 return (SET_ERROR(EINTR));
283 dsp->dsa_pending_op = PENDING_NONE;
285 if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) {
287 * See whether this free object array can be aggregated
290 if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
291 drrfo->drr_numobjs += numobjs;
294 /* can't be aggregated. Push out pending record */
295 if (dump_bytes(dsp, dsp->dsa_drr,
296 sizeof (dmu_replay_record_t)) != 0)
297 return (SET_ERROR(EINTR));
298 dsp->dsa_pending_op = PENDING_NONE;
302 /* write a FREEOBJECTS record */
303 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
304 dsp->dsa_drr->drr_type = DRR_FREEOBJECTS;
305 drrfo->drr_firstobj = firstobj;
306 drrfo->drr_numobjs = numobjs;
307 drrfo->drr_toguid = dsp->dsa_toguid;
309 dsp->dsa_pending_op = PENDING_FREEOBJECTS;
315 dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp)
317 struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object);
319 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
320 return (dump_freeobjects(dsp, object, 1));
322 if (dsp->dsa_pending_op != PENDING_NONE) {
323 if (dump_bytes(dsp, dsp->dsa_drr,
324 sizeof (dmu_replay_record_t)) != 0)
325 return (SET_ERROR(EINTR));
326 dsp->dsa_pending_op = PENDING_NONE;
329 /* write an OBJECT record */
330 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
331 dsp->dsa_drr->drr_type = DRR_OBJECT;
332 drro->drr_object = object;
333 drro->drr_type = dnp->dn_type;
334 drro->drr_bonustype = dnp->dn_bonustype;
335 drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
336 drro->drr_bonuslen = dnp->dn_bonuslen;
337 drro->drr_checksumtype = dnp->dn_checksum;
338 drro->drr_compress = dnp->dn_compress;
339 drro->drr_toguid = dsp->dsa_toguid;
341 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
342 return (SET_ERROR(EINTR));
344 if (dump_bytes(dsp, DN_BONUS(dnp), P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0)
345 return (SET_ERROR(EINTR));
347 /* Free anything past the end of the file. */
348 if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) *
349 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL) != 0)
350 return (SET_ERROR(EINTR));
351 if (dsp->dsa_err != 0)
352 return (SET_ERROR(EINTR));
356 #define BP_SPAN(dnp, level) \
357 (((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \
358 (level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)))
362 backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
363 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
365 dmu_sendarg_t *dsp = arg;
366 dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
369 if (issig(JUSTLOOKING) && issig(FORREAL))
370 return (SET_ERROR(EINTR));
372 if (zb->zb_object != DMU_META_DNODE_OBJECT &&
373 DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
375 } else if (BP_IS_HOLE(bp) &&
376 zb->zb_object == DMU_META_DNODE_OBJECT) {
377 uint64_t span = BP_SPAN(dnp, zb->zb_level);
378 uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
379 err = dump_freeobjects(dsp, dnobj, span >> DNODE_SHIFT);
380 } else if (BP_IS_HOLE(bp)) {
381 uint64_t span = BP_SPAN(dnp, zb->zb_level);
382 err = dump_free(dsp, zb->zb_object, zb->zb_blkid * span, span);
383 } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
385 } else if (type == DMU_OT_DNODE) {
388 int blksz = BP_GET_LSIZE(bp);
389 uint32_t aflags = ARC_WAIT;
392 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
393 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
395 return (SET_ERROR(EIO));
398 for (i = 0; i < blksz >> DNODE_SHIFT; i++) {
399 uint64_t dnobj = (zb->zb_blkid <<
400 (DNODE_BLOCK_SHIFT - DNODE_SHIFT)) + i;
401 err = dump_dnode(dsp, dnobj, blk+i);
405 (void) arc_buf_remove_ref(abuf, &abuf);
406 } else if (type == DMU_OT_SA) {
407 uint32_t aflags = ARC_WAIT;
409 int blksz = BP_GET_LSIZE(bp);
411 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
412 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
414 return (SET_ERROR(EIO));
416 err = dump_spill(dsp, zb->zb_object, blksz, abuf->b_data);
417 (void) arc_buf_remove_ref(abuf, &abuf);
418 } else { /* it's a level-0 block of a regular object */
419 uint32_t aflags = ARC_WAIT;
421 int blksz = BP_GET_LSIZE(bp);
423 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
424 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
426 if (zfs_send_corrupt_data) {
427 /* Send a block filled with 0x"zfs badd bloc" */
428 abuf = arc_buf_alloc(spa, blksz, &abuf,
431 for (ptr = abuf->b_data;
432 (char *)ptr < (char *)abuf->b_data + blksz;
434 *ptr = 0x2f5baddb10c;
436 return (SET_ERROR(EIO));
440 err = dump_data(dsp, type, zb->zb_object, zb->zb_blkid * blksz,
441 blksz, bp, abuf->b_data);
442 (void) arc_buf_remove_ref(abuf, &abuf);
445 ASSERT(err == 0 || err == EINTR);
450 * Releases dp, ds, and fromds, using the specified tag.
453 dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *ds,
455 dsl_dataset_t *fromds, int outfd, vnode_t *vp, offset_t *off)
457 dsl_dataset_t *fromds, int outfd, struct file *fp, offset_t *off)
461 dmu_replay_record_t *drr;
464 uint64_t fromtxg = 0;
466 if (fromds != NULL && !dsl_dataset_is_before(ds, fromds)) {
467 dsl_dataset_rele(fromds, tag);
468 dsl_dataset_rele(ds, tag);
469 dsl_pool_rele(dp, tag);
470 return (SET_ERROR(EXDEV));
473 err = dmu_objset_from_ds(ds, &os);
476 dsl_dataset_rele(fromds, tag);
477 dsl_dataset_rele(ds, tag);
478 dsl_pool_rele(dp, tag);
482 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
483 drr->drr_type = DRR_BEGIN;
484 drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
485 DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
489 if (dmu_objset_type(os) == DMU_OST_ZFS) {
491 if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) {
492 kmem_free(drr, sizeof (dmu_replay_record_t));
494 dsl_dataset_rele(fromds, tag);
495 dsl_dataset_rele(ds, tag);
496 dsl_pool_rele(dp, tag);
497 return (SET_ERROR(EINVAL));
499 if (version >= ZPL_VERSION_SA) {
500 DMU_SET_FEATUREFLAGS(
501 drr->drr_u.drr_begin.drr_versioninfo,
502 DMU_BACKUP_FEATURE_SA_SPILL);
507 drr->drr_u.drr_begin.drr_creation_time =
508 ds->ds_phys->ds_creation_time;
509 drr->drr_u.drr_begin.drr_type = dmu_objset_type(os);
510 if (fromds != NULL && ds->ds_dir != fromds->ds_dir)
511 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
512 drr->drr_u.drr_begin.drr_toguid = ds->ds_phys->ds_guid;
513 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
514 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
517 drr->drr_u.drr_begin.drr_fromguid = fromds->ds_phys->ds_guid;
518 dsl_dataset_name(ds, drr->drr_u.drr_begin.drr_toname);
520 if (fromds != NULL) {
521 fromtxg = fromds->ds_phys->ds_creation_txg;
522 dsl_dataset_rele(fromds, tag);
526 dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP);
529 dsp->dsa_outfd = outfd;
530 dsp->dsa_proc = curproc;
531 dsp->dsa_td = curthread;
535 dsp->dsa_toguid = ds->ds_phys->ds_guid;
536 ZIO_SET_CHECKSUM(&dsp->dsa_zc, 0, 0, 0, 0);
537 dsp->dsa_pending_op = PENDING_NONE;
538 dsp->dsa_incremental = (fromtxg != 0);
540 mutex_enter(&ds->ds_sendstream_lock);
541 list_insert_head(&ds->ds_sendstreams, dsp);
542 mutex_exit(&ds->ds_sendstream_lock);
544 dsl_dataset_long_hold(ds, FTAG);
545 dsl_pool_rele(dp, tag);
547 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) {
552 err = traverse_dataset(ds, fromtxg, TRAVERSE_PRE | TRAVERSE_PREFETCH,
555 if (dsp->dsa_pending_op != PENDING_NONE)
556 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0)
557 err = SET_ERROR(EINTR);
560 if (err == EINTR && dsp->dsa_err != 0)
565 bzero(drr, sizeof (dmu_replay_record_t));
566 drr->drr_type = DRR_END;
567 drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc;
568 drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid;
570 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) {
576 mutex_enter(&ds->ds_sendstream_lock);
577 list_remove(&ds->ds_sendstreams, dsp);
578 mutex_exit(&ds->ds_sendstream_lock);
580 kmem_free(drr, sizeof (dmu_replay_record_t));
581 kmem_free(dsp, sizeof (dmu_sendarg_t));
583 dsl_dataset_long_rele(ds, FTAG);
584 dsl_dataset_rele(ds, tag);
590 dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
592 int outfd, vnode_t *vp, offset_t *off)
594 int outfd, struct file *fp, offset_t *off)
599 dsl_dataset_t *fromds = NULL;
602 err = dsl_pool_hold(pool, FTAG, &dp);
606 err = dsl_dataset_hold_obj(dp, tosnap, FTAG, &ds);
608 dsl_pool_rele(dp, FTAG);
613 err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds);
615 dsl_dataset_rele(ds, FTAG);
616 dsl_pool_rele(dp, FTAG);
621 return (dmu_send_impl(FTAG, dp, ds, fromds, outfd, fp, off));
625 dmu_send(const char *tosnap, const char *fromsnap,
627 int outfd, vnode_t *vp, offset_t *off)
629 int outfd, struct file *fp, offset_t *off)
634 dsl_dataset_t *fromds = NULL;
637 if (strchr(tosnap, '@') == NULL)
638 return (SET_ERROR(EINVAL));
639 if (fromsnap != NULL && strchr(fromsnap, '@') == NULL)
640 return (SET_ERROR(EINVAL));
642 err = dsl_pool_hold(tosnap, FTAG, &dp);
646 err = dsl_dataset_hold(dp, tosnap, FTAG, &ds);
648 dsl_pool_rele(dp, FTAG);
652 if (fromsnap != NULL) {
653 err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds);
655 dsl_dataset_rele(ds, FTAG);
656 dsl_pool_rele(dp, FTAG);
660 return (dmu_send_impl(FTAG, dp, ds, fromds, outfd, fp, off));
664 dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds, uint64_t *sizep)
666 dsl_pool_t *dp = ds->ds_dir->dd_pool;
670 ASSERT(dsl_pool_config_held(dp));
672 /* tosnap must be a snapshot */
673 if (!dsl_dataset_is_snapshot(ds))
674 return (SET_ERROR(EINVAL));
677 * fromsnap must be an earlier snapshot from the same fs as tosnap,
678 * or the origin's fs.
680 if (fromds != NULL && !dsl_dataset_is_before(ds, fromds))
681 return (SET_ERROR(EXDEV));
683 /* Get uncompressed size estimate of changed data. */
684 if (fromds == NULL) {
685 size = ds->ds_phys->ds_uncompressed_bytes;
688 err = dsl_dataset_space_written(fromds, ds,
689 &used, &comp, &size);
695 * Assume that space (both on-disk and in-stream) is dominated by
696 * data. We will adjust for indirect blocks and the copies property,
697 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
701 * Subtract out approximate space used by indirect blocks.
702 * Assume most space is used by data blocks (non-indirect, non-dnode).
703 * Assume all blocks are recordsize. Assume ditto blocks and
704 * internal fragmentation counter out compression.
706 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
707 * block, which we observe in practice.
710 err = dsl_prop_get_int_ds(ds, "recordsize", &recordsize);
713 size -= size / recordsize * sizeof (blkptr_t);
715 /* Add in the space for the record associated with each block. */
716 size += size / recordsize * sizeof (dmu_replay_record_t);
723 typedef struct dmu_recv_begin_arg {
724 const char *drba_origin;
725 dmu_recv_cookie_t *drba_cookie;
727 uint64_t drba_snapobj;
728 } dmu_recv_begin_arg_t;
731 recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
736 dsl_pool_t *dp = ds->ds_dir->dd_pool;
738 /* temporary clone name must not exist */
739 error = zap_lookup(dp->dp_meta_objset,
740 ds->ds_dir->dd_phys->dd_child_dir_zapobj, recv_clone_name,
743 return (error == 0 ? EBUSY : error);
745 /* new snapshot name must not exist */
746 error = zap_lookup(dp->dp_meta_objset,
747 ds->ds_phys->ds_snapnames_zapobj, drba->drba_cookie->drc_tosnap,
750 return (error == 0 ? EEXIST : error);
754 uint64_t obj = ds->ds_phys->ds_prev_snap_obj;
756 /* Find snapshot in this dir that matches fromguid. */
758 error = dsl_dataset_hold_obj(dp, obj, FTAG,
761 return (SET_ERROR(ENODEV));
762 if (snap->ds_dir != ds->ds_dir) {
763 dsl_dataset_rele(snap, FTAG);
764 return (SET_ERROR(ENODEV));
766 if (snap->ds_phys->ds_guid == fromguid)
768 obj = snap->ds_phys->ds_prev_snap_obj;
769 dsl_dataset_rele(snap, FTAG);
772 return (SET_ERROR(ENODEV));
774 if (drba->drba_cookie->drc_force) {
775 drba->drba_snapobj = obj;
778 * If we are not forcing, there must be no
779 * changes since fromsnap.
781 if (dsl_dataset_modified_since_snap(ds, snap)) {
782 dsl_dataset_rele(snap, FTAG);
783 return (SET_ERROR(ETXTBSY));
785 drba->drba_snapobj = ds->ds_prev->ds_object;
788 dsl_dataset_rele(snap, FTAG);
790 /* if full, most recent snapshot must be $ORIGIN */
791 if (ds->ds_phys->ds_prev_snap_txg >= TXG_INITIAL)
792 return (SET_ERROR(ENODEV));
793 drba->drba_snapobj = ds->ds_phys->ds_prev_snap_obj;
801 dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
803 dmu_recv_begin_arg_t *drba = arg;
804 dsl_pool_t *dp = dmu_tx_pool(tx);
805 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
806 uint64_t fromguid = drrb->drr_fromguid;
807 int flags = drrb->drr_flags;
810 const char *tofs = drba->drba_cookie->drc_tofs;
812 /* already checked */
813 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
815 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
816 DMU_COMPOUNDSTREAM ||
817 drrb->drr_type >= DMU_OST_NUMTYPES ||
818 ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
819 return (SET_ERROR(EINVAL));
821 /* Verify pool version supports SA if SA_SPILL feature set */
822 if ((DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
823 DMU_BACKUP_FEATURE_SA_SPILL) &&
824 spa_version(dp->dp_spa) < SPA_VERSION_SA) {
825 return (SET_ERROR(ENOTSUP));
828 error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
830 /* target fs already exists; recv into temp clone */
832 /* Can't recv a clone into an existing fs */
833 if (flags & DRR_FLAG_CLONE) {
834 dsl_dataset_rele(ds, FTAG);
835 return (SET_ERROR(EINVAL));
838 error = recv_begin_check_existing_impl(drba, ds, fromguid);
839 dsl_dataset_rele(ds, FTAG);
840 } else if (error == ENOENT) {
841 /* target fs does not exist; must be a full backup or clone */
842 char buf[MAXNAMELEN];
845 * If it's a non-clone incremental, we are missing the
846 * target fs, so fail the recv.
848 if (fromguid != 0 && !(flags & DRR_FLAG_CLONE))
849 return (SET_ERROR(ENOENT));
851 /* Open the parent of tofs */
852 ASSERT3U(strlen(tofs), <, MAXNAMELEN);
853 (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
854 error = dsl_dataset_hold(dp, buf, FTAG, &ds);
858 if (drba->drba_origin != NULL) {
859 dsl_dataset_t *origin;
860 error = dsl_dataset_hold(dp, drba->drba_origin,
863 dsl_dataset_rele(ds, FTAG);
866 if (!dsl_dataset_is_snapshot(origin)) {
867 dsl_dataset_rele(origin, FTAG);
868 dsl_dataset_rele(ds, FTAG);
869 return (SET_ERROR(EINVAL));
871 if (origin->ds_phys->ds_guid != fromguid) {
872 dsl_dataset_rele(origin, FTAG);
873 dsl_dataset_rele(ds, FTAG);
874 return (SET_ERROR(ENODEV));
876 dsl_dataset_rele(origin, FTAG);
878 dsl_dataset_rele(ds, FTAG);
885 dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
887 dmu_recv_begin_arg_t *drba = arg;
888 dsl_pool_t *dp = dmu_tx_pool(tx);
889 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
890 const char *tofs = drba->drba_cookie->drc_tofs;
891 dsl_dataset_t *ds, *newds;
896 crflags = (drrb->drr_flags & DRR_FLAG_CI_DATA) ?
897 DS_FLAG_CI_DATASET : 0;
899 error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
901 /* create temporary clone */
902 dsl_dataset_t *snap = NULL;
903 if (drba->drba_snapobj != 0) {
904 VERIFY0(dsl_dataset_hold_obj(dp,
905 drba->drba_snapobj, FTAG, &snap));
907 dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name,
908 snap, crflags, drba->drba_cred, tx);
909 dsl_dataset_rele(snap, FTAG);
910 dsl_dataset_rele(ds, FTAG);
914 dsl_dataset_t *origin = NULL;
916 VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
918 if (drba->drba_origin != NULL) {
919 VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
923 /* Create new dataset. */
924 dsobj = dsl_dataset_create_sync(dd,
925 strrchr(tofs, '/') + 1,
926 origin, crflags, drba->drba_cred, tx);
928 dsl_dataset_rele(origin, FTAG);
929 dsl_dir_rele(dd, FTAG);
930 drba->drba_cookie->drc_newfs = B_TRUE;
932 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &newds));
934 dmu_buf_will_dirty(newds->ds_dbuf, tx);
935 newds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
938 * If we actually created a non-clone, we need to create the
939 * objset in our new dataset.
941 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds))) {
942 (void) dmu_objset_create_impl(dp->dp_spa,
943 newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
946 drba->drba_cookie->drc_ds = newds;
948 spa_history_log_internal_ds(newds, "receive", tx, "");
952 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
953 * succeeds; otherwise we will leak the holds on the datasets.
956 dmu_recv_begin(char *tofs, char *tosnap, struct drr_begin *drrb,
957 boolean_t force, char *origin, dmu_recv_cookie_t *drc)
959 dmu_recv_begin_arg_t drba = { 0 };
960 dmu_replay_record_t *drr;
962 bzero(drc, sizeof (dmu_recv_cookie_t));
963 drc->drc_drrb = drrb;
964 drc->drc_tosnap = tosnap;
965 drc->drc_tofs = tofs;
966 drc->drc_force = force;
968 if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
969 drc->drc_byteswap = B_TRUE;
970 else if (drrb->drr_magic != DMU_BACKUP_MAGIC)
971 return (SET_ERROR(EINVAL));
973 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
974 drr->drr_type = DRR_BEGIN;
975 drr->drr_u.drr_begin = *drc->drc_drrb;
976 if (drc->drc_byteswap) {
977 fletcher_4_incremental_byteswap(drr,
978 sizeof (dmu_replay_record_t), &drc->drc_cksum);
980 fletcher_4_incremental_native(drr,
981 sizeof (dmu_replay_record_t), &drc->drc_cksum);
983 kmem_free(drr, sizeof (dmu_replay_record_t));
985 if (drc->drc_byteswap) {
986 drrb->drr_magic = BSWAP_64(drrb->drr_magic);
987 drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo);
988 drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
989 drrb->drr_type = BSWAP_32(drrb->drr_type);
990 drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
991 drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
994 drba.drba_origin = origin;
995 drba.drba_cookie = drc;
996 drba.drba_cred = CRED();
998 return (dsl_sync_task(tofs, dmu_recv_begin_check, dmu_recv_begin_sync,
1009 int bufsize; /* amount of memory allocated for buf */
1011 avl_tree_t *guid_to_ds_map;
1014 typedef struct guid_map_entry {
1016 dsl_dataset_t *gme_ds;
1021 guid_compare(const void *arg1, const void *arg2)
1023 const guid_map_entry_t *gmep1 = arg1;
1024 const guid_map_entry_t *gmep2 = arg2;
1026 if (gmep1->guid < gmep2->guid)
1028 else if (gmep1->guid > gmep2->guid)
1034 free_guid_map_onexit(void *arg)
1036 avl_tree_t *ca = arg;
1037 void *cookie = NULL;
1038 guid_map_entry_t *gmep;
1040 while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
1041 dsl_dataset_long_rele(gmep->gme_ds, gmep);
1042 dsl_dataset_rele(gmep->gme_ds, gmep);
1043 kmem_free(gmep, sizeof (guid_map_entry_t));
1046 kmem_free(ca, sizeof (avl_tree_t));
1050 restore_bytes(struct restorearg *ra, void *buf, int len, off_t off, ssize_t *resid)
1056 aiov.iov_base = buf;
1058 auio.uio_iov = &aiov;
1059 auio.uio_iovcnt = 1;
1060 auio.uio_resid = len;
1061 auio.uio_segflg = UIO_SYSSPACE;
1062 auio.uio_rw = UIO_READ;
1063 auio.uio_offset = off;
1064 auio.uio_td = ra->td;
1066 error = fo_read(ra->fp, &auio, ra->td->td_ucred, FOF_OFFSET, ra->td);
1068 fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
1071 *resid = auio.uio_resid;
1076 restore_read(struct restorearg *ra, int len)
1081 /* some things will require 8-byte alignment, so everything must */
1084 while (done < len) {
1087 ra->err = restore_bytes(ra, (caddr_t)ra->buf + done,
1088 len - done, ra->voff, &resid);
1090 if (resid == len - done)
1091 ra->err = SET_ERROR(EINVAL);
1092 ra->voff += len - done - resid;
1098 ASSERT3U(done, ==, len);
1101 fletcher_4_incremental_byteswap(rv, len, &ra->cksum);
1103 fletcher_4_incremental_native(rv, len, &ra->cksum);
1108 backup_byteswap(dmu_replay_record_t *drr)
1110 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
1111 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
1112 drr->drr_type = BSWAP_32(drr->drr_type);
1113 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
1114 switch (drr->drr_type) {
1116 DO64(drr_begin.drr_magic);
1117 DO64(drr_begin.drr_versioninfo);
1118 DO64(drr_begin.drr_creation_time);
1119 DO32(drr_begin.drr_type);
1120 DO32(drr_begin.drr_flags);
1121 DO64(drr_begin.drr_toguid);
1122 DO64(drr_begin.drr_fromguid);
1125 DO64(drr_object.drr_object);
1126 /* DO64(drr_object.drr_allocation_txg); */
1127 DO32(drr_object.drr_type);
1128 DO32(drr_object.drr_bonustype);
1129 DO32(drr_object.drr_blksz);
1130 DO32(drr_object.drr_bonuslen);
1131 DO64(drr_object.drr_toguid);
1133 case DRR_FREEOBJECTS:
1134 DO64(drr_freeobjects.drr_firstobj);
1135 DO64(drr_freeobjects.drr_numobjs);
1136 DO64(drr_freeobjects.drr_toguid);
1139 DO64(drr_write.drr_object);
1140 DO32(drr_write.drr_type);
1141 DO64(drr_write.drr_offset);
1142 DO64(drr_write.drr_length);
1143 DO64(drr_write.drr_toguid);
1144 DO64(drr_write.drr_key.ddk_cksum.zc_word[0]);
1145 DO64(drr_write.drr_key.ddk_cksum.zc_word[1]);
1146 DO64(drr_write.drr_key.ddk_cksum.zc_word[2]);
1147 DO64(drr_write.drr_key.ddk_cksum.zc_word[3]);
1148 DO64(drr_write.drr_key.ddk_prop);
1150 case DRR_WRITE_BYREF:
1151 DO64(drr_write_byref.drr_object);
1152 DO64(drr_write_byref.drr_offset);
1153 DO64(drr_write_byref.drr_length);
1154 DO64(drr_write_byref.drr_toguid);
1155 DO64(drr_write_byref.drr_refguid);
1156 DO64(drr_write_byref.drr_refobject);
1157 DO64(drr_write_byref.drr_refoffset);
1158 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[0]);
1159 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[1]);
1160 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[2]);
1161 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[3]);
1162 DO64(drr_write_byref.drr_key.ddk_prop);
1165 DO64(drr_free.drr_object);
1166 DO64(drr_free.drr_offset);
1167 DO64(drr_free.drr_length);
1168 DO64(drr_free.drr_toguid);
1171 DO64(drr_spill.drr_object);
1172 DO64(drr_spill.drr_length);
1173 DO64(drr_spill.drr_toguid);
1176 DO64(drr_end.drr_checksum.zc_word[0]);
1177 DO64(drr_end.drr_checksum.zc_word[1]);
1178 DO64(drr_end.drr_checksum.zc_word[2]);
1179 DO64(drr_end.drr_checksum.zc_word[3]);
1180 DO64(drr_end.drr_toguid);
1188 restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro)
1194 if (drro->drr_type == DMU_OT_NONE ||
1195 !DMU_OT_IS_VALID(drro->drr_type) ||
1196 !DMU_OT_IS_VALID(drro->drr_bonustype) ||
1197 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
1198 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
1199 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
1200 drro->drr_blksz < SPA_MINBLOCKSIZE ||
1201 drro->drr_blksz > SPA_MAXBLOCKSIZE ||
1202 drro->drr_bonuslen > DN_MAX_BONUSLEN) {
1203 return (SET_ERROR(EINVAL));
1206 err = dmu_object_info(os, drro->drr_object, NULL);
1208 if (err != 0 && err != ENOENT)
1209 return (SET_ERROR(EINVAL));
1211 if (drro->drr_bonuslen) {
1212 data = restore_read(ra, P2ROUNDUP(drro->drr_bonuslen, 8));
1217 if (err == ENOENT) {
1218 /* currently free, want to be allocated */
1219 tx = dmu_tx_create(os);
1220 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1221 err = dmu_tx_assign(tx, TXG_WAIT);
1226 err = dmu_object_claim(os, drro->drr_object,
1227 drro->drr_type, drro->drr_blksz,
1228 drro->drr_bonustype, drro->drr_bonuslen, tx);
1231 /* currently allocated, want to be allocated */
1232 err = dmu_object_reclaim(os, drro->drr_object,
1233 drro->drr_type, drro->drr_blksz,
1234 drro->drr_bonustype, drro->drr_bonuslen);
1237 return (SET_ERROR(EINVAL));
1240 tx = dmu_tx_create(os);
1241 dmu_tx_hold_bonus(tx, drro->drr_object);
1242 err = dmu_tx_assign(tx, TXG_WAIT);
1248 dmu_object_set_checksum(os, drro->drr_object, drro->drr_checksumtype,
1250 dmu_object_set_compress(os, drro->drr_object, drro->drr_compress, tx);
1255 VERIFY(0 == dmu_bonus_hold(os, drro->drr_object, FTAG, &db));
1256 dmu_buf_will_dirty(db, tx);
1258 ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
1259 bcopy(data, db->db_data, drro->drr_bonuslen);
1261 dmu_object_byteswap_t byteswap =
1262 DMU_OT_BYTESWAP(drro->drr_bonustype);
1263 dmu_ot_byteswap[byteswap].ob_func(db->db_data,
1264 drro->drr_bonuslen);
1266 dmu_buf_rele(db, FTAG);
1274 restore_freeobjects(struct restorearg *ra, objset_t *os,
1275 struct drr_freeobjects *drrfo)
1279 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
1280 return (SET_ERROR(EINVAL));
1282 for (obj = drrfo->drr_firstobj;
1283 obj < drrfo->drr_firstobj + drrfo->drr_numobjs;
1284 (void) dmu_object_next(os, &obj, FALSE, 0)) {
1287 if (dmu_object_info(os, obj, NULL) != 0)
1290 err = dmu_free_long_object(os, obj);
1298 restore_write(struct restorearg *ra, objset_t *os,
1299 struct drr_write *drrw)
1305 if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset ||
1306 !DMU_OT_IS_VALID(drrw->drr_type))
1307 return (SET_ERROR(EINVAL));
1309 data = restore_read(ra, drrw->drr_length);
1313 if (dmu_object_info(os, drrw->drr_object, NULL) != 0)
1314 return (SET_ERROR(EINVAL));
1316 tx = dmu_tx_create(os);
1318 dmu_tx_hold_write(tx, drrw->drr_object,
1319 drrw->drr_offset, drrw->drr_length);
1320 err = dmu_tx_assign(tx, TXG_WAIT);
1326 dmu_object_byteswap_t byteswap =
1327 DMU_OT_BYTESWAP(drrw->drr_type);
1328 dmu_ot_byteswap[byteswap].ob_func(data, drrw->drr_length);
1330 dmu_write(os, drrw->drr_object,
1331 drrw->drr_offset, drrw->drr_length, data, tx);
1337 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed
1338 * streams to refer to a copy of the data that is already on the
1339 * system because it came in earlier in the stream. This function
1340 * finds the earlier copy of the data, and uses that copy instead of
1341 * data from the stream to fulfill this write.
1344 restore_write_byref(struct restorearg *ra, objset_t *os,
1345 struct drr_write_byref *drrwbr)
1349 guid_map_entry_t gmesrch;
1350 guid_map_entry_t *gmep;
1352 objset_t *ref_os = NULL;
1355 if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
1356 return (SET_ERROR(EINVAL));
1359 * If the GUID of the referenced dataset is different from the
1360 * GUID of the target dataset, find the referenced dataset.
1362 if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
1363 gmesrch.guid = drrwbr->drr_refguid;
1364 if ((gmep = avl_find(ra->guid_to_ds_map, &gmesrch,
1366 return (SET_ERROR(EINVAL));
1368 if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
1369 return (SET_ERROR(EINVAL));
1374 if (err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
1375 drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH))
1378 tx = dmu_tx_create(os);
1380 dmu_tx_hold_write(tx, drrwbr->drr_object,
1381 drrwbr->drr_offset, drrwbr->drr_length);
1382 err = dmu_tx_assign(tx, TXG_WAIT);
1387 dmu_write(os, drrwbr->drr_object,
1388 drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
1389 dmu_buf_rele(dbp, FTAG);
1395 restore_spill(struct restorearg *ra, objset_t *os, struct drr_spill *drrs)
1399 dmu_buf_t *db, *db_spill;
1402 if (drrs->drr_length < SPA_MINBLOCKSIZE ||
1403 drrs->drr_length > SPA_MAXBLOCKSIZE)
1404 return (SET_ERROR(EINVAL));
1406 data = restore_read(ra, drrs->drr_length);
1410 if (dmu_object_info(os, drrs->drr_object, NULL) != 0)
1411 return (SET_ERROR(EINVAL));
1413 VERIFY(0 == dmu_bonus_hold(os, drrs->drr_object, FTAG, &db));
1414 if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
1415 dmu_buf_rele(db, FTAG);
1419 tx = dmu_tx_create(os);
1421 dmu_tx_hold_spill(tx, db->db_object);
1423 err = dmu_tx_assign(tx, TXG_WAIT);
1425 dmu_buf_rele(db, FTAG);
1426 dmu_buf_rele(db_spill, FTAG);
1430 dmu_buf_will_dirty(db_spill, tx);
1432 if (db_spill->db_size < drrs->drr_length)
1433 VERIFY(0 == dbuf_spill_set_blksz(db_spill,
1434 drrs->drr_length, tx));
1435 bcopy(data, db_spill->db_data, drrs->drr_length);
1437 dmu_buf_rele(db, FTAG);
1438 dmu_buf_rele(db_spill, FTAG);
1446 restore_free(struct restorearg *ra, objset_t *os,
1447 struct drr_free *drrf)
1451 if (drrf->drr_length != -1ULL &&
1452 drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
1453 return (SET_ERROR(EINVAL));
1455 if (dmu_object_info(os, drrf->drr_object, NULL) != 0)
1456 return (SET_ERROR(EINVAL));
1458 err = dmu_free_long_range(os, drrf->drr_object,
1459 drrf->drr_offset, drrf->drr_length);
1463 /* used to destroy the drc_ds on error */
1465 dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
1467 char name[MAXNAMELEN];
1468 dsl_dataset_name(drc->drc_ds, name);
1469 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
1470 (void) dsl_destroy_head(name);
1474 * NB: callers *must* call dmu_recv_end() if this succeeds.
1477 dmu_recv_stream(dmu_recv_cookie_t *drc, struct file *fp, offset_t *voffp,
1478 int cleanup_fd, uint64_t *action_handlep)
1480 struct restorearg ra = { 0 };
1481 dmu_replay_record_t *drr;
1486 ra.byteswap = drc->drc_byteswap;
1487 ra.cksum = drc->drc_cksum;
1492 ra.buf = kmem_alloc(ra.bufsize, KM_SLEEP);
1494 /* these were verified in dmu_recv_begin */
1495 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
1497 ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
1500 * Open the objset we are modifying.
1502 VERIFY0(dmu_objset_from_ds(drc->drc_ds, &os));
1504 ASSERT(drc->drc_ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT);
1506 featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
1508 /* if this stream is dedup'ed, set up the avl tree for guid mapping */
1509 if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
1512 if (cleanup_fd == -1) {
1513 ra.err = SET_ERROR(EBADF);
1516 ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor);
1522 if (*action_handlep == 0) {
1524 kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
1525 avl_create(ra.guid_to_ds_map, guid_compare,
1526 sizeof (guid_map_entry_t),
1527 offsetof(guid_map_entry_t, avlnode));
1528 ra.err = zfs_onexit_add_cb(minor,
1529 free_guid_map_onexit, ra.guid_to_ds_map,
1534 ra.err = zfs_onexit_cb_data(minor, *action_handlep,
1535 (void **)&ra.guid_to_ds_map);
1540 drc->drc_guid_to_ds_map = ra.guid_to_ds_map;
1544 * Read records and process them.
1547 while (ra.err == 0 &&
1548 NULL != (drr = restore_read(&ra, sizeof (*drr)))) {
1549 if (issig(JUSTLOOKING) && issig(FORREAL)) {
1550 ra.err = SET_ERROR(EINTR);
1555 backup_byteswap(drr);
1557 switch (drr->drr_type) {
1561 * We need to make a copy of the record header,
1562 * because restore_{object,write} may need to
1563 * restore_read(), which will invalidate drr.
1565 struct drr_object drro = drr->drr_u.drr_object;
1566 ra.err = restore_object(&ra, os, &drro);
1569 case DRR_FREEOBJECTS:
1571 struct drr_freeobjects drrfo =
1572 drr->drr_u.drr_freeobjects;
1573 ra.err = restore_freeobjects(&ra, os, &drrfo);
1578 struct drr_write drrw = drr->drr_u.drr_write;
1579 ra.err = restore_write(&ra, os, &drrw);
1582 case DRR_WRITE_BYREF:
1584 struct drr_write_byref drrwbr =
1585 drr->drr_u.drr_write_byref;
1586 ra.err = restore_write_byref(&ra, os, &drrwbr);
1591 struct drr_free drrf = drr->drr_u.drr_free;
1592 ra.err = restore_free(&ra, os, &drrf);
1597 struct drr_end drre = drr->drr_u.drr_end;
1599 * We compare against the *previous* checksum
1600 * value, because the stored checksum is of
1601 * everything before the DRR_END record.
1603 if (!ZIO_CHECKSUM_EQUAL(drre.drr_checksum, pcksum))
1604 ra.err = SET_ERROR(ECKSUM);
1609 struct drr_spill drrs = drr->drr_u.drr_spill;
1610 ra.err = restore_spill(&ra, os, &drrs);
1614 ra.err = SET_ERROR(EINVAL);
1619 ASSERT(ra.err != 0);
1622 if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
1623 zfs_onexit_fd_rele(cleanup_fd);
1627 * destroy what we created, so we don't leave it in the
1628 * inconsistent restoring state.
1630 dmu_recv_cleanup_ds(drc);
1633 kmem_free(ra.buf, ra.bufsize);
1639 dmu_recv_end_check(void *arg, dmu_tx_t *tx)
1641 dmu_recv_cookie_t *drc = arg;
1642 dsl_pool_t *dp = dmu_tx_pool(tx);
1645 ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
1647 if (!drc->drc_newfs) {
1648 dsl_dataset_t *origin_head;
1650 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
1653 if (drc->drc_force) {
1655 * We will destroy any snapshots in tofs (i.e. before
1656 * origin_head) that are after the origin (which is
1657 * the snap before drc_ds, because drc_ds can not
1658 * have any snaps of its own).
1660 uint64_t obj = origin_head->ds_phys->ds_prev_snap_obj;
1661 while (obj != drc->drc_ds->ds_phys->ds_prev_snap_obj) {
1662 dsl_dataset_t *snap;
1663 error = dsl_dataset_hold_obj(dp, obj, FTAG,
1667 if (snap->ds_dir != origin_head->ds_dir)
1668 error = SET_ERROR(EINVAL);
1670 error = dsl_destroy_snapshot_check_impl(
1673 obj = snap->ds_phys->ds_prev_snap_obj;
1674 dsl_dataset_rele(snap, FTAG);
1679 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
1680 origin_head, drc->drc_force, drc->drc_owner, tx);
1682 dsl_dataset_rele(origin_head, FTAG);
1685 error = dsl_dataset_snapshot_check_impl(origin_head,
1686 drc->drc_tosnap, tx, B_TRUE);
1687 dsl_dataset_rele(origin_head, FTAG);
1691 error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
1693 error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
1694 drc->drc_tosnap, tx, B_TRUE);
1700 dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
1702 dmu_recv_cookie_t *drc = arg;
1703 dsl_pool_t *dp = dmu_tx_pool(tx);
1705 spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
1706 tx, "snap=%s", drc->drc_tosnap);
1708 if (!drc->drc_newfs) {
1709 dsl_dataset_t *origin_head;
1711 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
1714 if (drc->drc_force) {
1716 * Destroy any snapshots of drc_tofs (origin_head)
1717 * after the origin (the snap before drc_ds).
1719 uint64_t obj = origin_head->ds_phys->ds_prev_snap_obj;
1720 while (obj != drc->drc_ds->ds_phys->ds_prev_snap_obj) {
1721 dsl_dataset_t *snap;
1722 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
1724 ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
1725 obj = snap->ds_phys->ds_prev_snap_obj;
1726 dsl_destroy_snapshot_sync_impl(snap,
1728 dsl_dataset_rele(snap, FTAG);
1731 VERIFY3P(drc->drc_ds->ds_prev, ==,
1732 origin_head->ds_prev);
1734 dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
1736 dsl_dataset_snapshot_sync_impl(origin_head,
1737 drc->drc_tosnap, tx);
1739 /* set snapshot's creation time and guid */
1740 dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
1741 origin_head->ds_prev->ds_phys->ds_creation_time =
1742 drc->drc_drrb->drr_creation_time;
1743 origin_head->ds_prev->ds_phys->ds_guid =
1744 drc->drc_drrb->drr_toguid;
1745 origin_head->ds_prev->ds_phys->ds_flags &=
1746 ~DS_FLAG_INCONSISTENT;
1748 dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
1749 origin_head->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1751 dsl_dataset_rele(origin_head, FTAG);
1752 dsl_destroy_head_sync_impl(drc->drc_ds, tx);
1754 if (drc->drc_owner != NULL)
1755 VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
1757 dsl_dataset_t *ds = drc->drc_ds;
1759 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
1761 /* set snapshot's creation time and guid */
1762 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1763 ds->ds_prev->ds_phys->ds_creation_time =
1764 drc->drc_drrb->drr_creation_time;
1765 ds->ds_prev->ds_phys->ds_guid = drc->drc_drrb->drr_toguid;
1766 ds->ds_prev->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1768 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1769 ds->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
1771 drc->drc_newsnapobj = drc->drc_ds->ds_phys->ds_prev_snap_obj;
1773 * Release the hold from dmu_recv_begin. This must be done before
1774 * we return to open context, so that when we free the dataset's dnode,
1775 * we can evict its bonus buffer.
1777 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
1782 add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj)
1785 dsl_dataset_t *snapds;
1786 guid_map_entry_t *gmep;
1789 ASSERT(guid_map != NULL);
1791 err = dsl_pool_hold(name, FTAG, &dp);
1794 gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP);
1795 err = dsl_dataset_hold_obj(dp, snapobj, gmep, &snapds);
1797 gmep->guid = snapds->ds_phys->ds_guid;
1798 gmep->gme_ds = snapds;
1799 avl_add(guid_map, gmep);
1800 dsl_dataset_long_hold(snapds, gmep);
1802 kmem_free(gmep, sizeof (*gmep));
1804 dsl_pool_rele(dp, FTAG);
1808 static int dmu_recv_end_modified_blocks = 3;
1811 dmu_recv_existing_end(dmu_recv_cookie_t *drc)
1814 char name[MAXNAMELEN];
1818 * We will be destroying the ds; make sure its origin is unmounted if
1821 dsl_dataset_name(drc->drc_ds, name);
1822 zfs_destroy_unmount_origin(name);
1825 error = dsl_sync_task(drc->drc_tofs,
1826 dmu_recv_end_check, dmu_recv_end_sync, drc,
1827 dmu_recv_end_modified_blocks);
1830 dmu_recv_cleanup_ds(drc);
1835 dmu_recv_new_end(dmu_recv_cookie_t *drc)
1839 error = dsl_sync_task(drc->drc_tofs,
1840 dmu_recv_end_check, dmu_recv_end_sync, drc,
1841 dmu_recv_end_modified_blocks);
1844 dmu_recv_cleanup_ds(drc);
1845 } else if (drc->drc_guid_to_ds_map != NULL) {
1846 (void) add_ds_to_guidmap(drc->drc_tofs,
1847 drc->drc_guid_to_ds_map,
1848 drc->drc_newsnapobj);
1854 dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
1856 drc->drc_owner = owner;
1859 return (dmu_recv_new_end(drc));
1861 return (dmu_recv_existing_end(drc));
1865 * Return TRUE if this objset is currently being received into.
1868 dmu_objset_is_receiving(objset_t *os)
1870 return (os->os_dsl_dataset != NULL &&
1871 os->os_dsl_dataset->ds_owner == dmu_recv_tag);