]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c
MFC r289362, r289445: 2605 want to resume interrupted zfs send
[FreeBSD/stable/10.git] / sys / cddl / contrib / opensolaris / uts / common / fs / zfs / dmu_send.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24  * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
25  * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26  * Copyright (c) 2012, Martin Matuska <mm@FreeBSD.org>. All rights reserved.
27  * Copyright 2014 HybridCluster. All rights reserved.
28  */
29
30 #include <sys/dmu.h>
31 #include <sys/dmu_impl.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dbuf.h>
34 #include <sys/dnode.h>
35 #include <sys/zfs_context.h>
36 #include <sys/dmu_objset.h>
37 #include <sys/dmu_traverse.h>
38 #include <sys/dsl_dataset.h>
39 #include <sys/dsl_dir.h>
40 #include <sys/dsl_prop.h>
41 #include <sys/dsl_pool.h>
42 #include <sys/dsl_synctask.h>
43 #include <sys/zfs_ioctl.h>
44 #include <sys/zap.h>
45 #include <sys/zio_checksum.h>
46 #include <sys/zfs_znode.h>
47 #include <zfs_fletcher.h>
48 #include <sys/avl.h>
49 #include <sys/ddt.h>
50 #include <sys/zfs_onexit.h>
51 #include <sys/dmu_send.h>
52 #include <sys/dsl_destroy.h>
53 #include <sys/blkptr.h>
54 #include <sys/dsl_bookmark.h>
55 #include <sys/zfeature.h>
56 #include <sys/bqueue.h>
57
58 #ifdef __FreeBSD__
59 #undef dump_write
60 #define dump_write dmu_dump_write
61 #endif
62
63 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
64 int zfs_send_corrupt_data = B_FALSE;
65 int zfs_send_queue_length = 16 * 1024 * 1024;
66 int zfs_recv_queue_length = 16 * 1024 * 1024;
67
68 static char *dmu_recv_tag = "dmu_recv_tag";
69 const char *recv_clone_name = "%recv";
70
71 #define BP_SPAN(datablkszsec, indblkshift, level) \
72         (((uint64_t)datablkszsec) << (SPA_MINBLOCKSHIFT + \
73         (level) * (indblkshift - SPA_BLKPTRSHIFT)))
74
75 static void byteswap_record(dmu_replay_record_t *drr);
76
77 struct send_thread_arg {
78         bqueue_t        q;
79         dsl_dataset_t   *ds;            /* Dataset to traverse */
80         uint64_t        fromtxg;        /* Traverse from this txg */
81         int             flags;          /* flags to pass to traverse_dataset */
82         int             error_code;
83         boolean_t       cancel;
84         zbookmark_phys_t resume;
85 };
86
87 struct send_block_record {
88         boolean_t               eos_marker; /* Marks the end of the stream */
89         blkptr_t                bp;
90         zbookmark_phys_t        zb;
91         uint8_t                 indblkshift;
92         uint16_t                datablkszsec;
93         bqueue_node_t           ln;
94 };
95
96 static int
97 dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
98 {
99         dsl_dataset_t *ds = dmu_objset_ds(dsp->dsa_os);
100         struct uio auio;
101         struct iovec aiov;
102         ASSERT0(len % 8);
103
104         aiov.iov_base = buf;
105         aiov.iov_len = len;
106         auio.uio_iov = &aiov;
107         auio.uio_iovcnt = 1;
108         auio.uio_resid = len;
109         auio.uio_segflg = UIO_SYSSPACE;
110         auio.uio_rw = UIO_WRITE;
111         auio.uio_offset = (off_t)-1;
112         auio.uio_td = dsp->dsa_td;
113 #ifdef _KERNEL
114         if (dsp->dsa_fp->f_type == DTYPE_VNODE)
115                 bwillwrite();
116         dsp->dsa_err = fo_write(dsp->dsa_fp, &auio, dsp->dsa_td->td_ucred, 0,
117             dsp->dsa_td);
118 #else
119         fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
120         dsp->dsa_err = EOPNOTSUPP;
121 #endif
122         mutex_enter(&ds->ds_sendstream_lock);
123         *dsp->dsa_off += len;
124         mutex_exit(&ds->ds_sendstream_lock);
125
126         return (dsp->dsa_err);
127 }
128
129 /*
130  * For all record types except BEGIN, fill in the checksum (overlaid in
131  * drr_u.drr_checksum.drr_checksum).  The checksum verifies everything
132  * up to the start of the checksum itself.
133  */
134 static int
135 dump_record(dmu_sendarg_t *dsp, void *payload, int payload_len)
136 {
137         ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
138             ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
139         fletcher_4_incremental_native(dsp->dsa_drr,
140             offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
141             &dsp->dsa_zc);
142         if (dsp->dsa_drr->drr_type != DRR_BEGIN) {
143                 ASSERT(ZIO_CHECKSUM_IS_ZERO(&dsp->dsa_drr->drr_u.
144                     drr_checksum.drr_checksum));
145                 dsp->dsa_drr->drr_u.drr_checksum.drr_checksum = dsp->dsa_zc;
146         }
147         fletcher_4_incremental_native(&dsp->dsa_drr->
148             drr_u.drr_checksum.drr_checksum,
149             sizeof (zio_cksum_t), &dsp->dsa_zc);
150         if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
151                 return (SET_ERROR(EINTR));
152         if (payload_len != 0) {
153                 fletcher_4_incremental_native(payload, payload_len,
154                     &dsp->dsa_zc);
155                 if (dump_bytes(dsp, payload, payload_len) != 0)
156                         return (SET_ERROR(EINTR));
157         }
158         return (0);
159 }
160
161 static int
162 dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
163     uint64_t length)
164 {
165         struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free);
166
167         /*
168          * When we receive a free record, dbuf_free_range() assumes
169          * that the receiving system doesn't have any dbufs in the range
170          * being freed.  This is always true because there is a one-record
171          * constraint: we only send one WRITE record for any given
172          * object,offset.  We know that the one-record constraint is
173          * true because we always send data in increasing order by
174          * object,offset.
175          *
176          * If the increasing-order constraint ever changes, we should find
177          * another way to assert that the one-record constraint is still
178          * satisfied.
179          */
180         ASSERT(object > dsp->dsa_last_data_object ||
181             (object == dsp->dsa_last_data_object &&
182             offset > dsp->dsa_last_data_offset));
183
184         /*
185          * If we are doing a non-incremental send, then there can't
186          * be any data in the dataset we're receiving into.  Therefore
187          * a free record would simply be a no-op.  Save space by not
188          * sending it to begin with.
189          */
190         if (!dsp->dsa_incremental)
191                 return (0);
192
193         if (length != -1ULL && offset + length < offset)
194                 length = -1ULL;
195
196         /*
197          * If there is a pending op, but it's not PENDING_FREE, push it out,
198          * since free block aggregation can only be done for blocks of the
199          * same type (i.e., DRR_FREE records can only be aggregated with
200          * other DRR_FREE records.  DRR_FREEOBJECTS records can only be
201          * aggregated with other DRR_FREEOBJECTS records.
202          */
203         if (dsp->dsa_pending_op != PENDING_NONE &&
204             dsp->dsa_pending_op != PENDING_FREE) {
205                 if (dump_record(dsp, NULL, 0) != 0)
206                         return (SET_ERROR(EINTR));
207                 dsp->dsa_pending_op = PENDING_NONE;
208         }
209
210         if (dsp->dsa_pending_op == PENDING_FREE) {
211                 /*
212                  * There should never be a PENDING_FREE if length is -1
213                  * (because dump_dnode is the only place where this
214                  * function is called with a -1, and only after flushing
215                  * any pending record).
216                  */
217                 ASSERT(length != -1ULL);
218                 /*
219                  * Check to see whether this free block can be aggregated
220                  * with pending one.
221                  */
222                 if (drrf->drr_object == object && drrf->drr_offset +
223                     drrf->drr_length == offset) {
224                         drrf->drr_length += length;
225                         return (0);
226                 } else {
227                         /* not a continuation.  Push out pending record */
228                         if (dump_record(dsp, NULL, 0) != 0)
229                                 return (SET_ERROR(EINTR));
230                         dsp->dsa_pending_op = PENDING_NONE;
231                 }
232         }
233         /* create a FREE record and make it pending */
234         bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
235         dsp->dsa_drr->drr_type = DRR_FREE;
236         drrf->drr_object = object;
237         drrf->drr_offset = offset;
238         drrf->drr_length = length;
239         drrf->drr_toguid = dsp->dsa_toguid;
240         if (length == -1ULL) {
241                 if (dump_record(dsp, NULL, 0) != 0)
242                         return (SET_ERROR(EINTR));
243         } else {
244                 dsp->dsa_pending_op = PENDING_FREE;
245         }
246
247         return (0);
248 }
249
250 static int
251 dump_write(dmu_sendarg_t *dsp, dmu_object_type_t type,
252     uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data)
253 {
254         struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write);
255
256         /*
257          * We send data in increasing object, offset order.
258          * See comment in dump_free() for details.
259          */
260         ASSERT(object > dsp->dsa_last_data_object ||
261             (object == dsp->dsa_last_data_object &&
262             offset > dsp->dsa_last_data_offset));
263         dsp->dsa_last_data_object = object;
264         dsp->dsa_last_data_offset = offset + blksz - 1;
265
266         /*
267          * If there is any kind of pending aggregation (currently either
268          * a grouping of free objects or free blocks), push it out to
269          * the stream, since aggregation can't be done across operations
270          * of different types.
271          */
272         if (dsp->dsa_pending_op != PENDING_NONE) {
273                 if (dump_record(dsp, NULL, 0) != 0)
274                         return (SET_ERROR(EINTR));
275                 dsp->dsa_pending_op = PENDING_NONE;
276         }
277         /* write a WRITE record */
278         bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
279         dsp->dsa_drr->drr_type = DRR_WRITE;
280         drrw->drr_object = object;
281         drrw->drr_type = type;
282         drrw->drr_offset = offset;
283         drrw->drr_length = blksz;
284         drrw->drr_toguid = dsp->dsa_toguid;
285         if (bp == NULL || BP_IS_EMBEDDED(bp)) {
286                 /*
287                  * There's no pre-computed checksum for partial-block
288                  * writes or embedded BP's, so (like
289                  * fletcher4-checkummed blocks) userland will have to
290                  * compute a dedup-capable checksum itself.
291                  */
292                 drrw->drr_checksumtype = ZIO_CHECKSUM_OFF;
293         } else {
294                 drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
295                 if (zio_checksum_table[drrw->drr_checksumtype].ci_dedup)
296                         drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP;
297                 DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
298                 DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
299                 DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
300                 drrw->drr_key.ddk_cksum = bp->blk_cksum;
301         }
302
303         if (dump_record(dsp, data, blksz) != 0)
304                 return (SET_ERROR(EINTR));
305         return (0);
306 }
307
308 static int
309 dump_write_embedded(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
310     int blksz, const blkptr_t *bp)
311 {
312         char buf[BPE_PAYLOAD_SIZE];
313         struct drr_write_embedded *drrw =
314             &(dsp->dsa_drr->drr_u.drr_write_embedded);
315
316         if (dsp->dsa_pending_op != PENDING_NONE) {
317                 if (dump_record(dsp, NULL, 0) != 0)
318                         return (EINTR);
319                 dsp->dsa_pending_op = PENDING_NONE;
320         }
321
322         ASSERT(BP_IS_EMBEDDED(bp));
323
324         bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
325         dsp->dsa_drr->drr_type = DRR_WRITE_EMBEDDED;
326         drrw->drr_object = object;
327         drrw->drr_offset = offset;
328         drrw->drr_length = blksz;
329         drrw->drr_toguid = dsp->dsa_toguid;
330         drrw->drr_compression = BP_GET_COMPRESS(bp);
331         drrw->drr_etype = BPE_GET_ETYPE(bp);
332         drrw->drr_lsize = BPE_GET_LSIZE(bp);
333         drrw->drr_psize = BPE_GET_PSIZE(bp);
334
335         decode_embedded_bp_compressed(bp, buf);
336
337         if (dump_record(dsp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0)
338                 return (EINTR);
339         return (0);
340 }
341
342 static int
343 dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data)
344 {
345         struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill);
346
347         if (dsp->dsa_pending_op != PENDING_NONE) {
348                 if (dump_record(dsp, NULL, 0) != 0)
349                         return (SET_ERROR(EINTR));
350                 dsp->dsa_pending_op = PENDING_NONE;
351         }
352
353         /* write a SPILL record */
354         bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
355         dsp->dsa_drr->drr_type = DRR_SPILL;
356         drrs->drr_object = object;
357         drrs->drr_length = blksz;
358         drrs->drr_toguid = dsp->dsa_toguid;
359
360         if (dump_record(dsp, data, blksz) != 0)
361                 return (SET_ERROR(EINTR));
362         return (0);
363 }
364
365 static int
366 dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
367 {
368         struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects);
369
370         /* See comment in dump_free(). */
371         if (!dsp->dsa_incremental)
372                 return (0);
373
374         /*
375          * If there is a pending op, but it's not PENDING_FREEOBJECTS,
376          * push it out, since free block aggregation can only be done for
377          * blocks of the same type (i.e., DRR_FREE records can only be
378          * aggregated with other DRR_FREE records.  DRR_FREEOBJECTS records
379          * can only be aggregated with other DRR_FREEOBJECTS records.
380          */
381         if (dsp->dsa_pending_op != PENDING_NONE &&
382             dsp->dsa_pending_op != PENDING_FREEOBJECTS) {
383                 if (dump_record(dsp, NULL, 0) != 0)
384                         return (SET_ERROR(EINTR));
385                 dsp->dsa_pending_op = PENDING_NONE;
386         }
387         if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) {
388                 /*
389                  * See whether this free object array can be aggregated
390                  * with pending one
391                  */
392                 if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
393                         drrfo->drr_numobjs += numobjs;
394                         return (0);
395                 } else {
396                         /* can't be aggregated.  Push out pending record */
397                         if (dump_record(dsp, NULL, 0) != 0)
398                                 return (SET_ERROR(EINTR));
399                         dsp->dsa_pending_op = PENDING_NONE;
400                 }
401         }
402
403         /* write a FREEOBJECTS record */
404         bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
405         dsp->dsa_drr->drr_type = DRR_FREEOBJECTS;
406         drrfo->drr_firstobj = firstobj;
407         drrfo->drr_numobjs = numobjs;
408         drrfo->drr_toguid = dsp->dsa_toguid;
409
410         dsp->dsa_pending_op = PENDING_FREEOBJECTS;
411
412         return (0);
413 }
414
415 static int
416 dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp)
417 {
418         struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object);
419
420         if (object < dsp->dsa_resume_object) {
421                 /*
422                  * Note: when resuming, we will visit all the dnodes in
423                  * the block of dnodes that we are resuming from.  In
424                  * this case it's unnecessary to send the dnodes prior to
425                  * the one we are resuming from.  We should be at most one
426                  * block's worth of dnodes behind the resume point.
427                  */
428                 ASSERT3U(dsp->dsa_resume_object - object, <,
429                     1 << (DNODE_BLOCK_SHIFT - DNODE_SHIFT));
430                 return (0);
431         }
432
433         if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
434                 return (dump_freeobjects(dsp, object, 1));
435
436         if (dsp->dsa_pending_op != PENDING_NONE) {
437                 if (dump_record(dsp, NULL, 0) != 0)
438                         return (SET_ERROR(EINTR));
439                 dsp->dsa_pending_op = PENDING_NONE;
440         }
441
442         /* write an OBJECT record */
443         bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
444         dsp->dsa_drr->drr_type = DRR_OBJECT;
445         drro->drr_object = object;
446         drro->drr_type = dnp->dn_type;
447         drro->drr_bonustype = dnp->dn_bonustype;
448         drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
449         drro->drr_bonuslen = dnp->dn_bonuslen;
450         drro->drr_checksumtype = dnp->dn_checksum;
451         drro->drr_compress = dnp->dn_compress;
452         drro->drr_toguid = dsp->dsa_toguid;
453
454         if (!(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
455             drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE)
456                 drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE;
457
458         if (dump_record(dsp, DN_BONUS(dnp),
459             P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0) {
460                 return (SET_ERROR(EINTR));
461         }
462
463         /* Free anything past the end of the file. */
464         if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) *
465             (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL) != 0)
466                 return (SET_ERROR(EINTR));
467         if (dsp->dsa_err != 0)
468                 return (SET_ERROR(EINTR));
469         return (0);
470 }
471
472 static boolean_t
473 backup_do_embed(dmu_sendarg_t *dsp, const blkptr_t *bp)
474 {
475         if (!BP_IS_EMBEDDED(bp))
476                 return (B_FALSE);
477
478         /*
479          * Compression function must be legacy, or explicitly enabled.
480          */
481         if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS &&
482             !(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4)))
483                 return (B_FALSE);
484
485         /*
486          * Embed type must be explicitly enabled.
487          */
488         switch (BPE_GET_ETYPE(bp)) {
489         case BP_EMBEDDED_TYPE_DATA:
490                 if (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
491                         return (B_TRUE);
492                 break;
493         default:
494                 return (B_FALSE);
495         }
496         return (B_FALSE);
497 }
498
499 /*
500  * This is the callback function to traverse_dataset that acts as the worker
501  * thread for dmu_send_impl.
502  */
503 /*ARGSUSED*/
504 static int
505 send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
506     const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg)
507 {
508         struct send_thread_arg *sta = arg;
509         struct send_block_record *record;
510         uint64_t record_size;
511         int err = 0;
512
513         ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
514             zb->zb_object >= sta->resume.zb_object);
515
516         if (sta->cancel)
517                 return (SET_ERROR(EINTR));
518
519         if (bp == NULL) {
520                 ASSERT3U(zb->zb_level, ==, ZB_DNODE_LEVEL);
521                 return (0);
522         } else if (zb->zb_level < 0) {
523                 return (0);
524         }
525
526         record = kmem_zalloc(sizeof (struct send_block_record), KM_SLEEP);
527         record->eos_marker = B_FALSE;
528         record->bp = *bp;
529         record->zb = *zb;
530         record->indblkshift = dnp->dn_indblkshift;
531         record->datablkszsec = dnp->dn_datablkszsec;
532         record_size = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
533         bqueue_enqueue(&sta->q, record, record_size);
534
535         return (err);
536 }
537
538 /*
539  * This function kicks off the traverse_dataset.  It also handles setting the
540  * error code of the thread in case something goes wrong, and pushes the End of
541  * Stream record when the traverse_dataset call has finished.  If there is no
542  * dataset to traverse, the thread immediately pushes End of Stream marker.
543  */
544 static void
545 send_traverse_thread(void *arg)
546 {
547         struct send_thread_arg *st_arg = arg;
548         int err;
549         struct send_block_record *data;
550
551         if (st_arg->ds != NULL) {
552                 err = traverse_dataset_resume(st_arg->ds,
553                     st_arg->fromtxg, &st_arg->resume,
554                     st_arg->flags, send_cb, st_arg);
555
556                 if (err != EINTR)
557                         st_arg->error_code = err;
558         }
559         data = kmem_zalloc(sizeof (*data), KM_SLEEP);
560         data->eos_marker = B_TRUE;
561         bqueue_enqueue(&st_arg->q, data, 1);
562         thread_exit();
563 }
564
565 /*
566  * This function actually handles figuring out what kind of record needs to be
567  * dumped, reading the data (which has hopefully been prefetched), and calling
568  * the appropriate helper function.
569  */
570 static int
571 do_dump(dmu_sendarg_t *dsa, struct send_block_record *data)
572 {
573         dsl_dataset_t *ds = dmu_objset_ds(dsa->dsa_os);
574         const blkptr_t *bp = &data->bp;
575         const zbookmark_phys_t *zb = &data->zb;
576         uint8_t indblkshift = data->indblkshift;
577         uint16_t dblkszsec = data->datablkszsec;
578         spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
579         dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
580         int err = 0;
581
582         ASSERT3U(zb->zb_level, >=, 0);
583
584         ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
585             zb->zb_object >= dsa->dsa_resume_object);
586
587         if (zb->zb_object != DMU_META_DNODE_OBJECT &&
588             DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
589                 return (0);
590         } else if (BP_IS_HOLE(bp) &&
591             zb->zb_object == DMU_META_DNODE_OBJECT) {
592                 uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level);
593                 uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
594                 err = dump_freeobjects(dsa, dnobj, span >> DNODE_SHIFT);
595         } else if (BP_IS_HOLE(bp)) {
596                 uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level);
597                 uint64_t offset = zb->zb_blkid * span;
598                 err = dump_free(dsa, zb->zb_object, offset, span);
599         } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
600                 return (0);
601         } else if (type == DMU_OT_DNODE) {
602                 int blksz = BP_GET_LSIZE(bp);
603                 arc_flags_t aflags = ARC_FLAG_WAIT;
604                 arc_buf_t *abuf;
605
606                 ASSERT0(zb->zb_level);
607
608                 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
609                     ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
610                     &aflags, zb) != 0)
611                         return (SET_ERROR(EIO));
612
613                 dnode_phys_t *blk = abuf->b_data;
614                 uint64_t dnobj = zb->zb_blkid * (blksz >> DNODE_SHIFT);
615                 for (int i = 0; i < blksz >> DNODE_SHIFT; i++) {
616                         err = dump_dnode(dsa, dnobj + i, blk + i);
617                         if (err != 0)
618                                 break;
619                 }
620                 (void) arc_buf_remove_ref(abuf, &abuf);
621         } else if (type == DMU_OT_SA) {
622                 arc_flags_t aflags = ARC_FLAG_WAIT;
623                 arc_buf_t *abuf;
624                 int blksz = BP_GET_LSIZE(bp);
625
626                 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
627                     ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
628                     &aflags, zb) != 0)
629                         return (SET_ERROR(EIO));
630
631                 err = dump_spill(dsa, zb->zb_object, blksz, abuf->b_data);
632                 (void) arc_buf_remove_ref(abuf, &abuf);
633         } else if (backup_do_embed(dsa, bp)) {
634                 /* it's an embedded level-0 block of a regular object */
635                 int blksz = dblkszsec << SPA_MINBLOCKSHIFT;
636                 ASSERT0(zb->zb_level);
637                 err = dump_write_embedded(dsa, zb->zb_object,
638                     zb->zb_blkid * blksz, blksz, bp);
639         } else {
640                 /* it's a level-0 block of a regular object */
641                 arc_flags_t aflags = ARC_FLAG_WAIT;
642                 arc_buf_t *abuf;
643                 int blksz = dblkszsec << SPA_MINBLOCKSHIFT;
644                 uint64_t offset;
645
646                 ASSERT0(zb->zb_level);
647                 ASSERT(zb->zb_object > dsa->dsa_resume_object ||
648                     (zb->zb_object == dsa->dsa_resume_object &&
649                     zb->zb_blkid * blksz >= dsa->dsa_resume_offset));
650
651                 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
652                     ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
653                     &aflags, zb) != 0) {
654                         if (zfs_send_corrupt_data) {
655                                 /* Send a block filled with 0x"zfs badd bloc" */
656                                 abuf = arc_buf_alloc(spa, blksz, &abuf,
657                                     ARC_BUFC_DATA);
658                                 uint64_t *ptr;
659                                 for (ptr = abuf->b_data;
660                                     (char *)ptr < (char *)abuf->b_data + blksz;
661                                     ptr++)
662                                         *ptr = 0x2f5baddb10cULL;
663                         } else {
664                                 return (SET_ERROR(EIO));
665                         }
666                 }
667
668                 offset = zb->zb_blkid * blksz;
669
670                 if (!(dsa->dsa_featureflags &
671                     DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
672                     blksz > SPA_OLD_MAXBLOCKSIZE) {
673                         char *buf = abuf->b_data;
674                         while (blksz > 0 && err == 0) {
675                                 int n = MIN(blksz, SPA_OLD_MAXBLOCKSIZE);
676                                 err = dump_write(dsa, type, zb->zb_object,
677                                     offset, n, NULL, buf);
678                                 offset += n;
679                                 buf += n;
680                                 blksz -= n;
681                         }
682                 } else {
683                         err = dump_write(dsa, type, zb->zb_object,
684                             offset, blksz, bp, abuf->b_data);
685                 }
686                 (void) arc_buf_remove_ref(abuf, &abuf);
687         }
688
689         ASSERT(err == 0 || err == EINTR);
690         return (err);
691 }
692
693 /*
694  * Pop the new data off the queue, and free the old data.
695  */
696 static struct send_block_record *
697 get_next_record(bqueue_t *bq, struct send_block_record *data)
698 {
699         struct send_block_record *tmp = bqueue_dequeue(bq);
700         kmem_free(data, sizeof (*data));
701         return (tmp);
702 }
703
704 /*
705  * Actually do the bulk of the work in a zfs send.
706  *
707  * Note: Releases dp using the specified tag.
708  */
709 static int
710 dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *to_ds,
711     zfs_bookmark_phys_t *ancestor_zb,
712     boolean_t is_clone, boolean_t embedok, boolean_t large_block_ok, int outfd,
713     uint64_t resumeobj, uint64_t resumeoff,
714 #ifdef illumos
715     vnode_t *vp, offset_t *off)
716 #else
717     struct file *fp, offset_t *off)
718 #endif
719 {
720         objset_t *os;
721         dmu_replay_record_t *drr;
722         dmu_sendarg_t *dsp;
723         int err;
724         uint64_t fromtxg = 0;
725         uint64_t featureflags = 0;
726         struct send_thread_arg to_arg = { 0 };
727
728         err = dmu_objset_from_ds(to_ds, &os);
729         if (err != 0) {
730                 dsl_pool_rele(dp, tag);
731                 return (err);
732         }
733
734         drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
735         drr->drr_type = DRR_BEGIN;
736         drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
737         DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
738             DMU_SUBSTREAM);
739
740 #ifdef _KERNEL
741         if (dmu_objset_type(os) == DMU_OST_ZFS) {
742                 uint64_t version;
743                 if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) {
744                         kmem_free(drr, sizeof (dmu_replay_record_t));
745                         dsl_pool_rele(dp, tag);
746                         return (SET_ERROR(EINVAL));
747                 }
748                 if (version >= ZPL_VERSION_SA) {
749                         featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
750                 }
751         }
752 #endif
753
754         if (large_block_ok && to_ds->ds_feature_inuse[SPA_FEATURE_LARGE_BLOCKS])
755                 featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS;
756         if (embedok &&
757             spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) {
758                 featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA;
759                 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
760                         featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA_LZ4;
761         }
762
763         if (resumeobj != 0 || resumeoff != 0) {
764                 featureflags |= DMU_BACKUP_FEATURE_RESUMING;
765         }
766
767         DMU_SET_FEATUREFLAGS(drr->drr_u.drr_begin.drr_versioninfo,
768             featureflags);
769
770         drr->drr_u.drr_begin.drr_creation_time =
771             dsl_dataset_phys(to_ds)->ds_creation_time;
772         drr->drr_u.drr_begin.drr_type = dmu_objset_type(os);
773         if (is_clone)
774                 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
775         drr->drr_u.drr_begin.drr_toguid = dsl_dataset_phys(to_ds)->ds_guid;
776         if (dsl_dataset_phys(to_ds)->ds_flags & DS_FLAG_CI_DATASET)
777                 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
778
779         if (ancestor_zb != NULL) {
780                 drr->drr_u.drr_begin.drr_fromguid =
781                     ancestor_zb->zbm_guid;
782                 fromtxg = ancestor_zb->zbm_creation_txg;
783         }
784         dsl_dataset_name(to_ds, drr->drr_u.drr_begin.drr_toname);
785         if (!to_ds->ds_is_snapshot) {
786                 (void) strlcat(drr->drr_u.drr_begin.drr_toname, "@--head--",
787                     sizeof (drr->drr_u.drr_begin.drr_toname));
788         }
789
790         dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP);
791
792         dsp->dsa_drr = drr;
793         dsp->dsa_outfd = outfd;
794         dsp->dsa_proc = curproc;
795         dsp->dsa_td = curthread;
796         dsp->dsa_fp = fp;
797         dsp->dsa_os = os;
798         dsp->dsa_off = off;
799         dsp->dsa_toguid = dsl_dataset_phys(to_ds)->ds_guid;
800         dsp->dsa_pending_op = PENDING_NONE;
801         dsp->dsa_incremental = (ancestor_zb != NULL);
802         dsp->dsa_featureflags = featureflags;
803         dsp->dsa_resume_object = resumeobj;
804         dsp->dsa_resume_offset = resumeoff;
805
806         mutex_enter(&to_ds->ds_sendstream_lock);
807         list_insert_head(&to_ds->ds_sendstreams, dsp);
808         mutex_exit(&to_ds->ds_sendstream_lock);
809
810         dsl_dataset_long_hold(to_ds, FTAG);
811         dsl_pool_rele(dp, tag);
812
813         void *payload = NULL;
814         size_t payload_len = 0;
815         if (resumeobj != 0 || resumeoff != 0) {
816                 dmu_object_info_t to_doi;
817                 err = dmu_object_info(os, resumeobj, &to_doi);
818                 if (err != 0)
819                         goto out;
820                 SET_BOOKMARK(&to_arg.resume, to_ds->ds_object, resumeobj, 0,
821                     resumeoff / to_doi.doi_data_block_size);
822
823                 nvlist_t *nvl = fnvlist_alloc();
824                 fnvlist_add_uint64(nvl, "resume_object", resumeobj);
825                 fnvlist_add_uint64(nvl, "resume_offset", resumeoff);
826                 payload = fnvlist_pack(nvl, &payload_len);
827                 drr->drr_payloadlen = payload_len;
828                 fnvlist_free(nvl);
829         }
830
831         err = dump_record(dsp, payload, payload_len);
832         fnvlist_pack_free(payload, payload_len);
833         if (err != 0) {
834                 err = dsp->dsa_err;
835                 goto out;
836         }
837
838         err = bqueue_init(&to_arg.q, zfs_send_queue_length,
839             offsetof(struct send_block_record, ln));
840         to_arg.error_code = 0;
841         to_arg.cancel = B_FALSE;
842         to_arg.ds = to_ds;
843         to_arg.fromtxg = fromtxg;
844         to_arg.flags = TRAVERSE_PRE | TRAVERSE_PREFETCH;
845         (void) thread_create(NULL, 0, send_traverse_thread, &to_arg, 0, &p0,
846             TS_RUN, minclsyspri);
847
848         struct send_block_record *to_data;
849         to_data = bqueue_dequeue(&to_arg.q);
850
851         while (!to_data->eos_marker && err == 0) {
852                 err = do_dump(dsp, to_data);
853                 to_data = get_next_record(&to_arg.q, to_data);
854                 if (issig(JUSTLOOKING) && issig(FORREAL))
855                         err = EINTR;
856         }
857
858         if (err != 0) {
859                 to_arg.cancel = B_TRUE;
860                 while (!to_data->eos_marker) {
861                         to_data = get_next_record(&to_arg.q, to_data);
862                 }
863         }
864         kmem_free(to_data, sizeof (*to_data));
865
866         bqueue_destroy(&to_arg.q);
867
868         if (err == 0 && to_arg.error_code != 0)
869                 err = to_arg.error_code;
870
871         if (err != 0)
872                 goto out;
873
874         if (dsp->dsa_pending_op != PENDING_NONE)
875                 if (dump_record(dsp, NULL, 0) != 0)
876                         err = SET_ERROR(EINTR);
877
878         if (err != 0) {
879                 if (err == EINTR && dsp->dsa_err != 0)
880                         err = dsp->dsa_err;
881                 goto out;
882         }
883
884         bzero(drr, sizeof (dmu_replay_record_t));
885         drr->drr_type = DRR_END;
886         drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc;
887         drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid;
888
889         if (dump_record(dsp, NULL, 0) != 0)
890                 err = dsp->dsa_err;
891
892 out:
893         mutex_enter(&to_ds->ds_sendstream_lock);
894         list_remove(&to_ds->ds_sendstreams, dsp);
895         mutex_exit(&to_ds->ds_sendstream_lock);
896
897         kmem_free(drr, sizeof (dmu_replay_record_t));
898         kmem_free(dsp, sizeof (dmu_sendarg_t));
899
900         dsl_dataset_long_rele(to_ds, FTAG);
901
902         return (err);
903 }
904
905 int
906 dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
907     boolean_t embedok, boolean_t large_block_ok,
908 #ifdef illumos
909     int outfd, vnode_t *vp, offset_t *off)
910 #else
911     int outfd, struct file *fp, offset_t *off)
912 #endif
913 {
914         dsl_pool_t *dp;
915         dsl_dataset_t *ds;
916         dsl_dataset_t *fromds = NULL;
917         int err;
918
919         err = dsl_pool_hold(pool, FTAG, &dp);
920         if (err != 0)
921                 return (err);
922
923         err = dsl_dataset_hold_obj(dp, tosnap, FTAG, &ds);
924         if (err != 0) {
925                 dsl_pool_rele(dp, FTAG);
926                 return (err);
927         }
928
929         if (fromsnap != 0) {
930                 zfs_bookmark_phys_t zb;
931                 boolean_t is_clone;
932
933                 err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds);
934                 if (err != 0) {
935                         dsl_dataset_rele(ds, FTAG);
936                         dsl_pool_rele(dp, FTAG);
937                         return (err);
938                 }
939                 if (!dsl_dataset_is_before(ds, fromds, 0))
940                         err = SET_ERROR(EXDEV);
941                 zb.zbm_creation_time =
942                     dsl_dataset_phys(fromds)->ds_creation_time;
943                 zb.zbm_creation_txg = dsl_dataset_phys(fromds)->ds_creation_txg;
944                 zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
945                 is_clone = (fromds->ds_dir != ds->ds_dir);
946                 dsl_dataset_rele(fromds, FTAG);
947                 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
948                     embedok, large_block_ok, outfd, 0, 0, fp, off);
949         } else {
950                 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
951                     embedok, large_block_ok, outfd, 0, 0, fp, off);
952         }
953         dsl_dataset_rele(ds, FTAG);
954         return (err);
955 }
956
957 int
958 dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok,
959     boolean_t large_block_ok, int outfd, uint64_t resumeobj, uint64_t resumeoff,
960 #ifdef illumos
961     vnode_t *vp, offset_t *off)
962 #else
963     struct file *fp, offset_t *off)
964 #endif
965 {
966         dsl_pool_t *dp;
967         dsl_dataset_t *ds;
968         int err;
969         boolean_t owned = B_FALSE;
970
971         if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL)
972                 return (SET_ERROR(EINVAL));
973
974         err = dsl_pool_hold(tosnap, FTAG, &dp);
975         if (err != 0)
976                 return (err);
977
978         if (strchr(tosnap, '@') == NULL && spa_writeable(dp->dp_spa)) {
979                 /*
980                  * We are sending a filesystem or volume.  Ensure
981                  * that it doesn't change by owning the dataset.
982                  */
983                 err = dsl_dataset_own(dp, tosnap, FTAG, &ds);
984                 owned = B_TRUE;
985         } else {
986                 err = dsl_dataset_hold(dp, tosnap, FTAG, &ds);
987         }
988         if (err != 0) {
989                 dsl_pool_rele(dp, FTAG);
990                 return (err);
991         }
992
993         if (fromsnap != NULL) {
994                 zfs_bookmark_phys_t zb;
995                 boolean_t is_clone = B_FALSE;
996                 int fsnamelen = strchr(tosnap, '@') - tosnap;
997
998                 /*
999                  * If the fromsnap is in a different filesystem, then
1000                  * mark the send stream as a clone.
1001                  */
1002                 if (strncmp(tosnap, fromsnap, fsnamelen) != 0 ||
1003                     (fromsnap[fsnamelen] != '@' &&
1004                     fromsnap[fsnamelen] != '#')) {
1005                         is_clone = B_TRUE;
1006                 }
1007
1008                 if (strchr(fromsnap, '@')) {
1009                         dsl_dataset_t *fromds;
1010                         err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds);
1011                         if (err == 0) {
1012                                 if (!dsl_dataset_is_before(ds, fromds, 0))
1013                                         err = SET_ERROR(EXDEV);
1014                                 zb.zbm_creation_time =
1015                                     dsl_dataset_phys(fromds)->ds_creation_time;
1016                                 zb.zbm_creation_txg =
1017                                     dsl_dataset_phys(fromds)->ds_creation_txg;
1018                                 zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
1019                                 is_clone = (ds->ds_dir != fromds->ds_dir);
1020                                 dsl_dataset_rele(fromds, FTAG);
1021                         }
1022                 } else {
1023                         err = dsl_bookmark_lookup(dp, fromsnap, ds, &zb);
1024                 }
1025                 if (err != 0) {
1026                         dsl_dataset_rele(ds, FTAG);
1027                         dsl_pool_rele(dp, FTAG);
1028                         return (err);
1029                 }
1030                 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
1031                     embedok, large_block_ok,
1032                     outfd, resumeobj, resumeoff, fp, off);
1033         } else {
1034                 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
1035                     embedok, large_block_ok,
1036                     outfd, resumeobj, resumeoff, fp, off);
1037         }
1038         if (owned)
1039                 dsl_dataset_disown(ds, FTAG);
1040         else
1041                 dsl_dataset_rele(ds, FTAG);
1042         return (err);
1043 }
1044
1045 static int
1046 dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t size,
1047     uint64_t *sizep)
1048 {
1049         int err;
1050         /*
1051          * Assume that space (both on-disk and in-stream) is dominated by
1052          * data.  We will adjust for indirect blocks and the copies property,
1053          * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
1054          */
1055
1056         /*
1057          * Subtract out approximate space used by indirect blocks.
1058          * Assume most space is used by data blocks (non-indirect, non-dnode).
1059          * Assume all blocks are recordsize.  Assume ditto blocks and
1060          * internal fragmentation counter out compression.
1061          *
1062          * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
1063          * block, which we observe in practice.
1064          */
1065         uint64_t recordsize;
1066         err = dsl_prop_get_int_ds(ds, "recordsize", &recordsize);
1067         if (err != 0)
1068                 return (err);
1069         size -= size / recordsize * sizeof (blkptr_t);
1070
1071         /* Add in the space for the record associated with each block. */
1072         size += size / recordsize * sizeof (dmu_replay_record_t);
1073
1074         *sizep = size;
1075
1076         return (0);
1077 }
1078
1079 int
1080 dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds, uint64_t *sizep)
1081 {
1082         dsl_pool_t *dp = ds->ds_dir->dd_pool;
1083         int err;
1084         uint64_t size;
1085
1086         ASSERT(dsl_pool_config_held(dp));
1087
1088         /* tosnap must be a snapshot */
1089         if (!ds->ds_is_snapshot)
1090                 return (SET_ERROR(EINVAL));
1091
1092         /* fromsnap, if provided, must be a snapshot */
1093         if (fromds != NULL && !fromds->ds_is_snapshot)
1094                 return (SET_ERROR(EINVAL));
1095
1096         /*
1097          * fromsnap must be an earlier snapshot from the same fs as tosnap,
1098          * or the origin's fs.
1099          */
1100         if (fromds != NULL && !dsl_dataset_is_before(ds, fromds, 0))
1101                 return (SET_ERROR(EXDEV));
1102
1103         /* Get uncompressed size estimate of changed data. */
1104         if (fromds == NULL) {
1105                 size = dsl_dataset_phys(ds)->ds_uncompressed_bytes;
1106         } else {
1107                 uint64_t used, comp;
1108                 err = dsl_dataset_space_written(fromds, ds,
1109                     &used, &comp, &size);
1110                 if (err != 0)
1111                         return (err);
1112         }
1113
1114         err = dmu_adjust_send_estimate_for_indirects(ds, size, sizep);
1115         return (err);
1116 }
1117
1118 /*
1119  * Simple callback used to traverse the blocks of a snapshot and sum their
1120  * uncompressed size
1121  */
1122 /* ARGSUSED */
1123 static int
1124 dmu_calculate_send_traversal(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
1125     const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
1126 {
1127         uint64_t *spaceptr = arg;
1128         if (bp != NULL && !BP_IS_HOLE(bp)) {
1129                 *spaceptr += BP_GET_UCSIZE(bp);
1130         }
1131         return (0);
1132 }
1133
1134 /*
1135  * Given a desination snapshot and a TXG, calculate the approximate size of a
1136  * send stream sent from that TXG. from_txg may be zero, indicating that the
1137  * whole snapshot will be sent.
1138  */
1139 int
1140 dmu_send_estimate_from_txg(dsl_dataset_t *ds, uint64_t from_txg,
1141     uint64_t *sizep)
1142 {
1143         dsl_pool_t *dp = ds->ds_dir->dd_pool;
1144         int err;
1145         uint64_t size = 0;
1146
1147         ASSERT(dsl_pool_config_held(dp));
1148
1149         /* tosnap must be a snapshot */
1150         if (!dsl_dataset_is_snapshot(ds))
1151                 return (SET_ERROR(EINVAL));
1152
1153         /* verify that from_txg is before the provided snapshot was taken */
1154         if (from_txg >= dsl_dataset_phys(ds)->ds_creation_txg) {
1155                 return (SET_ERROR(EXDEV));
1156         }
1157
1158         /*
1159          * traverse the blocks of the snapshot with birth times after
1160          * from_txg, summing their uncompressed size
1161          */
1162         err = traverse_dataset(ds, from_txg, TRAVERSE_POST,
1163             dmu_calculate_send_traversal, &size);
1164         if (err)
1165                 return (err);
1166
1167         err = dmu_adjust_send_estimate_for_indirects(ds, size, sizep);
1168         return (err);
1169 }
1170
1171 typedef struct dmu_recv_begin_arg {
1172         const char *drba_origin;
1173         dmu_recv_cookie_t *drba_cookie;
1174         cred_t *drba_cred;
1175         uint64_t drba_snapobj;
1176 } dmu_recv_begin_arg_t;
1177
1178 static int
1179 recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
1180     uint64_t fromguid)
1181 {
1182         uint64_t val;
1183         int error;
1184         dsl_pool_t *dp = ds->ds_dir->dd_pool;
1185
1186         /* temporary clone name must not exist */
1187         error = zap_lookup(dp->dp_meta_objset,
1188             dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name,
1189             8, 1, &val);
1190         if (error != ENOENT)
1191                 return (error == 0 ? EBUSY : error);
1192
1193         /* new snapshot name must not exist */
1194         error = zap_lookup(dp->dp_meta_objset,
1195             dsl_dataset_phys(ds)->ds_snapnames_zapobj,
1196             drba->drba_cookie->drc_tosnap, 8, 1, &val);
1197         if (error != ENOENT)
1198                 return (error == 0 ? EEXIST : error);
1199
1200         /*
1201          * Check snapshot limit before receiving. We'll recheck again at the
1202          * end, but might as well abort before receiving if we're already over
1203          * the limit.
1204          *
1205          * Note that we do not check the file system limit with
1206          * dsl_dir_fscount_check because the temporary %clones don't count
1207          * against that limit.
1208          */
1209         error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT,
1210             NULL, drba->drba_cred);
1211         if (error != 0)
1212                 return (error);
1213
1214         if (fromguid != 0) {
1215                 dsl_dataset_t *snap;
1216                 uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
1217
1218                 /* Find snapshot in this dir that matches fromguid. */
1219                 while (obj != 0) {
1220                         error = dsl_dataset_hold_obj(dp, obj, FTAG,
1221                             &snap);
1222                         if (error != 0)
1223                                 return (SET_ERROR(ENODEV));
1224                         if (snap->ds_dir != ds->ds_dir) {
1225                                 dsl_dataset_rele(snap, FTAG);
1226                                 return (SET_ERROR(ENODEV));
1227                         }
1228                         if (dsl_dataset_phys(snap)->ds_guid == fromguid)
1229                                 break;
1230                         obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
1231                         dsl_dataset_rele(snap, FTAG);
1232                 }
1233                 if (obj == 0)
1234                         return (SET_ERROR(ENODEV));
1235
1236                 if (drba->drba_cookie->drc_force) {
1237                         drba->drba_snapobj = obj;
1238                 } else {
1239                         /*
1240                          * If we are not forcing, there must be no
1241                          * changes since fromsnap.
1242                          */
1243                         if (dsl_dataset_modified_since_snap(ds, snap)) {
1244                                 dsl_dataset_rele(snap, FTAG);
1245                                 return (SET_ERROR(ETXTBSY));
1246                         }
1247                         drba->drba_snapobj = ds->ds_prev->ds_object;
1248                 }
1249
1250                 dsl_dataset_rele(snap, FTAG);
1251         } else {
1252                 /* if full, then must be forced */
1253                 if (!drba->drba_cookie->drc_force)
1254                         return (SET_ERROR(EEXIST));
1255                 /* start from $ORIGIN@$ORIGIN, if supported */
1256                 drba->drba_snapobj = dp->dp_origin_snap != NULL ?
1257                     dp->dp_origin_snap->ds_object : 0;
1258         }
1259
1260         return (0);
1261
1262 }
1263
1264 static int
1265 dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
1266 {
1267         dmu_recv_begin_arg_t *drba = arg;
1268         dsl_pool_t *dp = dmu_tx_pool(tx);
1269         struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1270         uint64_t fromguid = drrb->drr_fromguid;
1271         int flags = drrb->drr_flags;
1272         int error;
1273         uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
1274         dsl_dataset_t *ds;
1275         const char *tofs = drba->drba_cookie->drc_tofs;
1276
1277         /* already checked */
1278         ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
1279         ASSERT(!(featureflags & DMU_BACKUP_FEATURE_RESUMING));
1280
1281         if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
1282             DMU_COMPOUNDSTREAM ||
1283             drrb->drr_type >= DMU_OST_NUMTYPES ||
1284             ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
1285                 return (SET_ERROR(EINVAL));
1286
1287         /* Verify pool version supports SA if SA_SPILL feature set */
1288         if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
1289             spa_version(dp->dp_spa) < SPA_VERSION_SA)
1290                 return (SET_ERROR(ENOTSUP));
1291
1292         if (drba->drba_cookie->drc_resumable &&
1293             !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EXTENSIBLE_DATASET))
1294                 return (SET_ERROR(ENOTSUP));
1295
1296         /*
1297          * The receiving code doesn't know how to translate a WRITE_EMBEDDED
1298          * record to a plan WRITE record, so the pool must have the
1299          * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
1300          * records.  Same with WRITE_EMBEDDED records that use LZ4 compression.
1301          */
1302         if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
1303             !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
1304                 return (SET_ERROR(ENOTSUP));
1305         if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4) &&
1306             !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
1307                 return (SET_ERROR(ENOTSUP));
1308
1309         /*
1310          * The receiving code doesn't know how to translate large blocks
1311          * to smaller ones, so the pool must have the LARGE_BLOCKS
1312          * feature enabled if the stream has LARGE_BLOCKS.
1313          */
1314         if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
1315             !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS))
1316                 return (SET_ERROR(ENOTSUP));
1317
1318         error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1319         if (error == 0) {
1320                 /* target fs already exists; recv into temp clone */
1321
1322                 /* Can't recv a clone into an existing fs */
1323                 if (flags & DRR_FLAG_CLONE) {
1324                         dsl_dataset_rele(ds, FTAG);
1325                         return (SET_ERROR(EINVAL));
1326                 }
1327
1328                 error = recv_begin_check_existing_impl(drba, ds, fromguid);
1329                 dsl_dataset_rele(ds, FTAG);
1330         } else if (error == ENOENT) {
1331                 /* target fs does not exist; must be a full backup or clone */
1332                 char buf[MAXNAMELEN];
1333
1334                 /*
1335                  * If it's a non-clone incremental, we are missing the
1336                  * target fs, so fail the recv.
1337                  */
1338                 if (fromguid != 0 && !(flags & DRR_FLAG_CLONE ||
1339                     drba->drba_origin))
1340                         return (SET_ERROR(ENOENT));
1341
1342                 /* Open the parent of tofs */
1343                 ASSERT3U(strlen(tofs), <, MAXNAMELEN);
1344                 (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
1345                 error = dsl_dataset_hold(dp, buf, FTAG, &ds);
1346                 if (error != 0)
1347                         return (error);
1348
1349                 /*
1350                  * Check filesystem and snapshot limits before receiving. We'll
1351                  * recheck snapshot limits again at the end (we create the
1352                  * filesystems and increment those counts during begin_sync).
1353                  */
1354                 error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1355                     ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred);
1356                 if (error != 0) {
1357                         dsl_dataset_rele(ds, FTAG);
1358                         return (error);
1359                 }
1360
1361                 error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1362                     ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred);
1363                 if (error != 0) {
1364                         dsl_dataset_rele(ds, FTAG);
1365                         return (error);
1366                 }
1367
1368                 if (drba->drba_origin != NULL) {
1369                         dsl_dataset_t *origin;
1370                         error = dsl_dataset_hold(dp, drba->drba_origin,
1371                             FTAG, &origin);
1372                         if (error != 0) {
1373                                 dsl_dataset_rele(ds, FTAG);
1374                                 return (error);
1375                         }
1376                         if (!origin->ds_is_snapshot) {
1377                                 dsl_dataset_rele(origin, FTAG);
1378                                 dsl_dataset_rele(ds, FTAG);
1379                                 return (SET_ERROR(EINVAL));
1380                         }
1381                         if (dsl_dataset_phys(origin)->ds_guid != fromguid) {
1382                                 dsl_dataset_rele(origin, FTAG);
1383                                 dsl_dataset_rele(ds, FTAG);
1384                                 return (SET_ERROR(ENODEV));
1385                         }
1386                         dsl_dataset_rele(origin, FTAG);
1387                 }
1388                 dsl_dataset_rele(ds, FTAG);
1389                 error = 0;
1390         }
1391         return (error);
1392 }
1393
1394 static void
1395 dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
1396 {
1397         dmu_recv_begin_arg_t *drba = arg;
1398         dsl_pool_t *dp = dmu_tx_pool(tx);
1399         objset_t *mos = dp->dp_meta_objset;
1400         struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1401         const char *tofs = drba->drba_cookie->drc_tofs;
1402         dsl_dataset_t *ds, *newds;
1403         uint64_t dsobj;
1404         int error;
1405         uint64_t crflags = 0;
1406
1407         if (drrb->drr_flags & DRR_FLAG_CI_DATA)
1408                 crflags |= DS_FLAG_CI_DATASET;
1409
1410         error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1411         if (error == 0) {
1412                 /* create temporary clone */
1413                 dsl_dataset_t *snap = NULL;
1414                 if (drba->drba_snapobj != 0) {
1415                         VERIFY0(dsl_dataset_hold_obj(dp,
1416                             drba->drba_snapobj, FTAG, &snap));
1417                 }
1418                 dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name,
1419                     snap, crflags, drba->drba_cred, tx);
1420                 if (drba->drba_snapobj != 0)
1421                         dsl_dataset_rele(snap, FTAG);
1422                 dsl_dataset_rele(ds, FTAG);
1423         } else {
1424                 dsl_dir_t *dd;
1425                 const char *tail;
1426                 dsl_dataset_t *origin = NULL;
1427
1428                 VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
1429
1430                 if (drba->drba_origin != NULL) {
1431                         VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
1432                             FTAG, &origin));
1433                 }
1434
1435                 /* Create new dataset. */
1436                 dsobj = dsl_dataset_create_sync(dd,
1437                     strrchr(tofs, '/') + 1,
1438                     origin, crflags, drba->drba_cred, tx);
1439                 if (origin != NULL)
1440                         dsl_dataset_rele(origin, FTAG);
1441                 dsl_dir_rele(dd, FTAG);
1442                 drba->drba_cookie->drc_newfs = B_TRUE;
1443         }
1444         VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &newds));
1445
1446         if (drba->drba_cookie->drc_resumable) {
1447                 dsl_dataset_zapify(newds, tx);
1448                 if (drrb->drr_fromguid != 0) {
1449                         VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_FROMGUID,
1450                             8, 1, &drrb->drr_fromguid, tx));
1451                 }
1452                 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TOGUID,
1453                     8, 1, &drrb->drr_toguid, tx));
1454                 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TONAME,
1455                     1, strlen(drrb->drr_toname) + 1, drrb->drr_toname, tx));
1456                 uint64_t one = 1;
1457                 uint64_t zero = 0;
1458                 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OBJECT,
1459                     8, 1, &one, tx));
1460                 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OFFSET,
1461                     8, 1, &zero, tx));
1462                 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_BYTES,
1463                     8, 1, &zero, tx));
1464                 if (DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
1465                     DMU_BACKUP_FEATURE_EMBED_DATA) {
1466                         VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_EMBEDOK,
1467                             8, 1, &one, tx));
1468                 }
1469         }
1470
1471         dmu_buf_will_dirty(newds->ds_dbuf, tx);
1472         dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT;
1473
1474         /*
1475          * If we actually created a non-clone, we need to create the
1476          * objset in our new dataset.
1477          */
1478         if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds))) {
1479                 (void) dmu_objset_create_impl(dp->dp_spa,
1480                     newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
1481         }
1482
1483         drba->drba_cookie->drc_ds = newds;
1484
1485         spa_history_log_internal_ds(newds, "receive", tx, "");
1486 }
1487
1488 static int
1489 dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx)
1490 {
1491         dmu_recv_begin_arg_t *drba = arg;
1492         dsl_pool_t *dp = dmu_tx_pool(tx);
1493         struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1494         int error;
1495         uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
1496         dsl_dataset_t *ds;
1497         const char *tofs = drba->drba_cookie->drc_tofs;
1498
1499         /* already checked */
1500         ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
1501         ASSERT(featureflags & DMU_BACKUP_FEATURE_RESUMING);
1502
1503         if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
1504             DMU_COMPOUNDSTREAM ||
1505             drrb->drr_type >= DMU_OST_NUMTYPES)
1506                 return (SET_ERROR(EINVAL));
1507
1508         /* Verify pool version supports SA if SA_SPILL feature set */
1509         if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
1510             spa_version(dp->dp_spa) < SPA_VERSION_SA)
1511                 return (SET_ERROR(ENOTSUP));
1512
1513         /*
1514          * The receiving code doesn't know how to translate a WRITE_EMBEDDED
1515          * record to a plain WRITE record, so the pool must have the
1516          * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
1517          * records.  Same with WRITE_EMBEDDED records that use LZ4 compression.
1518          */
1519         if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
1520             !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
1521                 return (SET_ERROR(ENOTSUP));
1522         if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4) &&
1523             !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
1524                 return (SET_ERROR(ENOTSUP));
1525
1526         char recvname[ZFS_MAXNAMELEN];
1527
1528         (void) snprintf(recvname, sizeof (recvname), "%s/%s",
1529             tofs, recv_clone_name);
1530
1531         if (dsl_dataset_hold(dp, recvname, FTAG, &ds) != 0) {
1532                 /* %recv does not exist; continue in tofs */
1533                 error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1534                 if (error != 0)
1535                         return (error);
1536         }
1537
1538         /* check that ds is marked inconsistent */
1539         if (!DS_IS_INCONSISTENT(ds)) {
1540                 dsl_dataset_rele(ds, FTAG);
1541                 return (SET_ERROR(EINVAL));
1542         }
1543
1544         /* check that there is resuming data, and that the toguid matches */
1545         if (!dsl_dataset_is_zapified(ds)) {
1546                 dsl_dataset_rele(ds, FTAG);
1547                 return (SET_ERROR(EINVAL));
1548         }
1549         uint64_t val;
1550         error = zap_lookup(dp->dp_meta_objset, ds->ds_object,
1551             DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val);
1552         if (error != 0 || drrb->drr_toguid != val) {
1553                 dsl_dataset_rele(ds, FTAG);
1554                 return (SET_ERROR(EINVAL));
1555         }
1556
1557         /*
1558          * Check if the receive is still running.  If so, it will be owned.
1559          * Note that nothing else can own the dataset (e.g. after the receive
1560          * fails) because it will be marked inconsistent.
1561          */
1562         if (dsl_dataset_has_owner(ds)) {
1563                 dsl_dataset_rele(ds, FTAG);
1564                 return (SET_ERROR(EBUSY));
1565         }
1566
1567         /* There should not be any snapshots of this fs yet. */
1568         if (ds->ds_prev != NULL && ds->ds_prev->ds_dir == ds->ds_dir) {
1569                 dsl_dataset_rele(ds, FTAG);
1570                 return (SET_ERROR(EINVAL));
1571         }
1572
1573         /*
1574          * Note: resume point will be checked when we process the first WRITE
1575          * record.
1576          */
1577
1578         /* check that the origin matches */
1579         val = 0;
1580         (void) zap_lookup(dp->dp_meta_objset, ds->ds_object,
1581             DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val);
1582         if (drrb->drr_fromguid != val) {
1583                 dsl_dataset_rele(ds, FTAG);
1584                 return (SET_ERROR(EINVAL));
1585         }
1586
1587         dsl_dataset_rele(ds, FTAG);
1588         return (0);
1589 }
1590
1591 static void
1592 dmu_recv_resume_begin_sync(void *arg, dmu_tx_t *tx)
1593 {
1594         dmu_recv_begin_arg_t *drba = arg;
1595         dsl_pool_t *dp = dmu_tx_pool(tx);
1596         const char *tofs = drba->drba_cookie->drc_tofs;
1597         dsl_dataset_t *ds;
1598         uint64_t dsobj;
1599         char recvname[ZFS_MAXNAMELEN];
1600
1601         (void) snprintf(recvname, sizeof (recvname), "%s/%s",
1602             tofs, recv_clone_name);
1603
1604         if (dsl_dataset_hold(dp, recvname, FTAG, &ds) != 0) {
1605                 /* %recv does not exist; continue in tofs */
1606                 VERIFY0(dsl_dataset_hold(dp, tofs, FTAG, &ds));
1607                 drba->drba_cookie->drc_newfs = B_TRUE;
1608         }
1609
1610         /* clear the inconsistent flag so that we can own it */
1611         ASSERT(DS_IS_INCONSISTENT(ds));
1612         dmu_buf_will_dirty(ds->ds_dbuf, tx);
1613         dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
1614         dsobj = ds->ds_object;
1615         dsl_dataset_rele(ds, FTAG);
1616
1617         VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &ds));
1618
1619         dmu_buf_will_dirty(ds->ds_dbuf, tx);
1620         dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT;
1621
1622         ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds)));
1623
1624         drba->drba_cookie->drc_ds = ds;
1625
1626         spa_history_log_internal_ds(ds, "resume receive", tx, "");
1627 }
1628
1629 /*
1630  * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
1631  * succeeds; otherwise we will leak the holds on the datasets.
1632  */
1633 int
1634 dmu_recv_begin(char *tofs, char *tosnap, dmu_replay_record_t *drr_begin,
1635     boolean_t force, boolean_t resumable, char *origin, dmu_recv_cookie_t *drc)
1636 {
1637         dmu_recv_begin_arg_t drba = { 0 };
1638
1639         bzero(drc, sizeof (dmu_recv_cookie_t));
1640         drc->drc_drr_begin = drr_begin;
1641         drc->drc_drrb = &drr_begin->drr_u.drr_begin;
1642         drc->drc_tosnap = tosnap;
1643         drc->drc_tofs = tofs;
1644         drc->drc_force = force;
1645         drc->drc_resumable = resumable;
1646         drc->drc_cred = CRED();
1647
1648         if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
1649                 drc->drc_byteswap = B_TRUE;
1650                 fletcher_4_incremental_byteswap(drr_begin,
1651                     sizeof (dmu_replay_record_t), &drc->drc_cksum);
1652                 byteswap_record(drr_begin);
1653         } else if (drc->drc_drrb->drr_magic == DMU_BACKUP_MAGIC) {
1654                 fletcher_4_incremental_native(drr_begin,
1655                     sizeof (dmu_replay_record_t), &drc->drc_cksum);
1656         } else {
1657                 return (SET_ERROR(EINVAL));
1658         }
1659
1660         drba.drba_origin = origin;
1661         drba.drba_cookie = drc;
1662         drba.drba_cred = CRED();
1663
1664         if (DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) &
1665             DMU_BACKUP_FEATURE_RESUMING) {
1666                 return (dsl_sync_task(tofs,
1667                     dmu_recv_resume_begin_check, dmu_recv_resume_begin_sync,
1668                     &drba, 5, ZFS_SPACE_CHECK_NORMAL));
1669         } else  {
1670                 return (dsl_sync_task(tofs,
1671                     dmu_recv_begin_check, dmu_recv_begin_sync,
1672                     &drba, 5, ZFS_SPACE_CHECK_NORMAL));
1673         }
1674 }
1675
1676 struct receive_record_arg {
1677         dmu_replay_record_t header;
1678         void *payload; /* Pointer to a buffer containing the payload */
1679         /*
1680          * If the record is a write, pointer to the arc_buf_t containing the
1681          * payload.
1682          */
1683         arc_buf_t *write_buf;
1684         int payload_size;
1685         uint64_t bytes_read; /* bytes read from stream when record created */
1686         boolean_t eos_marker; /* Marks the end of the stream */
1687         bqueue_node_t node;
1688 };
1689
1690 struct receive_writer_arg {
1691         objset_t *os;
1692         boolean_t byteswap;
1693         bqueue_t q;
1694
1695         /*
1696          * These three args are used to signal to the main thread that we're
1697          * done.
1698          */
1699         kmutex_t mutex;
1700         kcondvar_t cv;
1701         boolean_t done;
1702
1703         int err;
1704         /* A map from guid to dataset to help handle dedup'd streams. */
1705         avl_tree_t *guid_to_ds_map;
1706         boolean_t resumable;
1707         uint64_t last_object, last_offset;
1708         uint64_t bytes_read; /* bytes read when current record created */
1709 };
1710
1711 struct receive_arg  {
1712         objset_t *os;
1713         kthread_t *td;
1714         struct file *fp;
1715         uint64_t voff; /* The current offset in the stream */
1716         uint64_t bytes_read;
1717         /*
1718          * A record that has had its payload read in, but hasn't yet been handed
1719          * off to the worker thread.
1720          */
1721         struct receive_record_arg *rrd;
1722         /* A record that has had its header read in, but not its payload. */
1723         struct receive_record_arg *next_rrd;
1724         zio_cksum_t cksum;
1725         zio_cksum_t prev_cksum;
1726         int err;
1727         boolean_t byteswap;
1728         /* Sorted list of objects not to issue prefetches for. */
1729         list_t ignore_obj_list;
1730 };
1731
1732 struct receive_ign_obj_node {
1733         list_node_t node;
1734         uint64_t object;
1735 };
1736
1737 typedef struct guid_map_entry {
1738         uint64_t        guid;
1739         dsl_dataset_t   *gme_ds;
1740         avl_node_t      avlnode;
1741 } guid_map_entry_t;
1742
1743 static int
1744 guid_compare(const void *arg1, const void *arg2)
1745 {
1746         const guid_map_entry_t *gmep1 = arg1;
1747         const guid_map_entry_t *gmep2 = arg2;
1748
1749         if (gmep1->guid < gmep2->guid)
1750                 return (-1);
1751         else if (gmep1->guid > gmep2->guid)
1752                 return (1);
1753         return (0);
1754 }
1755
1756 static void
1757 free_guid_map_onexit(void *arg)
1758 {
1759         avl_tree_t *ca = arg;
1760         void *cookie = NULL;
1761         guid_map_entry_t *gmep;
1762
1763         while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
1764                 dsl_dataset_long_rele(gmep->gme_ds, gmep);
1765                 dsl_dataset_rele(gmep->gme_ds, gmep);
1766                 kmem_free(gmep, sizeof (guid_map_entry_t));
1767         }
1768         avl_destroy(ca);
1769         kmem_free(ca, sizeof (avl_tree_t));
1770 }
1771
1772 static int
1773 restore_bytes(struct receive_arg *ra, void *buf, int len, off_t off, ssize_t *resid)
1774 {
1775         struct uio auio;
1776         struct iovec aiov;
1777         int error;
1778
1779         aiov.iov_base = buf;
1780         aiov.iov_len = len;
1781         auio.uio_iov = &aiov;
1782         auio.uio_iovcnt = 1;
1783         auio.uio_resid = len;
1784         auio.uio_segflg = UIO_SYSSPACE;
1785         auio.uio_rw = UIO_READ;
1786         auio.uio_offset = off;
1787         auio.uio_td = ra->td;
1788 #ifdef _KERNEL
1789         error = fo_read(ra->fp, &auio, ra->td->td_ucred, FOF_OFFSET, ra->td);
1790 #else
1791         fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
1792         error = EOPNOTSUPP;
1793 #endif
1794         *resid = auio.uio_resid;
1795         return (error);
1796 }
1797
1798 static int
1799 receive_read(struct receive_arg *ra, int len, void *buf)
1800 {
1801         int done = 0;
1802
1803         /* some things will require 8-byte alignment, so everything must */
1804         ASSERT0(len % 8);
1805
1806         while (done < len) {
1807                 ssize_t resid;
1808
1809                 ra->err = restore_bytes(ra, buf + done,
1810                     len - done, ra->voff, &resid);
1811
1812                 if (resid == len - done) {
1813                         /*
1814                          * Note: ECKSUM indicates that the receive
1815                          * was interrupted and can potentially be resumed.
1816                          */
1817                         ra->err = SET_ERROR(ECKSUM);
1818                 }
1819                 ra->voff += len - done - resid;
1820                 done = len - resid;
1821                 if (ra->err != 0)
1822                         return (ra->err);
1823         }
1824
1825         ra->bytes_read += len;
1826
1827         ASSERT3U(done, ==, len);
1828         return (0);
1829 }
1830
1831 static void
1832 byteswap_record(dmu_replay_record_t *drr)
1833 {
1834 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
1835 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
1836         drr->drr_type = BSWAP_32(drr->drr_type);
1837         drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
1838
1839         switch (drr->drr_type) {
1840         case DRR_BEGIN:
1841                 DO64(drr_begin.drr_magic);
1842                 DO64(drr_begin.drr_versioninfo);
1843                 DO64(drr_begin.drr_creation_time);
1844                 DO32(drr_begin.drr_type);
1845                 DO32(drr_begin.drr_flags);
1846                 DO64(drr_begin.drr_toguid);
1847                 DO64(drr_begin.drr_fromguid);
1848                 break;
1849         case DRR_OBJECT:
1850                 DO64(drr_object.drr_object);
1851                 DO32(drr_object.drr_type);
1852                 DO32(drr_object.drr_bonustype);
1853                 DO32(drr_object.drr_blksz);
1854                 DO32(drr_object.drr_bonuslen);
1855                 DO64(drr_object.drr_toguid);
1856                 break;
1857         case DRR_FREEOBJECTS:
1858                 DO64(drr_freeobjects.drr_firstobj);
1859                 DO64(drr_freeobjects.drr_numobjs);
1860                 DO64(drr_freeobjects.drr_toguid);
1861                 break;
1862         case DRR_WRITE:
1863                 DO64(drr_write.drr_object);
1864                 DO32(drr_write.drr_type);
1865                 DO64(drr_write.drr_offset);
1866                 DO64(drr_write.drr_length);
1867                 DO64(drr_write.drr_toguid);
1868                 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum);
1869                 DO64(drr_write.drr_key.ddk_prop);
1870                 break;
1871         case DRR_WRITE_BYREF:
1872                 DO64(drr_write_byref.drr_object);
1873                 DO64(drr_write_byref.drr_offset);
1874                 DO64(drr_write_byref.drr_length);
1875                 DO64(drr_write_byref.drr_toguid);
1876                 DO64(drr_write_byref.drr_refguid);
1877                 DO64(drr_write_byref.drr_refobject);
1878                 DO64(drr_write_byref.drr_refoffset);
1879                 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write_byref.
1880                     drr_key.ddk_cksum);
1881                 DO64(drr_write_byref.drr_key.ddk_prop);
1882                 break;
1883         case DRR_WRITE_EMBEDDED:
1884                 DO64(drr_write_embedded.drr_object);
1885                 DO64(drr_write_embedded.drr_offset);
1886                 DO64(drr_write_embedded.drr_length);
1887                 DO64(drr_write_embedded.drr_toguid);
1888                 DO32(drr_write_embedded.drr_lsize);
1889                 DO32(drr_write_embedded.drr_psize);
1890                 break;
1891         case DRR_FREE:
1892                 DO64(drr_free.drr_object);
1893                 DO64(drr_free.drr_offset);
1894                 DO64(drr_free.drr_length);
1895                 DO64(drr_free.drr_toguid);
1896                 break;
1897         case DRR_SPILL:
1898                 DO64(drr_spill.drr_object);
1899                 DO64(drr_spill.drr_length);
1900                 DO64(drr_spill.drr_toguid);
1901                 break;
1902         case DRR_END:
1903                 DO64(drr_end.drr_toguid);
1904                 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum);
1905                 break;
1906         }
1907
1908         if (drr->drr_type != DRR_BEGIN) {
1909                 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum);
1910         }
1911
1912 #undef DO64
1913 #undef DO32
1914 }
1915
1916 static inline uint8_t
1917 deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size)
1918 {
1919         if (bonus_type == DMU_OT_SA) {
1920                 return (1);
1921         } else {
1922                 return (1 +
1923                     ((DN_MAX_BONUSLEN - bonus_size) >> SPA_BLKPTRSHIFT));
1924         }
1925 }
1926
1927 static void
1928 save_resume_state(struct receive_writer_arg *rwa,
1929     uint64_t object, uint64_t offset, dmu_tx_t *tx)
1930 {
1931         int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
1932
1933         if (!rwa->resumable)
1934                 return;
1935
1936         /*
1937          * We use ds_resume_bytes[] != 0 to indicate that we need to
1938          * update this on disk, so it must not be 0.
1939          */
1940         ASSERT(rwa->bytes_read != 0);
1941
1942         /*
1943          * We only resume from write records, which have a valid
1944          * (non-meta-dnode) object number.
1945          */
1946         ASSERT(object != 0);
1947
1948         /*
1949          * For resuming to work correctly, we must receive records in order,
1950          * sorted by object,offset.  This is checked by the callers, but
1951          * assert it here for good measure.
1952          */
1953         ASSERT3U(object, >=, rwa->os->os_dsl_dataset->ds_resume_object[txgoff]);
1954         ASSERT(object != rwa->os->os_dsl_dataset->ds_resume_object[txgoff] ||
1955             offset >= rwa->os->os_dsl_dataset->ds_resume_offset[txgoff]);
1956         ASSERT3U(rwa->bytes_read, >=,
1957             rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff]);
1958
1959         rwa->os->os_dsl_dataset->ds_resume_object[txgoff] = object;
1960         rwa->os->os_dsl_dataset->ds_resume_offset[txgoff] = offset;
1961         rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read;
1962 }
1963
1964 static int
1965 receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
1966     void *data)
1967 {
1968         dmu_object_info_t doi;
1969         dmu_tx_t *tx;
1970         uint64_t object;
1971         int err;
1972
1973         if (drro->drr_type == DMU_OT_NONE ||
1974             !DMU_OT_IS_VALID(drro->drr_type) ||
1975             !DMU_OT_IS_VALID(drro->drr_bonustype) ||
1976             drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
1977             drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
1978             P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
1979             drro->drr_blksz < SPA_MINBLOCKSIZE ||
1980             drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) ||
1981             drro->drr_bonuslen > DN_MAX_BONUSLEN) {
1982                 return (SET_ERROR(EINVAL));
1983         }
1984
1985         err = dmu_object_info(rwa->os, drro->drr_object, &doi);
1986
1987         if (err != 0 && err != ENOENT)
1988                 return (SET_ERROR(EINVAL));
1989         object = err == 0 ? drro->drr_object : DMU_NEW_OBJECT;
1990
1991         /*
1992          * If we are losing blkptrs or changing the block size this must
1993          * be a new file instance.  We must clear out the previous file
1994          * contents before we can change this type of metadata in the dnode.
1995          */
1996         if (err == 0) {
1997                 int nblkptr;
1998
1999                 nblkptr = deduce_nblkptr(drro->drr_bonustype,
2000                     drro->drr_bonuslen);
2001
2002                 if (drro->drr_blksz != doi.doi_data_block_size ||
2003                     nblkptr < doi.doi_nblkptr) {
2004                         err = dmu_free_long_range(rwa->os, drro->drr_object,
2005                             0, DMU_OBJECT_END);
2006                         if (err != 0)
2007                                 return (SET_ERROR(EINVAL));
2008                 }
2009         }
2010
2011         tx = dmu_tx_create(rwa->os);
2012         dmu_tx_hold_bonus(tx, object);
2013         err = dmu_tx_assign(tx, TXG_WAIT);
2014         if (err != 0) {
2015                 dmu_tx_abort(tx);
2016                 return (err);
2017         }
2018
2019         if (object == DMU_NEW_OBJECT) {
2020                 /* currently free, want to be allocated */
2021                 err = dmu_object_claim(rwa->os, drro->drr_object,
2022                     drro->drr_type, drro->drr_blksz,
2023                     drro->drr_bonustype, drro->drr_bonuslen, tx);
2024         } else if (drro->drr_type != doi.doi_type ||
2025             drro->drr_blksz != doi.doi_data_block_size ||
2026             drro->drr_bonustype != doi.doi_bonus_type ||
2027             drro->drr_bonuslen != doi.doi_bonus_size) {
2028                 /* currently allocated, but with different properties */
2029                 err = dmu_object_reclaim(rwa->os, drro->drr_object,
2030                     drro->drr_type, drro->drr_blksz,
2031                     drro->drr_bonustype, drro->drr_bonuslen, tx);
2032         }
2033         if (err != 0) {
2034                 dmu_tx_commit(tx);
2035                 return (SET_ERROR(EINVAL));
2036         }
2037
2038         dmu_object_set_checksum(rwa->os, drro->drr_object,
2039             drro->drr_checksumtype, tx);
2040         dmu_object_set_compress(rwa->os, drro->drr_object,
2041             drro->drr_compress, tx);
2042
2043         if (data != NULL) {
2044                 dmu_buf_t *db;
2045
2046                 VERIFY0(dmu_bonus_hold(rwa->os, drro->drr_object, FTAG, &db));
2047                 dmu_buf_will_dirty(db, tx);
2048
2049                 ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
2050                 bcopy(data, db->db_data, drro->drr_bonuslen);
2051                 if (rwa->byteswap) {
2052                         dmu_object_byteswap_t byteswap =
2053                             DMU_OT_BYTESWAP(drro->drr_bonustype);
2054                         dmu_ot_byteswap[byteswap].ob_func(db->db_data,
2055                             drro->drr_bonuslen);
2056                 }
2057                 dmu_buf_rele(db, FTAG);
2058         }
2059         dmu_tx_commit(tx);
2060
2061         return (0);
2062 }
2063
2064 /* ARGSUSED */
2065 static int
2066 receive_freeobjects(struct receive_writer_arg *rwa,
2067     struct drr_freeobjects *drrfo)
2068 {
2069         uint64_t obj;
2070
2071         if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
2072                 return (SET_ERROR(EINVAL));
2073
2074         for (obj = drrfo->drr_firstobj;
2075             obj < drrfo->drr_firstobj + drrfo->drr_numobjs;
2076             (void) dmu_object_next(rwa->os, &obj, FALSE, 0)) {
2077                 int err;
2078
2079                 if (dmu_object_info(rwa->os, obj, NULL) != 0)
2080                         continue;
2081
2082                 err = dmu_free_long_object(rwa->os, obj);
2083                 if (err != 0)
2084                         return (err);
2085         }
2086
2087         return (0);
2088 }
2089
2090 static int
2091 receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw,
2092     arc_buf_t *abuf)
2093 {
2094         dmu_tx_t *tx;
2095         int err;
2096
2097         if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset ||
2098             !DMU_OT_IS_VALID(drrw->drr_type))
2099                 return (SET_ERROR(EINVAL));
2100
2101         /*
2102          * For resuming to work, records must be in increasing order
2103          * by (object, offset).
2104          */
2105         if (drrw->drr_object < rwa->last_object ||
2106             (drrw->drr_object == rwa->last_object &&
2107             drrw->drr_offset < rwa->last_offset)) {
2108                 return (SET_ERROR(EINVAL));
2109         }
2110         rwa->last_object = drrw->drr_object;
2111         rwa->last_offset = drrw->drr_offset;
2112
2113         if (dmu_object_info(rwa->os, drrw->drr_object, NULL) != 0)
2114                 return (SET_ERROR(EINVAL));
2115
2116         tx = dmu_tx_create(rwa->os);
2117
2118         dmu_tx_hold_write(tx, drrw->drr_object,
2119             drrw->drr_offset, drrw->drr_length);
2120         err = dmu_tx_assign(tx, TXG_WAIT);
2121         if (err != 0) {
2122                 dmu_tx_abort(tx);
2123                 return (err);
2124         }
2125         if (rwa->byteswap) {
2126                 dmu_object_byteswap_t byteswap =
2127                     DMU_OT_BYTESWAP(drrw->drr_type);
2128                 dmu_ot_byteswap[byteswap].ob_func(abuf->b_data,
2129                     drrw->drr_length);
2130         }
2131
2132         dmu_buf_t *bonus;
2133         if (dmu_bonus_hold(rwa->os, drrw->drr_object, FTAG, &bonus) != 0)
2134                 return (SET_ERROR(EINVAL));
2135         dmu_assign_arcbuf(bonus, drrw->drr_offset, abuf, tx);
2136
2137         /*
2138          * Note: If the receive fails, we want the resume stream to start
2139          * with the same record that we last successfully received (as opposed
2140          * to the next record), so that we can verify that we are
2141          * resuming from the correct location.
2142          */
2143         save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx);
2144         dmu_tx_commit(tx);
2145         dmu_buf_rele(bonus, FTAG);
2146
2147         return (0);
2148 }
2149
2150 /*
2151  * Handle a DRR_WRITE_BYREF record.  This record is used in dedup'ed
2152  * streams to refer to a copy of the data that is already on the
2153  * system because it came in earlier in the stream.  This function
2154  * finds the earlier copy of the data, and uses that copy instead of
2155  * data from the stream to fulfill this write.
2156  */
2157 static int
2158 receive_write_byref(struct receive_writer_arg *rwa,
2159     struct drr_write_byref *drrwbr)
2160 {
2161         dmu_tx_t *tx;
2162         int err;
2163         guid_map_entry_t gmesrch;
2164         guid_map_entry_t *gmep;
2165         avl_index_t where;
2166         objset_t *ref_os = NULL;
2167         dmu_buf_t *dbp;
2168
2169         if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
2170                 return (SET_ERROR(EINVAL));
2171
2172         /*
2173          * If the GUID of the referenced dataset is different from the
2174          * GUID of the target dataset, find the referenced dataset.
2175          */
2176         if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
2177                 gmesrch.guid = drrwbr->drr_refguid;
2178                 if ((gmep = avl_find(rwa->guid_to_ds_map, &gmesrch,
2179                     &where)) == NULL) {
2180                         return (SET_ERROR(EINVAL));
2181                 }
2182                 if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
2183                         return (SET_ERROR(EINVAL));
2184         } else {
2185                 ref_os = rwa->os;
2186         }
2187
2188         err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
2189             drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH);
2190         if (err != 0)
2191                 return (err);
2192
2193         tx = dmu_tx_create(rwa->os);
2194
2195         dmu_tx_hold_write(tx, drrwbr->drr_object,
2196             drrwbr->drr_offset, drrwbr->drr_length);
2197         err = dmu_tx_assign(tx, TXG_WAIT);
2198         if (err != 0) {
2199                 dmu_tx_abort(tx);
2200                 return (err);
2201         }
2202         dmu_write(rwa->os, drrwbr->drr_object,
2203             drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
2204         dmu_buf_rele(dbp, FTAG);
2205
2206         /* See comment in restore_write. */
2207         save_resume_state(rwa, drrwbr->drr_object, drrwbr->drr_offset, tx);
2208         dmu_tx_commit(tx);
2209         return (0);
2210 }
2211
2212 static int
2213 receive_write_embedded(struct receive_writer_arg *rwa,
2214     struct drr_write_embedded *drrwe, void *data)
2215 {
2216         dmu_tx_t *tx;
2217         int err;
2218
2219         if (drrwe->drr_offset + drrwe->drr_length < drrwe->drr_offset)
2220                 return (EINVAL);
2221
2222         if (drrwe->drr_psize > BPE_PAYLOAD_SIZE)
2223                 return (EINVAL);
2224
2225         if (drrwe->drr_etype >= NUM_BP_EMBEDDED_TYPES)
2226                 return (EINVAL);
2227         if (drrwe->drr_compression >= ZIO_COMPRESS_FUNCTIONS)
2228                 return (EINVAL);
2229
2230         tx = dmu_tx_create(rwa->os);
2231
2232         dmu_tx_hold_write(tx, drrwe->drr_object,
2233             drrwe->drr_offset, drrwe->drr_length);
2234         err = dmu_tx_assign(tx, TXG_WAIT);
2235         if (err != 0) {
2236                 dmu_tx_abort(tx);
2237                 return (err);
2238         }
2239
2240         dmu_write_embedded(rwa->os, drrwe->drr_object,
2241             drrwe->drr_offset, data, drrwe->drr_etype,
2242             drrwe->drr_compression, drrwe->drr_lsize, drrwe->drr_psize,
2243             rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx);
2244
2245         /* See comment in restore_write. */
2246         save_resume_state(rwa, drrwe->drr_object, drrwe->drr_offset, tx);
2247         dmu_tx_commit(tx);
2248         return (0);
2249 }
2250
2251 static int
2252 receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
2253     void *data)
2254 {
2255         dmu_tx_t *tx;
2256         dmu_buf_t *db, *db_spill;
2257         int err;
2258
2259         if (drrs->drr_length < SPA_MINBLOCKSIZE ||
2260             drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os)))
2261                 return (SET_ERROR(EINVAL));
2262
2263         if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0)
2264                 return (SET_ERROR(EINVAL));
2265
2266         VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db));
2267         if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
2268                 dmu_buf_rele(db, FTAG);
2269                 return (err);
2270         }
2271
2272         tx = dmu_tx_create(rwa->os);
2273
2274         dmu_tx_hold_spill(tx, db->db_object);
2275
2276         err = dmu_tx_assign(tx, TXG_WAIT);
2277         if (err != 0) {
2278                 dmu_buf_rele(db, FTAG);
2279                 dmu_buf_rele(db_spill, FTAG);
2280                 dmu_tx_abort(tx);
2281                 return (err);
2282         }
2283         dmu_buf_will_dirty(db_spill, tx);
2284
2285         if (db_spill->db_size < drrs->drr_length)
2286                 VERIFY(0 == dbuf_spill_set_blksz(db_spill,
2287                     drrs->drr_length, tx));
2288         bcopy(data, db_spill->db_data, drrs->drr_length);
2289
2290         dmu_buf_rele(db, FTAG);
2291         dmu_buf_rele(db_spill, FTAG);
2292
2293         dmu_tx_commit(tx);
2294         return (0);
2295 }
2296
2297 /* ARGSUSED */
2298 static int
2299 receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf)
2300 {
2301         int err;
2302
2303         if (drrf->drr_length != -1ULL &&
2304             drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
2305                 return (SET_ERROR(EINVAL));
2306
2307         if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0)
2308                 return (SET_ERROR(EINVAL));
2309
2310         err = dmu_free_long_range(rwa->os, drrf->drr_object,
2311             drrf->drr_offset, drrf->drr_length);
2312
2313         return (err);
2314 }
2315
2316 /* used to destroy the drc_ds on error */
2317 static void
2318 dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
2319 {
2320         if (drc->drc_resumable) {
2321                 /* wait for our resume state to be written to disk */
2322                 txg_wait_synced(drc->drc_ds->ds_dir->dd_pool, 0);
2323                 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
2324         } else {
2325                 char name[MAXNAMELEN];
2326                 dsl_dataset_name(drc->drc_ds, name);
2327                 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
2328                 (void) dsl_destroy_head(name);
2329         }
2330 }
2331
2332 static void
2333 receive_cksum(struct receive_arg *ra, int len, void *buf)
2334 {
2335         if (ra->byteswap) {
2336                 fletcher_4_incremental_byteswap(buf, len, &ra->cksum);
2337         } else {
2338                 fletcher_4_incremental_native(buf, len, &ra->cksum);
2339         }
2340 }
2341
2342 /*
2343  * Read the payload into a buffer of size len, and update the current record's
2344  * payload field.
2345  * Allocate ra->next_rrd and read the next record's header into
2346  * ra->next_rrd->header.
2347  * Verify checksum of payload and next record.
2348  */
2349 static int
2350 receive_read_payload_and_next_header(struct receive_arg *ra, int len, void *buf)
2351 {
2352         int err;
2353
2354         if (len != 0) {
2355                 ASSERT3U(len, <=, SPA_MAXBLOCKSIZE);
2356                 err = receive_read(ra, len, buf);
2357                 if (err != 0)
2358                         return (err);
2359                 receive_cksum(ra, len, buf);
2360
2361                 /* note: rrd is NULL when reading the begin record's payload */
2362                 if (ra->rrd != NULL) {
2363                         ra->rrd->payload = buf;
2364                         ra->rrd->payload_size = len;
2365                         ra->rrd->bytes_read = ra->bytes_read;
2366                 }
2367         }
2368
2369         ra->prev_cksum = ra->cksum;
2370
2371         ra->next_rrd = kmem_zalloc(sizeof (*ra->next_rrd), KM_SLEEP);
2372         err = receive_read(ra, sizeof (ra->next_rrd->header),
2373             &ra->next_rrd->header);
2374         ra->next_rrd->bytes_read = ra->bytes_read;
2375         if (err != 0) {
2376                 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2377                 ra->next_rrd = NULL;
2378                 return (err);
2379         }
2380         if (ra->next_rrd->header.drr_type == DRR_BEGIN) {
2381                 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2382                 ra->next_rrd = NULL;
2383                 return (SET_ERROR(EINVAL));
2384         }
2385
2386         /*
2387          * Note: checksum is of everything up to but not including the
2388          * checksum itself.
2389          */
2390         ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
2391             ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
2392         receive_cksum(ra,
2393             offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
2394             &ra->next_rrd->header);
2395
2396         zio_cksum_t cksum_orig =
2397             ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
2398         zio_cksum_t *cksump =
2399             &ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
2400
2401         if (ra->byteswap)
2402                 byteswap_record(&ra->next_rrd->header);
2403
2404         if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) &&
2405             !ZIO_CHECKSUM_EQUAL(ra->cksum, *cksump)) {
2406                 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2407                 ra->next_rrd = NULL;
2408                 return (SET_ERROR(ECKSUM));
2409         }
2410
2411         receive_cksum(ra, sizeof (cksum_orig), &cksum_orig);
2412
2413         return (0);
2414 }
2415
2416 /*
2417  * Issue the prefetch reads for any necessary indirect blocks.
2418  *
2419  * We use the object ignore list to tell us whether or not to issue prefetches
2420  * for a given object.  We do this for both correctness (in case the blocksize
2421  * of an object has changed) and performance (if the object doesn't exist, don't
2422  * needlessly try to issue prefetches).  We also trim the list as we go through
2423  * the stream to prevent it from growing to an unbounded size.
2424  *
2425  * The object numbers within will always be in sorted order, and any write
2426  * records we see will also be in sorted order, but they're not sorted with
2427  * respect to each other (i.e. we can get several object records before
2428  * receiving each object's write records).  As a result, once we've reached a
2429  * given object number, we can safely remove any reference to lower object
2430  * numbers in the ignore list. In practice, we receive up to 32 object records
2431  * before receiving write records, so the list can have up to 32 nodes in it.
2432  */
2433 /* ARGSUSED */
2434 static void
2435 receive_read_prefetch(struct receive_arg *ra,
2436     uint64_t object, uint64_t offset, uint64_t length)
2437 {
2438         struct receive_ign_obj_node *node = list_head(&ra->ignore_obj_list);
2439         while (node != NULL && node->object < object) {
2440                 VERIFY3P(node, ==, list_remove_head(&ra->ignore_obj_list));
2441                 kmem_free(node, sizeof (*node));
2442                 node = list_head(&ra->ignore_obj_list);
2443         }
2444         if (node == NULL || node->object > object) {
2445                 dmu_prefetch(ra->os, object, 1, offset, length,
2446                     ZIO_PRIORITY_SYNC_READ);
2447         }
2448 }
2449
2450 /*
2451  * Read records off the stream, issuing any necessary prefetches.
2452  */
2453 static int
2454 receive_read_record(struct receive_arg *ra)
2455 {
2456         int err;
2457
2458         switch (ra->rrd->header.drr_type) {
2459         case DRR_OBJECT:
2460         {
2461                 struct drr_object *drro = &ra->rrd->header.drr_u.drr_object;
2462                 uint32_t size = P2ROUNDUP(drro->drr_bonuslen, 8);
2463                 void *buf = kmem_zalloc(size, KM_SLEEP);
2464                 dmu_object_info_t doi;
2465                 err = receive_read_payload_and_next_header(ra, size, buf);
2466                 if (err != 0) {
2467                         kmem_free(buf, size);
2468                         return (err);
2469                 }
2470                 err = dmu_object_info(ra->os, drro->drr_object, &doi);
2471                 /*
2472                  * See receive_read_prefetch for an explanation why we're
2473                  * storing this object in the ignore_obj_list.
2474                  */
2475                 if (err == ENOENT ||
2476                     (err == 0 && doi.doi_data_block_size != drro->drr_blksz)) {
2477                         struct receive_ign_obj_node *node =
2478                             kmem_zalloc(sizeof (*node),
2479                             KM_SLEEP);
2480                         node->object = drro->drr_object;
2481 #ifdef ZFS_DEBUG
2482                         struct receive_ign_obj_node *last_object =
2483                             list_tail(&ra->ignore_obj_list);
2484                         uint64_t last_objnum = (last_object != NULL ?
2485                             last_object->object : 0);
2486                         ASSERT3U(node->object, >, last_objnum);
2487 #endif
2488                         list_insert_tail(&ra->ignore_obj_list, node);
2489                         err = 0;
2490                 }
2491                 return (err);
2492         }
2493         case DRR_FREEOBJECTS:
2494         {
2495                 err = receive_read_payload_and_next_header(ra, 0, NULL);
2496                 return (err);
2497         }
2498         case DRR_WRITE:
2499         {
2500                 struct drr_write *drrw = &ra->rrd->header.drr_u.drr_write;
2501                 arc_buf_t *abuf = arc_loan_buf(dmu_objset_spa(ra->os),
2502                     drrw->drr_length);
2503
2504                 err = receive_read_payload_and_next_header(ra,
2505                     drrw->drr_length, abuf->b_data);
2506                 if (err != 0) {
2507                         dmu_return_arcbuf(abuf);
2508                         return (err);
2509                 }
2510                 ra->rrd->write_buf = abuf;
2511                 receive_read_prefetch(ra, drrw->drr_object, drrw->drr_offset,
2512                     drrw->drr_length);
2513                 return (err);
2514         }
2515         case DRR_WRITE_BYREF:
2516         {
2517                 struct drr_write_byref *drrwb =
2518                     &ra->rrd->header.drr_u.drr_write_byref;
2519                 err = receive_read_payload_and_next_header(ra, 0, NULL);
2520                 receive_read_prefetch(ra, drrwb->drr_object, drrwb->drr_offset,
2521                     drrwb->drr_length);
2522                 return (err);
2523         }
2524         case DRR_WRITE_EMBEDDED:
2525         {
2526                 struct drr_write_embedded *drrwe =
2527                     &ra->rrd->header.drr_u.drr_write_embedded;
2528                 uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8);
2529                 void *buf = kmem_zalloc(size, KM_SLEEP);
2530
2531                 err = receive_read_payload_and_next_header(ra, size, buf);
2532                 if (err != 0) {
2533                         kmem_free(buf, size);
2534                         return (err);
2535                 }
2536
2537                 receive_read_prefetch(ra, drrwe->drr_object, drrwe->drr_offset,
2538                     drrwe->drr_length);
2539                 return (err);
2540         }
2541         case DRR_FREE:
2542         {
2543                 /*
2544                  * It might be beneficial to prefetch indirect blocks here, but
2545                  * we don't really have the data to decide for sure.
2546                  */
2547                 err = receive_read_payload_and_next_header(ra, 0, NULL);
2548                 return (err);
2549         }
2550         case DRR_END:
2551         {
2552                 struct drr_end *drre = &ra->rrd->header.drr_u.drr_end;
2553                 if (!ZIO_CHECKSUM_EQUAL(ra->prev_cksum, drre->drr_checksum))
2554                         return (SET_ERROR(ECKSUM));
2555                 return (0);
2556         }
2557         case DRR_SPILL:
2558         {
2559                 struct drr_spill *drrs = &ra->rrd->header.drr_u.drr_spill;
2560                 void *buf = kmem_zalloc(drrs->drr_length, KM_SLEEP);
2561                 err = receive_read_payload_and_next_header(ra, drrs->drr_length,
2562                     buf);
2563                 if (err != 0)
2564                         kmem_free(buf, drrs->drr_length);
2565                 return (err);
2566         }
2567         default:
2568                 return (SET_ERROR(EINVAL));
2569         }
2570 }
2571
2572 /*
2573  * Commit the records to the pool.
2574  */
2575 static int
2576 receive_process_record(struct receive_writer_arg *rwa,
2577     struct receive_record_arg *rrd)
2578 {
2579         int err;
2580
2581         /* Processing in order, therefore bytes_read should be increasing. */
2582         ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read);
2583         rwa->bytes_read = rrd->bytes_read;
2584
2585         switch (rrd->header.drr_type) {
2586         case DRR_OBJECT:
2587         {
2588                 struct drr_object *drro = &rrd->header.drr_u.drr_object;
2589                 err = receive_object(rwa, drro, rrd->payload);
2590                 kmem_free(rrd->payload, rrd->payload_size);
2591                 rrd->payload = NULL;
2592                 return (err);
2593         }
2594         case DRR_FREEOBJECTS:
2595         {
2596                 struct drr_freeobjects *drrfo =
2597                     &rrd->header.drr_u.drr_freeobjects;
2598                 return (receive_freeobjects(rwa, drrfo));
2599         }
2600         case DRR_WRITE:
2601         {
2602                 struct drr_write *drrw = &rrd->header.drr_u.drr_write;
2603                 err = receive_write(rwa, drrw, rrd->write_buf);
2604                 /* if receive_write() is successful, it consumes the arc_buf */
2605                 if (err != 0)
2606                         dmu_return_arcbuf(rrd->write_buf);
2607                 rrd->write_buf = NULL;
2608                 rrd->payload = NULL;
2609                 return (err);
2610         }
2611         case DRR_WRITE_BYREF:
2612         {
2613                 struct drr_write_byref *drrwbr =
2614                     &rrd->header.drr_u.drr_write_byref;
2615                 return (receive_write_byref(rwa, drrwbr));
2616         }
2617         case DRR_WRITE_EMBEDDED:
2618         {
2619                 struct drr_write_embedded *drrwe =
2620                     &rrd->header.drr_u.drr_write_embedded;
2621                 err = receive_write_embedded(rwa, drrwe, rrd->payload);
2622                 kmem_free(rrd->payload, rrd->payload_size);
2623                 rrd->payload = NULL;
2624                 return (err);
2625         }
2626         case DRR_FREE:
2627         {
2628                 struct drr_free *drrf = &rrd->header.drr_u.drr_free;
2629                 return (receive_free(rwa, drrf));
2630         }
2631         case DRR_SPILL:
2632         {
2633                 struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
2634                 err = receive_spill(rwa, drrs, rrd->payload);
2635                 kmem_free(rrd->payload, rrd->payload_size);
2636                 rrd->payload = NULL;
2637                 return (err);
2638         }
2639         default:
2640                 return (SET_ERROR(EINVAL));
2641         }
2642 }
2643
2644 /*
2645  * dmu_recv_stream's worker thread; pull records off the queue, and then call
2646  * receive_process_record  When we're done, signal the main thread and exit.
2647  */
2648 static void
2649 receive_writer_thread(void *arg)
2650 {
2651         struct receive_writer_arg *rwa = arg;
2652         struct receive_record_arg *rrd;
2653         for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker;
2654             rrd = bqueue_dequeue(&rwa->q)) {
2655                 /*
2656                  * If there's an error, the main thread will stop putting things
2657                  * on the queue, but we need to clear everything in it before we
2658                  * can exit.
2659                  */
2660                 if (rwa->err == 0) {
2661                         rwa->err = receive_process_record(rwa, rrd);
2662                 } else if (rrd->write_buf != NULL) {
2663                         dmu_return_arcbuf(rrd->write_buf);
2664                         rrd->write_buf = NULL;
2665                         rrd->payload = NULL;
2666                 } else if (rrd->payload != NULL) {
2667                         kmem_free(rrd->payload, rrd->payload_size);
2668                         rrd->payload = NULL;
2669                 }
2670                 kmem_free(rrd, sizeof (*rrd));
2671         }
2672         kmem_free(rrd, sizeof (*rrd));
2673         mutex_enter(&rwa->mutex);
2674         rwa->done = B_TRUE;
2675         cv_signal(&rwa->cv);
2676         mutex_exit(&rwa->mutex);
2677         thread_exit();
2678 }
2679
2680 static int
2681 resume_check(struct receive_arg *ra, nvlist_t *begin_nvl)
2682 {
2683         uint64_t val;
2684         objset_t *mos = dmu_objset_pool(ra->os)->dp_meta_objset;
2685         uint64_t dsobj = dmu_objset_id(ra->os);
2686         uint64_t resume_obj, resume_off;
2687
2688         if (nvlist_lookup_uint64(begin_nvl,
2689             "resume_object", &resume_obj) != 0 ||
2690             nvlist_lookup_uint64(begin_nvl,
2691             "resume_offset", &resume_off) != 0) {
2692                 return (SET_ERROR(EINVAL));
2693         }
2694         VERIFY0(zap_lookup(mos, dsobj,
2695             DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val));
2696         if (resume_obj != val)
2697                 return (SET_ERROR(EINVAL));
2698         VERIFY0(zap_lookup(mos, dsobj,
2699             DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val));
2700         if (resume_off != val)
2701                 return (SET_ERROR(EINVAL));
2702
2703         return (0);
2704 }
2705
2706
2707 /*
2708  * Read in the stream's records, one by one, and apply them to the pool.  There
2709  * are two threads involved; the thread that calls this function will spin up a
2710  * worker thread, read the records off the stream one by one, and issue
2711  * prefetches for any necessary indirect blocks.  It will then push the records
2712  * onto an internal blocking queue.  The worker thread will pull the records off
2713  * the queue, and actually write the data into the DMU.  This way, the worker
2714  * thread doesn't have to wait for reads to complete, since everything it needs
2715  * (the indirect blocks) will be prefetched.
2716  *
2717  * NB: callers *must* call dmu_recv_end() if this succeeds.
2718  */
2719 int
2720 dmu_recv_stream(dmu_recv_cookie_t *drc, struct file *fp, offset_t *voffp,
2721     int cleanup_fd, uint64_t *action_handlep)
2722 {
2723         int err = 0;
2724         struct receive_arg ra = { 0 };
2725         struct receive_writer_arg rwa = { 0 };
2726         int featureflags;
2727         nvlist_t *begin_nvl = NULL;
2728
2729         ra.byteswap = drc->drc_byteswap;
2730         ra.cksum = drc->drc_cksum;
2731         ra.td = curthread;
2732         ra.fp = fp;
2733         ra.voff = *voffp;
2734
2735         if (dsl_dataset_is_zapified(drc->drc_ds)) {
2736                 (void) zap_lookup(drc->drc_ds->ds_dir->dd_pool->dp_meta_objset,
2737                     drc->drc_ds->ds_object, DS_FIELD_RESUME_BYTES,
2738                     sizeof (ra.bytes_read), 1, &ra.bytes_read);
2739         }
2740
2741         list_create(&ra.ignore_obj_list, sizeof (struct receive_ign_obj_node),
2742             offsetof(struct receive_ign_obj_node, node));
2743
2744         /* these were verified in dmu_recv_begin */
2745         ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
2746             DMU_SUBSTREAM);
2747         ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
2748
2749         /*
2750          * Open the objset we are modifying.
2751          */
2752         VERIFY0(dmu_objset_from_ds(drc->drc_ds, &ra.os));
2753
2754         ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT);
2755
2756         featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
2757
2758         /* if this stream is dedup'ed, set up the avl tree for guid mapping */
2759         if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
2760                 minor_t minor;
2761
2762                 if (cleanup_fd == -1) {
2763                         ra.err = SET_ERROR(EBADF);
2764                         goto out;
2765                 }
2766                 ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor);
2767                 if (ra.err != 0) {
2768                         cleanup_fd = -1;
2769                         goto out;
2770                 }
2771
2772                 if (*action_handlep == 0) {
2773                         rwa.guid_to_ds_map =
2774                             kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
2775                         avl_create(rwa.guid_to_ds_map, guid_compare,
2776                             sizeof (guid_map_entry_t),
2777                             offsetof(guid_map_entry_t, avlnode));
2778                         err = zfs_onexit_add_cb(minor,
2779                             free_guid_map_onexit, rwa.guid_to_ds_map,
2780                             action_handlep);
2781                         if (ra.err != 0)
2782                                 goto out;
2783                 } else {
2784                         err = zfs_onexit_cb_data(minor, *action_handlep,
2785                             (void **)&rwa.guid_to_ds_map);
2786                         if (ra.err != 0)
2787                                 goto out;
2788                 }
2789
2790                 drc->drc_guid_to_ds_map = rwa.guid_to_ds_map;
2791         }
2792
2793         uint32_t payloadlen = drc->drc_drr_begin->drr_payloadlen;
2794         void *payload = NULL;
2795         if (payloadlen != 0)
2796                 payload = kmem_alloc(payloadlen, KM_SLEEP);
2797
2798         err = receive_read_payload_and_next_header(&ra, payloadlen, payload);
2799         if (err != 0) {
2800                 if (payloadlen != 0)
2801                         kmem_free(payload, payloadlen);
2802                 goto out;
2803         }
2804         if (payloadlen != 0) {
2805                 err = nvlist_unpack(payload, payloadlen, &begin_nvl, KM_SLEEP);
2806                 kmem_free(payload, payloadlen);
2807                 if (err != 0)
2808                         goto out;
2809         }
2810
2811         if (featureflags & DMU_BACKUP_FEATURE_RESUMING) {
2812                 err = resume_check(&ra, begin_nvl);
2813                 if (err != 0)
2814                         goto out;
2815         }
2816
2817         (void) bqueue_init(&rwa.q, zfs_recv_queue_length,
2818             offsetof(struct receive_record_arg, node));
2819         cv_init(&rwa.cv, NULL, CV_DEFAULT, NULL);
2820         mutex_init(&rwa.mutex, NULL, MUTEX_DEFAULT, NULL);
2821         rwa.os = ra.os;
2822         rwa.byteswap = drc->drc_byteswap;
2823         rwa.resumable = drc->drc_resumable;
2824
2825         (void) thread_create(NULL, 0, receive_writer_thread, &rwa, 0, &p0,
2826             TS_RUN, minclsyspri);
2827         /*
2828          * We're reading rwa.err without locks, which is safe since we are the
2829          * only reader, and the worker thread is the only writer.  It's ok if we
2830          * miss a write for an iteration or two of the loop, since the writer
2831          * thread will keep freeing records we send it until we send it an eos
2832          * marker.
2833          *
2834          * We can leave this loop in 3 ways:  First, if rwa.err is
2835          * non-zero.  In that case, the writer thread will free the rrd we just
2836          * pushed.  Second, if  we're interrupted; in that case, either it's the
2837          * first loop and ra.rrd was never allocated, or it's later, and ra.rrd
2838          * has been handed off to the writer thread who will free it.  Finally,
2839          * if receive_read_record fails or we're at the end of the stream, then
2840          * we free ra.rrd and exit.
2841          */
2842         while (rwa.err == 0) {
2843                 if (issig(JUSTLOOKING) && issig(FORREAL)) {
2844                         err = SET_ERROR(EINTR);
2845                         break;
2846                 }
2847
2848                 ASSERT3P(ra.rrd, ==, NULL);
2849                 ra.rrd = ra.next_rrd;
2850                 ra.next_rrd = NULL;
2851                 /* Allocates and loads header into ra.next_rrd */
2852                 err = receive_read_record(&ra);
2853
2854                 if (ra.rrd->header.drr_type == DRR_END || err != 0) {
2855                         kmem_free(ra.rrd, sizeof (*ra.rrd));
2856                         ra.rrd = NULL;
2857                         break;
2858                 }
2859
2860                 bqueue_enqueue(&rwa.q, ra.rrd,
2861                     sizeof (struct receive_record_arg) + ra.rrd->payload_size);
2862                 ra.rrd = NULL;
2863         }
2864         if (ra.next_rrd == NULL)
2865                 ra.next_rrd = kmem_zalloc(sizeof (*ra.next_rrd), KM_SLEEP);
2866         ra.next_rrd->eos_marker = B_TRUE;
2867         bqueue_enqueue(&rwa.q, ra.next_rrd, 1);
2868
2869         mutex_enter(&rwa.mutex);
2870         while (!rwa.done) {
2871                 cv_wait(&rwa.cv, &rwa.mutex);
2872         }
2873         mutex_exit(&rwa.mutex);
2874
2875         cv_destroy(&rwa.cv);
2876         mutex_destroy(&rwa.mutex);
2877         bqueue_destroy(&rwa.q);
2878         if (err == 0)
2879                 err = rwa.err;
2880
2881 out:
2882         nvlist_free(begin_nvl);
2883         if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
2884                 zfs_onexit_fd_rele(cleanup_fd);
2885
2886         if (err != 0) {
2887                 /*
2888                  * Clean up references. If receive is not resumable,
2889                  * destroy what we created, so we don't leave it in
2890                  * the inconsistent state.
2891                  */
2892                 dmu_recv_cleanup_ds(drc);
2893         }
2894
2895         *voffp = ra.voff;
2896         for (struct receive_ign_obj_node *n =
2897             list_remove_head(&ra.ignore_obj_list); n != NULL;
2898             n = list_remove_head(&ra.ignore_obj_list)) {
2899                 kmem_free(n, sizeof (*n));
2900         }
2901         list_destroy(&ra.ignore_obj_list);
2902         return (err);
2903 }
2904
2905 static int
2906 dmu_recv_end_check(void *arg, dmu_tx_t *tx)
2907 {
2908         dmu_recv_cookie_t *drc = arg;
2909         dsl_pool_t *dp = dmu_tx_pool(tx);
2910         int error;
2911
2912         ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
2913
2914         if (!drc->drc_newfs) {
2915                 dsl_dataset_t *origin_head;
2916
2917                 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
2918                 if (error != 0)
2919                         return (error);
2920                 if (drc->drc_force) {
2921                         /*
2922                          * We will destroy any snapshots in tofs (i.e. before
2923                          * origin_head) that are after the origin (which is
2924                          * the snap before drc_ds, because drc_ds can not
2925                          * have any snaps of its own).
2926                          */
2927                         uint64_t obj;
2928
2929                         obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
2930                         while (obj !=
2931                             dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
2932                                 dsl_dataset_t *snap;
2933                                 error = dsl_dataset_hold_obj(dp, obj, FTAG,
2934                                     &snap);
2935                                 if (error != 0)
2936                                         break;
2937                                 if (snap->ds_dir != origin_head->ds_dir)
2938                                         error = SET_ERROR(EINVAL);
2939                                 if (error == 0)  {
2940                                         error = dsl_destroy_snapshot_check_impl(
2941                                             snap, B_FALSE);
2942                                 }
2943                                 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
2944                                 dsl_dataset_rele(snap, FTAG);
2945                                 if (error != 0)
2946                                         break;
2947                         }
2948                         if (error != 0) {
2949                                 dsl_dataset_rele(origin_head, FTAG);
2950                                 return (error);
2951                         }
2952                 }
2953                 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
2954                     origin_head, drc->drc_force, drc->drc_owner, tx);
2955                 if (error != 0) {
2956                         dsl_dataset_rele(origin_head, FTAG);
2957                         return (error);
2958                 }
2959                 error = dsl_dataset_snapshot_check_impl(origin_head,
2960                     drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
2961                 dsl_dataset_rele(origin_head, FTAG);
2962                 if (error != 0)
2963                         return (error);
2964
2965                 error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
2966         } else {
2967                 error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
2968                     drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
2969         }
2970         return (error);
2971 }
2972
2973 static void
2974 dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
2975 {
2976         dmu_recv_cookie_t *drc = arg;
2977         dsl_pool_t *dp = dmu_tx_pool(tx);
2978
2979         spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
2980             tx, "snap=%s", drc->drc_tosnap);
2981
2982         if (!drc->drc_newfs) {
2983                 dsl_dataset_t *origin_head;
2984
2985                 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
2986                     &origin_head));
2987
2988                 if (drc->drc_force) {
2989                         /*
2990                          * Destroy any snapshots of drc_tofs (origin_head)
2991                          * after the origin (the snap before drc_ds).
2992                          */
2993                         uint64_t obj;
2994
2995                         obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
2996                         while (obj !=
2997                             dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
2998                                 dsl_dataset_t *snap;
2999                                 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
3000                                     &snap));
3001                                 ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
3002                                 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
3003                                 dsl_destroy_snapshot_sync_impl(snap,
3004                                     B_FALSE, tx);
3005                                 dsl_dataset_rele(snap, FTAG);
3006                         }
3007                 }
3008                 VERIFY3P(drc->drc_ds->ds_prev, ==,
3009                     origin_head->ds_prev);
3010
3011                 dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
3012                     origin_head, tx);
3013                 dsl_dataset_snapshot_sync_impl(origin_head,
3014                     drc->drc_tosnap, tx);
3015
3016                 /* set snapshot's creation time and guid */
3017                 dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
3018                 dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time =
3019                     drc->drc_drrb->drr_creation_time;
3020                 dsl_dataset_phys(origin_head->ds_prev)->ds_guid =
3021                     drc->drc_drrb->drr_toguid;
3022                 dsl_dataset_phys(origin_head->ds_prev)->ds_flags &=
3023                     ~DS_FLAG_INCONSISTENT;
3024
3025                 dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
3026                 dsl_dataset_phys(origin_head)->ds_flags &=
3027                     ~DS_FLAG_INCONSISTENT;
3028
3029                 dsl_dataset_rele(origin_head, FTAG);
3030                 dsl_destroy_head_sync_impl(drc->drc_ds, tx);
3031
3032                 if (drc->drc_owner != NULL)
3033                         VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
3034         } else {
3035                 dsl_dataset_t *ds = drc->drc_ds;
3036
3037                 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
3038
3039                 /* set snapshot's creation time and guid */
3040                 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
3041                 dsl_dataset_phys(ds->ds_prev)->ds_creation_time =
3042                     drc->drc_drrb->drr_creation_time;
3043                 dsl_dataset_phys(ds->ds_prev)->ds_guid =
3044                     drc->drc_drrb->drr_toguid;
3045                 dsl_dataset_phys(ds->ds_prev)->ds_flags &=
3046                     ~DS_FLAG_INCONSISTENT;
3047
3048                 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3049                 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
3050                 if (dsl_dataset_has_resume_receive_state(ds)) {
3051                         (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3052                             DS_FIELD_RESUME_FROMGUID, tx);
3053                         (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3054                             DS_FIELD_RESUME_OBJECT, tx);
3055                         (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3056                             DS_FIELD_RESUME_OFFSET, tx);
3057                         (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3058                             DS_FIELD_RESUME_BYTES, tx);
3059                         (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3060                             DS_FIELD_RESUME_TOGUID, tx);
3061                         (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3062                             DS_FIELD_RESUME_TONAME, tx);
3063                 }
3064         }
3065         drc->drc_newsnapobj = dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj;
3066         /*
3067          * Release the hold from dmu_recv_begin.  This must be done before
3068          * we return to open context, so that when we free the dataset's dnode,
3069          * we can evict its bonus buffer.
3070          */
3071         dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
3072         drc->drc_ds = NULL;
3073 }
3074
3075 static int
3076 add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj)
3077 {
3078         dsl_pool_t *dp;
3079         dsl_dataset_t *snapds;
3080         guid_map_entry_t *gmep;
3081         int err;
3082
3083         ASSERT(guid_map != NULL);
3084
3085         err = dsl_pool_hold(name, FTAG, &dp);
3086         if (err != 0)
3087                 return (err);
3088         gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP);
3089         err = dsl_dataset_hold_obj(dp, snapobj, gmep, &snapds);
3090         if (err == 0) {
3091                 gmep->guid = dsl_dataset_phys(snapds)->ds_guid;
3092                 gmep->gme_ds = snapds;
3093                 avl_add(guid_map, gmep);
3094                 dsl_dataset_long_hold(snapds, gmep);
3095         } else
3096                 kmem_free(gmep, sizeof (*gmep));
3097
3098         dsl_pool_rele(dp, FTAG);
3099         return (err);
3100 }
3101
3102 static int dmu_recv_end_modified_blocks = 3;
3103
3104 static int
3105 dmu_recv_existing_end(dmu_recv_cookie_t *drc)
3106 {
3107         int error;
3108         char name[MAXNAMELEN];
3109
3110 #ifdef _KERNEL
3111         /*
3112          * We will be destroying the ds; make sure its origin is unmounted if
3113          * necessary.
3114          */
3115         dsl_dataset_name(drc->drc_ds, name);
3116         zfs_destroy_unmount_origin(name);
3117 #endif
3118
3119         error = dsl_sync_task(drc->drc_tofs,
3120             dmu_recv_end_check, dmu_recv_end_sync, drc,
3121             dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL);
3122
3123         if (error != 0)
3124                 dmu_recv_cleanup_ds(drc);
3125         return (error);
3126 }
3127
3128 static int
3129 dmu_recv_new_end(dmu_recv_cookie_t *drc)
3130 {
3131         int error;
3132
3133         error = dsl_sync_task(drc->drc_tofs,
3134             dmu_recv_end_check, dmu_recv_end_sync, drc,
3135             dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL);
3136
3137         if (error != 0) {
3138                 dmu_recv_cleanup_ds(drc);
3139         } else if (drc->drc_guid_to_ds_map != NULL) {
3140                 (void) add_ds_to_guidmap(drc->drc_tofs,
3141                     drc->drc_guid_to_ds_map,
3142                     drc->drc_newsnapobj);
3143         }
3144         return (error);
3145 }
3146
3147 int
3148 dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
3149 {
3150         drc->drc_owner = owner;
3151
3152         if (drc->drc_newfs)
3153                 return (dmu_recv_new_end(drc));
3154         else
3155                 return (dmu_recv_existing_end(drc));
3156 }
3157
3158 /*
3159  * Return TRUE if this objset is currently being received into.
3160  */
3161 boolean_t
3162 dmu_objset_is_receiving(objset_t *os)
3163 {
3164         return (os->os_dsl_dataset != NULL &&
3165             os->os_dsl_dataset->ds_owner == dmu_recv_tag);
3166 }