]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - uts/common/fs/zfs/dsl_scan.c
4370 avoid transmitting holes during zfs send
[FreeBSD/FreeBSD.git] / uts / common / fs / zfs / dsl_scan.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2013 by Delphix. All rights reserved.
24  */
25
26 #include <sys/dsl_scan.h>
27 #include <sys/dsl_pool.h>
28 #include <sys/dsl_dataset.h>
29 #include <sys/dsl_prop.h>
30 #include <sys/dsl_dir.h>
31 #include <sys/dsl_synctask.h>
32 #include <sys/dnode.h>
33 #include <sys/dmu_tx.h>
34 #include <sys/dmu_objset.h>
35 #include <sys/arc.h>
36 #include <sys/zap.h>
37 #include <sys/zio.h>
38 #include <sys/zfs_context.h>
39 #include <sys/fs/zfs.h>
40 #include <sys/zfs_znode.h>
41 #include <sys/spa_impl.h>
42 #include <sys/vdev_impl.h>
43 #include <sys/zil_impl.h>
44 #include <sys/zio_checksum.h>
45 #include <sys/ddt.h>
46 #include <sys/sa.h>
47 #include <sys/sa_impl.h>
48 #include <sys/zfeature.h>
49 #ifdef _KERNEL
50 #include <sys/zfs_vfsops.h>
51 #endif
52
53 typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *, const zbookmark_t *);
54
55 static scan_cb_t dsl_scan_defrag_cb;
56 static scan_cb_t dsl_scan_scrub_cb;
57 static scan_cb_t dsl_scan_remove_cb;
58 static void dsl_scan_cancel_sync(void *, dmu_tx_t *);
59 static void dsl_scan_sync_state(dsl_scan_t *, dmu_tx_t *tx);
60
61 int zfs_top_maxinflight = 32;           /* maximum I/Os per top-level */
62 int zfs_resilver_delay = 2;             /* number of ticks to delay resilver */
63 int zfs_scrub_delay = 4;                /* number of ticks to delay scrub */
64 int zfs_scan_idle = 50;                 /* idle window in clock ticks */
65
66 int zfs_scan_min_time_ms = 1000; /* min millisecs to scrub per txg */
67 int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */
68 int zfs_resilver_min_time_ms = 3000; /* min millisecs to resilver per txg */
69 boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
70 boolean_t zfs_no_scrub_prefetch = B_FALSE; /* set to disable srub prefetching */
71 enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE;
72 int dsl_scan_delay_completion = B_FALSE; /* set to delay scan completion */
73
74 #define DSL_SCAN_IS_SCRUB_RESILVER(scn) \
75         ((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \
76         (scn)->scn_phys.scn_func == POOL_SCAN_RESILVER)
77
78 extern int zfs_txg_timeout;
79
80 /* the order has to match pool_scan_type */
81 static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = {
82         NULL,
83         dsl_scan_scrub_cb,      /* POOL_SCAN_SCRUB */
84         dsl_scan_scrub_cb,      /* POOL_SCAN_RESILVER */
85 };
86
87 int
88 dsl_scan_init(dsl_pool_t *dp, uint64_t txg)
89 {
90         int err;
91         dsl_scan_t *scn;
92         spa_t *spa = dp->dp_spa;
93         uint64_t f;
94
95         scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP);
96         scn->scn_dp = dp;
97
98         /*
99          * It's possible that we're resuming a scan after a reboot so
100          * make sure that the scan_async_destroying flag is initialized
101          * appropriately.
102          */
103         ASSERT(!scn->scn_async_destroying);
104         scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa,
105             SPA_FEATURE_ASYNC_DESTROY);
106
107         err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
108             "scrub_func", sizeof (uint64_t), 1, &f);
109         if (err == 0) {
110                 /*
111                  * There was an old-style scrub in progress.  Restart a
112                  * new-style scrub from the beginning.
113                  */
114                 scn->scn_restart_txg = txg;
115                 zfs_dbgmsg("old-style scrub was in progress; "
116                     "restarting new-style scrub in txg %llu",
117                     scn->scn_restart_txg);
118
119                 /*
120                  * Load the queue obj from the old location so that it
121                  * can be freed by dsl_scan_done().
122                  */
123                 (void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
124                     "scrub_queue", sizeof (uint64_t), 1,
125                     &scn->scn_phys.scn_queue_obj);
126         } else {
127                 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
128                     DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
129                     &scn->scn_phys);
130                 if (err == ENOENT)
131                         return (0);
132                 else if (err)
133                         return (err);
134
135                 if (scn->scn_phys.scn_state == DSS_SCANNING &&
136                     spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) {
137                         /*
138                          * A new-type scrub was in progress on an old
139                          * pool, and the pool was accessed by old
140                          * software.  Restart from the beginning, since
141                          * the old software may have changed the pool in
142                          * the meantime.
143                          */
144                         scn->scn_restart_txg = txg;
145                         zfs_dbgmsg("new-style scrub was modified "
146                             "by old software; restarting in txg %llu",
147                             scn->scn_restart_txg);
148                 }
149         }
150
151         spa_scan_stat_init(spa);
152         return (0);
153 }
154
155 void
156 dsl_scan_fini(dsl_pool_t *dp)
157 {
158         if (dp->dp_scan) {
159                 kmem_free(dp->dp_scan, sizeof (dsl_scan_t));
160                 dp->dp_scan = NULL;
161         }
162 }
163
164 /* ARGSUSED */
165 static int
166 dsl_scan_setup_check(void *arg, dmu_tx_t *tx)
167 {
168         dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
169
170         if (scn->scn_phys.scn_state == DSS_SCANNING)
171                 return (SET_ERROR(EBUSY));
172
173         return (0);
174 }
175
176 static void
177 dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
178 {
179         dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
180         pool_scan_func_t *funcp = arg;
181         dmu_object_type_t ot = 0;
182         dsl_pool_t *dp = scn->scn_dp;
183         spa_t *spa = dp->dp_spa;
184
185         ASSERT(scn->scn_phys.scn_state != DSS_SCANNING);
186         ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS);
187         bzero(&scn->scn_phys, sizeof (scn->scn_phys));
188         scn->scn_phys.scn_func = *funcp;
189         scn->scn_phys.scn_state = DSS_SCANNING;
190         scn->scn_phys.scn_min_txg = 0;
191         scn->scn_phys.scn_max_txg = tx->tx_txg;
192         scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */
193         scn->scn_phys.scn_start_time = gethrestime_sec();
194         scn->scn_phys.scn_errors = 0;
195         scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc;
196         scn->scn_restart_txg = 0;
197         scn->scn_done_txg = 0;
198         spa_scan_stat_init(spa);
199
200         if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
201                 scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max;
202
203                 /* rewrite all disk labels */
204                 vdev_config_dirty(spa->spa_root_vdev);
205
206                 if (vdev_resilver_needed(spa->spa_root_vdev,
207                     &scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) {
208                         spa_event_notify(spa, NULL, ESC_ZFS_RESILVER_START);
209                 } else {
210                         spa_event_notify(spa, NULL, ESC_ZFS_SCRUB_START);
211                 }
212
213                 spa->spa_scrub_started = B_TRUE;
214                 /*
215                  * If this is an incremental scrub, limit the DDT scrub phase
216                  * to just the auto-ditto class (for correctness); the rest
217                  * of the scrub should go faster using top-down pruning.
218                  */
219                 if (scn->scn_phys.scn_min_txg > TXG_INITIAL)
220                         scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO;
221
222         }
223
224         /* back to the generic stuff */
225
226         if (dp->dp_blkstats == NULL) {
227                 dp->dp_blkstats =
228                     kmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP);
229         }
230         bzero(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
231
232         if (spa_version(spa) < SPA_VERSION_DSL_SCRUB)
233                 ot = DMU_OT_ZAP_OTHER;
234
235         scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset,
236             ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx);
237
238         dsl_scan_sync_state(scn, tx);
239
240         spa_history_log_internal(spa, "scan setup", tx,
241             "func=%u mintxg=%llu maxtxg=%llu",
242             *funcp, scn->scn_phys.scn_min_txg, scn->scn_phys.scn_max_txg);
243 }
244
245 /* ARGSUSED */
246 static void
247 dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
248 {
249         static const char *old_names[] = {
250                 "scrub_bookmark",
251                 "scrub_ddt_bookmark",
252                 "scrub_ddt_class_max",
253                 "scrub_queue",
254                 "scrub_min_txg",
255                 "scrub_max_txg",
256                 "scrub_func",
257                 "scrub_errors",
258                 NULL
259         };
260
261         dsl_pool_t *dp = scn->scn_dp;
262         spa_t *spa = dp->dp_spa;
263         int i;
264
265         /* Remove any remnants of an old-style scrub. */
266         for (i = 0; old_names[i]; i++) {
267                 (void) zap_remove(dp->dp_meta_objset,
268                     DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx);
269         }
270
271         if (scn->scn_phys.scn_queue_obj != 0) {
272                 VERIFY(0 == dmu_object_free(dp->dp_meta_objset,
273                     scn->scn_phys.scn_queue_obj, tx));
274                 scn->scn_phys.scn_queue_obj = 0;
275         }
276
277         /*
278          * If we were "restarted" from a stopped state, don't bother
279          * with anything else.
280          */
281         if (scn->scn_phys.scn_state != DSS_SCANNING)
282                 return;
283
284         if (complete)
285                 scn->scn_phys.scn_state = DSS_FINISHED;
286         else
287                 scn->scn_phys.scn_state = DSS_CANCELED;
288
289         spa_history_log_internal(spa, "scan done", tx,
290             "complete=%u", complete);
291
292         if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
293                 mutex_enter(&spa->spa_scrub_lock);
294                 while (spa->spa_scrub_inflight > 0) {
295                         cv_wait(&spa->spa_scrub_io_cv,
296                             &spa->spa_scrub_lock);
297                 }
298                 mutex_exit(&spa->spa_scrub_lock);
299                 spa->spa_scrub_started = B_FALSE;
300                 spa->spa_scrub_active = B_FALSE;
301
302                 /*
303                  * If the scrub/resilver completed, update all DTLs to
304                  * reflect this.  Whether it succeeded or not, vacate
305                  * all temporary scrub DTLs.
306                  */
307                 vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg,
308                     complete ? scn->scn_phys.scn_max_txg : 0, B_TRUE);
309                 if (complete) {
310                         spa_event_notify(spa, NULL, scn->scn_phys.scn_min_txg ?
311                             ESC_ZFS_RESILVER_FINISH : ESC_ZFS_SCRUB_FINISH);
312                 }
313                 spa_errlog_rotate(spa);
314
315                 /*
316                  * We may have finished replacing a device.
317                  * Let the async thread assess this and handle the detach.
318                  */
319                 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
320         }
321
322         scn->scn_phys.scn_end_time = gethrestime_sec();
323 }
324
325 /* ARGSUSED */
326 static int
327 dsl_scan_cancel_check(void *arg, dmu_tx_t *tx)
328 {
329         dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
330
331         if (scn->scn_phys.scn_state != DSS_SCANNING)
332                 return (SET_ERROR(ENOENT));
333         return (0);
334 }
335
336 /* ARGSUSED */
337 static void
338 dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx)
339 {
340         dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
341
342         dsl_scan_done(scn, B_FALSE, tx);
343         dsl_scan_sync_state(scn, tx);
344 }
345
346 int
347 dsl_scan_cancel(dsl_pool_t *dp)
348 {
349         return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check,
350             dsl_scan_cancel_sync, NULL, 3));
351 }
352
353 static void dsl_scan_visitbp(blkptr_t *bp,
354     const zbookmark_t *zb, dnode_phys_t *dnp, arc_buf_t *pbuf,
355     dsl_dataset_t *ds, dsl_scan_t *scn, dmu_objset_type_t ostype,
356     dmu_tx_t *tx);
357 static void dsl_scan_visitdnode(dsl_scan_t *, dsl_dataset_t *ds,
358     dmu_objset_type_t ostype,
359     dnode_phys_t *dnp, arc_buf_t *buf, uint64_t object, dmu_tx_t *tx);
360
361 void
362 dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp)
363 {
364         zio_free(dp->dp_spa, txg, bp);
365 }
366
367 void
368 dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp)
369 {
370         ASSERT(dsl_pool_sync_context(dp));
371         zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, pio->io_flags));
372 }
373
374 static uint64_t
375 dsl_scan_ds_maxtxg(dsl_dataset_t *ds)
376 {
377         uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg;
378         if (dsl_dataset_is_snapshot(ds))
379                 return (MIN(smt, ds->ds_phys->ds_creation_txg));
380         return (smt);
381 }
382
383 static void
384 dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx)
385 {
386         VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
387             DMU_POOL_DIRECTORY_OBJECT,
388             DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
389             &scn->scn_phys, tx));
390 }
391
392 static boolean_t
393 dsl_scan_check_pause(dsl_scan_t *scn, const zbookmark_t *zb)
394 {
395         uint64_t elapsed_nanosecs;
396         int mintime;
397
398         /* we never skip user/group accounting objects */
399         if (zb && (int64_t)zb->zb_object < 0)
400                 return (B_FALSE);
401
402         if (scn->scn_pausing)
403                 return (B_TRUE); /* we're already pausing */
404
405         if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark))
406                 return (B_FALSE); /* we're resuming */
407
408         /* We only know how to resume from level-0 blocks. */
409         if (zb && zb->zb_level != 0)
410                 return (B_FALSE);
411
412         mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
413             zfs_resilver_min_time_ms : zfs_scan_min_time_ms;
414         elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time;
415         if (elapsed_nanosecs / NANOSEC > zfs_txg_timeout ||
416             (NSEC2MSEC(elapsed_nanosecs) > mintime &&
417             txg_sync_waiting(scn->scn_dp)) ||
418             spa_shutting_down(scn->scn_dp->dp_spa)) {
419                 if (zb) {
420                         dprintf("pausing at bookmark %llx/%llx/%llx/%llx\n",
421                             (longlong_t)zb->zb_objset,
422                             (longlong_t)zb->zb_object,
423                             (longlong_t)zb->zb_level,
424                             (longlong_t)zb->zb_blkid);
425                         scn->scn_phys.scn_bookmark = *zb;
426                 }
427                 dprintf("pausing at DDT bookmark %llx/%llx/%llx/%llx\n",
428                     (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_class,
429                     (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_type,
430                     (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_checksum,
431                     (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_cursor);
432                 scn->scn_pausing = B_TRUE;
433                 return (B_TRUE);
434         }
435         return (B_FALSE);
436 }
437
438 typedef struct zil_scan_arg {
439         dsl_pool_t      *zsa_dp;
440         zil_header_t    *zsa_zh;
441 } zil_scan_arg_t;
442
443 /* ARGSUSED */
444 static int
445 dsl_scan_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
446 {
447         zil_scan_arg_t *zsa = arg;
448         dsl_pool_t *dp = zsa->zsa_dp;
449         dsl_scan_t *scn = dp->dp_scan;
450         zil_header_t *zh = zsa->zsa_zh;
451         zbookmark_t zb;
452
453         if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg)
454                 return (0);
455
456         /*
457          * One block ("stubby") can be allocated a long time ago; we
458          * want to visit that one because it has been allocated
459          * (on-disk) even if it hasn't been claimed (even though for
460          * scrub there's nothing to do to it).
461          */
462         if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(dp->dp_spa))
463                 return (0);
464
465         SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
466             ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
467
468         VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
469         return (0);
470 }
471
472 /* ARGSUSED */
473 static int
474 dsl_scan_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg)
475 {
476         if (lrc->lrc_txtype == TX_WRITE) {
477                 zil_scan_arg_t *zsa = arg;
478                 dsl_pool_t *dp = zsa->zsa_dp;
479                 dsl_scan_t *scn = dp->dp_scan;
480                 zil_header_t *zh = zsa->zsa_zh;
481                 lr_write_t *lr = (lr_write_t *)lrc;
482                 blkptr_t *bp = &lr->lr_blkptr;
483                 zbookmark_t zb;
484
485                 if (BP_IS_HOLE(bp) ||
486                     bp->blk_birth <= scn->scn_phys.scn_cur_min_txg)
487                         return (0);
488
489                 /*
490                  * birth can be < claim_txg if this record's txg is
491                  * already txg sync'ed (but this log block contains
492                  * other records that are not synced)
493                  */
494                 if (claim_txg == 0 || bp->blk_birth < claim_txg)
495                         return (0);
496
497                 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
498                     lr->lr_foid, ZB_ZIL_LEVEL,
499                     lr->lr_offset / BP_GET_LSIZE(bp));
500
501                 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
502         }
503         return (0);
504 }
505
506 static void
507 dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh)
508 {
509         uint64_t claim_txg = zh->zh_claim_txg;
510         zil_scan_arg_t zsa = { dp, zh };
511         zilog_t *zilog;
512
513         /*
514          * We only want to visit blocks that have been claimed but not yet
515          * replayed (or, in read-only mode, blocks that *would* be claimed).
516          */
517         if (claim_txg == 0 && spa_writeable(dp->dp_spa))
518                 return;
519
520         zilog = zil_alloc(dp->dp_meta_objset, zh);
521
522         (void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa,
523             claim_txg);
524
525         zil_free(zilog);
526 }
527
528 /* ARGSUSED */
529 static void
530 dsl_scan_prefetch(dsl_scan_t *scn, arc_buf_t *buf, blkptr_t *bp,
531     uint64_t objset, uint64_t object, uint64_t blkid)
532 {
533         zbookmark_t czb;
534         uint32_t flags = ARC_NOWAIT | ARC_PREFETCH;
535
536         if (zfs_no_scrub_prefetch)
537                 return;
538
539         if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_min_txg ||
540             (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE))
541                 return;
542
543         SET_BOOKMARK(&czb, objset, object, BP_GET_LEVEL(bp), blkid);
544
545         (void) arc_read(scn->scn_zio_root, scn->scn_dp->dp_spa, bp,
546             NULL, NULL, ZIO_PRIORITY_ASYNC_READ,
547             ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD, &flags, &czb);
548 }
549
550 static boolean_t
551 dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp,
552     const zbookmark_t *zb)
553 {
554         /*
555          * We never skip over user/group accounting objects (obj<0)
556          */
557         if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) &&
558             (int64_t)zb->zb_object >= 0) {
559                 /*
560                  * If we already visited this bp & everything below (in
561                  * a prior txg sync), don't bother doing it again.
562                  */
563                 if (zbookmark_is_before(dnp, zb, &scn->scn_phys.scn_bookmark))
564                         return (B_TRUE);
565
566                 /*
567                  * If we found the block we're trying to resume from, or
568                  * we went past it to a different object, zero it out to
569                  * indicate that it's OK to start checking for pausing
570                  * again.
571                  */
572                 if (bcmp(zb, &scn->scn_phys.scn_bookmark, sizeof (*zb)) == 0 ||
573                     zb->zb_object > scn->scn_phys.scn_bookmark.zb_object) {
574                         dprintf("resuming at %llx/%llx/%llx/%llx\n",
575                             (longlong_t)zb->zb_objset,
576                             (longlong_t)zb->zb_object,
577                             (longlong_t)zb->zb_level,
578                             (longlong_t)zb->zb_blkid);
579                         bzero(&scn->scn_phys.scn_bookmark, sizeof (*zb));
580                 }
581         }
582         return (B_FALSE);
583 }
584
585 /*
586  * Return nonzero on i/o error.
587  * Return new buf to write out in *bufp.
588  */
589 static int
590 dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype,
591     dnode_phys_t *dnp, const blkptr_t *bp,
592     const zbookmark_t *zb, dmu_tx_t *tx, arc_buf_t **bufp)
593 {
594         dsl_pool_t *dp = scn->scn_dp;
595         int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD;
596         int err;
597
598         if (BP_GET_LEVEL(bp) > 0) {
599                 uint32_t flags = ARC_WAIT;
600                 int i;
601                 blkptr_t *cbp;
602                 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
603
604                 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, bufp,
605                     ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb);
606                 if (err) {
607                         scn->scn_phys.scn_errors++;
608                         return (err);
609                 }
610                 for (i = 0, cbp = (*bufp)->b_data; i < epb; i++, cbp++) {
611                         dsl_scan_prefetch(scn, *bufp, cbp, zb->zb_objset,
612                             zb->zb_object, zb->zb_blkid * epb + i);
613                 }
614                 for (i = 0, cbp = (*bufp)->b_data; i < epb; i++, cbp++) {
615                         zbookmark_t czb;
616
617                         SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
618                             zb->zb_level - 1,
619                             zb->zb_blkid * epb + i);
620                         dsl_scan_visitbp(cbp, &czb, dnp,
621                             *bufp, ds, scn, ostype, tx);
622                 }
623         } else if (BP_GET_TYPE(bp) == DMU_OT_USERGROUP_USED) {
624                 uint32_t flags = ARC_WAIT;
625
626                 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, bufp,
627                     ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb);
628                 if (err) {
629                         scn->scn_phys.scn_errors++;
630                         return (err);
631                 }
632         } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
633                 uint32_t flags = ARC_WAIT;
634                 dnode_phys_t *cdnp;
635                 int i, j;
636                 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
637
638                 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, bufp,
639                     ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb);
640                 if (err) {
641                         scn->scn_phys.scn_errors++;
642                         return (err);
643                 }
644                 for (i = 0, cdnp = (*bufp)->b_data; i < epb; i++, cdnp++) {
645                         for (j = 0; j < cdnp->dn_nblkptr; j++) {
646                                 blkptr_t *cbp = &cdnp->dn_blkptr[j];
647                                 dsl_scan_prefetch(scn, *bufp, cbp,
648                                     zb->zb_objset, zb->zb_blkid * epb + i, j);
649                         }
650                 }
651                 for (i = 0, cdnp = (*bufp)->b_data; i < epb; i++, cdnp++) {
652                         dsl_scan_visitdnode(scn, ds, ostype,
653                             cdnp, *bufp, zb->zb_blkid * epb + i, tx);
654                 }
655
656         } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
657                 uint32_t flags = ARC_WAIT;
658                 objset_phys_t *osp;
659
660                 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, bufp,
661                     ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb);
662                 if (err) {
663                         scn->scn_phys.scn_errors++;
664                         return (err);
665                 }
666
667                 osp = (*bufp)->b_data;
668
669                 dsl_scan_visitdnode(scn, ds, osp->os_type,
670                     &osp->os_meta_dnode, *bufp, DMU_META_DNODE_OBJECT, tx);
671
672                 if (OBJSET_BUF_HAS_USERUSED(*bufp)) {
673                         /*
674                          * We also always visit user/group accounting
675                          * objects, and never skip them, even if we are
676                          * pausing.  This is necessary so that the space
677                          * deltas from this txg get integrated.
678                          */
679                         dsl_scan_visitdnode(scn, ds, osp->os_type,
680                             &osp->os_groupused_dnode, *bufp,
681                             DMU_GROUPUSED_OBJECT, tx);
682                         dsl_scan_visitdnode(scn, ds, osp->os_type,
683                             &osp->os_userused_dnode, *bufp,
684                             DMU_USERUSED_OBJECT, tx);
685                 }
686         }
687
688         return (0);
689 }
690
691 static void
692 dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds,
693     dmu_objset_type_t ostype, dnode_phys_t *dnp, arc_buf_t *buf,
694     uint64_t object, dmu_tx_t *tx)
695 {
696         int j;
697
698         for (j = 0; j < dnp->dn_nblkptr; j++) {
699                 zbookmark_t czb;
700
701                 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object,
702                     dnp->dn_nlevels - 1, j);
703                 dsl_scan_visitbp(&dnp->dn_blkptr[j],
704                     &czb, dnp, buf, ds, scn, ostype, tx);
705         }
706
707         if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
708                 zbookmark_t czb;
709                 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object,
710                     0, DMU_SPILL_BLKID);
711                 dsl_scan_visitbp(&dnp->dn_spill,
712                     &czb, dnp, buf, ds, scn, ostype, tx);
713         }
714 }
715
716 /*
717  * The arguments are in this order because mdb can only print the
718  * first 5; we want them to be useful.
719  */
720 static void
721 dsl_scan_visitbp(blkptr_t *bp, const zbookmark_t *zb,
722     dnode_phys_t *dnp, arc_buf_t *pbuf,
723     dsl_dataset_t *ds, dsl_scan_t *scn, dmu_objset_type_t ostype,
724     dmu_tx_t *tx)
725 {
726         dsl_pool_t *dp = scn->scn_dp;
727         arc_buf_t *buf = NULL;
728         blkptr_t bp_toread = *bp;
729
730         /* ASSERT(pbuf == NULL || arc_released(pbuf)); */
731
732         if (dsl_scan_check_pause(scn, zb))
733                 return;
734
735         if (dsl_scan_check_resume(scn, dnp, zb))
736                 return;
737
738         if (BP_IS_HOLE(bp))
739                 return;
740
741         scn->scn_visited_this_txg++;
742
743         dprintf_bp(bp,
744             "visiting ds=%p/%llu zb=%llx/%llx/%llx/%llx buf=%p bp=%p",
745             ds, ds ? ds->ds_object : 0,
746             zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid,
747             pbuf, bp);
748
749         if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg)
750                 return;
751
752         if (dsl_scan_recurse(scn, ds, ostype, dnp, &bp_toread, zb, tx,
753             &buf) != 0)
754                 return;
755
756         /*
757          * If dsl_scan_ddt() has aready visited this block, it will have
758          * already done any translations or scrubbing, so don't call the
759          * callback again.
760          */
761         if (ddt_class_contains(dp->dp_spa,
762             scn->scn_phys.scn_ddt_class_max, bp)) {
763                 ASSERT(buf == NULL);
764                 return;
765         }
766
767         /*
768          * If this block is from the future (after cur_max_txg), then we
769          * are doing this on behalf of a deleted snapshot, and we will
770          * revisit the future block on the next pass of this dataset.
771          * Don't scan it now unless we need to because something
772          * under it was modified.
773          */
774         if (BP_PHYSICAL_BIRTH(bp) <= scn->scn_phys.scn_cur_max_txg) {
775                 scan_funcs[scn->scn_phys.scn_func](dp, bp, zb);
776         }
777         if (buf)
778                 (void) arc_buf_remove_ref(buf, &buf);
779 }
780
781 static void
782 dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp,
783     dmu_tx_t *tx)
784 {
785         zbookmark_t zb;
786
787         SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
788             ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
789         dsl_scan_visitbp(bp, &zb, NULL, NULL,
790             ds, scn, DMU_OST_NONE, tx);
791
792         dprintf_ds(ds, "finished scan%s", "");
793 }
794
795 void
796 dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
797 {
798         dsl_pool_t *dp = ds->ds_dir->dd_pool;
799         dsl_scan_t *scn = dp->dp_scan;
800         uint64_t mintxg;
801
802         if (scn->scn_phys.scn_state != DSS_SCANNING)
803                 return;
804
805         if (scn->scn_phys.scn_bookmark.zb_objset == ds->ds_object) {
806                 if (dsl_dataset_is_snapshot(ds)) {
807                         /* Note, scn_cur_{min,max}_txg stays the same. */
808                         scn->scn_phys.scn_bookmark.zb_objset =
809                             ds->ds_phys->ds_next_snap_obj;
810                         zfs_dbgmsg("destroying ds %llu; currently traversing; "
811                             "reset zb_objset to %llu",
812                             (u_longlong_t)ds->ds_object,
813                             (u_longlong_t)ds->ds_phys->ds_next_snap_obj);
814                         scn->scn_phys.scn_flags |= DSF_VISIT_DS_AGAIN;
815                 } else {
816                         SET_BOOKMARK(&scn->scn_phys.scn_bookmark,
817                             ZB_DESTROYED_OBJSET, 0, 0, 0);
818                         zfs_dbgmsg("destroying ds %llu; currently traversing; "
819                             "reset bookmark to -1,0,0,0",
820                             (u_longlong_t)ds->ds_object);
821                 }
822         } else if (zap_lookup_int_key(dp->dp_meta_objset,
823             scn->scn_phys.scn_queue_obj, ds->ds_object, &mintxg) == 0) {
824                 ASSERT3U(ds->ds_phys->ds_num_children, <=, 1);
825                 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
826                     scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
827                 if (dsl_dataset_is_snapshot(ds)) {
828                         /*
829                          * We keep the same mintxg; it could be >
830                          * ds_creation_txg if the previous snapshot was
831                          * deleted too.
832                          */
833                         VERIFY(zap_add_int_key(dp->dp_meta_objset,
834                             scn->scn_phys.scn_queue_obj,
835                             ds->ds_phys->ds_next_snap_obj, mintxg, tx) == 0);
836                         zfs_dbgmsg("destroying ds %llu; in queue; "
837                             "replacing with %llu",
838                             (u_longlong_t)ds->ds_object,
839                             (u_longlong_t)ds->ds_phys->ds_next_snap_obj);
840                 } else {
841                         zfs_dbgmsg("destroying ds %llu; in queue; removing",
842                             (u_longlong_t)ds->ds_object);
843                 }
844         } else {
845                 zfs_dbgmsg("destroying ds %llu; ignoring",
846                     (u_longlong_t)ds->ds_object);
847         }
848
849         /*
850          * dsl_scan_sync() should be called after this, and should sync
851          * out our changed state, but just to be safe, do it here.
852          */
853         dsl_scan_sync_state(scn, tx);
854 }
855
856 void
857 dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
858 {
859         dsl_pool_t *dp = ds->ds_dir->dd_pool;
860         dsl_scan_t *scn = dp->dp_scan;
861         uint64_t mintxg;
862
863         if (scn->scn_phys.scn_state != DSS_SCANNING)
864                 return;
865
866         ASSERT(ds->ds_phys->ds_prev_snap_obj != 0);
867
868         if (scn->scn_phys.scn_bookmark.zb_objset == ds->ds_object) {
869                 scn->scn_phys.scn_bookmark.zb_objset =
870                     ds->ds_phys->ds_prev_snap_obj;
871                 zfs_dbgmsg("snapshotting ds %llu; currently traversing; "
872                     "reset zb_objset to %llu",
873                     (u_longlong_t)ds->ds_object,
874                     (u_longlong_t)ds->ds_phys->ds_prev_snap_obj);
875         } else if (zap_lookup_int_key(dp->dp_meta_objset,
876             scn->scn_phys.scn_queue_obj, ds->ds_object, &mintxg) == 0) {
877                 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
878                     scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
879                 VERIFY(zap_add_int_key(dp->dp_meta_objset,
880                     scn->scn_phys.scn_queue_obj,
881                     ds->ds_phys->ds_prev_snap_obj, mintxg, tx) == 0);
882                 zfs_dbgmsg("snapshotting ds %llu; in queue; "
883                     "replacing with %llu",
884                     (u_longlong_t)ds->ds_object,
885                     (u_longlong_t)ds->ds_phys->ds_prev_snap_obj);
886         }
887         dsl_scan_sync_state(scn, tx);
888 }
889
890 void
891 dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx)
892 {
893         dsl_pool_t *dp = ds1->ds_dir->dd_pool;
894         dsl_scan_t *scn = dp->dp_scan;
895         uint64_t mintxg;
896
897         if (scn->scn_phys.scn_state != DSS_SCANNING)
898                 return;
899
900         if (scn->scn_phys.scn_bookmark.zb_objset == ds1->ds_object) {
901                 scn->scn_phys.scn_bookmark.zb_objset = ds2->ds_object;
902                 zfs_dbgmsg("clone_swap ds %llu; currently traversing; "
903                     "reset zb_objset to %llu",
904                     (u_longlong_t)ds1->ds_object,
905                     (u_longlong_t)ds2->ds_object);
906         } else if (scn->scn_phys.scn_bookmark.zb_objset == ds2->ds_object) {
907                 scn->scn_phys.scn_bookmark.zb_objset = ds1->ds_object;
908                 zfs_dbgmsg("clone_swap ds %llu; currently traversing; "
909                     "reset zb_objset to %llu",
910                     (u_longlong_t)ds2->ds_object,
911                     (u_longlong_t)ds1->ds_object);
912         }
913
914         if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
915             ds1->ds_object, &mintxg) == 0) {
916                 int err;
917
918                 ASSERT3U(mintxg, ==, ds1->ds_phys->ds_prev_snap_txg);
919                 ASSERT3U(mintxg, ==, ds2->ds_phys->ds_prev_snap_txg);
920                 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
921                     scn->scn_phys.scn_queue_obj, ds1->ds_object, tx));
922                 err = zap_add_int_key(dp->dp_meta_objset,
923                     scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg, tx);
924                 VERIFY(err == 0 || err == EEXIST);
925                 if (err == EEXIST) {
926                         /* Both were there to begin with */
927                         VERIFY(0 == zap_add_int_key(dp->dp_meta_objset,
928                             scn->scn_phys.scn_queue_obj,
929                             ds1->ds_object, mintxg, tx));
930                 }
931                 zfs_dbgmsg("clone_swap ds %llu; in queue; "
932                     "replacing with %llu",
933                     (u_longlong_t)ds1->ds_object,
934                     (u_longlong_t)ds2->ds_object);
935         } else if (zap_lookup_int_key(dp->dp_meta_objset,
936             scn->scn_phys.scn_queue_obj, ds2->ds_object, &mintxg) == 0) {
937                 ASSERT3U(mintxg, ==, ds1->ds_phys->ds_prev_snap_txg);
938                 ASSERT3U(mintxg, ==, ds2->ds_phys->ds_prev_snap_txg);
939                 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
940                     scn->scn_phys.scn_queue_obj, ds2->ds_object, tx));
941                 VERIFY(0 == zap_add_int_key(dp->dp_meta_objset,
942                     scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg, tx));
943                 zfs_dbgmsg("clone_swap ds %llu; in queue; "
944                     "replacing with %llu",
945                     (u_longlong_t)ds2->ds_object,
946                     (u_longlong_t)ds1->ds_object);
947         }
948
949         dsl_scan_sync_state(scn, tx);
950 }
951
952 struct enqueue_clones_arg {
953         dmu_tx_t *tx;
954         uint64_t originobj;
955 };
956
957 /* ARGSUSED */
958 static int
959 enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
960 {
961         struct enqueue_clones_arg *eca = arg;
962         dsl_dataset_t *ds;
963         int err;
964         dsl_scan_t *scn = dp->dp_scan;
965
966         if (hds->ds_dir->dd_phys->dd_origin_obj != eca->originobj)
967                 return (0);
968
969         err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
970         if (err)
971                 return (err);
972
973         while (ds->ds_phys->ds_prev_snap_obj != eca->originobj) {
974                 dsl_dataset_t *prev;
975                 err = dsl_dataset_hold_obj(dp,
976                     ds->ds_phys->ds_prev_snap_obj, FTAG, &prev);
977
978                 dsl_dataset_rele(ds, FTAG);
979                 if (err)
980                         return (err);
981                 ds = prev;
982         }
983         VERIFY(zap_add_int_key(dp->dp_meta_objset,
984             scn->scn_phys.scn_queue_obj, ds->ds_object,
985             ds->ds_phys->ds_prev_snap_txg, eca->tx) == 0);
986         dsl_dataset_rele(ds, FTAG);
987         return (0);
988 }
989
990 static void
991 dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx)
992 {
993         dsl_pool_t *dp = scn->scn_dp;
994         dsl_dataset_t *ds;
995         objset_t *os;
996
997         VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
998
999         if (dmu_objset_from_ds(ds, &os))
1000                 goto out;
1001
1002         /*
1003          * Only the ZIL in the head (non-snapshot) is valid.  Even though
1004          * snapshots can have ZIL block pointers (which may be the same
1005          * BP as in the head), they must be ignored.  So we traverse the
1006          * ZIL here, rather than in scan_recurse(), because the regular
1007          * snapshot block-sharing rules don't apply to it.
1008          */
1009         if (DSL_SCAN_IS_SCRUB_RESILVER(scn) && !dsl_dataset_is_snapshot(ds))
1010                 dsl_scan_zil(dp, &os->os_zil_header);
1011
1012         /*
1013          * Iterate over the bps in this ds.
1014          */
1015         dmu_buf_will_dirty(ds->ds_dbuf, tx);
1016         dsl_scan_visit_rootbp(scn, ds, &ds->ds_phys->ds_bp, tx);
1017
1018         char *dsname = kmem_alloc(ZFS_MAXNAMELEN, KM_SLEEP);
1019         dsl_dataset_name(ds, dsname);
1020         zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; "
1021             "pausing=%u",
1022             (longlong_t)dsobj, dsname,
1023             (longlong_t)scn->scn_phys.scn_cur_min_txg,
1024             (longlong_t)scn->scn_phys.scn_cur_max_txg,
1025             (int)scn->scn_pausing);
1026         kmem_free(dsname, ZFS_MAXNAMELEN);
1027
1028         if (scn->scn_pausing)
1029                 goto out;
1030
1031         /*
1032          * We've finished this pass over this dataset.
1033          */
1034
1035         /*
1036          * If we did not completely visit this dataset, do another pass.
1037          */
1038         if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) {
1039                 zfs_dbgmsg("incomplete pass; visiting again");
1040                 scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN;
1041                 VERIFY(zap_add_int_key(dp->dp_meta_objset,
1042                     scn->scn_phys.scn_queue_obj, ds->ds_object,
1043                     scn->scn_phys.scn_cur_max_txg, tx) == 0);
1044                 goto out;
1045         }
1046
1047         /*
1048          * Add descendent datasets to work queue.
1049          */
1050         if (ds->ds_phys->ds_next_snap_obj != 0) {
1051                 VERIFY(zap_add_int_key(dp->dp_meta_objset,
1052                     scn->scn_phys.scn_queue_obj, ds->ds_phys->ds_next_snap_obj,
1053                     ds->ds_phys->ds_creation_txg, tx) == 0);
1054         }
1055         if (ds->ds_phys->ds_num_children > 1) {
1056                 boolean_t usenext = B_FALSE;
1057                 if (ds->ds_phys->ds_next_clones_obj != 0) {
1058                         uint64_t count;
1059                         /*
1060                          * A bug in a previous version of the code could
1061                          * cause upgrade_clones_cb() to not set
1062                          * ds_next_snap_obj when it should, leading to a
1063                          * missing entry.  Therefore we can only use the
1064                          * next_clones_obj when its count is correct.
1065                          */
1066                         int err = zap_count(dp->dp_meta_objset,
1067                             ds->ds_phys->ds_next_clones_obj, &count);
1068                         if (err == 0 &&
1069                             count == ds->ds_phys->ds_num_children - 1)
1070                                 usenext = B_TRUE;
1071                 }
1072
1073                 if (usenext) {
1074                         VERIFY0(zap_join_key(dp->dp_meta_objset,
1075                             ds->ds_phys->ds_next_clones_obj,
1076                             scn->scn_phys.scn_queue_obj,
1077                             ds->ds_phys->ds_creation_txg, tx));
1078                 } else {
1079                         struct enqueue_clones_arg eca;
1080                         eca.tx = tx;
1081                         eca.originobj = ds->ds_object;
1082
1083                         VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
1084                             enqueue_clones_cb, &eca, DS_FIND_CHILDREN));
1085                 }
1086         }
1087
1088 out:
1089         dsl_dataset_rele(ds, FTAG);
1090 }
1091
1092 /* ARGSUSED */
1093 static int
1094 enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
1095 {
1096         dmu_tx_t *tx = arg;
1097         dsl_dataset_t *ds;
1098         int err;
1099         dsl_scan_t *scn = dp->dp_scan;
1100
1101         err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
1102         if (err)
1103                 return (err);
1104
1105         while (ds->ds_phys->ds_prev_snap_obj != 0) {
1106                 dsl_dataset_t *prev;
1107                 err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj,
1108                     FTAG, &prev);
1109                 if (err) {
1110                         dsl_dataset_rele(ds, FTAG);
1111                         return (err);
1112                 }
1113
1114                 /*
1115                  * If this is a clone, we don't need to worry about it for now.
1116                  */
1117                 if (prev->ds_phys->ds_next_snap_obj != ds->ds_object) {
1118                         dsl_dataset_rele(ds, FTAG);
1119                         dsl_dataset_rele(prev, FTAG);
1120                         return (0);
1121                 }
1122                 dsl_dataset_rele(ds, FTAG);
1123                 ds = prev;
1124         }
1125
1126         VERIFY(zap_add_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
1127             ds->ds_object, ds->ds_phys->ds_prev_snap_txg, tx) == 0);
1128         dsl_dataset_rele(ds, FTAG);
1129         return (0);
1130 }
1131
1132 /*
1133  * Scrub/dedup interaction.
1134  *
1135  * If there are N references to a deduped block, we don't want to scrub it
1136  * N times -- ideally, we should scrub it exactly once.
1137  *
1138  * We leverage the fact that the dde's replication class (enum ddt_class)
1139  * is ordered from highest replication class (DDT_CLASS_DITTO) to lowest
1140  * (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order.
1141  *
1142  * To prevent excess scrubbing, the scrub begins by walking the DDT
1143  * to find all blocks with refcnt > 1, and scrubs each of these once.
1144  * Since there are two replication classes which contain blocks with
1145  * refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first.
1146  * Finally the top-down scrub begins, only visiting blocks with refcnt == 1.
1147  *
1148  * There would be nothing more to say if a block's refcnt couldn't change
1149  * during a scrub, but of course it can so we must account for changes
1150  * in a block's replication class.
1151  *
1152  * Here's an example of what can occur:
1153  *
1154  * If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1
1155  * when visited during the top-down scrub phase, it will be scrubbed twice.
1156  * This negates our scrub optimization, but is otherwise harmless.
1157  *
1158  * If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1
1159  * on each visit during the top-down scrub phase, it will never be scrubbed.
1160  * To catch this, ddt_sync_entry() notifies the scrub code whenever a block's
1161  * reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to
1162  * DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1
1163  * while a scrub is in progress, it scrubs the block right then.
1164  */
1165 static void
1166 dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx)
1167 {
1168         ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark;
1169         ddt_entry_t dde = { 0 };
1170         int error;
1171         uint64_t n = 0;
1172
1173         while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) {
1174                 ddt_t *ddt;
1175
1176                 if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max)
1177                         break;
1178                 dprintf("visiting ddb=%llu/%llu/%llu/%llx\n",
1179                     (longlong_t)ddb->ddb_class,
1180                     (longlong_t)ddb->ddb_type,
1181                     (longlong_t)ddb->ddb_checksum,
1182                     (longlong_t)ddb->ddb_cursor);
1183
1184                 /* There should be no pending changes to the dedup table */
1185                 ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum];
1186                 ASSERT(avl_first(&ddt->ddt_tree) == NULL);
1187
1188                 dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx);
1189                 n++;
1190
1191                 if (dsl_scan_check_pause(scn, NULL))
1192                         break;
1193         }
1194
1195         zfs_dbgmsg("scanned %llu ddt entries with class_max = %u; pausing=%u",
1196             (longlong_t)n, (int)scn->scn_phys.scn_ddt_class_max,
1197             (int)scn->scn_pausing);
1198
1199         ASSERT(error == 0 || error == ENOENT);
1200         ASSERT(error != ENOENT ||
1201             ddb->ddb_class > scn->scn_phys.scn_ddt_class_max);
1202 }
1203
1204 /* ARGSUSED */
1205 void
1206 dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum,
1207     ddt_entry_t *dde, dmu_tx_t *tx)
1208 {
1209         const ddt_key_t *ddk = &dde->dde_key;
1210         ddt_phys_t *ddp = dde->dde_phys;
1211         blkptr_t bp;
1212         zbookmark_t zb = { 0 };
1213
1214         if (scn->scn_phys.scn_state != DSS_SCANNING)
1215                 return;
1216
1217         for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
1218                 if (ddp->ddp_phys_birth == 0 ||
1219                     ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg)
1220                         continue;
1221                 ddt_bp_create(checksum, ddk, ddp, &bp);
1222
1223                 scn->scn_visited_this_txg++;
1224                 scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb);
1225         }
1226 }
1227
1228 static void
1229 dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx)
1230 {
1231         dsl_pool_t *dp = scn->scn_dp;
1232         zap_cursor_t zc;
1233         zap_attribute_t za;
1234
1235         if (scn->scn_phys.scn_ddt_bookmark.ddb_class <=
1236             scn->scn_phys.scn_ddt_class_max) {
1237                 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg;
1238                 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
1239                 dsl_scan_ddt(scn, tx);
1240                 if (scn->scn_pausing)
1241                         return;
1242         }
1243
1244         if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) {
1245                 /* First do the MOS & ORIGIN */
1246
1247                 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg;
1248                 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
1249                 dsl_scan_visit_rootbp(scn, NULL,
1250                     &dp->dp_meta_rootbp, tx);
1251                 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
1252                 if (scn->scn_pausing)
1253                         return;
1254
1255                 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) {
1256                         VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
1257                             enqueue_cb, tx, DS_FIND_CHILDREN));
1258                 } else {
1259                         dsl_scan_visitds(scn,
1260                             dp->dp_origin_snap->ds_object, tx);
1261                 }
1262                 ASSERT(!scn->scn_pausing);
1263         } else if (scn->scn_phys.scn_bookmark.zb_objset !=
1264             ZB_DESTROYED_OBJSET) {
1265                 /*
1266                  * If we were paused, continue from here.  Note if the
1267                  * ds we were paused on was deleted, the zb_objset may
1268                  * be -1, so we will skip this and find a new objset
1269                  * below.
1270                  */
1271                 dsl_scan_visitds(scn, scn->scn_phys.scn_bookmark.zb_objset, tx);
1272                 if (scn->scn_pausing)
1273                         return;
1274         }
1275
1276         /*
1277          * In case we were paused right at the end of the ds, zero the
1278          * bookmark so we don't think that we're still trying to resume.
1279          */
1280         bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_t));
1281
1282         /* keep pulling things out of the zap-object-as-queue */
1283         while (zap_cursor_init(&zc, dp->dp_meta_objset,
1284             scn->scn_phys.scn_queue_obj),
1285             zap_cursor_retrieve(&zc, &za) == 0) {
1286                 dsl_dataset_t *ds;
1287                 uint64_t dsobj;
1288
1289                 dsobj = strtonum(za.za_name, NULL);
1290                 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
1291                     scn->scn_phys.scn_queue_obj, dsobj, tx));
1292
1293                 /* Set up min/max txg */
1294                 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
1295                 if (za.za_first_integer != 0) {
1296                         scn->scn_phys.scn_cur_min_txg =
1297                             MAX(scn->scn_phys.scn_min_txg,
1298                             za.za_first_integer);
1299                 } else {
1300                         scn->scn_phys.scn_cur_min_txg =
1301                             MAX(scn->scn_phys.scn_min_txg,
1302                             ds->ds_phys->ds_prev_snap_txg);
1303                 }
1304                 scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds);
1305                 dsl_dataset_rele(ds, FTAG);
1306
1307                 dsl_scan_visitds(scn, dsobj, tx);
1308                 zap_cursor_fini(&zc);
1309                 if (scn->scn_pausing)
1310                         return;
1311         }
1312         zap_cursor_fini(&zc);
1313 }
1314
1315 static boolean_t
1316 dsl_scan_free_should_pause(dsl_scan_t *scn)
1317 {
1318         uint64_t elapsed_nanosecs;
1319
1320         elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time;
1321         return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout ||
1322             (NSEC2MSEC(elapsed_nanosecs) > zfs_free_min_time_ms &&
1323             txg_sync_waiting(scn->scn_dp)) ||
1324             spa_shutting_down(scn->scn_dp->dp_spa));
1325 }
1326
1327 static int
1328 dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1329 {
1330         dsl_scan_t *scn = arg;
1331
1332         if (!scn->scn_is_bptree ||
1333             (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) {
1334                 if (dsl_scan_free_should_pause(scn))
1335                         return (SET_ERROR(ERESTART));
1336         }
1337
1338         zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa,
1339             dmu_tx_get_txg(tx), bp, 0));
1340         dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
1341             -bp_get_dsize_sync(scn->scn_dp->dp_spa, bp),
1342             -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
1343         scn->scn_visited_this_txg++;
1344         return (0);
1345 }
1346
1347 boolean_t
1348 dsl_scan_active(dsl_scan_t *scn)
1349 {
1350         spa_t *spa = scn->scn_dp->dp_spa;
1351         uint64_t used = 0, comp, uncomp;
1352
1353         if (spa->spa_load_state != SPA_LOAD_NONE)
1354                 return (B_FALSE);
1355         if (spa_shutting_down(spa))
1356                 return (B_FALSE);
1357         if (scn->scn_phys.scn_state == DSS_SCANNING ||
1358             scn->scn_async_destroying)
1359                 return (B_TRUE);
1360
1361         if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
1362                 (void) bpobj_space(&scn->scn_dp->dp_free_bpobj,
1363                     &used, &comp, &uncomp);
1364         }
1365         return (used != 0);
1366 }
1367
1368 void
1369 dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx)
1370 {
1371         dsl_scan_t *scn = dp->dp_scan;
1372         spa_t *spa = dp->dp_spa;
1373         int err;
1374
1375         /*
1376          * Check for scn_restart_txg before checking spa_load_state, so
1377          * that we can restart an old-style scan while the pool is being
1378          * imported (see dsl_scan_init).
1379          */
1380         if (scn->scn_restart_txg != 0 &&
1381             scn->scn_restart_txg <= tx->tx_txg) {
1382                 pool_scan_func_t func = POOL_SCAN_SCRUB;
1383                 dsl_scan_done(scn, B_FALSE, tx);
1384                 if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL))
1385                         func = POOL_SCAN_RESILVER;
1386                 zfs_dbgmsg("restarting scan func=%u txg=%llu",
1387                     func, tx->tx_txg);
1388                 dsl_scan_setup_sync(&func, tx);
1389         }
1390
1391         if (!dsl_scan_active(scn) ||
1392             spa_sync_pass(dp->dp_spa) > 1)
1393                 return;
1394
1395         scn->scn_visited_this_txg = 0;
1396         scn->scn_pausing = B_FALSE;
1397         scn->scn_sync_start_time = gethrtime();
1398         spa->spa_scrub_active = B_TRUE;
1399
1400         /*
1401          * First process the free list.  If we pause the free, don't do
1402          * any scanning.  This ensures that there is no free list when
1403          * we are scanning, so the scan code doesn't have to worry about
1404          * traversing it.
1405          */
1406         if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
1407                 scn->scn_is_bptree = B_FALSE;
1408                 scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
1409                     NULL, ZIO_FLAG_MUSTSUCCEED);
1410                 err = bpobj_iterate(&dp->dp_free_bpobj,
1411                     dsl_scan_free_block_cb, scn, tx);
1412                 VERIFY3U(0, ==, zio_wait(scn->scn_zio_root));
1413
1414                 if (err == 0 && spa_feature_is_active(spa,
1415                     SPA_FEATURE_ASYNC_DESTROY)) {
1416                         ASSERT(scn->scn_async_destroying);
1417                         scn->scn_is_bptree = B_TRUE;
1418                         scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
1419                             NULL, ZIO_FLAG_MUSTSUCCEED);
1420                         err = bptree_iterate(dp->dp_meta_objset,
1421                             dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb,
1422                             scn, tx);
1423                         VERIFY0(zio_wait(scn->scn_zio_root));
1424
1425                         if (err == 0) {
1426                                 /* finished; deactivate async destroy feature */
1427                                 spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY,
1428                                     tx);
1429                                 ASSERT(!spa_feature_is_active(spa,
1430                                     SPA_FEATURE_ASYNC_DESTROY));
1431                                 VERIFY0(zap_remove(dp->dp_meta_objset,
1432                                     DMU_POOL_DIRECTORY_OBJECT,
1433                                     DMU_POOL_BPTREE_OBJ, tx));
1434                                 VERIFY0(bptree_free(dp->dp_meta_objset,
1435                                     dp->dp_bptree_obj, tx));
1436                                 dp->dp_bptree_obj = 0;
1437                                 scn->scn_async_destroying = B_FALSE;
1438                         }
1439                 }
1440                 if (scn->scn_visited_this_txg) {
1441                         zfs_dbgmsg("freed %llu blocks in %llums from "
1442                             "free_bpobj/bptree txg %llu",
1443                             (longlong_t)scn->scn_visited_this_txg,
1444                             (longlong_t)
1445                             NSEC2MSEC(gethrtime() - scn->scn_sync_start_time),
1446                             (longlong_t)tx->tx_txg);
1447                         scn->scn_visited_this_txg = 0;
1448                         /*
1449                          * Re-sync the ddt so that we can further modify
1450                          * it when doing bprewrite.
1451                          */
1452                         ddt_sync(spa, tx->tx_txg);
1453                 }
1454                 if (err == ERESTART)
1455                         return;
1456         }
1457
1458         if (scn->scn_phys.scn_state != DSS_SCANNING)
1459                 return;
1460
1461         if (scn->scn_done_txg == tx->tx_txg) {
1462                 ASSERT(!scn->scn_pausing);
1463                 /* finished with scan. */
1464                 zfs_dbgmsg("txg %llu scan complete", tx->tx_txg);
1465                 dsl_scan_done(scn, B_TRUE, tx);
1466                 ASSERT3U(spa->spa_scrub_inflight, ==, 0);
1467                 dsl_scan_sync_state(scn, tx);
1468                 return;
1469         }
1470
1471         if (scn->scn_phys.scn_ddt_bookmark.ddb_class <=
1472             scn->scn_phys.scn_ddt_class_max) {
1473                 zfs_dbgmsg("doing scan sync txg %llu; "
1474                     "ddt bm=%llu/%llu/%llu/%llx",
1475                     (longlong_t)tx->tx_txg,
1476                     (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_class,
1477                     (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_type,
1478                     (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_checksum,
1479                     (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_cursor);
1480                 ASSERT(scn->scn_phys.scn_bookmark.zb_objset == 0);
1481                 ASSERT(scn->scn_phys.scn_bookmark.zb_object == 0);
1482                 ASSERT(scn->scn_phys.scn_bookmark.zb_level == 0);
1483                 ASSERT(scn->scn_phys.scn_bookmark.zb_blkid == 0);
1484         } else {
1485                 zfs_dbgmsg("doing scan sync txg %llu; bm=%llu/%llu/%llu/%llu",
1486                     (longlong_t)tx->tx_txg,
1487                     (longlong_t)scn->scn_phys.scn_bookmark.zb_objset,
1488                     (longlong_t)scn->scn_phys.scn_bookmark.zb_object,
1489                     (longlong_t)scn->scn_phys.scn_bookmark.zb_level,
1490                     (longlong_t)scn->scn_phys.scn_bookmark.zb_blkid);
1491         }
1492
1493         scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
1494             NULL, ZIO_FLAG_CANFAIL);
1495         dsl_pool_config_enter(dp, FTAG);
1496         dsl_scan_visit(scn, tx);
1497         dsl_pool_config_exit(dp, FTAG);
1498         (void) zio_wait(scn->scn_zio_root);
1499         scn->scn_zio_root = NULL;
1500
1501         zfs_dbgmsg("visited %llu blocks in %llums",
1502             (longlong_t)scn->scn_visited_this_txg,
1503             (longlong_t)NSEC2MSEC(gethrtime() - scn->scn_sync_start_time));
1504
1505         if (!scn->scn_pausing) {
1506                 scn->scn_done_txg = tx->tx_txg + 1;
1507                 zfs_dbgmsg("txg %llu traversal complete, waiting till txg %llu",
1508                     tx->tx_txg, scn->scn_done_txg);
1509         }
1510
1511         if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
1512                 mutex_enter(&spa->spa_scrub_lock);
1513                 while (spa->spa_scrub_inflight > 0) {
1514                         cv_wait(&spa->spa_scrub_io_cv,
1515                             &spa->spa_scrub_lock);
1516                 }
1517                 mutex_exit(&spa->spa_scrub_lock);
1518         }
1519
1520         dsl_scan_sync_state(scn, tx);
1521 }
1522
1523 /*
1524  * This will start a new scan, or restart an existing one.
1525  */
1526 void
1527 dsl_resilver_restart(dsl_pool_t *dp, uint64_t txg)
1528 {
1529         if (txg == 0) {
1530                 dmu_tx_t *tx;
1531                 tx = dmu_tx_create_dd(dp->dp_mos_dir);
1532                 VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT));
1533
1534                 txg = dmu_tx_get_txg(tx);
1535                 dp->dp_scan->scn_restart_txg = txg;
1536                 dmu_tx_commit(tx);
1537         } else {
1538                 dp->dp_scan->scn_restart_txg = txg;
1539         }
1540         zfs_dbgmsg("restarting resilver txg=%llu", txg);
1541 }
1542
1543 boolean_t
1544 dsl_scan_resilvering(dsl_pool_t *dp)
1545 {
1546         return (dp->dp_scan->scn_phys.scn_state == DSS_SCANNING &&
1547             dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER);
1548 }
1549
1550 /*
1551  * scrub consumers
1552  */
1553
1554 static void
1555 count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp)
1556 {
1557         int i;
1558
1559         /*
1560          * If we resume after a reboot, zab will be NULL; don't record
1561          * incomplete stats in that case.
1562          */
1563         if (zab == NULL)
1564                 return;
1565
1566         for (i = 0; i < 4; i++) {
1567                 int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS;
1568                 int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL;
1569                 if (t & DMU_OT_NEWTYPE)
1570                         t = DMU_OT_OTHER;
1571                 zfs_blkstat_t *zb = &zab->zab_type[l][t];
1572                 int equal;
1573
1574                 zb->zb_count++;
1575                 zb->zb_asize += BP_GET_ASIZE(bp);
1576                 zb->zb_lsize += BP_GET_LSIZE(bp);
1577                 zb->zb_psize += BP_GET_PSIZE(bp);
1578                 zb->zb_gangs += BP_COUNT_GANG(bp);
1579
1580                 switch (BP_GET_NDVAS(bp)) {
1581                 case 2:
1582                         if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
1583                             DVA_GET_VDEV(&bp->blk_dva[1]))
1584                                 zb->zb_ditto_2_of_2_samevdev++;
1585                         break;
1586                 case 3:
1587                         equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
1588                             DVA_GET_VDEV(&bp->blk_dva[1])) +
1589                             (DVA_GET_VDEV(&bp->blk_dva[0]) ==
1590                             DVA_GET_VDEV(&bp->blk_dva[2])) +
1591                             (DVA_GET_VDEV(&bp->blk_dva[1]) ==
1592                             DVA_GET_VDEV(&bp->blk_dva[2]));
1593                         if (equal == 1)
1594                                 zb->zb_ditto_2_of_3_samevdev++;
1595                         else if (equal == 3)
1596                                 zb->zb_ditto_3_of_3_samevdev++;
1597                         break;
1598                 }
1599         }
1600 }
1601
1602 static void
1603 dsl_scan_scrub_done(zio_t *zio)
1604 {
1605         spa_t *spa = zio->io_spa;
1606
1607         zio_data_buf_free(zio->io_data, zio->io_size);
1608
1609         mutex_enter(&spa->spa_scrub_lock);
1610         spa->spa_scrub_inflight--;
1611         cv_broadcast(&spa->spa_scrub_io_cv);
1612
1613         if (zio->io_error && (zio->io_error != ECKSUM ||
1614             !(zio->io_flags & ZIO_FLAG_SPECULATIVE))) {
1615                 spa->spa_dsl_pool->dp_scan->scn_phys.scn_errors++;
1616         }
1617         mutex_exit(&spa->spa_scrub_lock);
1618 }
1619
1620 static int
1621 dsl_scan_scrub_cb(dsl_pool_t *dp,
1622     const blkptr_t *bp, const zbookmark_t *zb)
1623 {
1624         dsl_scan_t *scn = dp->dp_scan;
1625         size_t size = BP_GET_PSIZE(bp);
1626         spa_t *spa = dp->dp_spa;
1627         uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp);
1628         boolean_t needs_io;
1629         int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL;
1630         int scan_delay = 0;
1631
1632         if (phys_birth <= scn->scn_phys.scn_min_txg ||
1633             phys_birth >= scn->scn_phys.scn_max_txg)
1634                 return (0);
1635
1636         count_block(dp->dp_blkstats, bp);
1637
1638         ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn));
1639         if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) {
1640                 zio_flags |= ZIO_FLAG_SCRUB;
1641                 needs_io = B_TRUE;
1642                 scan_delay = zfs_scrub_delay;
1643         } else {
1644                 ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER);
1645                 zio_flags |= ZIO_FLAG_RESILVER;
1646                 needs_io = B_FALSE;
1647                 scan_delay = zfs_resilver_delay;
1648         }
1649
1650         /* If it's an intent log block, failure is expected. */
1651         if (zb->zb_level == ZB_ZIL_LEVEL)
1652                 zio_flags |= ZIO_FLAG_SPECULATIVE;
1653
1654         for (int d = 0; d < BP_GET_NDVAS(bp); d++) {
1655                 vdev_t *vd = vdev_lookup_top(spa,
1656                     DVA_GET_VDEV(&bp->blk_dva[d]));
1657
1658                 /*
1659                  * Keep track of how much data we've examined so that
1660                  * zpool(1M) status can make useful progress reports.
1661                  */
1662                 scn->scn_phys.scn_examined += DVA_GET_ASIZE(&bp->blk_dva[d]);
1663                 spa->spa_scan_pass_exam += DVA_GET_ASIZE(&bp->blk_dva[d]);
1664
1665                 /* if it's a resilver, this may not be in the target range */
1666                 if (!needs_io) {
1667                         if (DVA_GET_GANG(&bp->blk_dva[d])) {
1668                                 /*
1669                                  * Gang members may be spread across multiple
1670                                  * vdevs, so the best estimate we have is the
1671                                  * scrub range, which has already been checked.
1672                                  * XXX -- it would be better to change our
1673                                  * allocation policy to ensure that all
1674                                  * gang members reside on the same vdev.
1675                                  */
1676                                 needs_io = B_TRUE;
1677                         } else {
1678                                 needs_io = vdev_dtl_contains(vd, DTL_PARTIAL,
1679                                     phys_birth, 1);
1680                         }
1681                 }
1682         }
1683
1684         if (needs_io && !zfs_no_scrub_io) {
1685                 vdev_t *rvd = spa->spa_root_vdev;
1686                 uint64_t maxinflight = rvd->vdev_children * zfs_top_maxinflight;
1687                 void *data = zio_data_buf_alloc(size);
1688
1689                 mutex_enter(&spa->spa_scrub_lock);
1690                 while (spa->spa_scrub_inflight >= maxinflight)
1691                         cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
1692                 spa->spa_scrub_inflight++;
1693                 mutex_exit(&spa->spa_scrub_lock);
1694
1695                 /*
1696                  * If we're seeing recent (zfs_scan_idle) "important" I/Os
1697                  * then throttle our workload to limit the impact of a scan.
1698                  */
1699                 if (ddi_get_lbolt64() - spa->spa_last_io <= zfs_scan_idle)
1700                         delay(scan_delay);
1701
1702                 zio_nowait(zio_read(NULL, spa, bp, data, size,
1703                     dsl_scan_scrub_done, NULL, ZIO_PRIORITY_SCRUB,
1704                     zio_flags, zb));
1705         }
1706
1707         /* do not relocate this block */
1708         return (0);
1709 }
1710
1711 int
1712 dsl_scan(dsl_pool_t *dp, pool_scan_func_t func)
1713 {
1714         spa_t *spa = dp->dp_spa;
1715
1716         /*
1717          * Purge all vdev caches and probe all devices.  We do this here
1718          * rather than in sync context because this requires a writer lock
1719          * on the spa_config lock, which we can't do from sync context.  The
1720          * spa_scrub_reopen flag indicates that vdev_open() should not
1721          * attempt to start another scrub.
1722          */
1723         spa_vdev_state_enter(spa, SCL_NONE);
1724         spa->spa_scrub_reopen = B_TRUE;
1725         vdev_reopen(spa->spa_root_vdev);
1726         spa->spa_scrub_reopen = B_FALSE;
1727         (void) spa_vdev_state_exit(spa, NULL, 0);
1728
1729         return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check,
1730             dsl_scan_setup_sync, &func, 0));
1731 }