4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/dsl_pool.h>
27 #include <sys/dsl_dataset.h>
28 #include <sys/dsl_prop.h>
29 #include <sys/dsl_dir.h>
30 #include <sys/dsl_synctask.h>
31 #include <sys/dnode.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dmu_objset.h>
37 #include <sys/zfs_context.h>
38 #include <sys/fs/zfs.h>
39 #include <sys/zfs_znode.h>
40 #include <sys/spa_impl.h>
41 #include <sys/vdev_impl.h>
42 #include <sys/zil_impl.h>
44 typedef int (scrub_cb_t)(dsl_pool_t *, const blkptr_t *, const zbookmark_t *);
46 static scrub_cb_t dsl_pool_scrub_clean_cb;
47 static dsl_syncfunc_t dsl_pool_scrub_cancel_sync;
49 int zfs_scrub_min_time = 1; /* scrub for at least 1 sec each txg */
50 int zfs_resilver_min_time = 3; /* resilver for at least 3 sec each txg */
51 boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
53 extern int zfs_txg_timeout;
55 static scrub_cb_t *scrub_funcs[SCRUB_FUNC_NUMFUNCS] = {
57 dsl_pool_scrub_clean_cb
60 #define SET_BOOKMARK(zb, objset, object, level, blkid) \
62 (zb)->zb_objset = objset; \
63 (zb)->zb_object = object; \
64 (zb)->zb_level = level; \
65 (zb)->zb_blkid = blkid; \
70 dsl_pool_scrub_setup_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
72 dsl_pool_t *dp = arg1;
73 enum scrub_func *funcp = arg2;
74 dmu_object_type_t ot = 0;
75 boolean_t complete = B_FALSE;
77 dsl_pool_scrub_cancel_sync(dp, &complete, cr, tx);
79 ASSERT(dp->dp_scrub_func == SCRUB_FUNC_NONE);
80 ASSERT(*funcp > SCRUB_FUNC_NONE);
81 ASSERT(*funcp < SCRUB_FUNC_NUMFUNCS);
83 dp->dp_scrub_min_txg = 0;
84 dp->dp_scrub_max_txg = tx->tx_txg;
86 if (*funcp == SCRUB_FUNC_CLEAN) {
87 vdev_t *rvd = dp->dp_spa->spa_root_vdev;
89 /* rewrite all disk labels */
90 vdev_config_dirty(rvd);
92 if (vdev_resilver_needed(rvd,
93 &dp->dp_scrub_min_txg, &dp->dp_scrub_max_txg)) {
94 spa_event_notify(dp->dp_spa, NULL,
95 ESC_ZFS_RESILVER_START);
96 dp->dp_scrub_max_txg = MIN(dp->dp_scrub_max_txg,
100 /* zero out the scrub stats in all vdev_stat_t's */
101 vdev_scrub_stat_update(rvd,
102 dp->dp_scrub_min_txg ? POOL_SCRUB_RESILVER :
103 POOL_SCRUB_EVERYTHING, B_FALSE);
105 dp->dp_spa->spa_scrub_started = B_TRUE;
108 /* back to the generic stuff */
110 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB)
111 ot = DMU_OT_ZAP_OTHER;
113 dp->dp_scrub_func = *funcp;
114 dp->dp_scrub_queue_obj = zap_create(dp->dp_meta_objset,
115 ot ? ot : DMU_OT_SCRUB_QUEUE, DMU_OT_NONE, 0, tx);
116 bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t));
117 dp->dp_scrub_restart = B_FALSE;
118 dp->dp_spa->spa_scrub_errors = 0;
120 VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
121 DMU_POOL_SCRUB_FUNC, sizeof (uint32_t), 1,
122 &dp->dp_scrub_func, tx));
123 VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
124 DMU_POOL_SCRUB_QUEUE, sizeof (uint64_t), 1,
125 &dp->dp_scrub_queue_obj, tx));
126 VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
127 DMU_POOL_SCRUB_MIN_TXG, sizeof (uint64_t), 1,
128 &dp->dp_scrub_min_txg, tx));
129 VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
130 DMU_POOL_SCRUB_MAX_TXG, sizeof (uint64_t), 1,
131 &dp->dp_scrub_max_txg, tx));
132 VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
133 DMU_POOL_SCRUB_BOOKMARK, sizeof (uint64_t), 4,
134 &dp->dp_scrub_bookmark, tx));
135 VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
136 DMU_POOL_SCRUB_ERRORS, sizeof (uint64_t), 1,
137 &dp->dp_spa->spa_scrub_errors, tx));
139 spa_history_internal_log(LOG_POOL_SCRUB, dp->dp_spa, tx, cr,
140 "func=%u mintxg=%llu maxtxg=%llu",
141 *funcp, dp->dp_scrub_min_txg, dp->dp_scrub_max_txg);
145 dsl_pool_scrub_setup(dsl_pool_t *dp, enum scrub_func func)
147 return (dsl_sync_task_do(dp, NULL,
148 dsl_pool_scrub_setup_sync, dp, &func, 0));
153 dsl_pool_scrub_cancel_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
155 dsl_pool_t *dp = arg1;
156 boolean_t *completep = arg2;
158 if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
161 mutex_enter(&dp->dp_scrub_cancel_lock);
163 if (dp->dp_scrub_restart) {
164 dp->dp_scrub_restart = B_FALSE;
165 *completep = B_FALSE;
168 /* XXX this is scrub-clean specific */
169 mutex_enter(&dp->dp_spa->spa_scrub_lock);
170 while (dp->dp_spa->spa_scrub_inflight > 0) {
171 cv_wait(&dp->dp_spa->spa_scrub_io_cv,
172 &dp->dp_spa->spa_scrub_lock);
174 mutex_exit(&dp->dp_spa->spa_scrub_lock);
175 dp->dp_spa->spa_scrub_started = B_FALSE;
176 dp->dp_spa->spa_scrub_active = B_FALSE;
178 dp->dp_scrub_func = SCRUB_FUNC_NONE;
179 VERIFY(0 == dmu_object_free(dp->dp_meta_objset,
180 dp->dp_scrub_queue_obj, tx));
181 dp->dp_scrub_queue_obj = 0;
182 bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t));
184 VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
185 DMU_POOL_SCRUB_QUEUE, tx));
186 VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
187 DMU_POOL_SCRUB_MIN_TXG, tx));
188 VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
189 DMU_POOL_SCRUB_MAX_TXG, tx));
190 VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
191 DMU_POOL_SCRUB_BOOKMARK, tx));
192 VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
193 DMU_POOL_SCRUB_FUNC, tx));
194 VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
195 DMU_POOL_SCRUB_ERRORS, tx));
197 spa_history_internal_log(LOG_POOL_SCRUB_DONE, dp->dp_spa, tx, cr,
198 "complete=%u", *completep);
200 /* below is scrub-clean specific */
201 vdev_scrub_stat_update(dp->dp_spa->spa_root_vdev, POOL_SCRUB_NONE,
204 * If the scrub/resilver completed, update all DTLs to reflect this.
205 * Whether it succeeded or not, vacate all temporary scrub DTLs.
207 vdev_dtl_reassess(dp->dp_spa->spa_root_vdev, tx->tx_txg,
208 *completep ? dp->dp_scrub_max_txg : 0, B_TRUE);
209 if (dp->dp_scrub_min_txg && *completep)
210 spa_event_notify(dp->dp_spa, NULL, ESC_ZFS_RESILVER_FINISH);
211 spa_errlog_rotate(dp->dp_spa);
214 * We may have finished replacing a device.
215 * Let the async thread assess this and handle the detach.
217 spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER_DONE);
219 dp->dp_scrub_min_txg = dp->dp_scrub_max_txg = 0;
220 mutex_exit(&dp->dp_scrub_cancel_lock);
224 dsl_pool_scrub_cancel(dsl_pool_t *dp)
226 boolean_t complete = B_FALSE;
228 return (dsl_sync_task_do(dp, NULL,
229 dsl_pool_scrub_cancel_sync, dp, &complete, 3));
233 dsl_free(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp,
234 zio_done_func_t *done, void *private, uint32_t arc_flags)
237 * This function will be used by bp-rewrite wad to intercept frees.
239 return (arc_free(pio, dp->dp_spa, txg, (blkptr_t *)bpp,
240 done, private, arc_flags));
244 bookmark_is_zero(const zbookmark_t *zb)
246 return (zb->zb_objset == 0 && zb->zb_object == 0 &&
247 zb->zb_level == 0 && zb->zb_blkid == 0);
250 /* dnp is the dnode for zb1->zb_object */
252 bookmark_is_before(dnode_phys_t *dnp, const zbookmark_t *zb1,
253 const zbookmark_t *zb2)
255 uint64_t zb1nextL0, zb2thisobj;
257 ASSERT(zb1->zb_objset == zb2->zb_objset);
258 ASSERT(zb1->zb_object != -1ULL);
259 ASSERT(zb2->zb_level == 0);
262 * A bookmark in the deadlist is considered to be after
265 if (zb2->zb_object == -1ULL)
268 /* The objset_phys_t isn't before anything. */
272 zb1nextL0 = (zb1->zb_blkid + 1) <<
273 ((zb1->zb_level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT));
275 zb2thisobj = zb2->zb_object ? zb2->zb_object :
276 zb2->zb_blkid << (DNODE_BLOCK_SHIFT - DNODE_SHIFT);
278 if (zb1->zb_object == 0) {
279 uint64_t nextobj = zb1nextL0 *
280 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT) >> DNODE_SHIFT;
281 return (nextobj <= zb2thisobj);
284 if (zb1->zb_object < zb2thisobj)
286 if (zb1->zb_object > zb2thisobj)
288 if (zb2->zb_object == 0)
290 return (zb1nextL0 <= zb2->zb_blkid);
294 scrub_pause(dsl_pool_t *dp, const zbookmark_t *zb)
299 if (dp->dp_scrub_pausing)
300 return (B_TRUE); /* we're already pausing */
302 if (!bookmark_is_zero(&dp->dp_scrub_bookmark))
303 return (B_FALSE); /* we're resuming */
305 /* We only know how to resume from level-0 blocks. */
306 if (zb->zb_level != 0)
309 mintime = dp->dp_scrub_isresilver ? zfs_resilver_min_time :
311 elapsed_ticks = lbolt64 - dp->dp_scrub_start_time;
312 if (elapsed_ticks > hz * zfs_txg_timeout ||
313 (elapsed_ticks > hz * mintime && txg_sync_waiting(dp))) {
314 dprintf("pausing at %llx/%llx/%llx/%llx\n",
315 (longlong_t)zb->zb_objset, (longlong_t)zb->zb_object,
316 (longlong_t)zb->zb_level, (longlong_t)zb->zb_blkid);
317 dp->dp_scrub_pausing = B_TRUE;
318 dp->dp_scrub_bookmark = *zb;
324 typedef struct zil_traverse_arg {
326 zil_header_t *zta_zh;
327 } zil_traverse_arg_t;
331 traverse_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
333 zil_traverse_arg_t *zta = arg;
334 dsl_pool_t *dp = zta->zta_dp;
335 zil_header_t *zh = zta->zta_zh;
338 if (bp->blk_birth <= dp->dp_scrub_min_txg)
341 if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(dp->dp_spa))
344 zb.zb_objset = zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET];
347 zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ];
348 VERIFY(0 == scrub_funcs[dp->dp_scrub_func](dp, bp, &zb));
353 traverse_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg)
355 if (lrc->lrc_txtype == TX_WRITE) {
356 zil_traverse_arg_t *zta = arg;
357 dsl_pool_t *dp = zta->zta_dp;
358 zil_header_t *zh = zta->zta_zh;
359 lr_write_t *lr = (lr_write_t *)lrc;
360 blkptr_t *bp = &lr->lr_blkptr;
363 if (bp->blk_birth <= dp->dp_scrub_min_txg)
366 if (claim_txg == 0 || bp->blk_birth < claim_txg)
369 zb.zb_objset = zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET];
370 zb.zb_object = lr->lr_foid;
371 zb.zb_level = BP_GET_LEVEL(bp);
372 zb.zb_blkid = lr->lr_offset / BP_GET_LSIZE(bp);
373 VERIFY(0 == scrub_funcs[dp->dp_scrub_func](dp, bp, &zb));
378 traverse_zil(dsl_pool_t *dp, zil_header_t *zh)
380 uint64_t claim_txg = zh->zh_claim_txg;
381 zil_traverse_arg_t zta = { dp, zh };
385 * We only want to visit blocks that have been claimed but not yet
386 * replayed (or, in read-only mode, blocks that *would* be claimed).
388 if (claim_txg == 0 && (spa_mode & FWRITE))
391 zilog = zil_alloc(dp->dp_meta_objset, zh);
393 (void) zil_parse(zilog, traverse_zil_block, traverse_zil_record, &zta,
400 scrub_visitbp(dsl_pool_t *dp, dnode_phys_t *dnp,
401 arc_buf_t *pbuf, blkptr_t *bp, const zbookmark_t *zb)
404 arc_buf_t *buf = NULL;
406 if (bp->blk_birth == 0)
409 if (bp->blk_birth <= dp->dp_scrub_min_txg)
412 if (scrub_pause(dp, zb))
415 if (!bookmark_is_zero(&dp->dp_scrub_bookmark)) {
417 * If we already visited this bp & everything below (in
418 * a prior txg), don't bother doing it again.
420 if (bookmark_is_before(dnp, zb, &dp->dp_scrub_bookmark))
424 * If we found the block we're trying to resume from, or
425 * we went past it to a different object, zero it out to
426 * indicate that it's OK to start checking for pausing
429 if (bcmp(zb, &dp->dp_scrub_bookmark, sizeof (*zb)) == 0 ||
430 zb->zb_object > dp->dp_scrub_bookmark.zb_object) {
431 dprintf("resuming at %llx/%llx/%llx/%llx\n",
432 (longlong_t)zb->zb_objset,
433 (longlong_t)zb->zb_object,
434 (longlong_t)zb->zb_level,
435 (longlong_t)zb->zb_blkid);
436 bzero(&dp->dp_scrub_bookmark, sizeof (*zb));
440 if (BP_GET_LEVEL(bp) > 0) {
441 uint32_t flags = ARC_WAIT;
444 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
446 err = arc_read(NULL, dp->dp_spa, bp, pbuf,
447 arc_getbuf_func, &buf,
448 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
450 mutex_enter(&dp->dp_spa->spa_scrub_lock);
451 dp->dp_spa->spa_scrub_errors++;
452 mutex_exit(&dp->dp_spa->spa_scrub_lock);
457 for (i = 0; i < epb; i++, cbp++) {
460 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
462 zb->zb_blkid * epb + i);
463 scrub_visitbp(dp, dnp, buf, cbp, &czb);
465 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
466 uint32_t flags = ARC_WAIT;
467 dnode_phys_t *child_dnp;
469 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
471 err = arc_read(NULL, dp->dp_spa, bp, pbuf,
472 arc_getbuf_func, &buf,
473 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
475 mutex_enter(&dp->dp_spa->spa_scrub_lock);
476 dp->dp_spa->spa_scrub_errors++;
477 mutex_exit(&dp->dp_spa->spa_scrub_lock);
480 child_dnp = buf->b_data;
482 for (i = 0; i < epb; i++, child_dnp++) {
483 for (j = 0; j < child_dnp->dn_nblkptr; j++) {
486 SET_BOOKMARK(&czb, zb->zb_objset,
487 zb->zb_blkid * epb + i,
488 child_dnp->dn_nlevels - 1, j);
489 scrub_visitbp(dp, child_dnp, buf,
490 &child_dnp->dn_blkptr[j], &czb);
493 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
494 uint32_t flags = ARC_WAIT;
498 err = arc_read_nolock(NULL, dp->dp_spa, bp,
499 arc_getbuf_func, &buf,
500 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
502 mutex_enter(&dp->dp_spa->spa_scrub_lock);
503 dp->dp_spa->spa_scrub_errors++;
504 mutex_exit(&dp->dp_spa->spa_scrub_lock);
510 traverse_zil(dp, &osp->os_zil_header);
512 for (j = 0; j < osp->os_meta_dnode.dn_nblkptr; j++) {
515 SET_BOOKMARK(&czb, zb->zb_objset, 0,
516 osp->os_meta_dnode.dn_nlevels - 1, j);
517 scrub_visitbp(dp, &osp->os_meta_dnode, buf,
518 &osp->os_meta_dnode.dn_blkptr[j], &czb);
522 (void) scrub_funcs[dp->dp_scrub_func](dp, bp, zb);
524 (void) arc_buf_remove_ref(buf, &buf);
528 scrub_visit_rootbp(dsl_pool_t *dp, dsl_dataset_t *ds, blkptr_t *bp)
532 SET_BOOKMARK(&zb, ds ? ds->ds_object : 0, 0, -1, 0);
533 scrub_visitbp(dp, NULL, NULL, bp, &zb);
537 dsl_pool_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
539 dsl_pool_t *dp = ds->ds_dir->dd_pool;
541 if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
544 if (dp->dp_scrub_bookmark.zb_objset == ds->ds_object) {
545 SET_BOOKMARK(&dp->dp_scrub_bookmark, -1, 0, 0, 0);
546 } else if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
547 ds->ds_object, tx) != 0) {
551 if (ds->ds_phys->ds_next_snap_obj != 0) {
552 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
553 ds->ds_phys->ds_next_snap_obj, tx) == 0);
555 ASSERT3U(ds->ds_phys->ds_num_children, <=, 1);
559 dsl_pool_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
561 dsl_pool_t *dp = ds->ds_dir->dd_pool;
563 if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
566 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0);
568 if (dp->dp_scrub_bookmark.zb_objset == ds->ds_object) {
569 dp->dp_scrub_bookmark.zb_objset =
570 ds->ds_phys->ds_prev_snap_obj;
571 } else if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
572 ds->ds_object, tx) == 0) {
573 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
574 ds->ds_phys->ds_prev_snap_obj, tx) == 0);
578 struct enqueue_clones_arg {
585 enqueue_clones_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg)
587 struct enqueue_clones_arg *eca = arg;
592 err = dsl_dataset_hold_obj(spa->spa_dsl_pool, dsobj, FTAG, &ds);
595 dp = ds->ds_dir->dd_pool;
597 if (ds->ds_dir->dd_phys->dd_origin_obj == eca->originobj) {
598 while (ds->ds_phys->ds_prev_snap_obj != eca->originobj) {
600 err = dsl_dataset_hold_obj(dp,
601 ds->ds_phys->ds_prev_snap_obj, FTAG, &prev);
603 dsl_dataset_rele(ds, FTAG);
608 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
609 ds->ds_object, eca->tx) == 0);
611 dsl_dataset_rele(ds, FTAG);
616 scrub_visitds(dsl_pool_t *dp, uint64_t dsobj, dmu_tx_t *tx)
619 uint64_t min_txg_save;
621 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
624 * Iterate over the bps in this ds.
626 min_txg_save = dp->dp_scrub_min_txg;
627 dp->dp_scrub_min_txg =
628 MAX(dp->dp_scrub_min_txg, ds->ds_phys->ds_prev_snap_txg);
629 scrub_visit_rootbp(dp, ds, &ds->ds_phys->ds_bp);
630 dp->dp_scrub_min_txg = min_txg_save;
632 if (dp->dp_scrub_pausing)
636 * Add descendent datasets to work queue.
638 if (ds->ds_phys->ds_next_snap_obj != 0) {
639 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
640 ds->ds_phys->ds_next_snap_obj, tx) == 0);
642 if (ds->ds_phys->ds_num_children > 1) {
643 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) {
644 struct enqueue_clones_arg eca;
646 eca.originobj = ds->ds_object;
648 (void) dmu_objset_find_spa(ds->ds_dir->dd_pool->dp_spa,
649 NULL, enqueue_clones_cb, &eca, DS_FIND_CHILDREN);
651 VERIFY(zap_join(dp->dp_meta_objset,
652 ds->ds_phys->ds_next_clones_obj,
653 dp->dp_scrub_queue_obj, tx) == 0);
658 dsl_dataset_rele(ds, FTAG);
663 enqueue_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg)
670 err = dsl_dataset_hold_obj(spa->spa_dsl_pool, dsobj, FTAG, &ds);
674 dp = ds->ds_dir->dd_pool;
676 while (ds->ds_phys->ds_prev_snap_obj != 0) {
678 err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj,
681 dsl_dataset_rele(ds, FTAG);
686 * If this is a clone, we don't need to worry about it for now.
688 if (prev->ds_phys->ds_next_snap_obj != ds->ds_object) {
689 dsl_dataset_rele(ds, FTAG);
690 dsl_dataset_rele(prev, FTAG);
693 dsl_dataset_rele(ds, FTAG);
697 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
698 ds->ds_object, tx) == 0);
699 dsl_dataset_rele(ds, FTAG);
704 dsl_pool_scrub_sync(dsl_pool_t *dp, dmu_tx_t *tx)
708 boolean_t complete = B_TRUE;
710 if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
713 /* If the spa is not fully loaded, don't bother. */
714 if (dp->dp_spa->spa_load_state != SPA_LOAD_NONE)
717 if (dp->dp_scrub_restart) {
718 enum scrub_func func = dp->dp_scrub_func;
719 dp->dp_scrub_restart = B_FALSE;
720 dsl_pool_scrub_setup_sync(dp, &func, kcred, tx);
723 if (dp->dp_spa->spa_root_vdev->vdev_stat.vs_scrub_type == 0) {
725 * We must have resumed after rebooting; reset the vdev
726 * stats to know that we're doing a scrub (although it
727 * will think we're just starting now).
729 vdev_scrub_stat_update(dp->dp_spa->spa_root_vdev,
730 dp->dp_scrub_min_txg ? POOL_SCRUB_RESILVER :
731 POOL_SCRUB_EVERYTHING, B_FALSE);
734 dp->dp_scrub_pausing = B_FALSE;
735 dp->dp_scrub_start_time = lbolt64;
736 dp->dp_scrub_isresilver = (dp->dp_scrub_min_txg != 0);
737 dp->dp_spa->spa_scrub_active = B_TRUE;
739 if (dp->dp_scrub_bookmark.zb_objset == 0) {
740 /* First do the MOS & ORIGIN */
741 scrub_visit_rootbp(dp, NULL, &dp->dp_meta_rootbp);
742 if (dp->dp_scrub_pausing)
745 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) {
746 VERIFY(0 == dmu_objset_find_spa(dp->dp_spa,
747 NULL, enqueue_cb, tx, DS_FIND_CHILDREN));
749 scrub_visitds(dp, dp->dp_origin_snap->ds_object, tx);
751 ASSERT(!dp->dp_scrub_pausing);
752 } else if (dp->dp_scrub_bookmark.zb_objset != -1ULL) {
754 * If we were paused, continue from here. Note if the
755 * ds we were paused on was deleted, the zb_objset will
756 * be -1, so we will skip this and find a new objset
759 scrub_visitds(dp, dp->dp_scrub_bookmark.zb_objset, tx);
760 if (dp->dp_scrub_pausing)
765 * In case we were paused right at the end of the ds, zero the
766 * bookmark so we don't think that we're still trying to resume.
768 bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t));
770 /* keep pulling things out of the zap-object-as-queue */
771 while (zap_cursor_init(&zc, dp->dp_meta_objset, dp->dp_scrub_queue_obj),
772 zap_cursor_retrieve(&zc, &za) == 0) {
773 VERIFY(0 == zap_remove(dp->dp_meta_objset,
774 dp->dp_scrub_queue_obj, za.za_name, tx));
775 scrub_visitds(dp, za.za_first_integer, tx);
776 if (dp->dp_scrub_pausing)
778 zap_cursor_fini(&zc);
780 zap_cursor_fini(&zc);
781 if (dp->dp_scrub_pausing)
786 dsl_pool_scrub_cancel_sync(dp, &complete, kcred, tx);
789 VERIFY(0 == zap_update(dp->dp_meta_objset,
790 DMU_POOL_DIRECTORY_OBJECT,
791 DMU_POOL_SCRUB_BOOKMARK, sizeof (uint64_t), 4,
792 &dp->dp_scrub_bookmark, tx));
793 VERIFY(0 == zap_update(dp->dp_meta_objset,
794 DMU_POOL_DIRECTORY_OBJECT,
795 DMU_POOL_SCRUB_ERRORS, sizeof (uint64_t), 1,
796 &dp->dp_spa->spa_scrub_errors, tx));
798 /* XXX this is scrub-clean specific */
799 mutex_enter(&dp->dp_spa->spa_scrub_lock);
800 while (dp->dp_spa->spa_scrub_inflight > 0) {
801 cv_wait(&dp->dp_spa->spa_scrub_io_cv,
802 &dp->dp_spa->spa_scrub_lock);
804 mutex_exit(&dp->dp_spa->spa_scrub_lock);
808 dsl_pool_scrub_restart(dsl_pool_t *dp)
810 mutex_enter(&dp->dp_scrub_cancel_lock);
811 dp->dp_scrub_restart = B_TRUE;
812 mutex_exit(&dp->dp_scrub_cancel_lock);
820 dsl_pool_scrub_clean_done(zio_t *zio)
822 spa_t *spa = zio->io_spa;
824 zio_data_buf_free(zio->io_data, zio->io_size);
826 mutex_enter(&spa->spa_scrub_lock);
827 spa->spa_scrub_inflight--;
828 cv_broadcast(&spa->spa_scrub_io_cv);
830 if (zio->io_error && (zio->io_error != ECKSUM ||
831 !(zio->io_flags & ZIO_FLAG_SPECULATIVE)))
832 spa->spa_scrub_errors++;
833 mutex_exit(&spa->spa_scrub_lock);
837 dsl_pool_scrub_clean_cb(dsl_pool_t *dp,
838 const blkptr_t *bp, const zbookmark_t *zb)
840 size_t size = BP_GET_LSIZE(bp);
842 spa_t *spa = dp->dp_spa;
844 int zio_flags = ZIO_FLAG_SCRUB_THREAD | ZIO_FLAG_CANFAIL;
847 if (dp->dp_scrub_isresilver == 0) {
849 zio_flags |= ZIO_FLAG_SCRUB;
850 zio_priority = ZIO_PRIORITY_SCRUB;
853 /* It's a resilver */
854 zio_flags |= ZIO_FLAG_RESILVER;
855 zio_priority = ZIO_PRIORITY_RESILVER;
859 /* If it's an intent log block, failure is expected. */
860 if (zb->zb_level == -1 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)
861 zio_flags |= ZIO_FLAG_SPECULATIVE;
863 for (d = 0; d < BP_GET_NDVAS(bp); d++) {
864 vdev_t *vd = vdev_lookup_top(spa,
865 DVA_GET_VDEV(&bp->blk_dva[d]));
868 * Keep track of how much data we've examined so that
869 * zpool(1M) status can make useful progress reports.
871 mutex_enter(&vd->vdev_stat_lock);
872 vd->vdev_stat.vs_scrub_examined +=
873 DVA_GET_ASIZE(&bp->blk_dva[d]);
874 mutex_exit(&vd->vdev_stat_lock);
876 /* if it's a resilver, this may not be in the target range */
878 if (DVA_GET_GANG(&bp->blk_dva[d])) {
880 * Gang members may be spread across multiple
881 * vdevs, so the best we can do is look at the
883 * XXX -- it would be better to change our
884 * allocation policy to ensure that this can't
887 vd = spa->spa_root_vdev;
889 needs_io = vdev_dtl_contains(&vd->vdev_dtl_map,
894 if (needs_io && !zfs_no_scrub_io) {
895 void *data = zio_data_buf_alloc(size);
897 mutex_enter(&spa->spa_scrub_lock);
898 while (spa->spa_scrub_inflight >= spa->spa_scrub_maxinflight)
899 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
900 spa->spa_scrub_inflight++;
901 mutex_exit(&spa->spa_scrub_lock);
903 zio_nowait(zio_read(NULL, spa, bp, data, size,
904 dsl_pool_scrub_clean_done, NULL, zio_priority,
908 /* do not relocate this block */
913 dsl_pool_scrub_clean(dsl_pool_t *dp)
916 * Purge all vdev caches. We do this here rather than in sync
917 * context because this requires a writer lock on the spa_config
918 * lock, which we can't do from sync context. The
919 * spa_scrub_reopen flag indicates that vdev_open() should not
920 * attempt to start another scrub.
922 spa_config_enter(dp->dp_spa, SCL_ALL, FTAG, RW_WRITER);
923 dp->dp_spa->spa_scrub_reopen = B_TRUE;
924 vdev_reopen(dp->dp_spa->spa_root_vdev);
925 dp->dp_spa->spa_scrub_reopen = B_FALSE;
926 spa_config_exit(dp->dp_spa, SCL_ALL, FTAG);
928 return (dsl_pool_scrub_setup(dp, SCRUB_FUNC_CLEAN));