4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/dsl_pool.h>
27 #include <sys/dsl_dataset.h>
28 #include <sys/dsl_prop.h>
29 #include <sys/dsl_dir.h>
30 #include <sys/dsl_synctask.h>
31 #include <sys/dnode.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dmu_objset.h>
37 #include <sys/zfs_context.h>
38 #include <sys/fs/zfs.h>
39 #include <sys/zfs_znode.h>
40 #include <sys/spa_impl.h>
41 #include <sys/vdev_impl.h>
42 #include <sys/zil_impl.h>
44 typedef int (scrub_cb_t)(dsl_pool_t *, const blkptr_t *, const zbookmark_t *);
46 static scrub_cb_t dsl_pool_scrub_clean_cb;
47 static dsl_syncfunc_t dsl_pool_scrub_cancel_sync;
48 static void scrub_visitdnode(dsl_pool_t *dp, dnode_phys_t *dnp, arc_buf_t *buf,
49 uint64_t objset, uint64_t object);
51 int zfs_scrub_min_time = 1; /* scrub for at least 1 sec each txg */
52 int zfs_resilver_min_time = 3; /* resilver for at least 3 sec each txg */
53 boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
55 extern int zfs_txg_timeout;
57 static scrub_cb_t *scrub_funcs[SCRUB_FUNC_NUMFUNCS] = {
59 dsl_pool_scrub_clean_cb
62 #define SET_BOOKMARK(zb, objset, object, level, blkid) \
64 (zb)->zb_objset = objset; \
65 (zb)->zb_object = object; \
66 (zb)->zb_level = level; \
67 (zb)->zb_blkid = blkid; \
72 dsl_pool_scrub_setup_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
74 dsl_pool_t *dp = arg1;
75 enum scrub_func *funcp = arg2;
76 dmu_object_type_t ot = 0;
77 boolean_t complete = B_FALSE;
79 dsl_pool_scrub_cancel_sync(dp, &complete, cr, tx);
81 ASSERT(dp->dp_scrub_func == SCRUB_FUNC_NONE);
82 ASSERT(*funcp > SCRUB_FUNC_NONE);
83 ASSERT(*funcp < SCRUB_FUNC_NUMFUNCS);
85 dp->dp_scrub_min_txg = 0;
86 dp->dp_scrub_max_txg = tx->tx_txg;
88 if (*funcp == SCRUB_FUNC_CLEAN) {
89 vdev_t *rvd = dp->dp_spa->spa_root_vdev;
91 /* rewrite all disk labels */
92 vdev_config_dirty(rvd);
94 if (vdev_resilver_needed(rvd,
95 &dp->dp_scrub_min_txg, &dp->dp_scrub_max_txg)) {
96 spa_event_notify(dp->dp_spa, NULL,
97 ESC_ZFS_RESILVER_START);
98 dp->dp_scrub_max_txg = MIN(dp->dp_scrub_max_txg,
101 spa_event_notify(dp->dp_spa, NULL,
102 ESC_ZFS_SCRUB_START);
105 /* zero out the scrub stats in all vdev_stat_t's */
106 vdev_scrub_stat_update(rvd,
107 dp->dp_scrub_min_txg ? POOL_SCRUB_RESILVER :
108 POOL_SCRUB_EVERYTHING, B_FALSE);
110 dp->dp_spa->spa_scrub_started = B_TRUE;
113 /* back to the generic stuff */
115 if (dp->dp_blkstats == NULL) {
117 kmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP);
119 bzero(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
121 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB)
122 ot = DMU_OT_ZAP_OTHER;
124 dp->dp_scrub_func = *funcp;
125 dp->dp_scrub_queue_obj = zap_create(dp->dp_meta_objset,
126 ot ? ot : DMU_OT_SCRUB_QUEUE, DMU_OT_NONE, 0, tx);
127 bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t));
128 dp->dp_scrub_restart = B_FALSE;
129 dp->dp_spa->spa_scrub_errors = 0;
131 VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
132 DMU_POOL_SCRUB_FUNC, sizeof (uint32_t), 1,
133 &dp->dp_scrub_func, tx));
134 VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
135 DMU_POOL_SCRUB_QUEUE, sizeof (uint64_t), 1,
136 &dp->dp_scrub_queue_obj, tx));
137 VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
138 DMU_POOL_SCRUB_MIN_TXG, sizeof (uint64_t), 1,
139 &dp->dp_scrub_min_txg, tx));
140 VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
141 DMU_POOL_SCRUB_MAX_TXG, sizeof (uint64_t), 1,
142 &dp->dp_scrub_max_txg, tx));
143 VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
144 DMU_POOL_SCRUB_BOOKMARK, sizeof (uint64_t), 4,
145 &dp->dp_scrub_bookmark, tx));
146 VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
147 DMU_POOL_SCRUB_ERRORS, sizeof (uint64_t), 1,
148 &dp->dp_spa->spa_scrub_errors, tx));
150 spa_history_internal_log(LOG_POOL_SCRUB, dp->dp_spa, tx, cr,
151 "func=%u mintxg=%llu maxtxg=%llu",
152 *funcp, dp->dp_scrub_min_txg, dp->dp_scrub_max_txg);
156 dsl_pool_scrub_setup(dsl_pool_t *dp, enum scrub_func func)
158 return (dsl_sync_task_do(dp, NULL,
159 dsl_pool_scrub_setup_sync, dp, &func, 0));
164 dsl_pool_scrub_cancel_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
166 dsl_pool_t *dp = arg1;
167 boolean_t *completep = arg2;
169 if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
172 mutex_enter(&dp->dp_scrub_cancel_lock);
174 if (dp->dp_scrub_restart) {
175 dp->dp_scrub_restart = B_FALSE;
176 *completep = B_FALSE;
179 /* XXX this is scrub-clean specific */
180 mutex_enter(&dp->dp_spa->spa_scrub_lock);
181 while (dp->dp_spa->spa_scrub_inflight > 0) {
182 cv_wait(&dp->dp_spa->spa_scrub_io_cv,
183 &dp->dp_spa->spa_scrub_lock);
185 mutex_exit(&dp->dp_spa->spa_scrub_lock);
186 dp->dp_spa->spa_scrub_started = B_FALSE;
187 dp->dp_spa->spa_scrub_active = B_FALSE;
189 dp->dp_scrub_func = SCRUB_FUNC_NONE;
190 VERIFY(0 == dmu_object_free(dp->dp_meta_objset,
191 dp->dp_scrub_queue_obj, tx));
192 dp->dp_scrub_queue_obj = 0;
193 bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t));
195 VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
196 DMU_POOL_SCRUB_QUEUE, tx));
197 VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
198 DMU_POOL_SCRUB_MIN_TXG, tx));
199 VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
200 DMU_POOL_SCRUB_MAX_TXG, tx));
201 VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
202 DMU_POOL_SCRUB_BOOKMARK, tx));
203 VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
204 DMU_POOL_SCRUB_FUNC, tx));
205 VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
206 DMU_POOL_SCRUB_ERRORS, tx));
208 spa_history_internal_log(LOG_POOL_SCRUB_DONE, dp->dp_spa, tx, cr,
209 "complete=%u", *completep);
211 /* below is scrub-clean specific */
212 vdev_scrub_stat_update(dp->dp_spa->spa_root_vdev, POOL_SCRUB_NONE,
215 * If the scrub/resilver completed, update all DTLs to reflect this.
216 * Whether it succeeded or not, vacate all temporary scrub DTLs.
218 vdev_dtl_reassess(dp->dp_spa->spa_root_vdev, tx->tx_txg,
219 *completep ? dp->dp_scrub_max_txg : 0, B_TRUE);
221 spa_event_notify(dp->dp_spa, NULL, dp->dp_scrub_min_txg ?
222 ESC_ZFS_RESILVER_FINISH : ESC_ZFS_SCRUB_FINISH);
223 spa_errlog_rotate(dp->dp_spa);
226 * We may have finished replacing a device.
227 * Let the async thread assess this and handle the detach.
229 spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER_DONE);
231 dp->dp_scrub_min_txg = dp->dp_scrub_max_txg = 0;
232 mutex_exit(&dp->dp_scrub_cancel_lock);
236 dsl_pool_scrub_cancel(dsl_pool_t *dp)
238 boolean_t complete = B_FALSE;
240 return (dsl_sync_task_do(dp, NULL,
241 dsl_pool_scrub_cancel_sync, dp, &complete, 3));
245 dsl_free(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp,
246 zio_done_func_t *done, void *private, uint32_t arc_flags)
249 * This function will be used by bp-rewrite wad to intercept frees.
251 return (arc_free(pio, dp->dp_spa, txg, (blkptr_t *)bpp,
252 done, private, arc_flags));
256 bookmark_is_zero(const zbookmark_t *zb)
258 return (zb->zb_objset == 0 && zb->zb_object == 0 &&
259 zb->zb_level == 0 && zb->zb_blkid == 0);
262 /* dnp is the dnode for zb1->zb_object */
264 bookmark_is_before(dnode_phys_t *dnp, const zbookmark_t *zb1,
265 const zbookmark_t *zb2)
267 uint64_t zb1nextL0, zb2thisobj;
269 ASSERT(zb1->zb_objset == zb2->zb_objset);
270 ASSERT(zb1->zb_object != -1ULL);
271 ASSERT(zb2->zb_level == 0);
274 * A bookmark in the deadlist is considered to be after
277 if (zb2->zb_object == -1ULL)
280 /* The objset_phys_t isn't before anything. */
284 zb1nextL0 = (zb1->zb_blkid + 1) <<
285 ((zb1->zb_level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT));
287 zb2thisobj = zb2->zb_object ? zb2->zb_object :
288 zb2->zb_blkid << (DNODE_BLOCK_SHIFT - DNODE_SHIFT);
290 if (zb1->zb_object == 0) {
291 uint64_t nextobj = zb1nextL0 *
292 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT) >> DNODE_SHIFT;
293 return (nextobj <= zb2thisobj);
296 if (zb1->zb_object < zb2thisobj)
298 if (zb1->zb_object > zb2thisobj)
300 if (zb2->zb_object == 0)
302 return (zb1nextL0 <= zb2->zb_blkid);
306 scrub_pause(dsl_pool_t *dp, const zbookmark_t *zb)
311 if (dp->dp_scrub_pausing)
312 return (B_TRUE); /* we're already pausing */
314 if (!bookmark_is_zero(&dp->dp_scrub_bookmark))
315 return (B_FALSE); /* we're resuming */
317 /* We only know how to resume from level-0 blocks. */
318 if (zb->zb_level != 0)
321 mintime = dp->dp_scrub_isresilver ? zfs_resilver_min_time :
323 elapsed_ticks = lbolt64 - dp->dp_scrub_start_time;
324 if (elapsed_ticks > hz * zfs_txg_timeout ||
325 (elapsed_ticks > hz * mintime && txg_sync_waiting(dp))) {
326 dprintf("pausing at %llx/%llx/%llx/%llx\n",
327 (longlong_t)zb->zb_objset, (longlong_t)zb->zb_object,
328 (longlong_t)zb->zb_level, (longlong_t)zb->zb_blkid);
329 dp->dp_scrub_pausing = B_TRUE;
330 dp->dp_scrub_bookmark = *zb;
336 typedef struct zil_traverse_arg {
338 zil_header_t *zta_zh;
339 } zil_traverse_arg_t;
343 traverse_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
345 zil_traverse_arg_t *zta = arg;
346 dsl_pool_t *dp = zta->zta_dp;
347 zil_header_t *zh = zta->zta_zh;
350 if (bp->blk_birth <= dp->dp_scrub_min_txg)
354 * One block ("stumpy") can be allocated a long time ago; we
355 * want to visit that one because it has been allocated
356 * (on-disk) even if it hasn't been claimed (even though for
357 * plain scrub there's nothing to do to it).
359 if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(dp->dp_spa))
362 zb.zb_objset = zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET];
365 zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ];
366 VERIFY(0 == scrub_funcs[dp->dp_scrub_func](dp, bp, &zb));
371 traverse_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg)
373 if (lrc->lrc_txtype == TX_WRITE) {
374 zil_traverse_arg_t *zta = arg;
375 dsl_pool_t *dp = zta->zta_dp;
376 zil_header_t *zh = zta->zta_zh;
377 lr_write_t *lr = (lr_write_t *)lrc;
378 blkptr_t *bp = &lr->lr_blkptr;
381 if (bp->blk_birth <= dp->dp_scrub_min_txg)
385 * birth can be < claim_txg if this record's txg is
386 * already txg sync'ed (but this log block contains
387 * other records that are not synced)
389 if (claim_txg == 0 || bp->blk_birth < claim_txg)
392 zb.zb_objset = zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET];
393 zb.zb_object = lr->lr_foid;
394 zb.zb_level = BP_GET_LEVEL(bp);
395 zb.zb_blkid = lr->lr_offset / BP_GET_LSIZE(bp);
396 VERIFY(0 == scrub_funcs[dp->dp_scrub_func](dp, bp, &zb));
401 traverse_zil(dsl_pool_t *dp, zil_header_t *zh)
403 uint64_t claim_txg = zh->zh_claim_txg;
404 zil_traverse_arg_t zta = { dp, zh };
408 * We only want to visit blocks that have been claimed but not yet
409 * replayed (or, in read-only mode, blocks that *would* be claimed).
411 if (claim_txg == 0 && spa_writeable(dp->dp_spa))
414 zilog = zil_alloc(dp->dp_meta_objset, zh);
416 (void) zil_parse(zilog, traverse_zil_block, traverse_zil_record, &zta,
423 scrub_visitbp(dsl_pool_t *dp, dnode_phys_t *dnp,
424 arc_buf_t *pbuf, blkptr_t *bp, const zbookmark_t *zb)
427 arc_buf_t *buf = NULL;
429 if (bp->blk_birth <= dp->dp_scrub_min_txg)
432 if (scrub_pause(dp, zb))
435 if (!bookmark_is_zero(&dp->dp_scrub_bookmark)) {
437 * If we already visited this bp & everything below (in
438 * a prior txg), don't bother doing it again.
440 if (bookmark_is_before(dnp, zb, &dp->dp_scrub_bookmark))
444 * If we found the block we're trying to resume from, or
445 * we went past it to a different object, zero it out to
446 * indicate that it's OK to start checking for pausing
449 if (bcmp(zb, &dp->dp_scrub_bookmark, sizeof (*zb)) == 0 ||
450 zb->zb_object > dp->dp_scrub_bookmark.zb_object) {
451 dprintf("resuming at %llx/%llx/%llx/%llx\n",
452 (longlong_t)zb->zb_objset,
453 (longlong_t)zb->zb_object,
454 (longlong_t)zb->zb_level,
455 (longlong_t)zb->zb_blkid);
456 bzero(&dp->dp_scrub_bookmark, sizeof (*zb));
460 if (BP_GET_LEVEL(bp) > 0) {
461 uint32_t flags = ARC_WAIT;
464 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
466 err = arc_read(NULL, dp->dp_spa, bp, pbuf,
467 arc_getbuf_func, &buf,
468 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
470 mutex_enter(&dp->dp_spa->spa_scrub_lock);
471 dp->dp_spa->spa_scrub_errors++;
472 mutex_exit(&dp->dp_spa->spa_scrub_lock);
477 for (i = 0; i < epb; i++, cbp++) {
480 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
482 zb->zb_blkid * epb + i);
483 scrub_visitbp(dp, dnp, buf, cbp, &czb);
485 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
486 uint32_t flags = ARC_WAIT;
487 dnode_phys_t *child_dnp;
489 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
491 err = arc_read(NULL, dp->dp_spa, bp, pbuf,
492 arc_getbuf_func, &buf,
493 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
495 mutex_enter(&dp->dp_spa->spa_scrub_lock);
496 dp->dp_spa->spa_scrub_errors++;
497 mutex_exit(&dp->dp_spa->spa_scrub_lock);
500 child_dnp = buf->b_data;
502 for (i = 0; i < epb; i++, child_dnp++) {
503 scrub_visitdnode(dp, child_dnp, buf, zb->zb_objset,
504 zb->zb_blkid * epb + i);
506 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
507 uint32_t flags = ARC_WAIT;
510 err = arc_read_nolock(NULL, dp->dp_spa, bp,
511 arc_getbuf_func, &buf,
512 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
514 mutex_enter(&dp->dp_spa->spa_scrub_lock);
515 dp->dp_spa->spa_scrub_errors++;
516 mutex_exit(&dp->dp_spa->spa_scrub_lock);
522 traverse_zil(dp, &osp->os_zil_header);
524 scrub_visitdnode(dp, &osp->os_meta_dnode,
525 buf, zb->zb_objset, 0);
526 if (arc_buf_size(buf) >= sizeof (objset_phys_t)) {
527 scrub_visitdnode(dp, &osp->os_userused_dnode,
528 buf, zb->zb_objset, 0);
529 scrub_visitdnode(dp, &osp->os_groupused_dnode,
530 buf, zb->zb_objset, 0);
534 (void) scrub_funcs[dp->dp_scrub_func](dp, bp, zb);
536 (void) arc_buf_remove_ref(buf, &buf);
540 scrub_visitdnode(dsl_pool_t *dp, dnode_phys_t *dnp, arc_buf_t *buf,
541 uint64_t objset, uint64_t object)
545 for (j = 0; j < dnp->dn_nblkptr; j++) {
548 SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j);
549 scrub_visitbp(dp, dnp, buf, &dnp->dn_blkptr[j], &czb);
555 scrub_visit_rootbp(dsl_pool_t *dp, dsl_dataset_t *ds, blkptr_t *bp)
559 SET_BOOKMARK(&zb, ds ? ds->ds_object : 0, 0, -1, 0);
560 scrub_visitbp(dp, NULL, NULL, bp, &zb);
564 dsl_pool_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
566 dsl_pool_t *dp = ds->ds_dir->dd_pool;
568 if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
571 if (dp->dp_scrub_bookmark.zb_objset == ds->ds_object) {
572 SET_BOOKMARK(&dp->dp_scrub_bookmark, -1, 0, 0, 0);
573 } else if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
574 ds->ds_object, tx) != 0) {
578 if (ds->ds_phys->ds_next_snap_obj != 0) {
579 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
580 ds->ds_phys->ds_next_snap_obj, tx) == 0);
582 ASSERT3U(ds->ds_phys->ds_num_children, <=, 1);
586 dsl_pool_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
588 dsl_pool_t *dp = ds->ds_dir->dd_pool;
590 if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
593 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0);
595 if (dp->dp_scrub_bookmark.zb_objset == ds->ds_object) {
596 dp->dp_scrub_bookmark.zb_objset =
597 ds->ds_phys->ds_prev_snap_obj;
598 } else if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
599 ds->ds_object, tx) == 0) {
600 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
601 ds->ds_phys->ds_prev_snap_obj, tx) == 0);
606 dsl_pool_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx)
608 dsl_pool_t *dp = ds1->ds_dir->dd_pool;
610 if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
613 if (dp->dp_scrub_bookmark.zb_objset == ds1->ds_object) {
614 dp->dp_scrub_bookmark.zb_objset = ds2->ds_object;
615 } else if (dp->dp_scrub_bookmark.zb_objset == ds2->ds_object) {
616 dp->dp_scrub_bookmark.zb_objset = ds1->ds_object;
619 if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
620 ds1->ds_object, tx) == 0) {
621 int err = zap_add_int(dp->dp_meta_objset,
622 dp->dp_scrub_queue_obj, ds2->ds_object, tx);
623 VERIFY(err == 0 || err == EEXIST);
625 /* Both were there to begin with */
626 VERIFY(0 == zap_add_int(dp->dp_meta_objset,
627 dp->dp_scrub_queue_obj, ds1->ds_object, tx));
629 } else if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
630 ds2->ds_object, tx) == 0) {
631 VERIFY(0 == zap_add_int(dp->dp_meta_objset,
632 dp->dp_scrub_queue_obj, ds1->ds_object, tx));
636 struct enqueue_clones_arg {
643 enqueue_clones_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg)
645 struct enqueue_clones_arg *eca = arg;
650 err = dsl_dataset_hold_obj(spa->spa_dsl_pool, dsobj, FTAG, &ds);
653 dp = ds->ds_dir->dd_pool;
655 if (ds->ds_dir->dd_phys->dd_origin_obj == eca->originobj) {
656 while (ds->ds_phys->ds_prev_snap_obj != eca->originobj) {
658 err = dsl_dataset_hold_obj(dp,
659 ds->ds_phys->ds_prev_snap_obj, FTAG, &prev);
661 dsl_dataset_rele(ds, FTAG);
666 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
667 ds->ds_object, eca->tx) == 0);
669 dsl_dataset_rele(ds, FTAG);
674 scrub_visitds(dsl_pool_t *dp, uint64_t dsobj, dmu_tx_t *tx)
677 uint64_t min_txg_save;
679 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
682 * Iterate over the bps in this ds.
684 min_txg_save = dp->dp_scrub_min_txg;
685 dp->dp_scrub_min_txg =
686 MAX(dp->dp_scrub_min_txg, ds->ds_phys->ds_prev_snap_txg);
687 scrub_visit_rootbp(dp, ds, &ds->ds_phys->ds_bp);
688 dp->dp_scrub_min_txg = min_txg_save;
690 if (dp->dp_scrub_pausing)
694 * Add descendent datasets to work queue.
696 if (ds->ds_phys->ds_next_snap_obj != 0) {
697 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
698 ds->ds_phys->ds_next_snap_obj, tx) == 0);
700 if (ds->ds_phys->ds_num_children > 1) {
701 boolean_t usenext = B_FALSE;
702 if (ds->ds_phys->ds_next_clones_obj != 0) {
705 * A bug in a previous version of the code could
706 * cause upgrade_clones_cb() to not set
707 * ds_next_snap_obj when it should, leading to a
708 * missing entry. Therefore we can only use the
709 * next_clones_obj when its count is correct.
711 int err = zap_count(dp->dp_meta_objset,
712 ds->ds_phys->ds_next_clones_obj, &count);
714 count == ds->ds_phys->ds_num_children - 1)
719 VERIFY(zap_join(dp->dp_meta_objset,
720 ds->ds_phys->ds_next_clones_obj,
721 dp->dp_scrub_queue_obj, tx) == 0);
723 struct enqueue_clones_arg eca;
725 eca.originobj = ds->ds_object;
727 (void) dmu_objset_find_spa(ds->ds_dir->dd_pool->dp_spa,
728 NULL, enqueue_clones_cb, &eca, DS_FIND_CHILDREN);
733 dsl_dataset_rele(ds, FTAG);
738 enqueue_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg)
745 err = dsl_dataset_hold_obj(spa->spa_dsl_pool, dsobj, FTAG, &ds);
749 dp = ds->ds_dir->dd_pool;
751 while (ds->ds_phys->ds_prev_snap_obj != 0) {
753 err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj,
756 dsl_dataset_rele(ds, FTAG);
761 * If this is a clone, we don't need to worry about it for now.
763 if (prev->ds_phys->ds_next_snap_obj != ds->ds_object) {
764 dsl_dataset_rele(ds, FTAG);
765 dsl_dataset_rele(prev, FTAG);
768 dsl_dataset_rele(ds, FTAG);
772 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
773 ds->ds_object, tx) == 0);
774 dsl_dataset_rele(ds, FTAG);
779 dsl_pool_scrub_sync(dsl_pool_t *dp, dmu_tx_t *tx)
781 spa_t *spa = dp->dp_spa;
784 boolean_t complete = B_TRUE;
786 if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
790 * If the pool is not loaded, or is trying to unload, leave it alone.
792 if (spa->spa_load_state != SPA_LOAD_NONE || spa_shutting_down(spa))
795 if (dp->dp_scrub_restart) {
796 enum scrub_func func = dp->dp_scrub_func;
797 dp->dp_scrub_restart = B_FALSE;
798 dsl_pool_scrub_setup_sync(dp, &func, kcred, tx);
801 if (spa->spa_root_vdev->vdev_stat.vs_scrub_type == 0) {
803 * We must have resumed after rebooting; reset the vdev
804 * stats to know that we're doing a scrub (although it
805 * will think we're just starting now).
807 vdev_scrub_stat_update(spa->spa_root_vdev,
808 dp->dp_scrub_min_txg ? POOL_SCRUB_RESILVER :
809 POOL_SCRUB_EVERYTHING, B_FALSE);
812 dp->dp_scrub_pausing = B_FALSE;
813 dp->dp_scrub_start_time = lbolt64;
814 dp->dp_scrub_isresilver = (dp->dp_scrub_min_txg != 0);
815 spa->spa_scrub_active = B_TRUE;
817 if (dp->dp_scrub_bookmark.zb_objset == 0) {
818 /* First do the MOS & ORIGIN */
819 scrub_visit_rootbp(dp, NULL, &dp->dp_meta_rootbp);
820 if (dp->dp_scrub_pausing)
823 if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) {
824 VERIFY(0 == dmu_objset_find_spa(spa,
825 NULL, enqueue_cb, tx, DS_FIND_CHILDREN));
827 scrub_visitds(dp, dp->dp_origin_snap->ds_object, tx);
829 ASSERT(!dp->dp_scrub_pausing);
830 } else if (dp->dp_scrub_bookmark.zb_objset != -1ULL) {
832 * If we were paused, continue from here. Note if the
833 * ds we were paused on was deleted, the zb_objset will
834 * be -1, so we will skip this and find a new objset
837 scrub_visitds(dp, dp->dp_scrub_bookmark.zb_objset, tx);
838 if (dp->dp_scrub_pausing)
843 * In case we were paused right at the end of the ds, zero the
844 * bookmark so we don't think that we're still trying to resume.
846 bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t));
848 /* keep pulling things out of the zap-object-as-queue */
849 while (zap_cursor_init(&zc, dp->dp_meta_objset, dp->dp_scrub_queue_obj),
850 zap_cursor_retrieve(&zc, &za) == 0) {
851 VERIFY(0 == zap_remove(dp->dp_meta_objset,
852 dp->dp_scrub_queue_obj, za.za_name, tx));
853 scrub_visitds(dp, za.za_first_integer, tx);
854 if (dp->dp_scrub_pausing)
856 zap_cursor_fini(&zc);
858 zap_cursor_fini(&zc);
859 if (dp->dp_scrub_pausing)
864 dsl_pool_scrub_cancel_sync(dp, &complete, kcred, tx);
867 VERIFY(0 == zap_update(dp->dp_meta_objset,
868 DMU_POOL_DIRECTORY_OBJECT,
869 DMU_POOL_SCRUB_BOOKMARK, sizeof (uint64_t), 4,
870 &dp->dp_scrub_bookmark, tx));
871 VERIFY(0 == zap_update(dp->dp_meta_objset,
872 DMU_POOL_DIRECTORY_OBJECT,
873 DMU_POOL_SCRUB_ERRORS, sizeof (uint64_t), 1,
874 &spa->spa_scrub_errors, tx));
876 /* XXX this is scrub-clean specific */
877 mutex_enter(&spa->spa_scrub_lock);
878 while (spa->spa_scrub_inflight > 0)
879 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
880 mutex_exit(&spa->spa_scrub_lock);
884 dsl_pool_scrub_restart(dsl_pool_t *dp)
886 mutex_enter(&dp->dp_scrub_cancel_lock);
887 dp->dp_scrub_restart = B_TRUE;
888 mutex_exit(&dp->dp_scrub_cancel_lock);
896 count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp)
901 * If we resume after a reboot, zab will be NULL; don't record
902 * incomplete stats in that case.
907 for (i = 0; i < 4; i++) {
908 int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS;
909 int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL;
910 zfs_blkstat_t *zb = &zab->zab_type[l][t];
914 zb->zb_asize += BP_GET_ASIZE(bp);
915 zb->zb_lsize += BP_GET_LSIZE(bp);
916 zb->zb_psize += BP_GET_PSIZE(bp);
917 zb->zb_gangs += BP_COUNT_GANG(bp);
919 switch (BP_GET_NDVAS(bp)) {
921 if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
922 DVA_GET_VDEV(&bp->blk_dva[1]))
923 zb->zb_ditto_2_of_2_samevdev++;
926 equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
927 DVA_GET_VDEV(&bp->blk_dva[1])) +
928 (DVA_GET_VDEV(&bp->blk_dva[0]) ==
929 DVA_GET_VDEV(&bp->blk_dva[2])) +
930 (DVA_GET_VDEV(&bp->blk_dva[1]) ==
931 DVA_GET_VDEV(&bp->blk_dva[2]));
933 zb->zb_ditto_2_of_3_samevdev++;
935 zb->zb_ditto_3_of_3_samevdev++;
942 dsl_pool_scrub_clean_done(zio_t *zio)
944 spa_t *spa = zio->io_spa;
946 zio_data_buf_free(zio->io_data, zio->io_size);
948 mutex_enter(&spa->spa_scrub_lock);
949 spa->spa_scrub_inflight--;
950 cv_broadcast(&spa->spa_scrub_io_cv);
952 if (zio->io_error && (zio->io_error != ECKSUM ||
953 !(zio->io_flags & ZIO_FLAG_SPECULATIVE)))
954 spa->spa_scrub_errors++;
955 mutex_exit(&spa->spa_scrub_lock);
959 dsl_pool_scrub_clean_cb(dsl_pool_t *dp,
960 const blkptr_t *bp, const zbookmark_t *zb)
962 size_t size = BP_GET_PSIZE(bp);
963 spa_t *spa = dp->dp_spa;
965 int zio_flags = ZIO_FLAG_SCRUB_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL;
968 ASSERT(bp->blk_birth > dp->dp_scrub_min_txg);
970 if (bp->blk_birth >= dp->dp_scrub_max_txg)
973 count_block(dp->dp_blkstats, bp);
975 if (dp->dp_scrub_isresilver == 0) {
977 zio_flags |= ZIO_FLAG_SCRUB;
978 zio_priority = ZIO_PRIORITY_SCRUB;
981 /* It's a resilver */
982 zio_flags |= ZIO_FLAG_RESILVER;
983 zio_priority = ZIO_PRIORITY_RESILVER;
987 /* If it's an intent log block, failure is expected. */
988 if (zb->zb_level == -1 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)
989 zio_flags |= ZIO_FLAG_SPECULATIVE;
991 for (int d = 0; d < BP_GET_NDVAS(bp); d++) {
992 vdev_t *vd = vdev_lookup_top(spa,
993 DVA_GET_VDEV(&bp->blk_dva[d]));
996 * Keep track of how much data we've examined so that
997 * zpool(1M) status can make useful progress reports.
999 mutex_enter(&vd->vdev_stat_lock);
1000 vd->vdev_stat.vs_scrub_examined +=
1001 DVA_GET_ASIZE(&bp->blk_dva[d]);
1002 mutex_exit(&vd->vdev_stat_lock);
1004 /* if it's a resilver, this may not be in the target range */
1006 if (DVA_GET_GANG(&bp->blk_dva[d])) {
1008 * Gang members may be spread across multiple
1009 * vdevs, so the best estimate we have is the
1010 * scrub range, which has already been checked.
1011 * XXX -- it would be better to change our
1012 * allocation policy to ensure that all
1013 * gang members reside on the same vdev.
1017 needs_io = vdev_dtl_contains(vd, DTL_PARTIAL,
1023 if (needs_io && !zfs_no_scrub_io) {
1024 void *data = zio_data_buf_alloc(size);
1026 mutex_enter(&spa->spa_scrub_lock);
1027 while (spa->spa_scrub_inflight >= spa->spa_scrub_maxinflight)
1028 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
1029 spa->spa_scrub_inflight++;
1030 mutex_exit(&spa->spa_scrub_lock);
1032 zio_nowait(zio_read(NULL, spa, bp, data, size,
1033 dsl_pool_scrub_clean_done, NULL, zio_priority,
1037 /* do not relocate this block */
1042 dsl_pool_scrub_clean(dsl_pool_t *dp)
1044 spa_t *spa = dp->dp_spa;
1047 * Purge all vdev caches. We do this here rather than in sync
1048 * context because this requires a writer lock on the spa_config
1049 * lock, which we can't do from sync context. The
1050 * spa_scrub_reopen flag indicates that vdev_open() should not
1051 * attempt to start another scrub.
1053 spa_vdev_state_enter(spa);
1054 spa->spa_scrub_reopen = B_TRUE;
1055 vdev_reopen(spa->spa_root_vdev);
1056 spa->spa_scrub_reopen = B_FALSE;
1057 (void) spa_vdev_state_exit(spa, NULL, 0);
1059 return (dsl_pool_scrub_setup(dp, SCRUB_FUNC_CLEAN));