]> CyberLeo.Net >> Repos - FreeBSD/releng/8.1.git/blob - sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_scrub.c
Copy stable/8 to releng/8.1 in preparation for 8.1-RC1.
[FreeBSD/releng/8.1.git] / sys / cddl / contrib / opensolaris / uts / common / fs / zfs / dsl_scrub.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25
26 #include <sys/dsl_pool.h>
27 #include <sys/dsl_dataset.h>
28 #include <sys/dsl_prop.h>
29 #include <sys/dsl_dir.h>
30 #include <sys/dsl_synctask.h>
31 #include <sys/dnode.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dmu_objset.h>
34 #include <sys/arc.h>
35 #include <sys/zap.h>
36 #include <sys/zio.h>
37 #include <sys/zfs_context.h>
38 #include <sys/fs/zfs.h>
39 #include <sys/zfs_znode.h>
40 #include <sys/spa_impl.h>
41 #include <sys/vdev_impl.h>
42 #include <sys/zil_impl.h>
43
44 typedef int (scrub_cb_t)(dsl_pool_t *, const blkptr_t *, const zbookmark_t *);
45
46 static scrub_cb_t dsl_pool_scrub_clean_cb;
47 static dsl_syncfunc_t dsl_pool_scrub_cancel_sync;
48
49 int zfs_scrub_min_time = 1; /* scrub for at least 1 sec each txg */
50 int zfs_resilver_min_time = 3; /* resilver for at least 3 sec each txg */
51 boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
52
53 extern int zfs_txg_timeout;
54
55 static scrub_cb_t *scrub_funcs[SCRUB_FUNC_NUMFUNCS] = {
56         NULL,
57         dsl_pool_scrub_clean_cb
58 };
59
60 #define SET_BOOKMARK(zb, objset, object, level, blkid)  \
61 {                                                       \
62         (zb)->zb_objset = objset;                       \
63         (zb)->zb_object = object;                       \
64         (zb)->zb_level = level;                         \
65         (zb)->zb_blkid = blkid;                         \
66 }
67
68 /* ARGSUSED */
69 static void
70 dsl_pool_scrub_setup_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
71 {
72         dsl_pool_t *dp = arg1;
73         enum scrub_func *funcp = arg2;
74         dmu_object_type_t ot = 0;
75         boolean_t complete = B_FALSE;
76
77         dsl_pool_scrub_cancel_sync(dp, &complete, cr, tx);
78
79         ASSERT(dp->dp_scrub_func == SCRUB_FUNC_NONE);
80         ASSERT(*funcp > SCRUB_FUNC_NONE);
81         ASSERT(*funcp < SCRUB_FUNC_NUMFUNCS);
82
83         dp->dp_scrub_min_txg = 0;
84         dp->dp_scrub_max_txg = tx->tx_txg;
85
86         if (*funcp == SCRUB_FUNC_CLEAN) {
87                 vdev_t *rvd = dp->dp_spa->spa_root_vdev;
88
89                 /* rewrite all disk labels */
90                 vdev_config_dirty(rvd);
91
92                 if (vdev_resilver_needed(rvd,
93                     &dp->dp_scrub_min_txg, &dp->dp_scrub_max_txg)) {
94                         spa_event_notify(dp->dp_spa, NULL,
95                             ESC_ZFS_RESILVER_START);
96                         dp->dp_scrub_max_txg = MIN(dp->dp_scrub_max_txg,
97                             tx->tx_txg);
98                 }
99
100                 /* zero out the scrub stats in all vdev_stat_t's */
101                 vdev_scrub_stat_update(rvd,
102                     dp->dp_scrub_min_txg ? POOL_SCRUB_RESILVER :
103                     POOL_SCRUB_EVERYTHING, B_FALSE);
104
105                 dp->dp_spa->spa_scrub_started = B_TRUE;
106         }
107
108         /* back to the generic stuff */
109
110         if (dp->dp_blkstats == NULL) {
111                 dp->dp_blkstats =
112                     kmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP);
113         }
114         bzero(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
115
116         if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB)
117                 ot = DMU_OT_ZAP_OTHER;
118
119         dp->dp_scrub_func = *funcp;
120         dp->dp_scrub_queue_obj = zap_create(dp->dp_meta_objset,
121             ot ? ot : DMU_OT_SCRUB_QUEUE, DMU_OT_NONE, 0, tx);
122         bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t));
123         dp->dp_scrub_restart = B_FALSE;
124         dp->dp_spa->spa_scrub_errors = 0;
125
126         VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
127             DMU_POOL_SCRUB_FUNC, sizeof (uint32_t), 1,
128             &dp->dp_scrub_func, tx));
129         VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
130             DMU_POOL_SCRUB_QUEUE, sizeof (uint64_t), 1,
131             &dp->dp_scrub_queue_obj, tx));
132         VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
133             DMU_POOL_SCRUB_MIN_TXG, sizeof (uint64_t), 1,
134             &dp->dp_scrub_min_txg, tx));
135         VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
136             DMU_POOL_SCRUB_MAX_TXG, sizeof (uint64_t), 1,
137             &dp->dp_scrub_max_txg, tx));
138         VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
139             DMU_POOL_SCRUB_BOOKMARK, sizeof (uint64_t), 4,
140             &dp->dp_scrub_bookmark, tx));
141         VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
142             DMU_POOL_SCRUB_ERRORS, sizeof (uint64_t), 1,
143             &dp->dp_spa->spa_scrub_errors, tx));
144
145         spa_history_internal_log(LOG_POOL_SCRUB, dp->dp_spa, tx, cr,
146             "func=%u mintxg=%llu maxtxg=%llu",
147             *funcp, dp->dp_scrub_min_txg, dp->dp_scrub_max_txg);
148 }
149
150 int
151 dsl_pool_scrub_setup(dsl_pool_t *dp, enum scrub_func func)
152 {
153         return (dsl_sync_task_do(dp, NULL,
154             dsl_pool_scrub_setup_sync, dp, &func, 0));
155 }
156
157 /* ARGSUSED */
158 static void
159 dsl_pool_scrub_cancel_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
160 {
161         dsl_pool_t *dp = arg1;
162         boolean_t *completep = arg2;
163
164         if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
165                 return;
166
167         mutex_enter(&dp->dp_scrub_cancel_lock);
168
169         if (dp->dp_scrub_restart) {
170                 dp->dp_scrub_restart = B_FALSE;
171                 *completep = B_FALSE;
172         }
173
174         /* XXX this is scrub-clean specific */
175         mutex_enter(&dp->dp_spa->spa_scrub_lock);
176         while (dp->dp_spa->spa_scrub_inflight > 0) {
177                 cv_wait(&dp->dp_spa->spa_scrub_io_cv,
178                     &dp->dp_spa->spa_scrub_lock);
179         }
180         mutex_exit(&dp->dp_spa->spa_scrub_lock);
181         dp->dp_spa->spa_scrub_started = B_FALSE;
182         dp->dp_spa->spa_scrub_active = B_FALSE;
183
184         dp->dp_scrub_func = SCRUB_FUNC_NONE;
185         VERIFY(0 == dmu_object_free(dp->dp_meta_objset,
186             dp->dp_scrub_queue_obj, tx));
187         dp->dp_scrub_queue_obj = 0;
188         bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t));
189
190         VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
191             DMU_POOL_SCRUB_QUEUE, tx));
192         VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
193             DMU_POOL_SCRUB_MIN_TXG, tx));
194         VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
195             DMU_POOL_SCRUB_MAX_TXG, tx));
196         VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
197             DMU_POOL_SCRUB_BOOKMARK, tx));
198         VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
199             DMU_POOL_SCRUB_FUNC, tx));
200         VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
201             DMU_POOL_SCRUB_ERRORS, tx));
202
203         spa_history_internal_log(LOG_POOL_SCRUB_DONE, dp->dp_spa, tx, cr,
204             "complete=%u", *completep);
205
206         /* below is scrub-clean specific */
207         vdev_scrub_stat_update(dp->dp_spa->spa_root_vdev, POOL_SCRUB_NONE,
208             *completep);
209         /*
210          * If the scrub/resilver completed, update all DTLs to reflect this.
211          * Whether it succeeded or not, vacate all temporary scrub DTLs.
212          */
213         vdev_dtl_reassess(dp->dp_spa->spa_root_vdev, tx->tx_txg,
214             *completep ? dp->dp_scrub_max_txg : 0, B_TRUE);
215         if (dp->dp_scrub_min_txg && *completep)
216                 spa_event_notify(dp->dp_spa, NULL, ESC_ZFS_RESILVER_FINISH);
217         spa_errlog_rotate(dp->dp_spa);
218
219         /*
220          * We may have finished replacing a device.
221          * Let the async thread assess this and handle the detach.
222          */
223         spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER_DONE);
224
225         dp->dp_scrub_min_txg = dp->dp_scrub_max_txg = 0;
226         mutex_exit(&dp->dp_scrub_cancel_lock);
227 }
228
229 int
230 dsl_pool_scrub_cancel(dsl_pool_t *dp)
231 {
232         boolean_t complete = B_FALSE;
233
234         return (dsl_sync_task_do(dp, NULL,
235             dsl_pool_scrub_cancel_sync, dp, &complete, 3));
236 }
237
238 int
239 dsl_free(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp,
240     zio_done_func_t *done, void *private, uint32_t arc_flags)
241 {
242         /*
243          * This function will be used by bp-rewrite wad to intercept frees.
244          */
245         return (arc_free(pio, dp->dp_spa, txg, (blkptr_t *)bpp,
246             done, private, arc_flags));
247 }
248
249 static boolean_t
250 bookmark_is_zero(const zbookmark_t *zb)
251 {
252         return (zb->zb_objset == 0 && zb->zb_object == 0 &&
253             zb->zb_level == 0 && zb->zb_blkid == 0);
254 }
255
256 /* dnp is the dnode for zb1->zb_object */
257 static boolean_t
258 bookmark_is_before(dnode_phys_t *dnp, const zbookmark_t *zb1,
259     const zbookmark_t *zb2)
260 {
261         uint64_t zb1nextL0, zb2thisobj;
262
263         ASSERT(zb1->zb_objset == zb2->zb_objset);
264         ASSERT(zb1->zb_object != -1ULL);
265         ASSERT(zb2->zb_level == 0);
266
267         /*
268          * A bookmark in the deadlist is considered to be after
269          * everything else.
270          */
271         if (zb2->zb_object == -1ULL)
272                 return (B_TRUE);
273
274         /* The objset_phys_t isn't before anything. */
275         if (dnp == NULL)
276                 return (B_FALSE);
277
278         zb1nextL0 = (zb1->zb_blkid + 1) <<
279             ((zb1->zb_level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT));
280
281         zb2thisobj = zb2->zb_object ? zb2->zb_object :
282             zb2->zb_blkid << (DNODE_BLOCK_SHIFT - DNODE_SHIFT);
283
284         if (zb1->zb_object == 0) {
285                 uint64_t nextobj = zb1nextL0 *
286                     (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT) >> DNODE_SHIFT;
287                 return (nextobj <= zb2thisobj);
288         }
289
290         if (zb1->zb_object < zb2thisobj)
291                 return (B_TRUE);
292         if (zb1->zb_object > zb2thisobj)
293                 return (B_FALSE);
294         if (zb2->zb_object == 0)
295                 return (B_FALSE);
296         return (zb1nextL0 <= zb2->zb_blkid);
297 }
298
299 static boolean_t
300 scrub_pause(dsl_pool_t *dp, const zbookmark_t *zb)
301 {
302         int elapsed_ticks;
303         int mintime;
304
305         if (dp->dp_scrub_pausing)
306                 return (B_TRUE); /* we're already pausing */
307
308         if (!bookmark_is_zero(&dp->dp_scrub_bookmark))
309                 return (B_FALSE); /* we're resuming */
310
311         /* We only know how to resume from level-0 blocks. */
312         if (zb->zb_level != 0)
313                 return (B_FALSE);
314
315         mintime = dp->dp_scrub_isresilver ? zfs_resilver_min_time :
316             zfs_scrub_min_time;
317         elapsed_ticks = lbolt64 - dp->dp_scrub_start_time;
318         if (elapsed_ticks > hz * zfs_txg_timeout ||
319             (elapsed_ticks > hz * mintime && txg_sync_waiting(dp))) {
320                 dprintf("pausing at %llx/%llx/%llx/%llx\n",
321                     (longlong_t)zb->zb_objset, (longlong_t)zb->zb_object,
322                     (longlong_t)zb->zb_level, (longlong_t)zb->zb_blkid);
323                 dp->dp_scrub_pausing = B_TRUE;
324                 dp->dp_scrub_bookmark = *zb;
325                 return (B_TRUE);
326         }
327         return (B_FALSE);
328 }
329
330 typedef struct zil_traverse_arg {
331         dsl_pool_t      *zta_dp;
332         zil_header_t    *zta_zh;
333 } zil_traverse_arg_t;
334
335 /* ARGSUSED */
336 static void
337 traverse_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
338 {
339         zil_traverse_arg_t *zta = arg;
340         dsl_pool_t *dp = zta->zta_dp;
341         zil_header_t *zh = zta->zta_zh;
342         zbookmark_t zb;
343
344         if (bp->blk_birth <= dp->dp_scrub_min_txg)
345                 return;
346
347         /*
348          * One block ("stumpy") can be allocated a long time ago; we
349          * want to visit that one because it has been allocated
350          * (on-disk) even if it hasn't been claimed (even though for
351          * plain scrub there's nothing to do to it).
352          */
353         if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(dp->dp_spa))
354                 return;
355
356         zb.zb_objset = zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET];
357         zb.zb_object = 0;
358         zb.zb_level = -1;
359         zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ];
360         VERIFY(0 == scrub_funcs[dp->dp_scrub_func](dp, bp, &zb));
361 }
362
363 /* ARGSUSED */
364 static void
365 traverse_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg)
366 {
367         if (lrc->lrc_txtype == TX_WRITE) {
368                 zil_traverse_arg_t *zta = arg;
369                 dsl_pool_t *dp = zta->zta_dp;
370                 zil_header_t *zh = zta->zta_zh;
371                 lr_write_t *lr = (lr_write_t *)lrc;
372                 blkptr_t *bp = &lr->lr_blkptr;
373                 zbookmark_t zb;
374
375                 if (bp->blk_birth <= dp->dp_scrub_min_txg)
376                         return;
377
378                 /*
379                  * birth can be < claim_txg if this record's txg is
380                  * already txg sync'ed (but this log block contains
381                  * other records that are not synced)
382                  */
383                 if (claim_txg == 0 || bp->blk_birth < claim_txg)
384                         return;
385
386                 zb.zb_objset = zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET];
387                 zb.zb_object = lr->lr_foid;
388                 zb.zb_level = BP_GET_LEVEL(bp);
389                 zb.zb_blkid = lr->lr_offset / BP_GET_LSIZE(bp);
390                 VERIFY(0 == scrub_funcs[dp->dp_scrub_func](dp, bp, &zb));
391         }
392 }
393
394 static void
395 traverse_zil(dsl_pool_t *dp, zil_header_t *zh)
396 {
397         uint64_t claim_txg = zh->zh_claim_txg;
398         zil_traverse_arg_t zta = { dp, zh };
399         zilog_t *zilog;
400
401         /*
402          * We only want to visit blocks that have been claimed but not yet
403          * replayed (or, in read-only mode, blocks that *would* be claimed).
404          */
405         if (claim_txg == 0 && (spa_mode & FWRITE))
406                 return;
407
408         zilog = zil_alloc(dp->dp_meta_objset, zh);
409
410         (void) zil_parse(zilog, traverse_zil_block, traverse_zil_record, &zta,
411             claim_txg);
412
413         zil_free(zilog);
414 }
415
416 static void
417 scrub_visitbp(dsl_pool_t *dp, dnode_phys_t *dnp,
418     arc_buf_t *pbuf, blkptr_t *bp, const zbookmark_t *zb)
419 {
420         int err;
421         arc_buf_t *buf = NULL;
422
423         if (bp->blk_birth == 0)
424                 return;
425
426         if (bp->blk_birth <= dp->dp_scrub_min_txg)
427                 return;
428
429         if (scrub_pause(dp, zb))
430                 return;
431
432         if (!bookmark_is_zero(&dp->dp_scrub_bookmark)) {
433                 /*
434                  * If we already visited this bp & everything below (in
435                  * a prior txg), don't bother doing it again.
436                  */
437                 if (bookmark_is_before(dnp, zb, &dp->dp_scrub_bookmark))
438                         return;
439
440                 /*
441                  * If we found the block we're trying to resume from, or
442                  * we went past it to a different object, zero it out to
443                  * indicate that it's OK to start checking for pausing
444                  * again.
445                  */
446                 if (bcmp(zb, &dp->dp_scrub_bookmark, sizeof (*zb)) == 0 ||
447                     zb->zb_object > dp->dp_scrub_bookmark.zb_object) {
448                         dprintf("resuming at %llx/%llx/%llx/%llx\n",
449                             (longlong_t)zb->zb_objset,
450                             (longlong_t)zb->zb_object,
451                             (longlong_t)zb->zb_level,
452                             (longlong_t)zb->zb_blkid);
453                         bzero(&dp->dp_scrub_bookmark, sizeof (*zb));
454                 }
455         }
456
457         if (BP_GET_LEVEL(bp) > 0) {
458                 uint32_t flags = ARC_WAIT;
459                 int i;
460                 blkptr_t *cbp;
461                 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
462
463                 err = arc_read(NULL, dp->dp_spa, bp, pbuf,
464                     arc_getbuf_func, &buf,
465                     ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
466                 if (err) {
467                         mutex_enter(&dp->dp_spa->spa_scrub_lock);
468                         dp->dp_spa->spa_scrub_errors++;
469                         mutex_exit(&dp->dp_spa->spa_scrub_lock);
470                         return;
471                 }
472                 cbp = buf->b_data;
473
474                 for (i = 0; i < epb; i++, cbp++) {
475                         zbookmark_t czb;
476
477                         SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
478                             zb->zb_level - 1,
479                             zb->zb_blkid * epb + i);
480                         scrub_visitbp(dp, dnp, buf, cbp, &czb);
481                 }
482         } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
483                 uint32_t flags = ARC_WAIT;
484                 dnode_phys_t *child_dnp;
485                 int i, j;
486                 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
487
488                 err = arc_read(NULL, dp->dp_spa, bp, pbuf,
489                     arc_getbuf_func, &buf,
490                     ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
491                 if (err) {
492                         mutex_enter(&dp->dp_spa->spa_scrub_lock);
493                         dp->dp_spa->spa_scrub_errors++;
494                         mutex_exit(&dp->dp_spa->spa_scrub_lock);
495                         return;
496                 }
497                 child_dnp = buf->b_data;
498
499                 for (i = 0; i < epb; i++, child_dnp++) {
500                         for (j = 0; j < child_dnp->dn_nblkptr; j++) {
501                                 zbookmark_t czb;
502
503                                 SET_BOOKMARK(&czb, zb->zb_objset,
504                                     zb->zb_blkid * epb + i,
505                                     child_dnp->dn_nlevels - 1, j);
506                                 scrub_visitbp(dp, child_dnp, buf,
507                                     &child_dnp->dn_blkptr[j], &czb);
508                         }
509                 }
510         } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
511                 uint32_t flags = ARC_WAIT;
512                 objset_phys_t *osp;
513                 int j;
514
515                 err = arc_read_nolock(NULL, dp->dp_spa, bp,
516                     arc_getbuf_func, &buf,
517                     ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
518                 if (err) {
519                         mutex_enter(&dp->dp_spa->spa_scrub_lock);
520                         dp->dp_spa->spa_scrub_errors++;
521                         mutex_exit(&dp->dp_spa->spa_scrub_lock);
522                         return;
523                 }
524
525                 osp = buf->b_data;
526
527                 traverse_zil(dp, &osp->os_zil_header);
528
529                 for (j = 0; j < osp->os_meta_dnode.dn_nblkptr; j++) {
530                         zbookmark_t czb;
531
532                         SET_BOOKMARK(&czb, zb->zb_objset, 0,
533                             osp->os_meta_dnode.dn_nlevels - 1, j);
534                         scrub_visitbp(dp, &osp->os_meta_dnode, buf,
535                             &osp->os_meta_dnode.dn_blkptr[j], &czb);
536                 }
537         }
538
539         (void) scrub_funcs[dp->dp_scrub_func](dp, bp, zb);
540         if (buf)
541                 (void) arc_buf_remove_ref(buf, &buf);
542 }
543
544 static void
545 scrub_visit_rootbp(dsl_pool_t *dp, dsl_dataset_t *ds, blkptr_t *bp)
546 {
547         zbookmark_t zb;
548
549         SET_BOOKMARK(&zb, ds ? ds->ds_object : 0, 0, -1, 0);
550         scrub_visitbp(dp, NULL, NULL, bp, &zb);
551 }
552
553 void
554 dsl_pool_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
555 {
556         dsl_pool_t *dp = ds->ds_dir->dd_pool;
557
558         if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
559                 return;
560
561         if (dp->dp_scrub_bookmark.zb_objset == ds->ds_object) {
562                 SET_BOOKMARK(&dp->dp_scrub_bookmark, -1, 0, 0, 0);
563         } else if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
564             ds->ds_object, tx) != 0) {
565                 return;
566         }
567
568         if (ds->ds_phys->ds_next_snap_obj != 0) {
569                 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
570                     ds->ds_phys->ds_next_snap_obj, tx) == 0);
571         }
572         ASSERT3U(ds->ds_phys->ds_num_children, <=, 1);
573 }
574
575 void
576 dsl_pool_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
577 {
578         dsl_pool_t *dp = ds->ds_dir->dd_pool;
579
580         if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
581                 return;
582
583         ASSERT(ds->ds_phys->ds_prev_snap_obj != 0);
584
585         if (dp->dp_scrub_bookmark.zb_objset == ds->ds_object) {
586                 dp->dp_scrub_bookmark.zb_objset =
587                     ds->ds_phys->ds_prev_snap_obj;
588         } else if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
589             ds->ds_object, tx) == 0) {
590                 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
591                     ds->ds_phys->ds_prev_snap_obj, tx) == 0);
592         }
593 }
594
595 void
596 dsl_pool_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx)
597 {
598         dsl_pool_t *dp = ds1->ds_dir->dd_pool;
599
600         if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
601                 return;
602
603         if (dp->dp_scrub_bookmark.zb_objset == ds1->ds_object) {
604                 dp->dp_scrub_bookmark.zb_objset = ds2->ds_object;
605         } else if (dp->dp_scrub_bookmark.zb_objset == ds2->ds_object) {
606                 dp->dp_scrub_bookmark.zb_objset = ds1->ds_object;
607         }
608
609         if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
610             ds1->ds_object, tx) == 0) {
611                 int err = zap_add_int(dp->dp_meta_objset,
612                     dp->dp_scrub_queue_obj, ds2->ds_object, tx);
613                 VERIFY(err == 0 || err == EEXIST);
614                 if (err == EEXIST) {
615                         /* Both were there to begin with */
616                         VERIFY(0 == zap_add_int(dp->dp_meta_objset,
617                             dp->dp_scrub_queue_obj, ds1->ds_object, tx));
618                 }
619         } else if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
620             ds2->ds_object, tx) == 0) {
621                 VERIFY(0 == zap_add_int(dp->dp_meta_objset,
622                     dp->dp_scrub_queue_obj, ds1->ds_object, tx));
623         }
624 }
625
626 struct enqueue_clones_arg {
627         dmu_tx_t *tx;
628         uint64_t originobj;
629 };
630
631 /* ARGSUSED */
632 static int
633 enqueue_clones_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg)
634 {
635         struct enqueue_clones_arg *eca = arg;
636         dsl_dataset_t *ds;
637         int err;
638         dsl_pool_t *dp;
639
640         err = dsl_dataset_hold_obj(spa->spa_dsl_pool, dsobj, FTAG, &ds);
641         if (err)
642                 return (err);
643         dp = ds->ds_dir->dd_pool;
644
645         if (ds->ds_dir->dd_phys->dd_origin_obj == eca->originobj) {
646                 while (ds->ds_phys->ds_prev_snap_obj != eca->originobj) {
647                         dsl_dataset_t *prev;
648                         err = dsl_dataset_hold_obj(dp,
649                             ds->ds_phys->ds_prev_snap_obj, FTAG, &prev);
650
651                         dsl_dataset_rele(ds, FTAG);
652                         if (err)
653                                 return (err);
654                         ds = prev;
655                 }
656                 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
657                     ds->ds_object, eca->tx) == 0);
658         }
659         dsl_dataset_rele(ds, FTAG);
660         return (0);
661 }
662
663 static void
664 scrub_visitds(dsl_pool_t *dp, uint64_t dsobj, dmu_tx_t *tx)
665 {
666         dsl_dataset_t *ds;
667         uint64_t min_txg_save;
668
669         VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
670
671         /*
672          * Iterate over the bps in this ds.
673          */
674         min_txg_save = dp->dp_scrub_min_txg;
675         dp->dp_scrub_min_txg =
676             MAX(dp->dp_scrub_min_txg, ds->ds_phys->ds_prev_snap_txg);
677         scrub_visit_rootbp(dp, ds, &ds->ds_phys->ds_bp);
678         dp->dp_scrub_min_txg = min_txg_save;
679
680         if (dp->dp_scrub_pausing)
681                 goto out;
682
683         /*
684          * Add descendent datasets to work queue.
685          */
686         if (ds->ds_phys->ds_next_snap_obj != 0) {
687                 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
688                     ds->ds_phys->ds_next_snap_obj, tx) == 0);
689         }
690         if (ds->ds_phys->ds_num_children > 1) {
691                 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) {
692                         struct enqueue_clones_arg eca;
693                         eca.tx = tx;
694                         eca.originobj = ds->ds_object;
695
696                         (void) dmu_objset_find_spa(ds->ds_dir->dd_pool->dp_spa,
697                             NULL, enqueue_clones_cb, &eca, DS_FIND_CHILDREN);
698                 } else {
699                         VERIFY(zap_join(dp->dp_meta_objset,
700                             ds->ds_phys->ds_next_clones_obj,
701                             dp->dp_scrub_queue_obj, tx) == 0);
702                 }
703         }
704
705 out:
706         dsl_dataset_rele(ds, FTAG);
707 }
708
709 /* ARGSUSED */
710 static int
711 enqueue_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg)
712 {
713         dmu_tx_t *tx = arg;
714         dsl_dataset_t *ds;
715         int err;
716         dsl_pool_t *dp;
717
718         err = dsl_dataset_hold_obj(spa->spa_dsl_pool, dsobj, FTAG, &ds);
719         if (err)
720                 return (err);
721
722         dp = ds->ds_dir->dd_pool;
723
724         while (ds->ds_phys->ds_prev_snap_obj != 0) {
725                 dsl_dataset_t *prev;
726                 err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj,
727                     FTAG, &prev);
728                 if (err) {
729                         dsl_dataset_rele(ds, FTAG);
730                         return (err);
731                 }
732
733                 /*
734                  * If this is a clone, we don't need to worry about it for now.
735                  */
736                 if (prev->ds_phys->ds_next_snap_obj != ds->ds_object) {
737                         dsl_dataset_rele(ds, FTAG);
738                         dsl_dataset_rele(prev, FTAG);
739                         return (0);
740                 }
741                 dsl_dataset_rele(ds, FTAG);
742                 ds = prev;
743         }
744
745         VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
746             ds->ds_object, tx) == 0);
747         dsl_dataset_rele(ds, FTAG);
748         return (0);
749 }
750
751 void
752 dsl_pool_scrub_sync(dsl_pool_t *dp, dmu_tx_t *tx)
753 {
754         zap_cursor_t zc;
755         zap_attribute_t za;
756         boolean_t complete = B_TRUE;
757
758         if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
759                 return;
760
761         /* If the spa is not fully loaded, don't bother. */
762         if (dp->dp_spa->spa_load_state != SPA_LOAD_NONE)
763                 return;
764
765         if (dp->dp_scrub_restart) {
766                 enum scrub_func func = dp->dp_scrub_func;
767                 dp->dp_scrub_restart = B_FALSE;
768                 dsl_pool_scrub_setup_sync(dp, &func, kcred, tx);
769         }
770
771         if (dp->dp_spa->spa_root_vdev->vdev_stat.vs_scrub_type == 0) {
772                 /*
773                  * We must have resumed after rebooting; reset the vdev
774                  * stats to know that we're doing a scrub (although it
775                  * will think we're just starting now).
776                  */
777                 vdev_scrub_stat_update(dp->dp_spa->spa_root_vdev,
778                     dp->dp_scrub_min_txg ? POOL_SCRUB_RESILVER :
779                     POOL_SCRUB_EVERYTHING, B_FALSE);
780         }
781
782         dp->dp_scrub_pausing = B_FALSE;
783         dp->dp_scrub_start_time = lbolt64;
784         dp->dp_scrub_isresilver = (dp->dp_scrub_min_txg != 0);
785         dp->dp_spa->spa_scrub_active = B_TRUE;
786
787         if (dp->dp_scrub_bookmark.zb_objset == 0) {
788                 /* First do the MOS & ORIGIN */
789                 scrub_visit_rootbp(dp, NULL, &dp->dp_meta_rootbp);
790                 if (dp->dp_scrub_pausing)
791                         goto out;
792
793                 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) {
794                         VERIFY(0 == dmu_objset_find_spa(dp->dp_spa,
795                             NULL, enqueue_cb, tx, DS_FIND_CHILDREN));
796                 } else {
797                         scrub_visitds(dp, dp->dp_origin_snap->ds_object, tx);
798                 }
799                 ASSERT(!dp->dp_scrub_pausing);
800         } else if (dp->dp_scrub_bookmark.zb_objset != -1ULL) {
801                 /*
802                  * If we were paused, continue from here.  Note if the
803                  * ds we were paused on was deleted, the zb_objset will
804                  * be -1, so we will skip this and find a new objset
805                  * below.
806                  */
807                 scrub_visitds(dp, dp->dp_scrub_bookmark.zb_objset, tx);
808                 if (dp->dp_scrub_pausing)
809                         goto out;
810         }
811
812         /*
813          * In case we were paused right at the end of the ds, zero the
814          * bookmark so we don't think that we're still trying to resume.
815          */
816         bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t));
817
818         /* keep pulling things out of the zap-object-as-queue */
819         while (zap_cursor_init(&zc, dp->dp_meta_objset, dp->dp_scrub_queue_obj),
820             zap_cursor_retrieve(&zc, &za) == 0) {
821                 VERIFY(0 == zap_remove(dp->dp_meta_objset,
822                     dp->dp_scrub_queue_obj, za.za_name, tx));
823                 scrub_visitds(dp, za.za_first_integer, tx);
824                 if (dp->dp_scrub_pausing)
825                         break;
826                 zap_cursor_fini(&zc);
827         }
828         zap_cursor_fini(&zc);
829         if (dp->dp_scrub_pausing)
830                 goto out;
831
832         /* done. */
833
834         dsl_pool_scrub_cancel_sync(dp, &complete, kcred, tx);
835         return;
836 out:
837         VERIFY(0 == zap_update(dp->dp_meta_objset,
838             DMU_POOL_DIRECTORY_OBJECT,
839             DMU_POOL_SCRUB_BOOKMARK, sizeof (uint64_t), 4,
840             &dp->dp_scrub_bookmark, tx));
841         VERIFY(0 == zap_update(dp->dp_meta_objset,
842             DMU_POOL_DIRECTORY_OBJECT,
843             DMU_POOL_SCRUB_ERRORS, sizeof (uint64_t), 1,
844             &dp->dp_spa->spa_scrub_errors, tx));
845
846         /* XXX this is scrub-clean specific */
847         mutex_enter(&dp->dp_spa->spa_scrub_lock);
848         while (dp->dp_spa->spa_scrub_inflight > 0) {
849                 cv_wait(&dp->dp_spa->spa_scrub_io_cv,
850                     &dp->dp_spa->spa_scrub_lock);
851         }
852         mutex_exit(&dp->dp_spa->spa_scrub_lock);
853 }
854
855 void
856 dsl_pool_scrub_restart(dsl_pool_t *dp)
857 {
858         mutex_enter(&dp->dp_scrub_cancel_lock);
859         dp->dp_scrub_restart = B_TRUE;
860         mutex_exit(&dp->dp_scrub_cancel_lock);
861 }
862
863 /*
864  * scrub consumers
865  */
866
867 static void
868 count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp)
869 {
870         int i;
871
872         /*
873          * If we resume after a reboot, zab will be NULL; don't record
874          * incomplete stats in that case.
875          */
876         if (zab == NULL)
877                 return;
878
879         for (i = 0; i < 4; i++) {
880                 int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS;
881                 int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL;
882                 zfs_blkstat_t *zb = &zab->zab_type[l][t];
883                 int equal;
884
885                 zb->zb_count++;
886                 zb->zb_asize += BP_GET_ASIZE(bp);
887                 zb->zb_lsize += BP_GET_LSIZE(bp);
888                 zb->zb_psize += BP_GET_PSIZE(bp);
889                 zb->zb_gangs += BP_COUNT_GANG(bp);
890
891                 switch (BP_GET_NDVAS(bp)) {
892                 case 2:
893                         if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
894                             DVA_GET_VDEV(&bp->blk_dva[1]))
895                                 zb->zb_ditto_2_of_2_samevdev++;
896                         break;
897                 case 3:
898                         equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
899                             DVA_GET_VDEV(&bp->blk_dva[1])) +
900                             (DVA_GET_VDEV(&bp->blk_dva[0]) ==
901                             DVA_GET_VDEV(&bp->blk_dva[2])) +
902                             (DVA_GET_VDEV(&bp->blk_dva[1]) ==
903                             DVA_GET_VDEV(&bp->blk_dva[2]));
904                         if (equal == 1)
905                                 zb->zb_ditto_2_of_3_samevdev++;
906                         else if (equal == 3)
907                                 zb->zb_ditto_3_of_3_samevdev++;
908                         break;
909                 }
910         }
911 }
912
913 static void
914 dsl_pool_scrub_clean_done(zio_t *zio)
915 {
916         spa_t *spa = zio->io_spa;
917
918         zio_data_buf_free(zio->io_data, zio->io_size);
919
920         mutex_enter(&spa->spa_scrub_lock);
921         spa->spa_scrub_inflight--;
922         cv_broadcast(&spa->spa_scrub_io_cv);
923
924         if (zio->io_error && (zio->io_error != ECKSUM ||
925             !(zio->io_flags & ZIO_FLAG_SPECULATIVE)))
926                 spa->spa_scrub_errors++;
927         mutex_exit(&spa->spa_scrub_lock);
928 }
929
930 static int
931 dsl_pool_scrub_clean_cb(dsl_pool_t *dp,
932     const blkptr_t *bp, const zbookmark_t *zb)
933 {
934         size_t size = BP_GET_LSIZE(bp);
935         int d;
936         spa_t *spa = dp->dp_spa;
937         boolean_t needs_io;
938         int zio_flags = ZIO_FLAG_SCRUB_THREAD | ZIO_FLAG_CANFAIL;
939         int zio_priority;
940
941         count_block(dp->dp_blkstats, bp);
942
943         if (dp->dp_scrub_isresilver == 0) {
944                 /* It's a scrub */
945                 zio_flags |= ZIO_FLAG_SCRUB;
946                 zio_priority = ZIO_PRIORITY_SCRUB;
947                 needs_io = B_TRUE;
948         } else {
949                 /* It's a resilver */
950                 zio_flags |= ZIO_FLAG_RESILVER;
951                 zio_priority = ZIO_PRIORITY_RESILVER;
952                 needs_io = B_FALSE;
953         }
954
955         /* If it's an intent log block, failure is expected. */
956         if (zb->zb_level == -1 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)
957                 zio_flags |= ZIO_FLAG_SPECULATIVE;
958
959         for (d = 0; d < BP_GET_NDVAS(bp); d++) {
960                 vdev_t *vd = vdev_lookup_top(spa,
961                     DVA_GET_VDEV(&bp->blk_dva[d]));
962
963                 /*
964                  * Keep track of how much data we've examined so that
965                  * zpool(1M) status can make useful progress reports.
966                  */
967                 mutex_enter(&vd->vdev_stat_lock);
968                 vd->vdev_stat.vs_scrub_examined +=
969                     DVA_GET_ASIZE(&bp->blk_dva[d]);
970                 mutex_exit(&vd->vdev_stat_lock);
971
972                 /* if it's a resilver, this may not be in the target range */
973                 if (!needs_io) {
974                         if (DVA_GET_GANG(&bp->blk_dva[d])) {
975                                 /*
976                                  * Gang members may be spread across multiple
977                                  * vdevs, so the best we can do is look at the
978                                  * pool-wide DTL.
979                                  * XXX -- it would be better to change our
980                                  * allocation policy to ensure that this can't
981                                  * happen.
982                                  */
983                                 vd = spa->spa_root_vdev;
984                         }
985                         needs_io = vdev_dtl_contains(&vd->vdev_dtl_map,
986                             bp->blk_birth, 1);
987                 }
988         }
989
990         if (needs_io && !zfs_no_scrub_io) {
991                 void *data = zio_data_buf_alloc(size);
992
993                 mutex_enter(&spa->spa_scrub_lock);
994                 while (spa->spa_scrub_inflight >= spa->spa_scrub_maxinflight)
995                         cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
996                 spa->spa_scrub_inflight++;
997                 mutex_exit(&spa->spa_scrub_lock);
998
999                 zio_nowait(zio_read(NULL, spa, bp, data, size,
1000                     dsl_pool_scrub_clean_done, NULL, zio_priority,
1001                     zio_flags, zb));
1002         }
1003
1004         /* do not relocate this block */
1005         return (0);
1006 }
1007
1008 int
1009 dsl_pool_scrub_clean(dsl_pool_t *dp)
1010 {
1011         /*
1012          * Purge all vdev caches.  We do this here rather than in sync
1013          * context because this requires a writer lock on the spa_config
1014          * lock, which we can't do from sync context.  The
1015          * spa_scrub_reopen flag indicates that vdev_open() should not
1016          * attempt to start another scrub.
1017          */
1018         spa_config_enter(dp->dp_spa, SCL_ALL, FTAG, RW_WRITER);
1019         dp->dp_spa->spa_scrub_reopen = B_TRUE;
1020         vdev_reopen(dp->dp_spa->spa_root_vdev);
1021         dp->dp_spa->spa_scrub_reopen = B_FALSE;
1022         spa_config_exit(dp->dp_spa, SCL_ALL, FTAG);
1023
1024         return (dsl_pool_scrub_setup(dp, SCRUB_FUNC_CLEAN));
1025 }