]> CyberLeo.Net >> Repos - FreeBSD/releng/8.0.git/blob - sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_scrub.c
Adjust to reflect 8.0-RELEASE.
[FreeBSD/releng/8.0.git] / sys / cddl / contrib / opensolaris / uts / common / fs / zfs / dsl_scrub.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25
26 #include <sys/dsl_pool.h>
27 #include <sys/dsl_dataset.h>
28 #include <sys/dsl_prop.h>
29 #include <sys/dsl_dir.h>
30 #include <sys/dsl_synctask.h>
31 #include <sys/dnode.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dmu_objset.h>
34 #include <sys/arc.h>
35 #include <sys/zap.h>
36 #include <sys/zio.h>
37 #include <sys/zfs_context.h>
38 #include <sys/fs/zfs.h>
39 #include <sys/zfs_znode.h>
40 #include <sys/spa_impl.h>
41 #include <sys/vdev_impl.h>
42 #include <sys/zil_impl.h>
43
44 typedef int (scrub_cb_t)(dsl_pool_t *, const blkptr_t *, const zbookmark_t *);
45
46 static scrub_cb_t dsl_pool_scrub_clean_cb;
47 static dsl_syncfunc_t dsl_pool_scrub_cancel_sync;
48
49 int zfs_scrub_min_time = 1; /* scrub for at least 1 sec each txg */
50 int zfs_resilver_min_time = 3; /* resilver for at least 3 sec each txg */
51 boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
52
53 extern int zfs_txg_timeout;
54
55 static scrub_cb_t *scrub_funcs[SCRUB_FUNC_NUMFUNCS] = {
56         NULL,
57         dsl_pool_scrub_clean_cb
58 };
59
60 #define SET_BOOKMARK(zb, objset, object, level, blkid)  \
61 {                                                       \
62         (zb)->zb_objset = objset;                       \
63         (zb)->zb_object = object;                       \
64         (zb)->zb_level = level;                         \
65         (zb)->zb_blkid = blkid;                         \
66 }
67
68 /* ARGSUSED */
69 static void
70 dsl_pool_scrub_setup_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
71 {
72         dsl_pool_t *dp = arg1;
73         enum scrub_func *funcp = arg2;
74         dmu_object_type_t ot = 0;
75         boolean_t complete = B_FALSE;
76
77         dsl_pool_scrub_cancel_sync(dp, &complete, cr, tx);
78
79         ASSERT(dp->dp_scrub_func == SCRUB_FUNC_NONE);
80         ASSERT(*funcp > SCRUB_FUNC_NONE);
81         ASSERT(*funcp < SCRUB_FUNC_NUMFUNCS);
82
83         dp->dp_scrub_min_txg = 0;
84         dp->dp_scrub_max_txg = tx->tx_txg;
85
86         if (*funcp == SCRUB_FUNC_CLEAN) {
87                 vdev_t *rvd = dp->dp_spa->spa_root_vdev;
88
89                 /* rewrite all disk labels */
90                 vdev_config_dirty(rvd);
91
92                 if (vdev_resilver_needed(rvd,
93                     &dp->dp_scrub_min_txg, &dp->dp_scrub_max_txg)) {
94                         spa_event_notify(dp->dp_spa, NULL,
95                             ESC_ZFS_RESILVER_START);
96                         dp->dp_scrub_max_txg = MIN(dp->dp_scrub_max_txg,
97                             tx->tx_txg);
98                 }
99
100                 /* zero out the scrub stats in all vdev_stat_t's */
101                 vdev_scrub_stat_update(rvd,
102                     dp->dp_scrub_min_txg ? POOL_SCRUB_RESILVER :
103                     POOL_SCRUB_EVERYTHING, B_FALSE);
104
105                 dp->dp_spa->spa_scrub_started = B_TRUE;
106         }
107
108         /* back to the generic stuff */
109
110         if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB)
111                 ot = DMU_OT_ZAP_OTHER;
112
113         dp->dp_scrub_func = *funcp;
114         dp->dp_scrub_queue_obj = zap_create(dp->dp_meta_objset,
115             ot ? ot : DMU_OT_SCRUB_QUEUE, DMU_OT_NONE, 0, tx);
116         bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t));
117         dp->dp_scrub_restart = B_FALSE;
118         dp->dp_spa->spa_scrub_errors = 0;
119
120         VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
121             DMU_POOL_SCRUB_FUNC, sizeof (uint32_t), 1,
122             &dp->dp_scrub_func, tx));
123         VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
124             DMU_POOL_SCRUB_QUEUE, sizeof (uint64_t), 1,
125             &dp->dp_scrub_queue_obj, tx));
126         VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
127             DMU_POOL_SCRUB_MIN_TXG, sizeof (uint64_t), 1,
128             &dp->dp_scrub_min_txg, tx));
129         VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
130             DMU_POOL_SCRUB_MAX_TXG, sizeof (uint64_t), 1,
131             &dp->dp_scrub_max_txg, tx));
132         VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
133             DMU_POOL_SCRUB_BOOKMARK, sizeof (uint64_t), 4,
134             &dp->dp_scrub_bookmark, tx));
135         VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
136             DMU_POOL_SCRUB_ERRORS, sizeof (uint64_t), 1,
137             &dp->dp_spa->spa_scrub_errors, tx));
138
139         spa_history_internal_log(LOG_POOL_SCRUB, dp->dp_spa, tx, cr,
140             "func=%u mintxg=%llu maxtxg=%llu",
141             *funcp, dp->dp_scrub_min_txg, dp->dp_scrub_max_txg);
142 }
143
144 int
145 dsl_pool_scrub_setup(dsl_pool_t *dp, enum scrub_func func)
146 {
147         return (dsl_sync_task_do(dp, NULL,
148             dsl_pool_scrub_setup_sync, dp, &func, 0));
149 }
150
151 /* ARGSUSED */
152 static void
153 dsl_pool_scrub_cancel_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
154 {
155         dsl_pool_t *dp = arg1;
156         boolean_t *completep = arg2;
157
158         if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
159                 return;
160
161         mutex_enter(&dp->dp_scrub_cancel_lock);
162
163         if (dp->dp_scrub_restart) {
164                 dp->dp_scrub_restart = B_FALSE;
165                 *completep = B_FALSE;
166         }
167
168         /* XXX this is scrub-clean specific */
169         mutex_enter(&dp->dp_spa->spa_scrub_lock);
170         while (dp->dp_spa->spa_scrub_inflight > 0) {
171                 cv_wait(&dp->dp_spa->spa_scrub_io_cv,
172                     &dp->dp_spa->spa_scrub_lock);
173         }
174         mutex_exit(&dp->dp_spa->spa_scrub_lock);
175         dp->dp_spa->spa_scrub_started = B_FALSE;
176         dp->dp_spa->spa_scrub_active = B_FALSE;
177
178         dp->dp_scrub_func = SCRUB_FUNC_NONE;
179         VERIFY(0 == dmu_object_free(dp->dp_meta_objset,
180             dp->dp_scrub_queue_obj, tx));
181         dp->dp_scrub_queue_obj = 0;
182         bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t));
183
184         VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
185             DMU_POOL_SCRUB_QUEUE, tx));
186         VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
187             DMU_POOL_SCRUB_MIN_TXG, tx));
188         VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
189             DMU_POOL_SCRUB_MAX_TXG, tx));
190         VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
191             DMU_POOL_SCRUB_BOOKMARK, tx));
192         VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
193             DMU_POOL_SCRUB_FUNC, tx));
194         VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
195             DMU_POOL_SCRUB_ERRORS, tx));
196
197         spa_history_internal_log(LOG_POOL_SCRUB_DONE, dp->dp_spa, tx, cr,
198             "complete=%u", *completep);
199
200         /* below is scrub-clean specific */
201         vdev_scrub_stat_update(dp->dp_spa->spa_root_vdev, POOL_SCRUB_NONE,
202             *completep);
203         /*
204          * If the scrub/resilver completed, update all DTLs to reflect this.
205          * Whether it succeeded or not, vacate all temporary scrub DTLs.
206          */
207         vdev_dtl_reassess(dp->dp_spa->spa_root_vdev, tx->tx_txg,
208             *completep ? dp->dp_scrub_max_txg : 0, B_TRUE);
209         if (dp->dp_scrub_min_txg && *completep)
210                 spa_event_notify(dp->dp_spa, NULL, ESC_ZFS_RESILVER_FINISH);
211         spa_errlog_rotate(dp->dp_spa);
212
213         /*
214          * We may have finished replacing a device.
215          * Let the async thread assess this and handle the detach.
216          */
217         spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER_DONE);
218
219         dp->dp_scrub_min_txg = dp->dp_scrub_max_txg = 0;
220         mutex_exit(&dp->dp_scrub_cancel_lock);
221 }
222
223 int
224 dsl_pool_scrub_cancel(dsl_pool_t *dp)
225 {
226         boolean_t complete = B_FALSE;
227
228         return (dsl_sync_task_do(dp, NULL,
229             dsl_pool_scrub_cancel_sync, dp, &complete, 3));
230 }
231
232 int
233 dsl_free(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp,
234     zio_done_func_t *done, void *private, uint32_t arc_flags)
235 {
236         /*
237          * This function will be used by bp-rewrite wad to intercept frees.
238          */
239         return (arc_free(pio, dp->dp_spa, txg, (blkptr_t *)bpp,
240             done, private, arc_flags));
241 }
242
243 static boolean_t
244 bookmark_is_zero(const zbookmark_t *zb)
245 {
246         return (zb->zb_objset == 0 && zb->zb_object == 0 &&
247             zb->zb_level == 0 && zb->zb_blkid == 0);
248 }
249
250 /* dnp is the dnode for zb1->zb_object */
251 static boolean_t
252 bookmark_is_before(dnode_phys_t *dnp, const zbookmark_t *zb1,
253     const zbookmark_t *zb2)
254 {
255         uint64_t zb1nextL0, zb2thisobj;
256
257         ASSERT(zb1->zb_objset == zb2->zb_objset);
258         ASSERT(zb1->zb_object != -1ULL);
259         ASSERT(zb2->zb_level == 0);
260
261         /*
262          * A bookmark in the deadlist is considered to be after
263          * everything else.
264          */
265         if (zb2->zb_object == -1ULL)
266                 return (B_TRUE);
267
268         /* The objset_phys_t isn't before anything. */
269         if (dnp == NULL)
270                 return (B_FALSE);
271
272         zb1nextL0 = (zb1->zb_blkid + 1) <<
273             ((zb1->zb_level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT));
274
275         zb2thisobj = zb2->zb_object ? zb2->zb_object :
276             zb2->zb_blkid << (DNODE_BLOCK_SHIFT - DNODE_SHIFT);
277
278         if (zb1->zb_object == 0) {
279                 uint64_t nextobj = zb1nextL0 *
280                     (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT) >> DNODE_SHIFT;
281                 return (nextobj <= zb2thisobj);
282         }
283
284         if (zb1->zb_object < zb2thisobj)
285                 return (B_TRUE);
286         if (zb1->zb_object > zb2thisobj)
287                 return (B_FALSE);
288         if (zb2->zb_object == 0)
289                 return (B_FALSE);
290         return (zb1nextL0 <= zb2->zb_blkid);
291 }
292
293 static boolean_t
294 scrub_pause(dsl_pool_t *dp, const zbookmark_t *zb)
295 {
296         int elapsed_ticks;
297         int mintime;
298
299         if (dp->dp_scrub_pausing)
300                 return (B_TRUE); /* we're already pausing */
301
302         if (!bookmark_is_zero(&dp->dp_scrub_bookmark))
303                 return (B_FALSE); /* we're resuming */
304
305         /* We only know how to resume from level-0 blocks. */
306         if (zb->zb_level != 0)
307                 return (B_FALSE);
308
309         mintime = dp->dp_scrub_isresilver ? zfs_resilver_min_time :
310             zfs_scrub_min_time;
311         elapsed_ticks = lbolt64 - dp->dp_scrub_start_time;
312         if (elapsed_ticks > hz * zfs_txg_timeout ||
313             (elapsed_ticks > hz * mintime && txg_sync_waiting(dp))) {
314                 dprintf("pausing at %llx/%llx/%llx/%llx\n",
315                     (longlong_t)zb->zb_objset, (longlong_t)zb->zb_object,
316                     (longlong_t)zb->zb_level, (longlong_t)zb->zb_blkid);
317                 dp->dp_scrub_pausing = B_TRUE;
318                 dp->dp_scrub_bookmark = *zb;
319                 return (B_TRUE);
320         }
321         return (B_FALSE);
322 }
323
324 typedef struct zil_traverse_arg {
325         dsl_pool_t      *zta_dp;
326         zil_header_t    *zta_zh;
327 } zil_traverse_arg_t;
328
329 /* ARGSUSED */
330 static void
331 traverse_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
332 {
333         zil_traverse_arg_t *zta = arg;
334         dsl_pool_t *dp = zta->zta_dp;
335         zil_header_t *zh = zta->zta_zh;
336         zbookmark_t zb;
337
338         if (bp->blk_birth <= dp->dp_scrub_min_txg)
339                 return;
340
341         if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(dp->dp_spa))
342                 return;
343
344         zb.zb_objset = zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET];
345         zb.zb_object = 0;
346         zb.zb_level = -1;
347         zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ];
348         VERIFY(0 == scrub_funcs[dp->dp_scrub_func](dp, bp, &zb));
349 }
350
351 /* ARGSUSED */
352 static void
353 traverse_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg)
354 {
355         if (lrc->lrc_txtype == TX_WRITE) {
356                 zil_traverse_arg_t *zta = arg;
357                 dsl_pool_t *dp = zta->zta_dp;
358                 zil_header_t *zh = zta->zta_zh;
359                 lr_write_t *lr = (lr_write_t *)lrc;
360                 blkptr_t *bp = &lr->lr_blkptr;
361                 zbookmark_t zb;
362
363                 if (bp->blk_birth <= dp->dp_scrub_min_txg)
364                         return;
365
366                 if (claim_txg == 0 || bp->blk_birth < claim_txg)
367                         return;
368
369                 zb.zb_objset = zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET];
370                 zb.zb_object = lr->lr_foid;
371                 zb.zb_level = BP_GET_LEVEL(bp);
372                 zb.zb_blkid = lr->lr_offset / BP_GET_LSIZE(bp);
373                 VERIFY(0 == scrub_funcs[dp->dp_scrub_func](dp, bp, &zb));
374         }
375 }
376
377 static void
378 traverse_zil(dsl_pool_t *dp, zil_header_t *zh)
379 {
380         uint64_t claim_txg = zh->zh_claim_txg;
381         zil_traverse_arg_t zta = { dp, zh };
382         zilog_t *zilog;
383
384         /*
385          * We only want to visit blocks that have been claimed but not yet
386          * replayed (or, in read-only mode, blocks that *would* be claimed).
387          */
388         if (claim_txg == 0 && (spa_mode & FWRITE))
389                 return;
390
391         zilog = zil_alloc(dp->dp_meta_objset, zh);
392
393         (void) zil_parse(zilog, traverse_zil_block, traverse_zil_record, &zta,
394             claim_txg);
395
396         zil_free(zilog);
397 }
398
399 static void
400 scrub_visitbp(dsl_pool_t *dp, dnode_phys_t *dnp,
401     arc_buf_t *pbuf, blkptr_t *bp, const zbookmark_t *zb)
402 {
403         int err;
404         arc_buf_t *buf = NULL;
405
406         if (bp->blk_birth == 0)
407                 return;
408
409         if (bp->blk_birth <= dp->dp_scrub_min_txg)
410                 return;
411
412         if (scrub_pause(dp, zb))
413                 return;
414
415         if (!bookmark_is_zero(&dp->dp_scrub_bookmark)) {
416                 /*
417                  * If we already visited this bp & everything below (in
418                  * a prior txg), don't bother doing it again.
419                  */
420                 if (bookmark_is_before(dnp, zb, &dp->dp_scrub_bookmark))
421                         return;
422
423                 /*
424                  * If we found the block we're trying to resume from, or
425                  * we went past it to a different object, zero it out to
426                  * indicate that it's OK to start checking for pausing
427                  * again.
428                  */
429                 if (bcmp(zb, &dp->dp_scrub_bookmark, sizeof (*zb)) == 0 ||
430                     zb->zb_object > dp->dp_scrub_bookmark.zb_object) {
431                         dprintf("resuming at %llx/%llx/%llx/%llx\n",
432                             (longlong_t)zb->zb_objset,
433                             (longlong_t)zb->zb_object,
434                             (longlong_t)zb->zb_level,
435                             (longlong_t)zb->zb_blkid);
436                         bzero(&dp->dp_scrub_bookmark, sizeof (*zb));
437                 }
438         }
439
440         if (BP_GET_LEVEL(bp) > 0) {
441                 uint32_t flags = ARC_WAIT;
442                 int i;
443                 blkptr_t *cbp;
444                 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
445
446                 err = arc_read(NULL, dp->dp_spa, bp, pbuf,
447                     arc_getbuf_func, &buf,
448                     ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
449                 if (err) {
450                         mutex_enter(&dp->dp_spa->spa_scrub_lock);
451                         dp->dp_spa->spa_scrub_errors++;
452                         mutex_exit(&dp->dp_spa->spa_scrub_lock);
453                         return;
454                 }
455                 cbp = buf->b_data;
456
457                 for (i = 0; i < epb; i++, cbp++) {
458                         zbookmark_t czb;
459
460                         SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
461                             zb->zb_level - 1,
462                             zb->zb_blkid * epb + i);
463                         scrub_visitbp(dp, dnp, buf, cbp, &czb);
464                 }
465         } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
466                 uint32_t flags = ARC_WAIT;
467                 dnode_phys_t *child_dnp;
468                 int i, j;
469                 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
470
471                 err = arc_read(NULL, dp->dp_spa, bp, pbuf,
472                     arc_getbuf_func, &buf,
473                     ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
474                 if (err) {
475                         mutex_enter(&dp->dp_spa->spa_scrub_lock);
476                         dp->dp_spa->spa_scrub_errors++;
477                         mutex_exit(&dp->dp_spa->spa_scrub_lock);
478                         return;
479                 }
480                 child_dnp = buf->b_data;
481
482                 for (i = 0; i < epb; i++, child_dnp++) {
483                         for (j = 0; j < child_dnp->dn_nblkptr; j++) {
484                                 zbookmark_t czb;
485
486                                 SET_BOOKMARK(&czb, zb->zb_objset,
487                                     zb->zb_blkid * epb + i,
488                                     child_dnp->dn_nlevels - 1, j);
489                                 scrub_visitbp(dp, child_dnp, buf,
490                                     &child_dnp->dn_blkptr[j], &czb);
491                         }
492                 }
493         } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
494                 uint32_t flags = ARC_WAIT;
495                 objset_phys_t *osp;
496                 int j;
497
498                 err = arc_read_nolock(NULL, dp->dp_spa, bp,
499                     arc_getbuf_func, &buf,
500                     ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
501                 if (err) {
502                         mutex_enter(&dp->dp_spa->spa_scrub_lock);
503                         dp->dp_spa->spa_scrub_errors++;
504                         mutex_exit(&dp->dp_spa->spa_scrub_lock);
505                         return;
506                 }
507
508                 osp = buf->b_data;
509
510                 traverse_zil(dp, &osp->os_zil_header);
511
512                 for (j = 0; j < osp->os_meta_dnode.dn_nblkptr; j++) {
513                         zbookmark_t czb;
514
515                         SET_BOOKMARK(&czb, zb->zb_objset, 0,
516                             osp->os_meta_dnode.dn_nlevels - 1, j);
517                         scrub_visitbp(dp, &osp->os_meta_dnode, buf,
518                             &osp->os_meta_dnode.dn_blkptr[j], &czb);
519                 }
520         }
521
522         (void) scrub_funcs[dp->dp_scrub_func](dp, bp, zb);
523         if (buf)
524                 (void) arc_buf_remove_ref(buf, &buf);
525 }
526
527 static void
528 scrub_visit_rootbp(dsl_pool_t *dp, dsl_dataset_t *ds, blkptr_t *bp)
529 {
530         zbookmark_t zb;
531
532         SET_BOOKMARK(&zb, ds ? ds->ds_object : 0, 0, -1, 0);
533         scrub_visitbp(dp, NULL, NULL, bp, &zb);
534 }
535
536 void
537 dsl_pool_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
538 {
539         dsl_pool_t *dp = ds->ds_dir->dd_pool;
540
541         if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
542                 return;
543
544         if (dp->dp_scrub_bookmark.zb_objset == ds->ds_object) {
545                 SET_BOOKMARK(&dp->dp_scrub_bookmark, -1, 0, 0, 0);
546         } else if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
547             ds->ds_object, tx) != 0) {
548                 return;
549         }
550
551         if (ds->ds_phys->ds_next_snap_obj != 0) {
552                 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
553                     ds->ds_phys->ds_next_snap_obj, tx) == 0);
554         }
555         ASSERT3U(ds->ds_phys->ds_num_children, <=, 1);
556 }
557
558 void
559 dsl_pool_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
560 {
561         dsl_pool_t *dp = ds->ds_dir->dd_pool;
562
563         if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
564                 return;
565
566         ASSERT(ds->ds_phys->ds_prev_snap_obj != 0);
567
568         if (dp->dp_scrub_bookmark.zb_objset == ds->ds_object) {
569                 dp->dp_scrub_bookmark.zb_objset =
570                     ds->ds_phys->ds_prev_snap_obj;
571         } else if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
572             ds->ds_object, tx) == 0) {
573                 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
574                     ds->ds_phys->ds_prev_snap_obj, tx) == 0);
575         }
576 }
577
578 struct enqueue_clones_arg {
579         dmu_tx_t *tx;
580         uint64_t originobj;
581 };
582
583 /* ARGSUSED */
584 static int
585 enqueue_clones_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg)
586 {
587         struct enqueue_clones_arg *eca = arg;
588         dsl_dataset_t *ds;
589         int err;
590         dsl_pool_t *dp;
591
592         err = dsl_dataset_hold_obj(spa->spa_dsl_pool, dsobj, FTAG, &ds);
593         if (err)
594                 return (err);
595         dp = ds->ds_dir->dd_pool;
596
597         if (ds->ds_dir->dd_phys->dd_origin_obj == eca->originobj) {
598                 while (ds->ds_phys->ds_prev_snap_obj != eca->originobj) {
599                         dsl_dataset_t *prev;
600                         err = dsl_dataset_hold_obj(dp,
601                             ds->ds_phys->ds_prev_snap_obj, FTAG, &prev);
602
603                         dsl_dataset_rele(ds, FTAG);
604                         if (err)
605                                 return (err);
606                         ds = prev;
607                 }
608                 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
609                     ds->ds_object, eca->tx) == 0);
610         }
611         dsl_dataset_rele(ds, FTAG);
612         return (0);
613 }
614
615 static void
616 scrub_visitds(dsl_pool_t *dp, uint64_t dsobj, dmu_tx_t *tx)
617 {
618         dsl_dataset_t *ds;
619         uint64_t min_txg_save;
620
621         VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
622
623         /*
624          * Iterate over the bps in this ds.
625          */
626         min_txg_save = dp->dp_scrub_min_txg;
627         dp->dp_scrub_min_txg =
628             MAX(dp->dp_scrub_min_txg, ds->ds_phys->ds_prev_snap_txg);
629         scrub_visit_rootbp(dp, ds, &ds->ds_phys->ds_bp);
630         dp->dp_scrub_min_txg = min_txg_save;
631
632         if (dp->dp_scrub_pausing)
633                 goto out;
634
635         /*
636          * Add descendent datasets to work queue.
637          */
638         if (ds->ds_phys->ds_next_snap_obj != 0) {
639                 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
640                     ds->ds_phys->ds_next_snap_obj, tx) == 0);
641         }
642         if (ds->ds_phys->ds_num_children > 1) {
643                 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) {
644                         struct enqueue_clones_arg eca;
645                         eca.tx = tx;
646                         eca.originobj = ds->ds_object;
647
648                         (void) dmu_objset_find_spa(ds->ds_dir->dd_pool->dp_spa,
649                             NULL, enqueue_clones_cb, &eca, DS_FIND_CHILDREN);
650                 } else {
651                         VERIFY(zap_join(dp->dp_meta_objset,
652                             ds->ds_phys->ds_next_clones_obj,
653                             dp->dp_scrub_queue_obj, tx) == 0);
654                 }
655         }
656
657 out:
658         dsl_dataset_rele(ds, FTAG);
659 }
660
661 /* ARGSUSED */
662 static int
663 enqueue_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg)
664 {
665         dmu_tx_t *tx = arg;
666         dsl_dataset_t *ds;
667         int err;
668         dsl_pool_t *dp;
669
670         err = dsl_dataset_hold_obj(spa->spa_dsl_pool, dsobj, FTAG, &ds);
671         if (err)
672                 return (err);
673
674         dp = ds->ds_dir->dd_pool;
675
676         while (ds->ds_phys->ds_prev_snap_obj != 0) {
677                 dsl_dataset_t *prev;
678                 err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj,
679                     FTAG, &prev);
680                 if (err) {
681                         dsl_dataset_rele(ds, FTAG);
682                         return (err);
683                 }
684
685                 /*
686                  * If this is a clone, we don't need to worry about it for now.
687                  */
688                 if (prev->ds_phys->ds_next_snap_obj != ds->ds_object) {
689                         dsl_dataset_rele(ds, FTAG);
690                         dsl_dataset_rele(prev, FTAG);
691                         return (0);
692                 }
693                 dsl_dataset_rele(ds, FTAG);
694                 ds = prev;
695         }
696
697         VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
698             ds->ds_object, tx) == 0);
699         dsl_dataset_rele(ds, FTAG);
700         return (0);
701 }
702
703 void
704 dsl_pool_scrub_sync(dsl_pool_t *dp, dmu_tx_t *tx)
705 {
706         zap_cursor_t zc;
707         zap_attribute_t za;
708         boolean_t complete = B_TRUE;
709
710         if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
711                 return;
712
713         /* If the spa is not fully loaded, don't bother. */
714         if (dp->dp_spa->spa_load_state != SPA_LOAD_NONE)
715                 return;
716
717         if (dp->dp_scrub_restart) {
718                 enum scrub_func func = dp->dp_scrub_func;
719                 dp->dp_scrub_restart = B_FALSE;
720                 dsl_pool_scrub_setup_sync(dp, &func, kcred, tx);
721         }
722
723         if (dp->dp_spa->spa_root_vdev->vdev_stat.vs_scrub_type == 0) {
724                 /*
725                  * We must have resumed after rebooting; reset the vdev
726                  * stats to know that we're doing a scrub (although it
727                  * will think we're just starting now).
728                  */
729                 vdev_scrub_stat_update(dp->dp_spa->spa_root_vdev,
730                     dp->dp_scrub_min_txg ? POOL_SCRUB_RESILVER :
731                     POOL_SCRUB_EVERYTHING, B_FALSE);
732         }
733
734         dp->dp_scrub_pausing = B_FALSE;
735         dp->dp_scrub_start_time = lbolt64;
736         dp->dp_scrub_isresilver = (dp->dp_scrub_min_txg != 0);
737         dp->dp_spa->spa_scrub_active = B_TRUE;
738
739         if (dp->dp_scrub_bookmark.zb_objset == 0) {
740                 /* First do the MOS & ORIGIN */
741                 scrub_visit_rootbp(dp, NULL, &dp->dp_meta_rootbp);
742                 if (dp->dp_scrub_pausing)
743                         goto out;
744
745                 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) {
746                         VERIFY(0 == dmu_objset_find_spa(dp->dp_spa,
747                             NULL, enqueue_cb, tx, DS_FIND_CHILDREN));
748                 } else {
749                         scrub_visitds(dp, dp->dp_origin_snap->ds_object, tx);
750                 }
751                 ASSERT(!dp->dp_scrub_pausing);
752         } else if (dp->dp_scrub_bookmark.zb_objset != -1ULL) {
753                 /*
754                  * If we were paused, continue from here.  Note if the
755                  * ds we were paused on was deleted, the zb_objset will
756                  * be -1, so we will skip this and find a new objset
757                  * below.
758                  */
759                 scrub_visitds(dp, dp->dp_scrub_bookmark.zb_objset, tx);
760                 if (dp->dp_scrub_pausing)
761                         goto out;
762         }
763
764         /*
765          * In case we were paused right at the end of the ds, zero the
766          * bookmark so we don't think that we're still trying to resume.
767          */
768         bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t));
769
770         /* keep pulling things out of the zap-object-as-queue */
771         while (zap_cursor_init(&zc, dp->dp_meta_objset, dp->dp_scrub_queue_obj),
772             zap_cursor_retrieve(&zc, &za) == 0) {
773                 VERIFY(0 == zap_remove(dp->dp_meta_objset,
774                     dp->dp_scrub_queue_obj, za.za_name, tx));
775                 scrub_visitds(dp, za.za_first_integer, tx);
776                 if (dp->dp_scrub_pausing)
777                         break;
778                 zap_cursor_fini(&zc);
779         }
780         zap_cursor_fini(&zc);
781         if (dp->dp_scrub_pausing)
782                 goto out;
783
784         /* done. */
785
786         dsl_pool_scrub_cancel_sync(dp, &complete, kcred, tx);
787         return;
788 out:
789         VERIFY(0 == zap_update(dp->dp_meta_objset,
790             DMU_POOL_DIRECTORY_OBJECT,
791             DMU_POOL_SCRUB_BOOKMARK, sizeof (uint64_t), 4,
792             &dp->dp_scrub_bookmark, tx));
793         VERIFY(0 == zap_update(dp->dp_meta_objset,
794             DMU_POOL_DIRECTORY_OBJECT,
795             DMU_POOL_SCRUB_ERRORS, sizeof (uint64_t), 1,
796             &dp->dp_spa->spa_scrub_errors, tx));
797
798         /* XXX this is scrub-clean specific */
799         mutex_enter(&dp->dp_spa->spa_scrub_lock);
800         while (dp->dp_spa->spa_scrub_inflight > 0) {
801                 cv_wait(&dp->dp_spa->spa_scrub_io_cv,
802                     &dp->dp_spa->spa_scrub_lock);
803         }
804         mutex_exit(&dp->dp_spa->spa_scrub_lock);
805 }
806
807 void
808 dsl_pool_scrub_restart(dsl_pool_t *dp)
809 {
810         mutex_enter(&dp->dp_scrub_cancel_lock);
811         dp->dp_scrub_restart = B_TRUE;
812         mutex_exit(&dp->dp_scrub_cancel_lock);
813 }
814
815 /*
816  * scrub consumers
817  */
818
819 static void
820 dsl_pool_scrub_clean_done(zio_t *zio)
821 {
822         spa_t *spa = zio->io_spa;
823
824         zio_data_buf_free(zio->io_data, zio->io_size);
825
826         mutex_enter(&spa->spa_scrub_lock);
827         spa->spa_scrub_inflight--;
828         cv_broadcast(&spa->spa_scrub_io_cv);
829
830         if (zio->io_error && (zio->io_error != ECKSUM ||
831             !(zio->io_flags & ZIO_FLAG_SPECULATIVE)))
832                 spa->spa_scrub_errors++;
833         mutex_exit(&spa->spa_scrub_lock);
834 }
835
836 static int
837 dsl_pool_scrub_clean_cb(dsl_pool_t *dp,
838     const blkptr_t *bp, const zbookmark_t *zb)
839 {
840         size_t size = BP_GET_LSIZE(bp);
841         int d;
842         spa_t *spa = dp->dp_spa;
843         boolean_t needs_io;
844         int zio_flags = ZIO_FLAG_SCRUB_THREAD | ZIO_FLAG_CANFAIL;
845         int zio_priority;
846
847         if (dp->dp_scrub_isresilver == 0) {
848                 /* It's a scrub */
849                 zio_flags |= ZIO_FLAG_SCRUB;
850                 zio_priority = ZIO_PRIORITY_SCRUB;
851                 needs_io = B_TRUE;
852         } else {
853                 /* It's a resilver */
854                 zio_flags |= ZIO_FLAG_RESILVER;
855                 zio_priority = ZIO_PRIORITY_RESILVER;
856                 needs_io = B_FALSE;
857         }
858
859         /* If it's an intent log block, failure is expected. */
860         if (zb->zb_level == -1 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)
861                 zio_flags |= ZIO_FLAG_SPECULATIVE;
862
863         for (d = 0; d < BP_GET_NDVAS(bp); d++) {
864                 vdev_t *vd = vdev_lookup_top(spa,
865                     DVA_GET_VDEV(&bp->blk_dva[d]));
866
867                 /*
868                  * Keep track of how much data we've examined so that
869                  * zpool(1M) status can make useful progress reports.
870                  */
871                 mutex_enter(&vd->vdev_stat_lock);
872                 vd->vdev_stat.vs_scrub_examined +=
873                     DVA_GET_ASIZE(&bp->blk_dva[d]);
874                 mutex_exit(&vd->vdev_stat_lock);
875
876                 /* if it's a resilver, this may not be in the target range */
877                 if (!needs_io) {
878                         if (DVA_GET_GANG(&bp->blk_dva[d])) {
879                                 /*
880                                  * Gang members may be spread across multiple
881                                  * vdevs, so the best we can do is look at the
882                                  * pool-wide DTL.
883                                  * XXX -- it would be better to change our
884                                  * allocation policy to ensure that this can't
885                                  * happen.
886                                  */
887                                 vd = spa->spa_root_vdev;
888                         }
889                         needs_io = vdev_dtl_contains(&vd->vdev_dtl_map,
890                             bp->blk_birth, 1);
891                 }
892         }
893
894         if (needs_io && !zfs_no_scrub_io) {
895                 void *data = zio_data_buf_alloc(size);
896
897                 mutex_enter(&spa->spa_scrub_lock);
898                 while (spa->spa_scrub_inflight >= spa->spa_scrub_maxinflight)
899                         cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
900                 spa->spa_scrub_inflight++;
901                 mutex_exit(&spa->spa_scrub_lock);
902
903                 zio_nowait(zio_read(NULL, spa, bp, data, size,
904                     dsl_pool_scrub_clean_done, NULL, zio_priority,
905                     zio_flags, zb));
906         }
907
908         /* do not relocate this block */
909         return (0);
910 }
911
912 int
913 dsl_pool_scrub_clean(dsl_pool_t *dp)
914 {
915         /*
916          * Purge all vdev caches.  We do this here rather than in sync
917          * context because this requires a writer lock on the spa_config
918          * lock, which we can't do from sync context.  The
919          * spa_scrub_reopen flag indicates that vdev_open() should not
920          * attempt to start another scrub.
921          */
922         spa_config_enter(dp->dp_spa, SCL_ALL, FTAG, RW_WRITER);
923         dp->dp_spa->spa_scrub_reopen = B_TRUE;
924         vdev_reopen(dp->dp_spa->spa_root_vdev);
925         dp->dp_spa->spa_scrub_reopen = B_FALSE;
926         spa_config_exit(dp->dp_spa, SCL_ALL, FTAG);
927
928         return (dsl_pool_scrub_setup(dp, SCRUB_FUNC_CLEAN));
929 }