]> CyberLeo.Net >> Repos - FreeBSD/stable/8.git/blob - sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_scrub.c
MFC r209962, r211970-r211972, r212050, r212605, r212611
[FreeBSD/stable/8.git] / sys / cddl / contrib / opensolaris / uts / common / fs / zfs / dsl_scrub.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25
26 #include <sys/dsl_pool.h>
27 #include <sys/dsl_dataset.h>
28 #include <sys/dsl_prop.h>
29 #include <sys/dsl_dir.h>
30 #include <sys/dsl_synctask.h>
31 #include <sys/dnode.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dmu_objset.h>
34 #include <sys/arc.h>
35 #include <sys/zap.h>
36 #include <sys/zio.h>
37 #include <sys/zfs_context.h>
38 #include <sys/fs/zfs.h>
39 #include <sys/zfs_znode.h>
40 #include <sys/spa_impl.h>
41 #include <sys/vdev_impl.h>
42 #include <sys/zil_impl.h>
43
44 typedef int (scrub_cb_t)(dsl_pool_t *, const blkptr_t *, const zbookmark_t *);
45
46 static scrub_cb_t dsl_pool_scrub_clean_cb;
47 static dsl_syncfunc_t dsl_pool_scrub_cancel_sync;
48 static void scrub_visitdnode(dsl_pool_t *dp, dnode_phys_t *dnp, arc_buf_t *buf,
49     uint64_t objset, uint64_t object);
50
51 int zfs_scrub_min_time = 1; /* scrub for at least 1 sec each txg */
52 int zfs_resilver_min_time = 3; /* resilver for at least 3 sec each txg */
53 boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
54
55 extern int zfs_txg_timeout;
56
57 static scrub_cb_t *scrub_funcs[SCRUB_FUNC_NUMFUNCS] = {
58         NULL,
59         dsl_pool_scrub_clean_cb
60 };
61
62 #define SET_BOOKMARK(zb, objset, object, level, blkid)  \
63 {                                                       \
64         (zb)->zb_objset = objset;                       \
65         (zb)->zb_object = object;                       \
66         (zb)->zb_level = level;                         \
67         (zb)->zb_blkid = blkid;                         \
68 }
69
70 /* ARGSUSED */
71 static void
72 dsl_pool_scrub_setup_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
73 {
74         dsl_pool_t *dp = arg1;
75         enum scrub_func *funcp = arg2;
76         dmu_object_type_t ot = 0;
77         boolean_t complete = B_FALSE;
78
79         dsl_pool_scrub_cancel_sync(dp, &complete, cr, tx);
80
81         ASSERT(dp->dp_scrub_func == SCRUB_FUNC_NONE);
82         ASSERT(*funcp > SCRUB_FUNC_NONE);
83         ASSERT(*funcp < SCRUB_FUNC_NUMFUNCS);
84
85         dp->dp_scrub_min_txg = 0;
86         dp->dp_scrub_max_txg = tx->tx_txg;
87
88         if (*funcp == SCRUB_FUNC_CLEAN) {
89                 vdev_t *rvd = dp->dp_spa->spa_root_vdev;
90
91                 /* rewrite all disk labels */
92                 vdev_config_dirty(rvd);
93
94                 if (vdev_resilver_needed(rvd,
95                     &dp->dp_scrub_min_txg, &dp->dp_scrub_max_txg)) {
96                         spa_event_notify(dp->dp_spa, NULL,
97                             ESC_ZFS_RESILVER_START);
98                         dp->dp_scrub_max_txg = MIN(dp->dp_scrub_max_txg,
99                             tx->tx_txg);
100                 } else {
101                         spa_event_notify(dp->dp_spa, NULL,
102                             ESC_ZFS_SCRUB_START);
103                 }
104
105                 /* zero out the scrub stats in all vdev_stat_t's */
106                 vdev_scrub_stat_update(rvd,
107                     dp->dp_scrub_min_txg ? POOL_SCRUB_RESILVER :
108                     POOL_SCRUB_EVERYTHING, B_FALSE);
109
110                 dp->dp_spa->spa_scrub_started = B_TRUE;
111         }
112
113         /* back to the generic stuff */
114
115         if (dp->dp_blkstats == NULL) {
116                 dp->dp_blkstats =
117                     kmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP);
118         }
119         bzero(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
120
121         if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB)
122                 ot = DMU_OT_ZAP_OTHER;
123
124         dp->dp_scrub_func = *funcp;
125         dp->dp_scrub_queue_obj = zap_create(dp->dp_meta_objset,
126             ot ? ot : DMU_OT_SCRUB_QUEUE, DMU_OT_NONE, 0, tx);
127         bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t));
128         dp->dp_scrub_restart = B_FALSE;
129         dp->dp_spa->spa_scrub_errors = 0;
130
131         VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
132             DMU_POOL_SCRUB_FUNC, sizeof (uint32_t), 1,
133             &dp->dp_scrub_func, tx));
134         VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
135             DMU_POOL_SCRUB_QUEUE, sizeof (uint64_t), 1,
136             &dp->dp_scrub_queue_obj, tx));
137         VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
138             DMU_POOL_SCRUB_MIN_TXG, sizeof (uint64_t), 1,
139             &dp->dp_scrub_min_txg, tx));
140         VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
141             DMU_POOL_SCRUB_MAX_TXG, sizeof (uint64_t), 1,
142             &dp->dp_scrub_max_txg, tx));
143         VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
144             DMU_POOL_SCRUB_BOOKMARK, sizeof (uint64_t), 4,
145             &dp->dp_scrub_bookmark, tx));
146         VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
147             DMU_POOL_SCRUB_ERRORS, sizeof (uint64_t), 1,
148             &dp->dp_spa->spa_scrub_errors, tx));
149
150         spa_history_internal_log(LOG_POOL_SCRUB, dp->dp_spa, tx, cr,
151             "func=%u mintxg=%llu maxtxg=%llu",
152             *funcp, dp->dp_scrub_min_txg, dp->dp_scrub_max_txg);
153 }
154
155 int
156 dsl_pool_scrub_setup(dsl_pool_t *dp, enum scrub_func func)
157 {
158         return (dsl_sync_task_do(dp, NULL,
159             dsl_pool_scrub_setup_sync, dp, &func, 0));
160 }
161
162 /* ARGSUSED */
163 static void
164 dsl_pool_scrub_cancel_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
165 {
166         dsl_pool_t *dp = arg1;
167         boolean_t *completep = arg2;
168
169         if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
170                 return;
171
172         mutex_enter(&dp->dp_scrub_cancel_lock);
173
174         if (dp->dp_scrub_restart) {
175                 dp->dp_scrub_restart = B_FALSE;
176                 *completep = B_FALSE;
177         }
178
179         /* XXX this is scrub-clean specific */
180         mutex_enter(&dp->dp_spa->spa_scrub_lock);
181         while (dp->dp_spa->spa_scrub_inflight > 0) {
182                 cv_wait(&dp->dp_spa->spa_scrub_io_cv,
183                     &dp->dp_spa->spa_scrub_lock);
184         }
185         mutex_exit(&dp->dp_spa->spa_scrub_lock);
186         dp->dp_spa->spa_scrub_started = B_FALSE;
187         dp->dp_spa->spa_scrub_active = B_FALSE;
188
189         dp->dp_scrub_func = SCRUB_FUNC_NONE;
190         VERIFY(0 == dmu_object_free(dp->dp_meta_objset,
191             dp->dp_scrub_queue_obj, tx));
192         dp->dp_scrub_queue_obj = 0;
193         bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t));
194
195         VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
196             DMU_POOL_SCRUB_QUEUE, tx));
197         VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
198             DMU_POOL_SCRUB_MIN_TXG, tx));
199         VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
200             DMU_POOL_SCRUB_MAX_TXG, tx));
201         VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
202             DMU_POOL_SCRUB_BOOKMARK, tx));
203         VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
204             DMU_POOL_SCRUB_FUNC, tx));
205         VERIFY(0 == zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
206             DMU_POOL_SCRUB_ERRORS, tx));
207
208         spa_history_internal_log(LOG_POOL_SCRUB_DONE, dp->dp_spa, tx, cr,
209             "complete=%u", *completep);
210
211         /* below is scrub-clean specific */
212         vdev_scrub_stat_update(dp->dp_spa->spa_root_vdev, POOL_SCRUB_NONE,
213             *completep);
214         /*
215          * If the scrub/resilver completed, update all DTLs to reflect this.
216          * Whether it succeeded or not, vacate all temporary scrub DTLs.
217          */
218         vdev_dtl_reassess(dp->dp_spa->spa_root_vdev, tx->tx_txg,
219             *completep ? dp->dp_scrub_max_txg : 0, B_TRUE);
220         if (*completep)
221                 spa_event_notify(dp->dp_spa, NULL, dp->dp_scrub_min_txg ?
222                     ESC_ZFS_RESILVER_FINISH : ESC_ZFS_SCRUB_FINISH);
223         spa_errlog_rotate(dp->dp_spa);
224
225         /*
226          * We may have finished replacing a device.
227          * Let the async thread assess this and handle the detach.
228          */
229         spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER_DONE);
230
231         dp->dp_scrub_min_txg = dp->dp_scrub_max_txg = 0;
232         mutex_exit(&dp->dp_scrub_cancel_lock);
233 }
234
235 int
236 dsl_pool_scrub_cancel(dsl_pool_t *dp)
237 {
238         boolean_t complete = B_FALSE;
239
240         return (dsl_sync_task_do(dp, NULL,
241             dsl_pool_scrub_cancel_sync, dp, &complete, 3));
242 }
243
244 int
245 dsl_free(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp,
246     zio_done_func_t *done, void *private, uint32_t arc_flags)
247 {
248         /*
249          * This function will be used by bp-rewrite wad to intercept frees.
250          */
251         return (arc_free(pio, dp->dp_spa, txg, (blkptr_t *)bpp,
252             done, private, arc_flags));
253 }
254
255 static boolean_t
256 bookmark_is_zero(const zbookmark_t *zb)
257 {
258         return (zb->zb_objset == 0 && zb->zb_object == 0 &&
259             zb->zb_level == 0 && zb->zb_blkid == 0);
260 }
261
262 /* dnp is the dnode for zb1->zb_object */
263 static boolean_t
264 bookmark_is_before(dnode_phys_t *dnp, const zbookmark_t *zb1,
265     const zbookmark_t *zb2)
266 {
267         uint64_t zb1nextL0, zb2thisobj;
268
269         ASSERT(zb1->zb_objset == zb2->zb_objset);
270         ASSERT(zb1->zb_object != -1ULL);
271         ASSERT(zb2->zb_level == 0);
272
273         /*
274          * A bookmark in the deadlist is considered to be after
275          * everything else.
276          */
277         if (zb2->zb_object == -1ULL)
278                 return (B_TRUE);
279
280         /* The objset_phys_t isn't before anything. */
281         if (dnp == NULL)
282                 return (B_FALSE);
283
284         zb1nextL0 = (zb1->zb_blkid + 1) <<
285             ((zb1->zb_level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT));
286
287         zb2thisobj = zb2->zb_object ? zb2->zb_object :
288             zb2->zb_blkid << (DNODE_BLOCK_SHIFT - DNODE_SHIFT);
289
290         if (zb1->zb_object == 0) {
291                 uint64_t nextobj = zb1nextL0 *
292                     (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT) >> DNODE_SHIFT;
293                 return (nextobj <= zb2thisobj);
294         }
295
296         if (zb1->zb_object < zb2thisobj)
297                 return (B_TRUE);
298         if (zb1->zb_object > zb2thisobj)
299                 return (B_FALSE);
300         if (zb2->zb_object == 0)
301                 return (B_FALSE);
302         return (zb1nextL0 <= zb2->zb_blkid);
303 }
304
305 static boolean_t
306 scrub_pause(dsl_pool_t *dp, const zbookmark_t *zb)
307 {
308         int elapsed_ticks;
309         int mintime;
310
311         if (dp->dp_scrub_pausing)
312                 return (B_TRUE); /* we're already pausing */
313
314         if (!bookmark_is_zero(&dp->dp_scrub_bookmark))
315                 return (B_FALSE); /* we're resuming */
316
317         /* We only know how to resume from level-0 blocks. */
318         if (zb->zb_level != 0)
319                 return (B_FALSE);
320
321         mintime = dp->dp_scrub_isresilver ? zfs_resilver_min_time :
322             zfs_scrub_min_time;
323         elapsed_ticks = lbolt64 - dp->dp_scrub_start_time;
324         if (elapsed_ticks > hz * zfs_txg_timeout ||
325             (elapsed_ticks > hz * mintime && txg_sync_waiting(dp))) {
326                 dprintf("pausing at %llx/%llx/%llx/%llx\n",
327                     (longlong_t)zb->zb_objset, (longlong_t)zb->zb_object,
328                     (longlong_t)zb->zb_level, (longlong_t)zb->zb_blkid);
329                 dp->dp_scrub_pausing = B_TRUE;
330                 dp->dp_scrub_bookmark = *zb;
331                 return (B_TRUE);
332         }
333         return (B_FALSE);
334 }
335
336 typedef struct zil_traverse_arg {
337         dsl_pool_t      *zta_dp;
338         zil_header_t    *zta_zh;
339 } zil_traverse_arg_t;
340
341 /* ARGSUSED */
342 static void
343 traverse_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
344 {
345         zil_traverse_arg_t *zta = arg;
346         dsl_pool_t *dp = zta->zta_dp;
347         zil_header_t *zh = zta->zta_zh;
348         zbookmark_t zb;
349
350         if (bp->blk_birth <= dp->dp_scrub_min_txg)
351                 return;
352
353         /*
354          * One block ("stumpy") can be allocated a long time ago; we
355          * want to visit that one because it has been allocated
356          * (on-disk) even if it hasn't been claimed (even though for
357          * plain scrub there's nothing to do to it).
358          */
359         if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(dp->dp_spa))
360                 return;
361
362         zb.zb_objset = zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET];
363         zb.zb_object = 0;
364         zb.zb_level = -1;
365         zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ];
366         VERIFY(0 == scrub_funcs[dp->dp_scrub_func](dp, bp, &zb));
367 }
368
369 /* ARGSUSED */
370 static void
371 traverse_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg)
372 {
373         if (lrc->lrc_txtype == TX_WRITE) {
374                 zil_traverse_arg_t *zta = arg;
375                 dsl_pool_t *dp = zta->zta_dp;
376                 zil_header_t *zh = zta->zta_zh;
377                 lr_write_t *lr = (lr_write_t *)lrc;
378                 blkptr_t *bp = &lr->lr_blkptr;
379                 zbookmark_t zb;
380
381                 if (bp->blk_birth <= dp->dp_scrub_min_txg)
382                         return;
383
384                 /*
385                  * birth can be < claim_txg if this record's txg is
386                  * already txg sync'ed (but this log block contains
387                  * other records that are not synced)
388                  */
389                 if (claim_txg == 0 || bp->blk_birth < claim_txg)
390                         return;
391
392                 zb.zb_objset = zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET];
393                 zb.zb_object = lr->lr_foid;
394                 zb.zb_level = BP_GET_LEVEL(bp);
395                 zb.zb_blkid = lr->lr_offset / BP_GET_LSIZE(bp);
396                 VERIFY(0 == scrub_funcs[dp->dp_scrub_func](dp, bp, &zb));
397         }
398 }
399
400 static void
401 traverse_zil(dsl_pool_t *dp, zil_header_t *zh)
402 {
403         uint64_t claim_txg = zh->zh_claim_txg;
404         zil_traverse_arg_t zta = { dp, zh };
405         zilog_t *zilog;
406
407         /*
408          * We only want to visit blocks that have been claimed but not yet
409          * replayed (or, in read-only mode, blocks that *would* be claimed).
410          */
411         if (claim_txg == 0 && spa_writeable(dp->dp_spa))
412                 return;
413
414         zilog = zil_alloc(dp->dp_meta_objset, zh);
415
416         (void) zil_parse(zilog, traverse_zil_block, traverse_zil_record, &zta,
417             claim_txg);
418
419         zil_free(zilog);
420 }
421
422 static void
423 scrub_visitbp(dsl_pool_t *dp, dnode_phys_t *dnp,
424     arc_buf_t *pbuf, blkptr_t *bp, const zbookmark_t *zb)
425 {
426         int err;
427         arc_buf_t *buf = NULL;
428
429         if (bp->blk_birth <= dp->dp_scrub_min_txg)
430                 return;
431
432         if (scrub_pause(dp, zb))
433                 return;
434
435         if (!bookmark_is_zero(&dp->dp_scrub_bookmark)) {
436                 /*
437                  * If we already visited this bp & everything below (in
438                  * a prior txg), don't bother doing it again.
439                  */
440                 if (bookmark_is_before(dnp, zb, &dp->dp_scrub_bookmark))
441                         return;
442
443                 /*
444                  * If we found the block we're trying to resume from, or
445                  * we went past it to a different object, zero it out to
446                  * indicate that it's OK to start checking for pausing
447                  * again.
448                  */
449                 if (bcmp(zb, &dp->dp_scrub_bookmark, sizeof (*zb)) == 0 ||
450                     zb->zb_object > dp->dp_scrub_bookmark.zb_object) {
451                         dprintf("resuming at %llx/%llx/%llx/%llx\n",
452                             (longlong_t)zb->zb_objset,
453                             (longlong_t)zb->zb_object,
454                             (longlong_t)zb->zb_level,
455                             (longlong_t)zb->zb_blkid);
456                         bzero(&dp->dp_scrub_bookmark, sizeof (*zb));
457                 }
458         }
459
460         if (BP_GET_LEVEL(bp) > 0) {
461                 uint32_t flags = ARC_WAIT;
462                 int i;
463                 blkptr_t *cbp;
464                 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
465
466                 err = arc_read(NULL, dp->dp_spa, bp, pbuf,
467                     arc_getbuf_func, &buf,
468                     ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
469                 if (err) {
470                         mutex_enter(&dp->dp_spa->spa_scrub_lock);
471                         dp->dp_spa->spa_scrub_errors++;
472                         mutex_exit(&dp->dp_spa->spa_scrub_lock);
473                         return;
474                 }
475                 cbp = buf->b_data;
476
477                 for (i = 0; i < epb; i++, cbp++) {
478                         zbookmark_t czb;
479
480                         SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
481                             zb->zb_level - 1,
482                             zb->zb_blkid * epb + i);
483                         scrub_visitbp(dp, dnp, buf, cbp, &czb);
484                 }
485         } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
486                 uint32_t flags = ARC_WAIT;
487                 dnode_phys_t *child_dnp;
488                 int i;
489                 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
490
491                 err = arc_read(NULL, dp->dp_spa, bp, pbuf,
492                     arc_getbuf_func, &buf,
493                     ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
494                 if (err) {
495                         mutex_enter(&dp->dp_spa->spa_scrub_lock);
496                         dp->dp_spa->spa_scrub_errors++;
497                         mutex_exit(&dp->dp_spa->spa_scrub_lock);
498                         return;
499                 }
500                 child_dnp = buf->b_data;
501
502                 for (i = 0; i < epb; i++, child_dnp++) {
503                         scrub_visitdnode(dp, child_dnp, buf, zb->zb_objset,
504                             zb->zb_blkid * epb + i);
505                 }
506         } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
507                 uint32_t flags = ARC_WAIT;
508                 objset_phys_t *osp;
509
510                 err = arc_read_nolock(NULL, dp->dp_spa, bp,
511                     arc_getbuf_func, &buf,
512                     ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
513                 if (err) {
514                         mutex_enter(&dp->dp_spa->spa_scrub_lock);
515                         dp->dp_spa->spa_scrub_errors++;
516                         mutex_exit(&dp->dp_spa->spa_scrub_lock);
517                         return;
518                 }
519
520                 osp = buf->b_data;
521
522                 traverse_zil(dp, &osp->os_zil_header);
523
524                 scrub_visitdnode(dp, &osp->os_meta_dnode,
525                     buf, zb->zb_objset, 0);
526                 if (arc_buf_size(buf) >= sizeof (objset_phys_t)) {
527                         scrub_visitdnode(dp, &osp->os_userused_dnode,
528                             buf, zb->zb_objset, 0);
529                         scrub_visitdnode(dp, &osp->os_groupused_dnode,
530                             buf, zb->zb_objset, 0);
531                 }
532         }
533
534         (void) scrub_funcs[dp->dp_scrub_func](dp, bp, zb);
535         if (buf)
536                 (void) arc_buf_remove_ref(buf, &buf);
537 }
538
539 static void
540 scrub_visitdnode(dsl_pool_t *dp, dnode_phys_t *dnp, arc_buf_t *buf,
541     uint64_t objset, uint64_t object)
542 {
543         int j;
544
545         for (j = 0; j < dnp->dn_nblkptr; j++) {
546                 zbookmark_t czb;
547
548                 SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j);
549                 scrub_visitbp(dp, dnp, buf, &dnp->dn_blkptr[j], &czb);
550         }
551
552 }
553
554 static void
555 scrub_visit_rootbp(dsl_pool_t *dp, dsl_dataset_t *ds, blkptr_t *bp)
556 {
557         zbookmark_t zb;
558
559         SET_BOOKMARK(&zb, ds ? ds->ds_object : 0, 0, -1, 0);
560         scrub_visitbp(dp, NULL, NULL, bp, &zb);
561 }
562
563 void
564 dsl_pool_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
565 {
566         dsl_pool_t *dp = ds->ds_dir->dd_pool;
567
568         if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
569                 return;
570
571         if (dp->dp_scrub_bookmark.zb_objset == ds->ds_object) {
572                 SET_BOOKMARK(&dp->dp_scrub_bookmark, -1, 0, 0, 0);
573         } else if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
574             ds->ds_object, tx) != 0) {
575                 return;
576         }
577
578         if (ds->ds_phys->ds_next_snap_obj != 0) {
579                 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
580                     ds->ds_phys->ds_next_snap_obj, tx) == 0);
581         }
582         ASSERT3U(ds->ds_phys->ds_num_children, <=, 1);
583 }
584
585 void
586 dsl_pool_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
587 {
588         dsl_pool_t *dp = ds->ds_dir->dd_pool;
589
590         if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
591                 return;
592
593         ASSERT(ds->ds_phys->ds_prev_snap_obj != 0);
594
595         if (dp->dp_scrub_bookmark.zb_objset == ds->ds_object) {
596                 dp->dp_scrub_bookmark.zb_objset =
597                     ds->ds_phys->ds_prev_snap_obj;
598         } else if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
599             ds->ds_object, tx) == 0) {
600                 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
601                     ds->ds_phys->ds_prev_snap_obj, tx) == 0);
602         }
603 }
604
605 void
606 dsl_pool_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx)
607 {
608         dsl_pool_t *dp = ds1->ds_dir->dd_pool;
609
610         if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
611                 return;
612
613         if (dp->dp_scrub_bookmark.zb_objset == ds1->ds_object) {
614                 dp->dp_scrub_bookmark.zb_objset = ds2->ds_object;
615         } else if (dp->dp_scrub_bookmark.zb_objset == ds2->ds_object) {
616                 dp->dp_scrub_bookmark.zb_objset = ds1->ds_object;
617         }
618
619         if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
620             ds1->ds_object, tx) == 0) {
621                 int err = zap_add_int(dp->dp_meta_objset,
622                     dp->dp_scrub_queue_obj, ds2->ds_object, tx);
623                 VERIFY(err == 0 || err == EEXIST);
624                 if (err == EEXIST) {
625                         /* Both were there to begin with */
626                         VERIFY(0 == zap_add_int(dp->dp_meta_objset,
627                             dp->dp_scrub_queue_obj, ds1->ds_object, tx));
628                 }
629         } else if (zap_remove_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
630             ds2->ds_object, tx) == 0) {
631                 VERIFY(0 == zap_add_int(dp->dp_meta_objset,
632                     dp->dp_scrub_queue_obj, ds1->ds_object, tx));
633         }
634 }
635
636 struct enqueue_clones_arg {
637         dmu_tx_t *tx;
638         uint64_t originobj;
639 };
640
641 /* ARGSUSED */
642 static int
643 enqueue_clones_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg)
644 {
645         struct enqueue_clones_arg *eca = arg;
646         dsl_dataset_t *ds;
647         int err;
648         dsl_pool_t *dp;
649
650         err = dsl_dataset_hold_obj(spa->spa_dsl_pool, dsobj, FTAG, &ds);
651         if (err)
652                 return (err);
653         dp = ds->ds_dir->dd_pool;
654
655         if (ds->ds_dir->dd_phys->dd_origin_obj == eca->originobj) {
656                 while (ds->ds_phys->ds_prev_snap_obj != eca->originobj) {
657                         dsl_dataset_t *prev;
658                         err = dsl_dataset_hold_obj(dp,
659                             ds->ds_phys->ds_prev_snap_obj, FTAG, &prev);
660
661                         dsl_dataset_rele(ds, FTAG);
662                         if (err)
663                                 return (err);
664                         ds = prev;
665                 }
666                 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
667                     ds->ds_object, eca->tx) == 0);
668         }
669         dsl_dataset_rele(ds, FTAG);
670         return (0);
671 }
672
673 static void
674 scrub_visitds(dsl_pool_t *dp, uint64_t dsobj, dmu_tx_t *tx)
675 {
676         dsl_dataset_t *ds;
677         uint64_t min_txg_save;
678
679         VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
680
681         /*
682          * Iterate over the bps in this ds.
683          */
684         min_txg_save = dp->dp_scrub_min_txg;
685         dp->dp_scrub_min_txg =
686             MAX(dp->dp_scrub_min_txg, ds->ds_phys->ds_prev_snap_txg);
687         scrub_visit_rootbp(dp, ds, &ds->ds_phys->ds_bp);
688         dp->dp_scrub_min_txg = min_txg_save;
689
690         if (dp->dp_scrub_pausing)
691                 goto out;
692
693         /*
694          * Add descendent datasets to work queue.
695          */
696         if (ds->ds_phys->ds_next_snap_obj != 0) {
697                 VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
698                     ds->ds_phys->ds_next_snap_obj, tx) == 0);
699         }
700         if (ds->ds_phys->ds_num_children > 1) {
701                 boolean_t usenext = B_FALSE;
702                 if (ds->ds_phys->ds_next_clones_obj != 0) {
703                         uint64_t count;
704                         /*
705                          * A bug in a previous version of the code could
706                          * cause upgrade_clones_cb() to not set
707                          * ds_next_snap_obj when it should, leading to a
708                          * missing entry.  Therefore we can only use the
709                          * next_clones_obj when its count is correct.
710                          */
711                         int err = zap_count(dp->dp_meta_objset,
712                             ds->ds_phys->ds_next_clones_obj, &count);
713                         if (err == 0 &&
714                             count == ds->ds_phys->ds_num_children - 1)
715                                 usenext = B_TRUE;
716                 }
717
718                 if (usenext) {
719                         VERIFY(zap_join(dp->dp_meta_objset,
720                             ds->ds_phys->ds_next_clones_obj,
721                             dp->dp_scrub_queue_obj, tx) == 0);
722                 } else {
723                         struct enqueue_clones_arg eca;
724                         eca.tx = tx;
725                         eca.originobj = ds->ds_object;
726
727                         (void) dmu_objset_find_spa(ds->ds_dir->dd_pool->dp_spa,
728                             NULL, enqueue_clones_cb, &eca, DS_FIND_CHILDREN);
729                 }
730         }
731
732 out:
733         dsl_dataset_rele(ds, FTAG);
734 }
735
736 /* ARGSUSED */
737 static int
738 enqueue_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg)
739 {
740         dmu_tx_t *tx = arg;
741         dsl_dataset_t *ds;
742         int err;
743         dsl_pool_t *dp;
744
745         err = dsl_dataset_hold_obj(spa->spa_dsl_pool, dsobj, FTAG, &ds);
746         if (err)
747                 return (err);
748
749         dp = ds->ds_dir->dd_pool;
750
751         while (ds->ds_phys->ds_prev_snap_obj != 0) {
752                 dsl_dataset_t *prev;
753                 err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj,
754                     FTAG, &prev);
755                 if (err) {
756                         dsl_dataset_rele(ds, FTAG);
757                         return (err);
758                 }
759
760                 /*
761                  * If this is a clone, we don't need to worry about it for now.
762                  */
763                 if (prev->ds_phys->ds_next_snap_obj != ds->ds_object) {
764                         dsl_dataset_rele(ds, FTAG);
765                         dsl_dataset_rele(prev, FTAG);
766                         return (0);
767                 }
768                 dsl_dataset_rele(ds, FTAG);
769                 ds = prev;
770         }
771
772         VERIFY(zap_add_int(dp->dp_meta_objset, dp->dp_scrub_queue_obj,
773             ds->ds_object, tx) == 0);
774         dsl_dataset_rele(ds, FTAG);
775         return (0);
776 }
777
778 void
779 dsl_pool_scrub_sync(dsl_pool_t *dp, dmu_tx_t *tx)
780 {
781         spa_t *spa = dp->dp_spa;
782         zap_cursor_t zc;
783         zap_attribute_t za;
784         boolean_t complete = B_TRUE;
785
786         if (dp->dp_scrub_func == SCRUB_FUNC_NONE)
787                 return;
788
789         /*
790          * If the pool is not loaded, or is trying to unload, leave it alone.
791          */
792         if (spa->spa_load_state != SPA_LOAD_NONE || spa_shutting_down(spa))
793                 return;
794
795         if (dp->dp_scrub_restart) {
796                 enum scrub_func func = dp->dp_scrub_func;
797                 dp->dp_scrub_restart = B_FALSE;
798                 dsl_pool_scrub_setup_sync(dp, &func, kcred, tx);
799         }
800
801         if (spa->spa_root_vdev->vdev_stat.vs_scrub_type == 0) {
802                 /*
803                  * We must have resumed after rebooting; reset the vdev
804                  * stats to know that we're doing a scrub (although it
805                  * will think we're just starting now).
806                  */
807                 vdev_scrub_stat_update(spa->spa_root_vdev,
808                     dp->dp_scrub_min_txg ? POOL_SCRUB_RESILVER :
809                     POOL_SCRUB_EVERYTHING, B_FALSE);
810         }
811
812         dp->dp_scrub_pausing = B_FALSE;
813         dp->dp_scrub_start_time = lbolt64;
814         dp->dp_scrub_isresilver = (dp->dp_scrub_min_txg != 0);
815         spa->spa_scrub_active = B_TRUE;
816
817         if (dp->dp_scrub_bookmark.zb_objset == 0) {
818                 /* First do the MOS & ORIGIN */
819                 scrub_visit_rootbp(dp, NULL, &dp->dp_meta_rootbp);
820                 if (dp->dp_scrub_pausing)
821                         goto out;
822
823                 if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) {
824                         VERIFY(0 == dmu_objset_find_spa(spa,
825                             NULL, enqueue_cb, tx, DS_FIND_CHILDREN));
826                 } else {
827                         scrub_visitds(dp, dp->dp_origin_snap->ds_object, tx);
828                 }
829                 ASSERT(!dp->dp_scrub_pausing);
830         } else if (dp->dp_scrub_bookmark.zb_objset != -1ULL) {
831                 /*
832                  * If we were paused, continue from here.  Note if the
833                  * ds we were paused on was deleted, the zb_objset will
834                  * be -1, so we will skip this and find a new objset
835                  * below.
836                  */
837                 scrub_visitds(dp, dp->dp_scrub_bookmark.zb_objset, tx);
838                 if (dp->dp_scrub_pausing)
839                         goto out;
840         }
841
842         /*
843          * In case we were paused right at the end of the ds, zero the
844          * bookmark so we don't think that we're still trying to resume.
845          */
846         bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t));
847
848         /* keep pulling things out of the zap-object-as-queue */
849         while (zap_cursor_init(&zc, dp->dp_meta_objset, dp->dp_scrub_queue_obj),
850             zap_cursor_retrieve(&zc, &za) == 0) {
851                 VERIFY(0 == zap_remove(dp->dp_meta_objset,
852                     dp->dp_scrub_queue_obj, za.za_name, tx));
853                 scrub_visitds(dp, za.za_first_integer, tx);
854                 if (dp->dp_scrub_pausing)
855                         break;
856                 zap_cursor_fini(&zc);
857         }
858         zap_cursor_fini(&zc);
859         if (dp->dp_scrub_pausing)
860                 goto out;
861
862         /* done. */
863
864         dsl_pool_scrub_cancel_sync(dp, &complete, kcred, tx);
865         return;
866 out:
867         VERIFY(0 == zap_update(dp->dp_meta_objset,
868             DMU_POOL_DIRECTORY_OBJECT,
869             DMU_POOL_SCRUB_BOOKMARK, sizeof (uint64_t), 4,
870             &dp->dp_scrub_bookmark, tx));
871         VERIFY(0 == zap_update(dp->dp_meta_objset,
872             DMU_POOL_DIRECTORY_OBJECT,
873             DMU_POOL_SCRUB_ERRORS, sizeof (uint64_t), 1,
874             &spa->spa_scrub_errors, tx));
875
876         /* XXX this is scrub-clean specific */
877         mutex_enter(&spa->spa_scrub_lock);
878         while (spa->spa_scrub_inflight > 0)
879                 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
880         mutex_exit(&spa->spa_scrub_lock);
881 }
882
883 void
884 dsl_pool_scrub_restart(dsl_pool_t *dp)
885 {
886         mutex_enter(&dp->dp_scrub_cancel_lock);
887         dp->dp_scrub_restart = B_TRUE;
888         mutex_exit(&dp->dp_scrub_cancel_lock);
889 }
890
891 /*
892  * scrub consumers
893  */
894
895 static void
896 count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp)
897 {
898         int i;
899
900         /*
901          * If we resume after a reboot, zab will be NULL; don't record
902          * incomplete stats in that case.
903          */
904         if (zab == NULL)
905                 return;
906
907         for (i = 0; i < 4; i++) {
908                 int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS;
909                 int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL;
910                 zfs_blkstat_t *zb = &zab->zab_type[l][t];
911                 int equal;
912
913                 zb->zb_count++;
914                 zb->zb_asize += BP_GET_ASIZE(bp);
915                 zb->zb_lsize += BP_GET_LSIZE(bp);
916                 zb->zb_psize += BP_GET_PSIZE(bp);
917                 zb->zb_gangs += BP_COUNT_GANG(bp);
918
919                 switch (BP_GET_NDVAS(bp)) {
920                 case 2:
921                         if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
922                             DVA_GET_VDEV(&bp->blk_dva[1]))
923                                 zb->zb_ditto_2_of_2_samevdev++;
924                         break;
925                 case 3:
926                         equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
927                             DVA_GET_VDEV(&bp->blk_dva[1])) +
928                             (DVA_GET_VDEV(&bp->blk_dva[0]) ==
929                             DVA_GET_VDEV(&bp->blk_dva[2])) +
930                             (DVA_GET_VDEV(&bp->blk_dva[1]) ==
931                             DVA_GET_VDEV(&bp->blk_dva[2]));
932                         if (equal == 1)
933                                 zb->zb_ditto_2_of_3_samevdev++;
934                         else if (equal == 3)
935                                 zb->zb_ditto_3_of_3_samevdev++;
936                         break;
937                 }
938         }
939 }
940
941 static void
942 dsl_pool_scrub_clean_done(zio_t *zio)
943 {
944         spa_t *spa = zio->io_spa;
945
946         zio_data_buf_free(zio->io_data, zio->io_size);
947
948         mutex_enter(&spa->spa_scrub_lock);
949         spa->spa_scrub_inflight--;
950         cv_broadcast(&spa->spa_scrub_io_cv);
951
952         if (zio->io_error && (zio->io_error != ECKSUM ||
953             !(zio->io_flags & ZIO_FLAG_SPECULATIVE)))
954                 spa->spa_scrub_errors++;
955         mutex_exit(&spa->spa_scrub_lock);
956 }
957
958 static int
959 dsl_pool_scrub_clean_cb(dsl_pool_t *dp,
960     const blkptr_t *bp, const zbookmark_t *zb)
961 {
962         size_t size = BP_GET_PSIZE(bp);
963         spa_t *spa = dp->dp_spa;
964         boolean_t needs_io;
965         int zio_flags = ZIO_FLAG_SCRUB_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL;
966         int zio_priority;
967
968         ASSERT(bp->blk_birth > dp->dp_scrub_min_txg);
969
970         if (bp->blk_birth >= dp->dp_scrub_max_txg)
971                 return (0);
972
973         count_block(dp->dp_blkstats, bp);
974
975         if (dp->dp_scrub_isresilver == 0) {
976                 /* It's a scrub */
977                 zio_flags |= ZIO_FLAG_SCRUB;
978                 zio_priority = ZIO_PRIORITY_SCRUB;
979                 needs_io = B_TRUE;
980         } else {
981                 /* It's a resilver */
982                 zio_flags |= ZIO_FLAG_RESILVER;
983                 zio_priority = ZIO_PRIORITY_RESILVER;
984                 needs_io = B_FALSE;
985         }
986
987         /* If it's an intent log block, failure is expected. */
988         if (zb->zb_level == -1 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)
989                 zio_flags |= ZIO_FLAG_SPECULATIVE;
990
991         for (int d = 0; d < BP_GET_NDVAS(bp); d++) {
992                 vdev_t *vd = vdev_lookup_top(spa,
993                     DVA_GET_VDEV(&bp->blk_dva[d]));
994
995                 /*
996                  * Keep track of how much data we've examined so that
997                  * zpool(1M) status can make useful progress reports.
998                  */
999                 mutex_enter(&vd->vdev_stat_lock);
1000                 vd->vdev_stat.vs_scrub_examined +=
1001                     DVA_GET_ASIZE(&bp->blk_dva[d]);
1002                 mutex_exit(&vd->vdev_stat_lock);
1003
1004                 /* if it's a resilver, this may not be in the target range */
1005                 if (!needs_io) {
1006                         if (DVA_GET_GANG(&bp->blk_dva[d])) {
1007                                 /*
1008                                  * Gang members may be spread across multiple
1009                                  * vdevs, so the best estimate we have is the
1010                                  * scrub range, which has already been checked.
1011                                  * XXX -- it would be better to change our
1012                                  * allocation policy to ensure that all
1013                                  * gang members reside on the same vdev.
1014                                  */
1015                                 needs_io = B_TRUE;
1016                         } else {
1017                                 needs_io = vdev_dtl_contains(vd, DTL_PARTIAL,
1018                                     bp->blk_birth, 1);
1019                         }
1020                 }
1021         }
1022
1023         if (needs_io && !zfs_no_scrub_io) {
1024                 void *data = zio_data_buf_alloc(size);
1025
1026                 mutex_enter(&spa->spa_scrub_lock);
1027                 while (spa->spa_scrub_inflight >= spa->spa_scrub_maxinflight)
1028                         cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
1029                 spa->spa_scrub_inflight++;
1030                 mutex_exit(&spa->spa_scrub_lock);
1031
1032                 zio_nowait(zio_read(NULL, spa, bp, data, size,
1033                     dsl_pool_scrub_clean_done, NULL, zio_priority,
1034                     zio_flags, zb));
1035         }
1036
1037         /* do not relocate this block */
1038         return (0);
1039 }
1040
1041 int
1042 dsl_pool_scrub_clean(dsl_pool_t *dp)
1043 {
1044         spa_t *spa = dp->dp_spa;
1045
1046         /*
1047          * Purge all vdev caches.  We do this here rather than in sync
1048          * context because this requires a writer lock on the spa_config
1049          * lock, which we can't do from sync context.  The
1050          * spa_scrub_reopen flag indicates that vdev_open() should not
1051          * attempt to start another scrub.
1052          */
1053         spa_vdev_state_enter(spa);
1054         spa->spa_scrub_reopen = B_TRUE;
1055         vdev_reopen(spa->spa_root_vdev);
1056         spa->spa_scrub_reopen = B_FALSE;
1057         (void) spa_vdev_state_exit(spa, NULL, 0);
1058
1059         return (dsl_pool_scrub_setup(dp, SCRUB_FUNC_CLEAN));
1060 }