4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2018, Intel Corporation.
24 * Copyright (c) 2020 by Lawrence Livermore National Security, LLC.
27 #include <sys/vdev_impl.h>
28 #include <sys/dsl_scan.h>
29 #include <sys/spa_impl.h>
30 #include <sys/metaslab_impl.h>
31 #include <sys/vdev_rebuild.h>
33 #include <sys/dmu_tx.h>
38 * This file contains the sequential reconstruction implementation for
39 * resilvering. This form of resilvering is internally referred to as device
40 * rebuild to avoid conflating it with the traditional healing reconstruction
41 * performed by the dsl scan code.
43 * When replacing a device, or scrubbing the pool, ZFS has historically used
44 * a process called resilvering which is a form of healing reconstruction.
45 * This approach has the advantage that as blocks are read from disk their
46 * checksums can be immediately verified and the data repaired. Unfortunately,
47 * it also results in a random IO pattern to the disk even when extra care
48 * is taken to sequentialize the IO as much as possible. This substantially
49 * increases the time required to resilver the pool and restore redundancy.
51 * For mirrored devices it's possible to implement an alternate sequential
52 * reconstruction strategy when resilvering. Sequential reconstruction
53 * behaves like a traditional RAID rebuild and reconstructs a device in LBA
54 * order without verifying the checksum. After this phase completes a second
55 * scrub phase is started to verify all of the checksums. This two phase
56 * process will take longer than the healing reconstruction described above.
57 * However, it has that advantage that after the reconstruction first phase
58 * completes redundancy has been restored. At this point the pool can incur
59 * another device failure without risking data loss.
61 * There are a few noteworthy limitations and other advantages of resilvering
62 * using sequential reconstruction vs healing reconstruction.
66 * - Only supported for mirror vdev types. Due to the variable stripe
67 * width used by raidz sequential reconstruction is not possible.
69 * - Block checksums are not verified during sequential reconstuction.
70 * Similar to traditional RAID the parity/mirror data is reconstructed
71 * but cannot be immediately double checked. For this reason when the
72 * last active resilver completes the pool is automatically scrubbed.
74 * - Deferred resilvers using sequential reconstruction are not currently
75 * supported. When adding another vdev to an active top-level resilver
76 * it must be restarted.
80 * - Sequential reconstuction is performed in LBA order which may be faster
81 * than healing reconstuction particularly when using using HDDs (or
82 * especially with SMR devices). Only allocated capacity is resilvered.
84 * - Sequential reconstruction is not constrained by ZFS block boundaries.
85 * This allows it to issue larger IOs to disk which span multiple blocks
86 * allowing all of these logical blocks to be repaired with a single IO.
88 * - Unlike a healing resilver or scrub which are pool wide operations,
89 * sequential reconstruction is handled by the top-level mirror vdevs.
90 * This allows for it to be started or canceled on a top-level vdev
91 * without impacting any other top-level vdevs in the pool.
93 * - Data only referenced by a pool checkpoint will be repaired because
94 * that space is reflected in the space maps. This differs for a
95 * healing resilver or scrub which will not repair that data.
100 * Maximum number of queued rebuild I/Os top-level vdev. The number of
101 * concurrent rebuild I/Os issued to the device is controlled by the
102 * zfs_vdev_rebuild_min_active and zfs_vdev_rebuild_max_active module
105 unsigned int zfs_rebuild_queue_limit = 20;
108 * Size of rebuild reads; defaults to 1MiB and is capped at SPA_MAXBLOCKSIZE.
110 unsigned long zfs_rebuild_max_segment = 1024 * 1024;
113 * For vdev_rebuild_initiate_sync() and vdev_rebuild_reset_sync().
115 static void vdev_rebuild_thread(void *arg);
118 * Clear the per-vdev rebuild bytes value for a vdev tree.
121 clear_rebuild_bytes(vdev_t *vd)
123 vdev_stat_t *vs = &vd->vdev_stat;
125 for (uint64_t i = 0; i < vd->vdev_children; i++)
126 clear_rebuild_bytes(vd->vdev_child[i]);
128 mutex_enter(&vd->vdev_stat_lock);
129 vs->vs_rebuild_processed = 0;
130 mutex_exit(&vd->vdev_stat_lock);
134 * Determines whether a vdev_rebuild_thread() should be stopped.
137 vdev_rebuild_should_stop(vdev_t *vd)
139 return (!vdev_writeable(vd) || vd->vdev_removing ||
140 vd->vdev_rebuild_exit_wanted ||
141 vd->vdev_rebuild_cancel_wanted ||
142 vd->vdev_rebuild_reset_wanted);
146 * Determine if the rebuild should be canceled. This may happen when all
147 * vdevs with MISSING DTLs are detached.
150 vdev_rebuild_should_cancel(vdev_t *vd)
152 vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
153 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
155 if (!vdev_resilver_needed(vd, &vrp->vrp_min_txg, &vrp->vrp_max_txg))
162 * The sync task for updating the on-disk state of a rebuild. This is
163 * scheduled by vdev_rebuild_range().
166 vdev_rebuild_update_sync(void *arg, dmu_tx_t *tx)
168 int vdev_id = (uintptr_t)arg;
169 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
170 vdev_t *vd = vdev_lookup_top(spa, vdev_id);
171 vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
172 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
173 uint64_t txg = dmu_tx_get_txg(tx);
175 mutex_enter(&vd->vdev_rebuild_lock);
177 if (vr->vr_scan_offset[txg & TXG_MASK] > 0) {
178 vrp->vrp_last_offset = vr->vr_scan_offset[txg & TXG_MASK];
179 vr->vr_scan_offset[txg & TXG_MASK] = 0;
182 vrp->vrp_scan_time_ms = vr->vr_prev_scan_time_ms +
183 NSEC2MSEC(gethrtime() - vr->vr_pass_start_time);
185 VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
186 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
187 REBUILD_PHYS_ENTRIES, vrp, tx));
189 mutex_exit(&vd->vdev_rebuild_lock);
193 * Initialize the on-disk state for a new rebuild, start the rebuild thread.
196 vdev_rebuild_initiate_sync(void *arg, dmu_tx_t *tx)
198 int vdev_id = (uintptr_t)arg;
199 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
200 vdev_t *vd = vdev_lookup_top(spa, vdev_id);
201 vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
202 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
204 ASSERT(vd->vdev_rebuilding);
206 spa_feature_incr(vd->vdev_spa, SPA_FEATURE_DEVICE_REBUILD, tx);
208 mutex_enter(&vd->vdev_rebuild_lock);
209 bzero(vrp, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES);
210 vrp->vrp_rebuild_state = VDEV_REBUILD_ACTIVE;
211 vrp->vrp_min_txg = 0;
212 vrp->vrp_max_txg = dmu_tx_get_txg(tx);
213 vrp->vrp_start_time = gethrestime_sec();
214 vrp->vrp_scan_time_ms = 0;
215 vr->vr_prev_scan_time_ms = 0;
218 * Rebuilds are currently only used when replacing a device, in which
219 * case there must be DTL_MISSING entries. In the future, we could
220 * allow rebuilds to be used in a way similar to a scrub. This would
221 * be useful because it would allow us to rebuild the space used by
224 VERIFY(vdev_resilver_needed(vd, &vrp->vrp_min_txg, &vrp->vrp_max_txg));
226 VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
227 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
228 REBUILD_PHYS_ENTRIES, vrp, tx));
230 spa_history_log_internal(spa, "rebuild", tx,
231 "vdev_id=%llu vdev_guid=%llu started",
232 (u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid);
234 ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
235 vd->vdev_rebuild_thread = thread_create(NULL, 0,
236 vdev_rebuild_thread, vd, 0, &p0, TS_RUN, maxclsyspri);
238 mutex_exit(&vd->vdev_rebuild_lock);
242 vdev_rebuild_log_notify(spa_t *spa, vdev_t *vd, char *name)
244 nvlist_t *aux = fnvlist_alloc();
246 fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE, "sequential");
247 spa_event_notify(spa, vd, aux, name);
252 * Called to request that a new rebuild be started. The feature will remain
253 * active for the duration of the rebuild, then revert to the enabled state.
256 vdev_rebuild_initiate(vdev_t *vd)
258 spa_t *spa = vd->vdev_spa;
260 ASSERT(vd->vdev_top == vd);
261 ASSERT(MUTEX_HELD(&vd->vdev_rebuild_lock));
262 ASSERT(!vd->vdev_rebuilding);
264 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
265 VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
267 vd->vdev_rebuilding = B_TRUE;
269 dsl_sync_task_nowait(spa_get_dsl(spa), vdev_rebuild_initiate_sync,
270 (void *)(uintptr_t)vd->vdev_id, tx);
273 vdev_rebuild_log_notify(spa, vd, ESC_ZFS_RESILVER_START);
277 * Update the on-disk state to completed when a rebuild finishes.
280 vdev_rebuild_complete_sync(void *arg, dmu_tx_t *tx)
282 int vdev_id = (uintptr_t)arg;
283 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
284 vdev_t *vd = vdev_lookup_top(spa, vdev_id);
285 vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
286 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
288 mutex_enter(&vd->vdev_rebuild_lock);
289 vrp->vrp_rebuild_state = VDEV_REBUILD_COMPLETE;
290 vrp->vrp_end_time = gethrestime_sec();
292 VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
293 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
294 REBUILD_PHYS_ENTRIES, vrp, tx));
296 vdev_dtl_reassess(vd, tx->tx_txg, vrp->vrp_max_txg, B_TRUE, B_TRUE);
297 spa_feature_decr(vd->vdev_spa, SPA_FEATURE_DEVICE_REBUILD, tx);
299 spa_history_log_internal(spa, "rebuild", tx,
300 "vdev_id=%llu vdev_guid=%llu complete",
301 (u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid);
302 vdev_rebuild_log_notify(spa, vd, ESC_ZFS_RESILVER_FINISH);
304 /* Handles detaching of spares */
305 spa_async_request(spa, SPA_ASYNC_REBUILD_DONE);
306 vd->vdev_rebuilding = B_FALSE;
307 mutex_exit(&vd->vdev_rebuild_lock);
309 spa_notify_waiters(spa);
310 cv_broadcast(&vd->vdev_rebuild_cv);
314 * Update the on-disk state to canceled when a rebuild finishes.
317 vdev_rebuild_cancel_sync(void *arg, dmu_tx_t *tx)
319 int vdev_id = (uintptr_t)arg;
320 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
321 vdev_t *vd = vdev_lookup_top(spa, vdev_id);
322 vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
323 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
325 mutex_enter(&vd->vdev_rebuild_lock);
326 vrp->vrp_rebuild_state = VDEV_REBUILD_CANCELED;
327 vrp->vrp_end_time = gethrestime_sec();
329 VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
330 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
331 REBUILD_PHYS_ENTRIES, vrp, tx));
333 spa_feature_decr(vd->vdev_spa, SPA_FEATURE_DEVICE_REBUILD, tx);
335 spa_history_log_internal(spa, "rebuild", tx,
336 "vdev_id=%llu vdev_guid=%llu canceled",
337 (u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid);
338 vdev_rebuild_log_notify(spa, vd, ESC_ZFS_RESILVER_FINISH);
340 vd->vdev_rebuild_cancel_wanted = B_FALSE;
341 vd->vdev_rebuilding = B_FALSE;
342 mutex_exit(&vd->vdev_rebuild_lock);
344 spa_notify_waiters(spa);
345 cv_broadcast(&vd->vdev_rebuild_cv);
349 * Resets the progress of a running rebuild. This will occur when a new
350 * vdev is added to rebuild.
353 vdev_rebuild_reset_sync(void *arg, dmu_tx_t *tx)
355 int vdev_id = (uintptr_t)arg;
356 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
357 vdev_t *vd = vdev_lookup_top(spa, vdev_id);
358 vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
359 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
361 mutex_enter(&vd->vdev_rebuild_lock);
363 ASSERT(vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE);
364 ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
366 vrp->vrp_last_offset = 0;
367 vrp->vrp_min_txg = 0;
368 vrp->vrp_max_txg = dmu_tx_get_txg(tx);
369 vrp->vrp_bytes_scanned = 0;
370 vrp->vrp_bytes_issued = 0;
371 vrp->vrp_bytes_rebuilt = 0;
372 vrp->vrp_bytes_est = 0;
373 vrp->vrp_scan_time_ms = 0;
374 vr->vr_prev_scan_time_ms = 0;
376 /* See vdev_rebuild_initiate_sync comment */
377 VERIFY(vdev_resilver_needed(vd, &vrp->vrp_min_txg, &vrp->vrp_max_txg));
379 VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
380 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
381 REBUILD_PHYS_ENTRIES, vrp, tx));
383 spa_history_log_internal(spa, "rebuild", tx,
384 "vdev_id=%llu vdev_guid=%llu reset",
385 (u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid);
387 vd->vdev_rebuild_reset_wanted = B_FALSE;
388 ASSERT(vd->vdev_rebuilding);
390 vd->vdev_rebuild_thread = thread_create(NULL, 0,
391 vdev_rebuild_thread, vd, 0, &p0, TS_RUN, maxclsyspri);
393 mutex_exit(&vd->vdev_rebuild_lock);
397 * Clear the last rebuild status.
400 vdev_rebuild_clear_sync(void *arg, dmu_tx_t *tx)
402 int vdev_id = (uintptr_t)arg;
403 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
404 vdev_t *vd = vdev_lookup_top(spa, vdev_id);
405 vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
406 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
407 objset_t *mos = spa_meta_objset(spa);
409 mutex_enter(&vd->vdev_rebuild_lock);
411 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD) ||
412 vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE) {
413 mutex_exit(&vd->vdev_rebuild_lock);
417 clear_rebuild_bytes(vd);
418 bzero(vrp, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES);
420 if (vd->vdev_top_zap != 0 && zap_contains(mos, vd->vdev_top_zap,
421 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS) == 0) {
422 VERIFY0(zap_update(mos, vd->vdev_top_zap,
423 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
424 REBUILD_PHYS_ENTRIES, vrp, tx));
427 mutex_exit(&vd->vdev_rebuild_lock);
431 * The zio_done_func_t callback for each rebuild I/O issued. It's responsible
432 * for updating the rebuild stats and limiting the number of in flight I/Os.
435 vdev_rebuild_cb(zio_t *zio)
437 vdev_rebuild_t *vr = zio->io_private;
438 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
439 vdev_t *vd = vr->vr_top_vdev;
441 mutex_enter(&vd->vdev_rebuild_io_lock);
442 if (zio->io_error == ENXIO && !vdev_writeable(vd)) {
444 * The I/O failed because the top-level vdev was unavailable.
445 * Attempt to roll back to the last completed offset, in order
446 * resume from the correct location if the pool is resumed.
447 * (This works because spa_sync waits on spa_txg_zio before
448 * it runs sync tasks.)
450 uint64_t *off = &vr->vr_scan_offset[zio->io_txg & TXG_MASK];
451 *off = MIN(*off, zio->io_offset);
452 } else if (zio->io_error) {
456 abd_free(zio->io_abd);
458 ASSERT3U(vd->vdev_rebuild_inflight, >, 0);
459 vd->vdev_rebuild_inflight--;
460 cv_broadcast(&vd->vdev_rebuild_io_cv);
461 mutex_exit(&vd->vdev_rebuild_io_lock);
463 spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
467 * Rebuild the data in this range by constructing a special dummy block
468 * pointer for the given range. It has no relation to any existing blocks
469 * in the pool. But by disabling checksum verification and issuing a scrub
470 * I/O mirrored vdevs will replicate the block using any available mirror
474 vdev_rebuild_rebuild_block(vdev_rebuild_t *vr, uint64_t start, uint64_t asize,
477 vdev_t *vd = vr->vr_top_vdev;
478 spa_t *spa = vd->vdev_spa;
479 uint64_t psize = asize;
481 ASSERT(vd->vdev_ops == &vdev_mirror_ops ||
482 vd->vdev_ops == &vdev_replacing_ops ||
483 vd->vdev_ops == &vdev_spare_ops);
485 blkptr_t blk, *bp = &blk;
488 DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id);
489 DVA_SET_OFFSET(&bp->blk_dva[0], start);
490 DVA_SET_GANG(&bp->blk_dva[0], 0);
491 DVA_SET_ASIZE(&bp->blk_dva[0], asize);
493 BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL);
494 BP_SET_LSIZE(bp, psize);
495 BP_SET_PSIZE(bp, psize);
496 BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF);
497 BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF);
498 BP_SET_TYPE(bp, DMU_OT_NONE);
501 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
504 * We increment the issued bytes by the asize rather than the psize
505 * so the scanned and issued bytes may be directly compared. This
506 * is consistent with the scrub/resilver issued reporting.
508 vr->vr_pass_bytes_issued += asize;
509 vr->vr_rebuild_phys.vrp_bytes_issued += asize;
511 zio_nowait(zio_read(spa->spa_txg_zio[txg & TXG_MASK], spa, bp,
512 abd_alloc(psize, B_FALSE), psize, vdev_rebuild_cb, vr,
513 ZIO_PRIORITY_REBUILD, ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL |
514 ZIO_FLAG_RESILVER, NULL));
518 * Issues a rebuild I/O and takes care of rate limiting the number of queued
519 * rebuild I/Os. The provided start and size must be properly aligned for the
520 * top-level vdev type being rebuilt.
523 vdev_rebuild_range(vdev_rebuild_t *vr, uint64_t start, uint64_t size)
525 uint64_t ms_id __maybe_unused = vr->vr_scan_msp->ms_id;
526 vdev_t *vd = vr->vr_top_vdev;
527 spa_t *spa = vd->vdev_spa;
529 ASSERT3U(ms_id, ==, start >> vd->vdev_ms_shift);
530 ASSERT3U(ms_id, ==, (start + size - 1) >> vd->vdev_ms_shift);
532 vr->vr_pass_bytes_scanned += size;
533 vr->vr_rebuild_phys.vrp_bytes_scanned += size;
535 mutex_enter(&vd->vdev_rebuild_io_lock);
537 /* Limit in flight rebuild I/Os */
538 while (vd->vdev_rebuild_inflight >= zfs_rebuild_queue_limit)
539 cv_wait(&vd->vdev_rebuild_io_cv, &vd->vdev_rebuild_io_lock);
541 vd->vdev_rebuild_inflight++;
542 mutex_exit(&vd->vdev_rebuild_io_lock);
544 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
545 VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
546 uint64_t txg = dmu_tx_get_txg(tx);
548 spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER);
549 mutex_enter(&vd->vdev_rebuild_lock);
551 /* This is the first I/O for this txg. */
552 if (vr->vr_scan_offset[txg & TXG_MASK] == 0) {
553 vr->vr_scan_offset[txg & TXG_MASK] = start;
554 dsl_sync_task_nowait(spa_get_dsl(spa),
555 vdev_rebuild_update_sync,
556 (void *)(uintptr_t)vd->vdev_id, tx);
559 /* When exiting write out our progress. */
560 if (vdev_rebuild_should_stop(vd)) {
561 mutex_enter(&vd->vdev_rebuild_io_lock);
562 vd->vdev_rebuild_inflight--;
563 mutex_exit(&vd->vdev_rebuild_io_lock);
564 spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
565 mutex_exit(&vd->vdev_rebuild_lock);
567 return (SET_ERROR(EINTR));
569 mutex_exit(&vd->vdev_rebuild_lock);
571 vr->vr_scan_offset[txg & TXG_MASK] = start + size;
572 vdev_rebuild_rebuild_block(vr, start, size, txg);
580 * Split range into legally-sized logical chunks given the constraints of the
581 * top-level mirror vdev type.
584 vdev_rebuild_chunk_size(vdev_t *vd, uint64_t start, uint64_t size)
586 uint64_t chunk_size, max_asize, max_segment;
588 ASSERT(vd->vdev_ops == &vdev_mirror_ops ||
589 vd->vdev_ops == &vdev_replacing_ops ||
590 vd->vdev_ops == &vdev_spare_ops);
592 max_segment = MIN(P2ROUNDUP(zfs_rebuild_max_segment,
593 1 << vd->vdev_ashift), SPA_MAXBLOCKSIZE);
594 max_asize = vdev_psize_to_asize(vd, max_segment);
595 chunk_size = MIN(size, max_asize);
601 * Issues rebuild I/Os for all ranges in the provided vr->vr_tree range tree.
604 vdev_rebuild_ranges(vdev_rebuild_t *vr)
606 vdev_t *vd = vr->vr_top_vdev;
607 zfs_btree_t *t = &vr->vr_scan_tree->rt_root;
608 zfs_btree_index_t idx;
611 for (range_seg_t *rs = zfs_btree_first(t, &idx); rs != NULL;
612 rs = zfs_btree_next(t, &idx, &idx)) {
613 uint64_t start = rs_get_start(rs, vr->vr_scan_tree);
614 uint64_t size = rs_get_end(rs, vr->vr_scan_tree) - start;
617 * zfs_scan_suspend_progress can be set to disable rebuild
618 * progress for testing. See comment in dsl_scan_sync().
620 while (zfs_scan_suspend_progress &&
621 !vdev_rebuild_should_stop(vd)) {
628 chunk_size = vdev_rebuild_chunk_size(vd, start, size);
630 error = vdev_rebuild_range(vr, start, chunk_size);
643 * Calculates the estimated capacity which remains to be scanned. Since
644 * we traverse the pool in metaslab order only allocated capacity beyond
645 * the vrp_last_offset need be considered. All lower offsets must have
646 * already been rebuilt and are thus already included in vrp_bytes_scanned.
649 vdev_rebuild_update_bytes_est(vdev_t *vd, uint64_t ms_id)
651 vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
652 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
653 uint64_t bytes_est = vrp->vrp_bytes_scanned;
655 if (vrp->vrp_last_offset < vd->vdev_ms[ms_id]->ms_start)
658 for (uint64_t i = ms_id; i < vd->vdev_ms_count; i++) {
659 metaslab_t *msp = vd->vdev_ms[i];
661 mutex_enter(&msp->ms_lock);
662 bytes_est += metaslab_allocated_space(msp);
663 mutex_exit(&msp->ms_lock);
666 vrp->vrp_bytes_est = bytes_est;
670 * Load from disk the top-level vdev's rebuild information.
673 vdev_rebuild_load(vdev_t *vd)
675 vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
676 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
677 spa_t *spa = vd->vdev_spa;
680 mutex_enter(&vd->vdev_rebuild_lock);
681 vd->vdev_rebuilding = B_FALSE;
683 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD)) {
684 bzero(vrp, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES);
685 mutex_exit(&vd->vdev_rebuild_lock);
686 return (SET_ERROR(ENOTSUP));
689 ASSERT(vd->vdev_top == vd);
691 err = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
692 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
693 REBUILD_PHYS_ENTRIES, vrp);
696 * A missing or damaged VDEV_TOP_ZAP_VDEV_REBUILD_PHYS should
697 * not prevent a pool from being imported. Clear the rebuild
698 * status allowing a new resilver/rebuild to be started.
700 if (err == ENOENT || err == EOVERFLOW || err == ECKSUM) {
701 bzero(vrp, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES);
703 mutex_exit(&vd->vdev_rebuild_lock);
707 vr->vr_prev_scan_time_ms = vrp->vrp_scan_time_ms;
708 vr->vr_top_vdev = vd;
710 mutex_exit(&vd->vdev_rebuild_lock);
716 * Each scan thread is responsible for rebuilding a top-level vdev. The
717 * rebuild progress in tracked on-disk in VDEV_TOP_ZAP_VDEV_REBUILD_PHYS.
720 vdev_rebuild_thread(void *arg)
723 spa_t *spa = vd->vdev_spa;
727 * If there's a scrub in process request that it be stopped. This
728 * is not required for a correct rebuild, but we do want rebuilds to
729 * emulate the resilver behavior as much as possible.
731 dsl_pool_t *dsl = spa_get_dsl(spa);
732 if (dsl_scan_scrubbing(dsl))
733 dsl_scan_cancel(dsl);
735 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
736 mutex_enter(&vd->vdev_rebuild_lock);
738 ASSERT3P(vd->vdev_top, ==, vd);
739 ASSERT3P(vd->vdev_rebuild_thread, !=, NULL);
740 ASSERT(vd->vdev_rebuilding);
741 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REBUILD));
742 ASSERT3B(vd->vdev_rebuild_cancel_wanted, ==, B_FALSE);
743 ASSERT3B(vd->vdev_rebuild_reset_wanted, ==, B_FALSE);
745 vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
746 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
747 vr->vr_top_vdev = vd;
748 vr->vr_scan_msp = NULL;
749 vr->vr_scan_tree = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
750 vr->vr_pass_start_time = gethrtime();
751 vr->vr_pass_bytes_scanned = 0;
752 vr->vr_pass_bytes_issued = 0;
754 uint64_t update_est_time = gethrtime();
755 vdev_rebuild_update_bytes_est(vd, 0);
757 clear_rebuild_bytes(vr->vr_top_vdev);
759 mutex_exit(&vd->vdev_rebuild_lock);
762 * Systematically walk the metaslabs and issue rebuild I/Os for
763 * all ranges in the allocated space map.
765 for (uint64_t i = 0; i < vd->vdev_ms_count; i++) {
766 metaslab_t *msp = vd->vdev_ms[i];
767 vr->vr_scan_msp = msp;
770 * Removal of vdevs from the vdev tree may eliminate the need
771 * for the rebuild, in which case it should be canceled. The
772 * vdev_rebuild_cancel_wanted flag is set until the sync task
773 * completes. This may be after the rebuild thread exits.
775 if (vdev_rebuild_should_cancel(vd)) {
776 vd->vdev_rebuild_cancel_wanted = B_TRUE;
781 ASSERT0(range_tree_space(vr->vr_scan_tree));
784 * Disable any new allocations to this metaslab and wait
785 * for any writes inflight to complete. This is needed to
786 * ensure all allocated ranges are rebuilt.
788 metaslab_disable(msp);
789 spa_config_exit(spa, SCL_CONFIG, FTAG);
790 txg_wait_synced(dsl, 0);
792 mutex_enter(&msp->ms_sync_lock);
793 mutex_enter(&msp->ms_lock);
796 * When a metaslab has been allocated from read its allocated
797 * ranges from the space map object in to the vr_scan_tree.
798 * Then add inflight / unflushed ranges and remove inflight /
799 * unflushed frees. This is the minimum range to be rebuilt.
801 if (msp->ms_sm != NULL) {
802 VERIFY0(space_map_load(msp->ms_sm,
803 vr->vr_scan_tree, SM_ALLOC));
805 for (int i = 0; i < TXG_SIZE; i++) {
806 ASSERT0(range_tree_space(
807 msp->ms_allocating[i]));
810 range_tree_walk(msp->ms_unflushed_allocs,
811 range_tree_add, vr->vr_scan_tree);
812 range_tree_walk(msp->ms_unflushed_frees,
813 range_tree_remove, vr->vr_scan_tree);
816 * Remove ranges which have already been rebuilt based
817 * on the last offset. This can happen when restarting
818 * a scan after exporting and re-importing the pool.
820 range_tree_clear(vr->vr_scan_tree, 0,
821 vrp->vrp_last_offset);
824 mutex_exit(&msp->ms_lock);
825 mutex_exit(&msp->ms_sync_lock);
828 * To provide an accurate estimate re-calculate the estimated
829 * size every 5 minutes to account for recent allocations and
830 * frees made space maps which have not yet been rebuilt.
832 if (gethrtime() > update_est_time + SEC2NSEC(300)) {
833 update_est_time = gethrtime();
834 vdev_rebuild_update_bytes_est(vd, i);
838 * Walk the allocated space map and issue the rebuild I/O.
840 error = vdev_rebuild_ranges(vr);
841 range_tree_vacate(vr->vr_scan_tree, NULL, NULL);
843 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
844 metaslab_enable(msp, B_FALSE, B_FALSE);
850 range_tree_destroy(vr->vr_scan_tree);
851 spa_config_exit(spa, SCL_CONFIG, FTAG);
853 /* Wait for any remaining rebuild I/O to complete */
854 mutex_enter(&vd->vdev_rebuild_io_lock);
855 while (vd->vdev_rebuild_inflight > 0)
856 cv_wait(&vd->vdev_rebuild_io_cv, &vd->vdev_rebuild_io_lock);
858 mutex_exit(&vd->vdev_rebuild_io_lock);
860 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
862 dsl_pool_t *dp = spa_get_dsl(spa);
863 dmu_tx_t *tx = dmu_tx_create_dd(dp->dp_mos_dir);
864 VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
866 mutex_enter(&vd->vdev_rebuild_lock);
869 * After a successful rebuild clear the DTLs of all ranges
870 * which were missing when the rebuild was started. These
871 * ranges must have been rebuilt as a consequence of rebuilding
872 * all allocated space. Note that unlike a scrub or resilver
873 * the rebuild operation will reconstruct data only referenced
874 * by a pool checkpoint. See the dsl_scan_done() comments.
876 dsl_sync_task_nowait(dp, vdev_rebuild_complete_sync,
877 (void *)(uintptr_t)vd->vdev_id, tx);
878 } else if (vd->vdev_rebuild_cancel_wanted) {
880 * The rebuild operation was canceled. This will occur when
881 * a device participating in the rebuild is detached.
883 dsl_sync_task_nowait(dp, vdev_rebuild_cancel_sync,
884 (void *)(uintptr_t)vd->vdev_id, tx);
885 } else if (vd->vdev_rebuild_reset_wanted) {
887 * Reset the running rebuild without canceling and restarting
888 * it. This will occur when a new device is attached and must
889 * participate in the rebuild.
891 dsl_sync_task_nowait(dp, vdev_rebuild_reset_sync,
892 (void *)(uintptr_t)vd->vdev_id, tx);
895 * The rebuild operation should be suspended. This may occur
896 * when detaching a child vdev or when exporting the pool. The
897 * rebuild is left in the active state so it will be resumed.
899 ASSERT(vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE);
900 vd->vdev_rebuilding = B_FALSE;
905 vd->vdev_rebuild_thread = NULL;
906 mutex_exit(&vd->vdev_rebuild_lock);
907 spa_config_exit(spa, SCL_CONFIG, FTAG);
909 cv_broadcast(&vd->vdev_rebuild_cv);
915 * Returns B_TRUE if any top-level vdev are rebuilding.
918 vdev_rebuild_active(vdev_t *vd)
920 spa_t *spa = vd->vdev_spa;
921 boolean_t ret = B_FALSE;
923 if (vd == spa->spa_root_vdev) {
924 for (uint64_t i = 0; i < vd->vdev_children; i++) {
925 ret = vdev_rebuild_active(vd->vdev_child[i]);
929 } else if (vd->vdev_top_zap != 0) {
930 vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
931 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
933 mutex_enter(&vd->vdev_rebuild_lock);
934 ret = (vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE);
935 mutex_exit(&vd->vdev_rebuild_lock);
942 * Start a rebuild operation. The rebuild may be restarted when the
943 * top-level vdev is currently actively rebuilding.
946 vdev_rebuild(vdev_t *vd)
948 vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
949 vdev_rebuild_phys_t *vrp __maybe_unused = &vr->vr_rebuild_phys;
951 ASSERT(vd->vdev_top == vd);
952 ASSERT(vdev_is_concrete(vd));
953 ASSERT(!vd->vdev_removing);
954 ASSERT(spa_feature_is_enabled(vd->vdev_spa,
955 SPA_FEATURE_DEVICE_REBUILD));
957 mutex_enter(&vd->vdev_rebuild_lock);
958 if (vd->vdev_rebuilding) {
959 ASSERT3U(vrp->vrp_rebuild_state, ==, VDEV_REBUILD_ACTIVE);
962 * Signal a running rebuild operation that it should restart
963 * from the beginning because a new device was attached. The
964 * vdev_rebuild_reset_wanted flag is set until the sync task
965 * completes. This may be after the rebuild thread exits.
967 if (!vd->vdev_rebuild_reset_wanted)
968 vd->vdev_rebuild_reset_wanted = B_TRUE;
970 vdev_rebuild_initiate(vd);
972 mutex_exit(&vd->vdev_rebuild_lock);
976 vdev_rebuild_restart_impl(vdev_t *vd)
978 spa_t *spa = vd->vdev_spa;
980 if (vd == spa->spa_root_vdev) {
981 for (uint64_t i = 0; i < vd->vdev_children; i++)
982 vdev_rebuild_restart_impl(vd->vdev_child[i]);
984 } else if (vd->vdev_top_zap != 0) {
985 vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
986 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
988 mutex_enter(&vd->vdev_rebuild_lock);
989 if (vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE &&
990 vdev_writeable(vd) && !vd->vdev_rebuilding) {
991 ASSERT(spa_feature_is_active(spa,
992 SPA_FEATURE_DEVICE_REBUILD));
993 vd->vdev_rebuilding = B_TRUE;
994 vd->vdev_rebuild_thread = thread_create(NULL, 0,
995 vdev_rebuild_thread, vd, 0, &p0, TS_RUN,
998 mutex_exit(&vd->vdev_rebuild_lock);
1003 * Conditionally restart all of the vdev_rebuild_thread's for a pool. The
1004 * feature flag must be active and the rebuild in the active state. This
1005 * cannot be used to start a new rebuild.
1008 vdev_rebuild_restart(spa_t *spa)
1010 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1012 vdev_rebuild_restart_impl(spa->spa_root_vdev);
1016 * Stop and wait for all of the vdev_rebuild_thread's associated with the
1017 * vdev tree provide to be terminated (canceled or stopped).
1020 vdev_rebuild_stop_wait(vdev_t *vd)
1022 spa_t *spa = vd->vdev_spa;
1024 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1026 if (vd == spa->spa_root_vdev) {
1027 for (uint64_t i = 0; i < vd->vdev_children; i++)
1028 vdev_rebuild_stop_wait(vd->vdev_child[i]);
1030 } else if (vd->vdev_top_zap != 0) {
1031 ASSERT(vd == vd->vdev_top);
1033 mutex_enter(&vd->vdev_rebuild_lock);
1034 if (vd->vdev_rebuild_thread != NULL) {
1035 vd->vdev_rebuild_exit_wanted = B_TRUE;
1036 while (vd->vdev_rebuilding) {
1037 cv_wait(&vd->vdev_rebuild_cv,
1038 &vd->vdev_rebuild_lock);
1040 vd->vdev_rebuild_exit_wanted = B_FALSE;
1042 mutex_exit(&vd->vdev_rebuild_lock);
1047 * Stop all rebuild operations but leave them in the active state so they
1048 * will be resumed when importing the pool.
1051 vdev_rebuild_stop_all(spa_t *spa)
1053 vdev_rebuild_stop_wait(spa->spa_root_vdev);
1057 * Rebuild statistics reported per top-level vdev.
1060 vdev_rebuild_get_stats(vdev_t *tvd, vdev_rebuild_stat_t *vrs)
1062 spa_t *spa = tvd->vdev_spa;
1064 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD))
1065 return (SET_ERROR(ENOTSUP));
1067 if (tvd != tvd->vdev_top || tvd->vdev_top_zap == 0)
1068 return (SET_ERROR(EINVAL));
1070 int error = zap_contains(spa_meta_objset(spa),
1071 tvd->vdev_top_zap, VDEV_TOP_ZAP_VDEV_REBUILD_PHYS);
1073 if (error == ENOENT) {
1074 bzero(vrs, sizeof (vdev_rebuild_stat_t));
1075 vrs->vrs_state = VDEV_REBUILD_NONE;
1077 } else if (error == 0) {
1078 vdev_rebuild_t *vr = &tvd->vdev_rebuild_config;
1079 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
1081 mutex_enter(&tvd->vdev_rebuild_lock);
1082 vrs->vrs_state = vrp->vrp_rebuild_state;
1083 vrs->vrs_start_time = vrp->vrp_start_time;
1084 vrs->vrs_end_time = vrp->vrp_end_time;
1085 vrs->vrs_scan_time_ms = vrp->vrp_scan_time_ms;
1086 vrs->vrs_bytes_scanned = vrp->vrp_bytes_scanned;
1087 vrs->vrs_bytes_issued = vrp->vrp_bytes_issued;
1088 vrs->vrs_bytes_rebuilt = vrp->vrp_bytes_rebuilt;
1089 vrs->vrs_bytes_est = vrp->vrp_bytes_est;
1090 vrs->vrs_errors = vrp->vrp_errors;
1091 vrs->vrs_pass_time_ms = NSEC2MSEC(gethrtime() -
1092 vr->vr_pass_start_time);
1093 vrs->vrs_pass_bytes_scanned = vr->vr_pass_bytes_scanned;
1094 vrs->vrs_pass_bytes_issued = vr->vr_pass_bytes_issued;
1095 mutex_exit(&tvd->vdev_rebuild_lock);
1102 ZFS_MODULE_PARAM(zfs, zfs_, rebuild_max_segment, ULONG, ZMOD_RW,
1103 "Max segment size in bytes of rebuild reads");