4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
27 #include <sys/zfs_context.h>
28 #include <sys/spa_impl.h>
30 #include <sys/dmu_tx.h>
32 #include <sys/vdev_impl.h>
33 #include <sys/metaslab.h>
34 #include <sys/metaslab_impl.h>
35 #include <sys/uberblock_impl.h>
38 #include <sys/bpobj.h>
39 #include <sys/dsl_pool.h>
40 #include <sys/dsl_synctask.h>
41 #include <sys/dsl_dir.h>
43 #include <sys/zfeature.h>
44 #include <sys/vdev_indirect_births.h>
45 #include <sys/vdev_indirect_mapping.h>
49 * This file contains the necessary logic to remove vdevs from a
50 * storage pool. Currently, the only devices that can be removed
51 * are log, cache, and spare devices; and top level vdevs from a pool
52 * w/o raidz. (Note that members of a mirror can also be removed
53 * by the detach operation.)
55 * Log vdevs are removed by evacuating them and then turning the vdev
56 * into a hole vdev while holding spa config locks.
58 * Top level vdevs are removed and converted into an indirect vdev via
59 * a multi-step process:
61 * - Disable allocations from this device (spa_vdev_remove_top).
63 * - From a new thread (spa_vdev_remove_thread), copy data from
64 * the removing vdev to a different vdev. The copy happens in open
65 * context (spa_vdev_copy_impl) and issues a sync task
66 * (vdev_mapping_sync) so the sync thread can update the partial
67 * indirect mappings in core and on disk.
69 * - If a free happens during a removal, it is freed from the
70 * removing vdev, and if it has already been copied, from the new
71 * location as well (free_from_removing_vdev).
73 * - After the removal is completed, the copy thread converts the vdev
74 * into an indirect vdev (vdev_remove_complete) before instructing
75 * the sync thread to destroy the space maps and finish the removal
76 * (spa_finish_removal).
79 typedef struct vdev_copy_arg {
81 uint64_t vca_outstanding_bytes;
86 typedef struct vdev_copy_seg_arg {
87 vdev_copy_arg_t *vcsa_copy_arg;
90 blkptr_t *vcsa_dest_bp;
91 } vdev_copy_seg_arg_t;
94 * The maximum amount of allowed data we're allowed to copy from a device
95 * at a time when removing it.
97 int zfs_remove_max_copy_bytes = 8 * 1024 * 1024;
100 * The largest contiguous segment that we will attempt to allocate when
101 * removing a device. This can be no larger than SPA_MAXBLOCKSIZE. If
102 * there is a performance problem with attempting to allocate large blocks,
103 * consider decreasing this.
105 * Note: we will issue I/Os of up to this size. The mpt driver does not
106 * respond well to I/Os larger than 1MB, so we set this to 1MB. (When
107 * mpt processes an I/O larger than 1MB, it needs to do an allocation of
108 * 2 physically contiguous pages; if this allocation fails, mpt will drop
109 * the I/O and hang the device.)
111 int zfs_remove_max_segment = 1024 * 1024;
114 * This is used by the test suite so that it can ensure that certain
115 * actions happen while in the middle of a removal.
117 uint64_t zfs_remove_max_bytes_pause = UINT64_MAX;
119 #define VDEV_REMOVAL_ZAP_OBJS "lzap"
121 static void spa_vdev_remove_thread(void *arg);
124 spa_sync_removing_state(spa_t *spa, dmu_tx_t *tx)
126 VERIFY0(zap_update(spa->spa_dsl_pool->dp_meta_objset,
127 DMU_POOL_DIRECTORY_OBJECT,
128 DMU_POOL_REMOVING, sizeof (uint64_t),
129 sizeof (spa->spa_removing_phys) / sizeof (uint64_t),
130 &spa->spa_removing_phys, tx));
134 spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
136 for (int i = 0; i < count; i++) {
138 fnvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID);
140 if (guid == target_guid)
148 spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
149 nvlist_t *dev_to_remove)
151 nvlist_t **newdev = NULL;
154 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
156 for (int i = 0, j = 0; i < count; i++) {
157 if (dev[i] == dev_to_remove)
159 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
162 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
163 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
165 for (int i = 0; i < count - 1; i++)
166 nvlist_free(newdev[i]);
169 kmem_free(newdev, (count - 1) * sizeof (void *));
172 static spa_vdev_removal_t *
173 spa_vdev_removal_create(vdev_t *vd)
175 spa_vdev_removal_t *svr = kmem_zalloc(sizeof (*svr), KM_SLEEP);
176 mutex_init(&svr->svr_lock, NULL, MUTEX_DEFAULT, NULL);
177 cv_init(&svr->svr_cv, NULL, CV_DEFAULT, NULL);
178 svr->svr_allocd_segs = range_tree_create(NULL, NULL);
181 for (int i = 0; i < TXG_SIZE; i++) {
182 svr->svr_frees[i] = range_tree_create(NULL, NULL);
183 list_create(&svr->svr_new_segments[i],
184 sizeof (vdev_indirect_mapping_entry_t),
185 offsetof(vdev_indirect_mapping_entry_t, vime_node));
192 spa_vdev_removal_destroy(spa_vdev_removal_t *svr)
194 for (int i = 0; i < TXG_SIZE; i++) {
195 ASSERT0(svr->svr_bytes_done[i]);
196 ASSERT0(svr->svr_max_offset_to_sync[i]);
197 range_tree_destroy(svr->svr_frees[i]);
198 list_destroy(&svr->svr_new_segments[i]);
201 range_tree_destroy(svr->svr_allocd_segs);
202 mutex_destroy(&svr->svr_lock);
203 cv_destroy(&svr->svr_cv);
204 kmem_free(svr, sizeof (*svr));
208 * This is called as a synctask in the txg in which we will mark this vdev
209 * as removing (in the config stored in the MOS).
211 * It begins the evacuation of a toplevel vdev by:
212 * - initializing the spa_removing_phys which tracks this removal
213 * - computing the amount of space to remove for accounting purposes
214 * - dirtying all dbufs in the spa_config_object
215 * - creating the spa_vdev_removal
216 * - starting the spa_vdev_remove_thread
219 vdev_remove_initiate_sync(void *arg, dmu_tx_t *tx)
222 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
223 spa_t *spa = vd->vdev_spa;
224 objset_t *mos = spa->spa_dsl_pool->dp_meta_objset;
225 spa_vdev_removal_t *svr = NULL;
226 uint64_t txg = dmu_tx_get_txg(tx);
228 ASSERT3P(vd->vdev_ops, !=, &vdev_raidz_ops);
229 svr = spa_vdev_removal_create(vd);
231 ASSERT(vd->vdev_removing);
232 ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
234 spa_feature_incr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx);
235 if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
237 * By activating the OBSOLETE_COUNTS feature, we prevent
238 * the pool from being downgraded and ensure that the
239 * refcounts are precise.
241 spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
243 VERIFY0(zap_add(spa->spa_meta_objset, vd->vdev_top_zap,
244 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (one), 1,
246 ASSERT3U(vdev_obsolete_counts_are_precise(vd), !=, 0);
249 vic->vic_mapping_object = vdev_indirect_mapping_alloc(mos, tx);
250 vd->vdev_indirect_mapping =
251 vdev_indirect_mapping_open(mos, vic->vic_mapping_object);
252 vic->vic_births_object = vdev_indirect_births_alloc(mos, tx);
253 vd->vdev_indirect_births =
254 vdev_indirect_births_open(mos, vic->vic_births_object);
255 spa->spa_removing_phys.sr_removing_vdev = vd->vdev_id;
256 spa->spa_removing_phys.sr_start_time = gethrestime_sec();
257 spa->spa_removing_phys.sr_end_time = 0;
258 spa->spa_removing_phys.sr_state = DSS_SCANNING;
259 spa->spa_removing_phys.sr_to_copy = 0;
260 spa->spa_removing_phys.sr_copied = 0;
263 * Note: We can't use vdev_stat's vs_alloc for sr_to_copy, because
264 * there may be space in the defer tree, which is free, but still
265 * counted in vs_alloc.
267 for (uint64_t i = 0; i < vd->vdev_ms_count; i++) {
268 metaslab_t *ms = vd->vdev_ms[i];
269 if (ms->ms_sm == NULL)
273 * Sync tasks happen before metaslab_sync(), therefore
274 * smp_alloc and sm_alloc must be the same.
276 ASSERT3U(space_map_allocated(ms->ms_sm), ==,
277 ms->ms_sm->sm_phys->smp_alloc);
279 spa->spa_removing_phys.sr_to_copy +=
280 space_map_allocated(ms->ms_sm);
283 * Space which we are freeing this txg does not need to
286 spa->spa_removing_phys.sr_to_copy -=
287 range_tree_space(ms->ms_freeing);
289 ASSERT0(range_tree_space(ms->ms_freed));
290 for (int t = 0; t < TXG_SIZE; t++)
291 ASSERT0(range_tree_space(ms->ms_allocating[t]));
295 * Sync tasks are called before metaslab_sync(), so there should
296 * be no already-synced metaslabs in the TXG_CLEAN list.
298 ASSERT3P(txg_list_head(&vd->vdev_ms_list, TXG_CLEAN(txg)), ==, NULL);
300 spa_sync_removing_state(spa, tx);
303 * All blocks that we need to read the most recent mapping must be
304 * stored on concrete vdevs. Therefore, we must dirty anything that
305 * is read before spa_remove_init(). Specifically, the
306 * spa_config_object. (Note that although we already modified the
307 * spa_config_object in spa_sync_removing_state, that may not have
308 * modified all blocks of the object.)
310 dmu_object_info_t doi;
311 VERIFY0(dmu_object_info(mos, DMU_POOL_DIRECTORY_OBJECT, &doi));
312 for (uint64_t offset = 0; offset < doi.doi_max_offset; ) {
314 VERIFY0(dmu_buf_hold(mos, DMU_POOL_DIRECTORY_OBJECT,
315 offset, FTAG, &dbuf, 0));
316 dmu_buf_will_dirty(dbuf, tx);
317 offset += dbuf->db_size;
318 dmu_buf_rele(dbuf, FTAG);
322 * Now that we've allocated the im_object, dirty the vdev to ensure
323 * that the object gets written to the config on disk.
325 vdev_config_dirty(vd);
327 zfs_dbgmsg("starting removal thread for vdev %llu (%p) in txg %llu "
328 "im_obj=%llu", vd->vdev_id, vd, dmu_tx_get_txg(tx),
329 vic->vic_mapping_object);
331 spa_history_log_internal(spa, "vdev remove started", tx,
332 "%s vdev %llu %s", spa_name(spa), vd->vdev_id,
333 (vd->vdev_path != NULL) ? vd->vdev_path : "-");
335 * Setting spa_vdev_removal causes subsequent frees to call
336 * free_from_removing_vdev(). Note that we don't need any locking
337 * because we are the sync thread, and metaslab_free_impl() is only
338 * called from syncing context (potentially from a zio taskq thread,
339 * but in any case only when there are outstanding free i/os, which
342 ASSERT3P(spa->spa_vdev_removal, ==, NULL);
343 spa->spa_vdev_removal = svr;
344 svr->svr_thread = thread_create(NULL, 0,
345 spa_vdev_remove_thread, vd, 0, &p0, TS_RUN, minclsyspri);
349 * When we are opening a pool, we must read the mapping for each
350 * indirect vdev in order from most recently removed to least
351 * recently removed. We do this because the blocks for the mapping
352 * of older indirect vdevs may be stored on more recently removed vdevs.
353 * In order to read each indirect mapping object, we must have
354 * initialized all more recently removed vdevs.
357 spa_remove_init(spa_t *spa)
361 error = zap_lookup(spa->spa_dsl_pool->dp_meta_objset,
362 DMU_POOL_DIRECTORY_OBJECT,
363 DMU_POOL_REMOVING, sizeof (uint64_t),
364 sizeof (spa->spa_removing_phys) / sizeof (uint64_t),
365 &spa->spa_removing_phys);
367 if (error == ENOENT) {
368 spa->spa_removing_phys.sr_state = DSS_NONE;
369 spa->spa_removing_phys.sr_removing_vdev = -1;
370 spa->spa_removing_phys.sr_prev_indirect_vdev = -1;
371 spa->spa_indirect_vdevs_loaded = B_TRUE;
373 } else if (error != 0) {
377 if (spa->spa_removing_phys.sr_state == DSS_SCANNING) {
379 * We are currently removing a vdev. Create and
380 * initialize a spa_vdev_removal_t from the bonus
381 * buffer of the removing vdevs vdev_im_object, and
382 * initialize its partial mapping.
384 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
385 vdev_t *vd = vdev_lookup_top(spa,
386 spa->spa_removing_phys.sr_removing_vdev);
387 spa_config_exit(spa, SCL_STATE, FTAG);
392 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
394 ASSERT(vdev_is_concrete(vd));
395 spa_vdev_removal_t *svr = spa_vdev_removal_create(vd);
396 ASSERT(svr->svr_vdev->vdev_removing);
398 vd->vdev_indirect_mapping = vdev_indirect_mapping_open(
399 spa->spa_meta_objset, vic->vic_mapping_object);
400 vd->vdev_indirect_births = vdev_indirect_births_open(
401 spa->spa_meta_objset, vic->vic_births_object);
403 spa->spa_vdev_removal = svr;
406 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
407 uint64_t indirect_vdev_id =
408 spa->spa_removing_phys.sr_prev_indirect_vdev;
409 while (indirect_vdev_id != UINT64_MAX) {
410 vdev_t *vd = vdev_lookup_top(spa, indirect_vdev_id);
411 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
413 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
414 vd->vdev_indirect_mapping = vdev_indirect_mapping_open(
415 spa->spa_meta_objset, vic->vic_mapping_object);
416 vd->vdev_indirect_births = vdev_indirect_births_open(
417 spa->spa_meta_objset, vic->vic_births_object);
419 indirect_vdev_id = vic->vic_prev_indirect_vdev;
421 spa_config_exit(spa, SCL_STATE, FTAG);
424 * Now that we've loaded all the indirect mappings, we can allow
425 * reads from other blocks (e.g. via predictive prefetch).
427 spa->spa_indirect_vdevs_loaded = B_TRUE;
432 spa_restart_removal(spa_t *spa)
434 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
440 * In general when this function is called there is no
441 * removal thread running. The only scenario where this
442 * is not true is during spa_import() where this function
443 * is called twice [once from spa_import_impl() and
444 * spa_async_resume()]. Thus, in the scenario where we
445 * import a pool that has an ongoing removal we don't
446 * want to spawn a second thread.
448 if (svr->svr_thread != NULL)
451 if (!spa_writeable(spa))
454 vdev_t *vd = svr->svr_vdev;
455 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
457 ASSERT3P(vd, !=, NULL);
458 ASSERT(vd->vdev_removing);
460 zfs_dbgmsg("restarting removal of %llu at count=%llu",
461 vd->vdev_id, vdev_indirect_mapping_num_entries(vim));
462 svr->svr_thread = thread_create(NULL, 0, spa_vdev_remove_thread, vd,
463 0, &p0, TS_RUN, minclsyspri);
467 * Process freeing from a device which is in the middle of being removed.
468 * We must handle this carefully so that we attempt to copy freed data,
469 * and we correctly free already-copied data.
472 free_from_removing_vdev(vdev_t *vd, uint64_t offset, uint64_t size)
474 spa_t *spa = vd->vdev_spa;
475 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
476 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
477 uint64_t txg = spa_syncing_txg(spa);
478 uint64_t max_offset_yet = 0;
480 ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
481 ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, ==,
482 vdev_indirect_mapping_object(vim));
483 ASSERT3P(vd, ==, svr->svr_vdev);
485 mutex_enter(&svr->svr_lock);
488 * Remove the segment from the removing vdev's spacemap. This
489 * ensures that we will not attempt to copy this space (if the
490 * removal thread has not yet visited it), and also ensures
491 * that we know what is actually allocated on the new vdevs
492 * (needed if we cancel the removal).
494 * Note: we must do the metaslab_free_concrete() with the svr_lock
495 * held, so that the remove_thread can not load this metaslab and then
496 * visit this offset between the time that we metaslab_free_concrete()
497 * and when we check to see if it has been visited.
499 * Note: The checkpoint flag is set to false as having/taking
500 * a checkpoint and removing a device can't happen at the same
503 ASSERT(!spa_has_checkpoint(spa));
504 metaslab_free_concrete(vd, offset, size, B_FALSE);
506 uint64_t synced_size = 0;
507 uint64_t synced_offset = 0;
508 uint64_t max_offset_synced = vdev_indirect_mapping_max_offset(vim);
509 if (offset < max_offset_synced) {
511 * The mapping for this offset is already on disk.
512 * Free from the new location.
514 * Note that we use svr_max_synced_offset because it is
515 * updated atomically with respect to the in-core mapping.
516 * By contrast, vim_max_offset is not.
518 * This block may be split between a synced entry and an
519 * in-flight or unvisited entry. Only process the synced
520 * portion of it here.
522 synced_size = MIN(size, max_offset_synced - offset);
523 synced_offset = offset;
525 ASSERT3U(max_offset_yet, <=, max_offset_synced);
526 max_offset_yet = max_offset_synced;
528 DTRACE_PROBE3(remove__free__synced,
531 uint64_t, synced_size);
534 offset += synced_size;
538 * Look at all in-flight txgs starting from the currently syncing one
539 * and see if a section of this free is being copied. By starting from
540 * this txg and iterating forward, we might find that this region
541 * was copied in two different txgs and handle it appropriately.
543 for (int i = 0; i < TXG_CONCURRENT_STATES; i++) {
544 int txgoff = (txg + i) & TXG_MASK;
545 if (size > 0 && offset < svr->svr_max_offset_to_sync[txgoff]) {
547 * The mapping for this offset is in flight, and
548 * will be synced in txg+i.
550 uint64_t inflight_size = MIN(size,
551 svr->svr_max_offset_to_sync[txgoff] - offset);
553 DTRACE_PROBE4(remove__free__inflight,
556 uint64_t, inflight_size,
560 * We copy data in order of increasing offset.
561 * Therefore the max_offset_to_sync[] must increase
562 * (or be zero, indicating that nothing is being
563 * copied in that txg).
565 if (svr->svr_max_offset_to_sync[txgoff] != 0) {
566 ASSERT3U(svr->svr_max_offset_to_sync[txgoff],
569 svr->svr_max_offset_to_sync[txgoff];
573 * We've already committed to copying this segment:
574 * we have allocated space elsewhere in the pool for
575 * it and have an IO outstanding to copy the data. We
576 * cannot free the space before the copy has
577 * completed, or else the copy IO might overwrite any
578 * new data. To free that space, we record the
579 * segment in the appropriate svr_frees tree and free
580 * the mapped space later, in the txg where we have
581 * completed the copy and synced the mapping (see
582 * vdev_mapping_sync).
584 range_tree_add(svr->svr_frees[txgoff],
585 offset, inflight_size);
586 size -= inflight_size;
587 offset += inflight_size;
590 * This space is already accounted for as being
591 * done, because it is being copied in txg+i.
592 * However, if i!=0, then it is being copied in
593 * a future txg. If we crash after this txg
594 * syncs but before txg+i syncs, then the space
595 * will be free. Therefore we must account
596 * for the space being done in *this* txg
597 * (when it is freed) rather than the future txg
598 * (when it will be copied).
600 ASSERT3U(svr->svr_bytes_done[txgoff], >=,
602 svr->svr_bytes_done[txgoff] -= inflight_size;
603 svr->svr_bytes_done[txg & TXG_MASK] += inflight_size;
606 ASSERT0(svr->svr_max_offset_to_sync[TXG_CLEAN(txg) & TXG_MASK]);
610 * The copy thread has not yet visited this offset. Ensure
614 DTRACE_PROBE3(remove__free__unvisited,
619 if (svr->svr_allocd_segs != NULL)
620 range_tree_clear(svr->svr_allocd_segs, offset, size);
623 * Since we now do not need to copy this data, for
624 * accounting purposes we have done our job and can count
627 svr->svr_bytes_done[txg & TXG_MASK] += size;
629 mutex_exit(&svr->svr_lock);
632 * Now that we have dropped svr_lock, process the synced portion
635 if (synced_size > 0) {
636 vdev_indirect_mark_obsolete(vd, synced_offset, synced_size);
639 * Note: this can only be called from syncing context,
640 * and the vdev_indirect_mapping is only changed from the
641 * sync thread, so we don't need svr_lock while doing
642 * metaslab_free_impl_cb.
644 boolean_t checkpoint = B_FALSE;
645 vdev_indirect_ops.vdev_op_remap(vd, synced_offset, synced_size,
646 metaslab_free_impl_cb, &checkpoint);
651 * Stop an active removal and update the spa_removing phys.
654 spa_finish_removal(spa_t *spa, dsl_scan_state_t state, dmu_tx_t *tx)
656 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
657 ASSERT3U(dmu_tx_get_txg(tx), ==, spa_syncing_txg(spa));
659 /* Ensure the removal thread has completed before we free the svr. */
660 spa_vdev_remove_suspend(spa);
662 ASSERT(state == DSS_FINISHED || state == DSS_CANCELED);
664 if (state == DSS_FINISHED) {
665 spa_removing_phys_t *srp = &spa->spa_removing_phys;
666 vdev_t *vd = svr->svr_vdev;
667 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
669 if (srp->sr_prev_indirect_vdev != UINT64_MAX) {
670 vdev_t *pvd = vdev_lookup_top(spa,
671 srp->sr_prev_indirect_vdev);
672 ASSERT3P(pvd->vdev_ops, ==, &vdev_indirect_ops);
675 vic->vic_prev_indirect_vdev = srp->sr_prev_indirect_vdev;
676 srp->sr_prev_indirect_vdev = vd->vdev_id;
678 spa->spa_removing_phys.sr_state = state;
679 spa->spa_removing_phys.sr_end_time = gethrestime_sec();
681 spa->spa_vdev_removal = NULL;
682 spa_vdev_removal_destroy(svr);
684 spa_sync_removing_state(spa, tx);
686 vdev_config_dirty(spa->spa_root_vdev);
690 free_mapped_segment_cb(void *arg, uint64_t offset, uint64_t size)
693 vdev_indirect_mark_obsolete(vd, offset, size);
694 boolean_t checkpoint = B_FALSE;
695 vdev_indirect_ops.vdev_op_remap(vd, offset, size,
696 metaslab_free_impl_cb, &checkpoint);
700 * On behalf of the removal thread, syncs an incremental bit more of
701 * the indirect mapping to disk and updates the in-memory mapping.
702 * Called as a sync task in every txg that the removal thread makes progress.
705 vdev_mapping_sync(void *arg, dmu_tx_t *tx)
707 spa_vdev_removal_t *svr = arg;
708 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
709 vdev_t *vd = svr->svr_vdev;
710 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
711 uint64_t txg = dmu_tx_get_txg(tx);
712 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
714 ASSERT(vic->vic_mapping_object != 0);
715 ASSERT3U(txg, ==, spa_syncing_txg(spa));
717 vdev_indirect_mapping_add_entries(vim,
718 &svr->svr_new_segments[txg & TXG_MASK], tx);
719 vdev_indirect_births_add_entry(vd->vdev_indirect_births,
720 vdev_indirect_mapping_max_offset(vim), dmu_tx_get_txg(tx), tx);
723 * Free the copied data for anything that was freed while the
724 * mapping entries were in flight.
726 mutex_enter(&svr->svr_lock);
727 range_tree_vacate(svr->svr_frees[txg & TXG_MASK],
728 free_mapped_segment_cb, vd);
729 ASSERT3U(svr->svr_max_offset_to_sync[txg & TXG_MASK], >=,
730 vdev_indirect_mapping_max_offset(vim));
731 svr->svr_max_offset_to_sync[txg & TXG_MASK] = 0;
732 mutex_exit(&svr->svr_lock);
734 spa_sync_removing_state(spa, tx);
738 spa_vdev_copy_segment_write_done(zio_t *zio)
740 vdev_copy_seg_arg_t *vcsa = zio->io_private;
741 vdev_copy_arg_t *vca = vcsa->vcsa_copy_arg;
742 spa_config_exit(zio->io_spa, SCL_STATE, FTAG);
743 abd_free(zio->io_abd);
745 mutex_enter(&vca->vca_lock);
746 vca->vca_outstanding_bytes -= zio->io_size;
747 cv_signal(&vca->vca_cv);
748 mutex_exit(&vca->vca_lock);
750 ASSERT0(zio->io_error);
751 kmem_free(vcsa->vcsa_dest_bp, sizeof (blkptr_t));
752 kmem_free(vcsa, sizeof (vdev_copy_seg_arg_t));
756 spa_vdev_copy_segment_read_done(zio_t *zio)
758 vdev_copy_seg_arg_t *vcsa = zio->io_private;
759 dva_t *dest_dva = vcsa->vcsa_dest_dva;
760 uint64_t txg = vcsa->vcsa_txg;
761 spa_t *spa = zio->io_spa;
762 vdev_t *dest_vd = vdev_lookup_top(spa, DVA_GET_VDEV(dest_dva));
765 uint64_t size = zio->io_size;
767 ASSERT3P(dest_vd, !=, NULL);
768 ASSERT0(zio->io_error);
770 vcsa->vcsa_dest_bp = kmem_alloc(sizeof (blkptr_t), KM_SLEEP);
771 bp = vcsa->vcsa_dest_bp;
776 /* initialize with dest_dva */
777 bcopy(dest_dva, dva, sizeof (dva_t));
778 BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL);
780 BP_SET_LSIZE(bp, size);
781 BP_SET_PSIZE(bp, size);
782 BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF);
783 BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF);
784 BP_SET_TYPE(bp, DMU_OT_NONE);
787 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
789 zio_nowait(zio_rewrite(spa->spa_txg_zio[txg & TXG_MASK], spa,
790 txg, bp, zio->io_abd, size,
791 spa_vdev_copy_segment_write_done, vcsa,
792 ZIO_PRIORITY_REMOVAL, 0, NULL));
796 spa_vdev_copy_segment(vdev_t *vd, uint64_t start, uint64_t size, uint64_t txg,
797 vdev_copy_arg_t *vca, zio_alloc_list_t *zal)
799 metaslab_group_t *mg = vd->vdev_mg;
800 spa_t *spa = vd->vdev_spa;
801 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
802 vdev_indirect_mapping_entry_t *entry;
803 vdev_copy_seg_arg_t *private;
805 blkptr_t blk, *bp = &blk;
806 dva_t *dva = bp->blk_dva;
808 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
810 int error = metaslab_alloc_dva(spa, mg->mg_class, size,
811 &dst, 0, NULL, txg, 0, zal);
816 * We can't have any padding of the allocated size, otherwise we will
817 * misunderstand what's allocated, and the size of the mapping.
818 * The caller ensures this will be true by passing in a size that is
819 * aligned to the worst (highest) ashift in the pool.
821 ASSERT3U(DVA_GET_ASIZE(&dst), ==, size);
823 mutex_enter(&vca->vca_lock);
824 vca->vca_outstanding_bytes += size;
825 mutex_exit(&vca->vca_lock);
827 entry = kmem_zalloc(sizeof (vdev_indirect_mapping_entry_t), KM_SLEEP);
828 DVA_MAPPING_SET_SRC_OFFSET(&entry->vime_mapping, start);
829 entry->vime_mapping.vimep_dst = dst;
831 private = kmem_alloc(sizeof (vdev_copy_seg_arg_t), KM_SLEEP);
832 private->vcsa_dest_dva = &entry->vime_mapping.vimep_dst;
833 private->vcsa_txg = txg;
834 private->vcsa_copy_arg = vca;
837 * This lock is eventually released by the donefunc for the
838 * zio_write_phys that finishes copying the data.
840 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
843 * Do logical I/O, letting the redundancy vdevs (like mirror)
844 * handle their own I/O instead of duplicating that code here.
848 DVA_SET_VDEV(&dva[0], vd->vdev_id);
849 DVA_SET_OFFSET(&dva[0], start);
850 DVA_SET_GANG(&dva[0], 0);
851 DVA_SET_ASIZE(&dva[0], vdev_psize_to_asize(vd, size));
853 BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL);
855 BP_SET_LSIZE(bp, size);
856 BP_SET_PSIZE(bp, size);
857 BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF);
858 BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF);
859 BP_SET_TYPE(bp, DMU_OT_NONE);
862 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
864 zio_nowait(zio_read(spa->spa_txg_zio[txg & TXG_MASK], spa,
865 bp, abd_alloc_for_io(size, B_FALSE), size,
866 spa_vdev_copy_segment_read_done, private,
867 ZIO_PRIORITY_REMOVAL, 0, NULL));
869 list_insert_tail(&svr->svr_new_segments[txg & TXG_MASK], entry);
870 ASSERT3U(start + size, <=, vd->vdev_ms_count << vd->vdev_ms_shift);
871 vdev_dirty(vd, 0, NULL, txg);
877 * Complete the removal of a toplevel vdev. This is called as a
878 * synctask in the same txg that we will sync out the new config (to the
879 * MOS object) which indicates that this vdev is indirect.
882 vdev_remove_complete_sync(void *arg, dmu_tx_t *tx)
884 spa_vdev_removal_t *svr = arg;
885 vdev_t *vd = svr->svr_vdev;
886 spa_t *spa = vd->vdev_spa;
888 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
890 for (int i = 0; i < TXG_SIZE; i++) {
891 ASSERT0(svr->svr_bytes_done[i]);
894 ASSERT3U(spa->spa_removing_phys.sr_copied, ==,
895 spa->spa_removing_phys.sr_to_copy);
897 vdev_destroy_spacemaps(vd, tx);
899 /* destroy leaf zaps, if any */
900 ASSERT3P(svr->svr_zaplist, !=, NULL);
901 for (nvpair_t *pair = nvlist_next_nvpair(svr->svr_zaplist, NULL);
903 pair = nvlist_next_nvpair(svr->svr_zaplist, pair)) {
904 vdev_destroy_unlink_zap(vd, fnvpair_value_uint64(pair), tx);
906 fnvlist_free(svr->svr_zaplist);
908 spa_finish_removal(dmu_tx_pool(tx)->dp_spa, DSS_FINISHED, tx);
909 /* vd->vdev_path is not available here */
910 spa_history_log_internal(spa, "vdev remove completed", tx,
911 "%s vdev %llu", spa_name(spa), vd->vdev_id);
915 vdev_indirect_state_transfer(vdev_t *ivd, vdev_t *vd)
917 ivd->vdev_indirect_config = vd->vdev_indirect_config;
919 ASSERT3P(ivd->vdev_indirect_mapping, ==, NULL);
920 ASSERT(vd->vdev_indirect_mapping != NULL);
921 ivd->vdev_indirect_mapping = vd->vdev_indirect_mapping;
922 vd->vdev_indirect_mapping = NULL;
924 ASSERT3P(ivd->vdev_indirect_births, ==, NULL);
925 ASSERT(vd->vdev_indirect_births != NULL);
926 ivd->vdev_indirect_births = vd->vdev_indirect_births;
927 vd->vdev_indirect_births = NULL;
929 ASSERT0(range_tree_space(vd->vdev_obsolete_segments));
930 ASSERT0(range_tree_space(ivd->vdev_obsolete_segments));
932 if (vd->vdev_obsolete_sm != NULL) {
933 ASSERT3U(ivd->vdev_asize, ==, vd->vdev_asize);
936 * We cannot use space_map_{open,close} because we hold all
937 * the config locks as writer.
939 ASSERT3P(ivd->vdev_obsolete_sm, ==, NULL);
940 ivd->vdev_obsolete_sm = vd->vdev_obsolete_sm;
941 vd->vdev_obsolete_sm = NULL;
946 vdev_remove_enlist_zaps(vdev_t *vd, nvlist_t *zlist)
948 ASSERT3P(zlist, !=, NULL);
949 ASSERT3P(vd->vdev_ops, !=, &vdev_raidz_ops);
951 if (vd->vdev_leaf_zap != 0) {
953 (void) snprintf(zkey, sizeof (zkey), "%s-%ju",
954 VDEV_REMOVAL_ZAP_OBJS, (uintmax_t)vd->vdev_leaf_zap);
955 fnvlist_add_uint64(zlist, zkey, vd->vdev_leaf_zap);
958 for (uint64_t id = 0; id < vd->vdev_children; id++) {
959 vdev_remove_enlist_zaps(vd->vdev_child[id], zlist);
964 vdev_remove_replace_with_indirect(vdev_t *vd, uint64_t txg)
968 spa_t *spa = vd->vdev_spa;
969 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
972 * First, build a list of leaf zaps to be destroyed.
973 * This is passed to the sync context thread,
974 * which does the actual unlinking.
976 svr->svr_zaplist = fnvlist_alloc();
977 vdev_remove_enlist_zaps(vd, svr->svr_zaplist);
979 ivd = vdev_add_parent(vd, &vdev_indirect_ops);
981 vd->vdev_leaf_zap = 0;
983 vdev_remove_child(ivd, vd);
984 vdev_compact_children(ivd);
986 vdev_indirect_state_transfer(ivd, vd);
990 ASSERT(!ivd->vdev_removing);
991 ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
993 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
994 dsl_sync_task_nowait(spa->spa_dsl_pool, vdev_remove_complete_sync, svr,
995 0, ZFS_SPACE_CHECK_NONE, tx);
999 * Indicate that this thread has exited.
1000 * After this, we can not use svr.
1002 mutex_enter(&svr->svr_lock);
1003 svr->svr_thread = NULL;
1004 cv_broadcast(&svr->svr_cv);
1005 mutex_exit(&svr->svr_lock);
1009 * Complete the removal of a toplevel vdev. This is called in open
1010 * context by the removal thread after we have copied all vdev's data.
1013 vdev_remove_complete(vdev_t *vd)
1015 spa_t *spa = vd->vdev_spa;
1019 * Wait for any deferred frees to be synced before we call
1020 * vdev_metaslab_fini()
1022 txg_wait_synced(spa->spa_dsl_pool, 0);
1024 txg = spa_vdev_enter(spa);
1025 zfs_dbgmsg("finishing device removal for vdev %llu in txg %llu",
1029 * Discard allocation state.
1031 if (vd->vdev_mg != NULL) {
1032 vdev_metaslab_fini(vd);
1033 metaslab_group_destroy(vd->vdev_mg);
1036 ASSERT0(vd->vdev_stat.vs_space);
1037 ASSERT0(vd->vdev_stat.vs_dspace);
1039 vdev_remove_replace_with_indirect(vd, txg);
1042 * We now release the locks, allowing spa_sync to run and finish the
1043 * removal via vdev_remove_complete_sync in syncing context.
1045 (void) spa_vdev_exit(spa, NULL, txg, 0);
1048 * Top ZAP should have been transferred to the indirect vdev in
1049 * vdev_remove_replace_with_indirect.
1051 ASSERT0(vd->vdev_top_zap);
1054 * Leaf ZAP should have been moved in vdev_remove_replace_with_indirect.
1056 ASSERT0(vd->vdev_leaf_zap);
1058 txg = spa_vdev_enter(spa);
1059 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
1061 * Request to update the config and the config cachefile.
1063 vdev_config_dirty(spa->spa_root_vdev);
1064 (void) spa_vdev_exit(spa, vd, txg, 0);
1068 * Evacuates a segment of size at most max_alloc from the vdev
1069 * via repeated calls to spa_vdev_copy_segment. If an allocation
1070 * fails, the pool is probably too fragmented to handle such a
1071 * large size, so decrease max_alloc so that the caller will not try
1072 * this size again this txg.
1075 spa_vdev_copy_impl(spa_vdev_removal_t *svr, vdev_copy_arg_t *vca,
1076 uint64_t *max_alloc, dmu_tx_t *tx)
1078 uint64_t txg = dmu_tx_get_txg(tx);
1079 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1081 mutex_enter(&svr->svr_lock);
1083 range_seg_t *rs = avl_first(&svr->svr_allocd_segs->rt_root);
1085 mutex_exit(&svr->svr_lock);
1088 uint64_t offset = rs->rs_start;
1089 uint64_t length = MIN(rs->rs_end - rs->rs_start, *max_alloc);
1091 range_tree_remove(svr->svr_allocd_segs, offset, length);
1093 if (svr->svr_max_offset_to_sync[txg & TXG_MASK] == 0) {
1094 dsl_sync_task_nowait(dmu_tx_pool(tx), vdev_mapping_sync,
1095 svr, 0, ZFS_SPACE_CHECK_NONE, tx);
1098 svr->svr_max_offset_to_sync[txg & TXG_MASK] = offset + length;
1101 * Note: this is the amount of *allocated* space
1102 * that we are taking care of each txg.
1104 svr->svr_bytes_done[txg & TXG_MASK] += length;
1106 mutex_exit(&svr->svr_lock);
1108 zio_alloc_list_t zal;
1109 metaslab_trace_init(&zal);
1110 uint64_t thismax = *max_alloc;
1111 while (length > 0) {
1112 uint64_t mylen = MIN(length, thismax);
1114 int error = spa_vdev_copy_segment(svr->svr_vdev,
1115 offset, mylen, txg, vca, &zal);
1117 if (error == ENOSPC) {
1119 * Cut our segment in half, and don't try this
1120 * segment size again this txg. Note that the
1121 * allocation size must be aligned to the highest
1122 * ashift in the pool, so that the allocation will
1123 * not be padded out to a multiple of the ashift,
1124 * which could cause us to think that this mapping
1125 * is larger than we intended.
1127 ASSERT3U(spa->spa_max_ashift, >=, SPA_MINBLOCKSHIFT);
1128 ASSERT3U(spa->spa_max_ashift, ==, spa->spa_min_ashift);
1129 thismax = P2ROUNDUP(mylen / 2,
1130 1 << spa->spa_max_ashift);
1131 ASSERT3U(thismax, <, mylen);
1133 * The minimum-size allocation can not fail.
1135 ASSERT3U(mylen, >, 1 << spa->spa_max_ashift);
1136 *max_alloc = mylen - (1 << spa->spa_max_ashift);
1143 * We've performed an allocation, so reset the
1146 metaslab_trace_fini(&zal);
1147 metaslab_trace_init(&zal);
1150 metaslab_trace_fini(&zal);
1154 * The removal thread operates in open context. It iterates over all
1155 * allocated space in the vdev, by loading each metaslab's spacemap.
1156 * For each contiguous segment of allocated space (capping the segment
1157 * size at SPA_MAXBLOCKSIZE), we:
1158 * - Allocate space for it on another vdev.
1159 * - Create a new mapping from the old location to the new location
1160 * (as a record in svr_new_segments).
1161 * - Initiate a logical read zio to get the data off the removing disk.
1162 * - In the read zio's done callback, initiate a logical write zio to
1163 * write it to the new vdev.
1164 * Note that all of this will take effect when a particular TXG syncs.
1165 * The sync thread ensures that all the phys reads and writes for the syncing
1166 * TXG have completed (see spa_txg_zio) and writes the new mappings to disk
1167 * (see vdev_mapping_sync()).
1170 spa_vdev_remove_thread(void *arg)
1173 spa_t *spa = vd->vdev_spa;
1174 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
1175 vdev_copy_arg_t vca;
1176 uint64_t max_alloc = zfs_remove_max_segment;
1177 uint64_t last_txg = 0;
1178 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
1179 uint64_t start_offset = vdev_indirect_mapping_max_offset(vim);
1181 ASSERT3P(vd->vdev_ops, !=, &vdev_indirect_ops);
1182 ASSERT(vdev_is_concrete(vd));
1183 ASSERT(vd->vdev_removing);
1184 ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
1185 ASSERT3P(svr->svr_vdev, ==, vd);
1186 ASSERT(vim != NULL);
1188 mutex_init(&vca.vca_lock, NULL, MUTEX_DEFAULT, NULL);
1189 cv_init(&vca.vca_cv, NULL, CV_DEFAULT, NULL);
1190 vca.vca_outstanding_bytes = 0;
1192 mutex_enter(&svr->svr_lock);
1195 * Start from vim_max_offset so we pick up where we left off
1196 * if we are restarting the removal after opening the pool.
1199 for (msi = start_offset >> vd->vdev_ms_shift;
1200 msi < vd->vdev_ms_count && !svr->svr_thread_exit; msi++) {
1201 metaslab_t *msp = vd->vdev_ms[msi];
1202 ASSERT3U(msi, <=, vd->vdev_ms_count);
1204 ASSERT0(range_tree_space(svr->svr_allocd_segs));
1206 mutex_enter(&msp->ms_sync_lock);
1207 mutex_enter(&msp->ms_lock);
1210 * Assert nothing in flight -- ms_*tree is empty.
1212 for (int i = 0; i < TXG_SIZE; i++) {
1213 ASSERT0(range_tree_space(msp->ms_allocating[i]));
1217 * If the metaslab has ever been allocated from (ms_sm!=NULL),
1218 * read the allocated segments from the space map object
1219 * into svr_allocd_segs. Since we do this while holding
1220 * svr_lock and ms_sync_lock, concurrent frees (which
1221 * would have modified the space map) will wait for us
1222 * to finish loading the spacemap, and then take the
1223 * appropriate action (see free_from_removing_vdev()).
1225 if (msp->ms_sm != NULL) {
1226 space_map_t *sm = NULL;
1229 * We have to open a new space map here, because
1230 * ms_sm's sm_length and sm_alloc may not reflect
1231 * what's in the object contents, if we are in between
1232 * metaslab_sync() and metaslab_sync_done().
1234 VERIFY0(space_map_open(&sm,
1235 spa->spa_dsl_pool->dp_meta_objset,
1236 msp->ms_sm->sm_object, msp->ms_sm->sm_start,
1237 msp->ms_sm->sm_size, msp->ms_sm->sm_shift));
1238 space_map_update(sm);
1239 VERIFY0(space_map_load(sm, svr->svr_allocd_segs,
1241 space_map_close(sm);
1243 range_tree_walk(msp->ms_freeing,
1244 range_tree_remove, svr->svr_allocd_segs);
1247 * When we are resuming from a paused removal (i.e.
1248 * when importing a pool with a removal in progress),
1249 * discard any state that we have already processed.
1251 range_tree_clear(svr->svr_allocd_segs, 0, start_offset);
1253 mutex_exit(&msp->ms_lock);
1254 mutex_exit(&msp->ms_sync_lock);
1257 zfs_dbgmsg("copying %llu segments for metaslab %llu",
1258 avl_numnodes(&svr->svr_allocd_segs->rt_root),
1261 while (!svr->svr_thread_exit &&
1262 !range_tree_is_empty(svr->svr_allocd_segs)) {
1264 mutex_exit(&svr->svr_lock);
1267 * This delay will pause the removal around the point
1268 * specified by zfs_remove_max_bytes_pause. We do this
1269 * solely from the test suite or during debugging.
1271 uint64_t bytes_copied =
1272 spa->spa_removing_phys.sr_copied;
1273 for (int i = 0; i < TXG_SIZE; i++)
1274 bytes_copied += svr->svr_bytes_done[i];
1275 while (zfs_remove_max_bytes_pause <= bytes_copied &&
1276 !svr->svr_thread_exit)
1279 mutex_enter(&vca.vca_lock);
1280 while (vca.vca_outstanding_bytes >
1281 zfs_remove_max_copy_bytes) {
1282 cv_wait(&vca.vca_cv, &vca.vca_lock);
1284 mutex_exit(&vca.vca_lock);
1287 dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
1289 VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
1290 uint64_t txg = dmu_tx_get_txg(tx);
1292 if (txg != last_txg)
1293 max_alloc = zfs_remove_max_segment;
1296 spa_vdev_copy_impl(svr, &vca, &max_alloc, tx);
1299 mutex_enter(&svr->svr_lock);
1303 mutex_exit(&svr->svr_lock);
1305 * Wait for all copies to finish before cleaning up the vca.
1307 txg_wait_synced(spa->spa_dsl_pool, 0);
1308 ASSERT0(vca.vca_outstanding_bytes);
1310 mutex_destroy(&vca.vca_lock);
1311 cv_destroy(&vca.vca_cv);
1313 if (svr->svr_thread_exit) {
1314 mutex_enter(&svr->svr_lock);
1315 range_tree_vacate(svr->svr_allocd_segs, NULL, NULL);
1316 svr->svr_thread = NULL;
1317 cv_broadcast(&svr->svr_cv);
1318 mutex_exit(&svr->svr_lock);
1320 ASSERT0(range_tree_space(svr->svr_allocd_segs));
1321 vdev_remove_complete(vd);
1327 spa_vdev_remove_suspend(spa_t *spa)
1329 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
1334 mutex_enter(&svr->svr_lock);
1335 svr->svr_thread_exit = B_TRUE;
1336 while (svr->svr_thread != NULL)
1337 cv_wait(&svr->svr_cv, &svr->svr_lock);
1338 svr->svr_thread_exit = B_FALSE;
1339 mutex_exit(&svr->svr_lock);
1344 spa_vdev_remove_cancel_check(void *arg, dmu_tx_t *tx)
1346 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1348 if (spa->spa_vdev_removal == NULL)
1354 * Cancel a removal by freeing all entries from the partial mapping
1355 * and marking the vdev as no longer being removing.
1359 spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx)
1361 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1362 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
1363 vdev_t *vd = svr->svr_vdev;
1364 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
1365 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
1366 objset_t *mos = spa->spa_meta_objset;
1368 ASSERT3P(svr->svr_thread, ==, NULL);
1370 spa_feature_decr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx);
1371 if (vdev_obsolete_counts_are_precise(vd)) {
1372 spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
1373 VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
1374 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, tx));
1377 if (vdev_obsolete_sm_object(vd) != 0) {
1378 ASSERT(vd->vdev_obsolete_sm != NULL);
1379 ASSERT3U(vdev_obsolete_sm_object(vd), ==,
1380 space_map_object(vd->vdev_obsolete_sm));
1382 space_map_free(vd->vdev_obsolete_sm, tx);
1383 VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
1384 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx));
1385 space_map_close(vd->vdev_obsolete_sm);
1386 vd->vdev_obsolete_sm = NULL;
1387 spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
1389 for (int i = 0; i < TXG_SIZE; i++) {
1390 ASSERT(list_is_empty(&svr->svr_new_segments[i]));
1391 ASSERT3U(svr->svr_max_offset_to_sync[i], <=,
1392 vdev_indirect_mapping_max_offset(vim));
1395 for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) {
1396 metaslab_t *msp = vd->vdev_ms[msi];
1398 if (msp->ms_start >= vdev_indirect_mapping_max_offset(vim))
1401 ASSERT0(range_tree_space(svr->svr_allocd_segs));
1403 mutex_enter(&msp->ms_lock);
1406 * Assert nothing in flight -- ms_*tree is empty.
1408 for (int i = 0; i < TXG_SIZE; i++)
1409 ASSERT0(range_tree_space(msp->ms_allocating[i]));
1410 for (int i = 0; i < TXG_DEFER_SIZE; i++)
1411 ASSERT0(range_tree_space(msp->ms_defer[i]));
1412 ASSERT0(range_tree_space(msp->ms_freed));
1414 if (msp->ms_sm != NULL) {
1416 * Assert that the in-core spacemap has the same
1417 * length as the on-disk one, so we can use the
1418 * existing in-core spacemap to load it from disk.
1420 ASSERT3U(msp->ms_sm->sm_alloc, ==,
1421 msp->ms_sm->sm_phys->smp_alloc);
1422 ASSERT3U(msp->ms_sm->sm_length, ==,
1423 msp->ms_sm->sm_phys->smp_objsize);
1425 mutex_enter(&svr->svr_lock);
1426 VERIFY0(space_map_load(msp->ms_sm,
1427 svr->svr_allocd_segs, SM_ALLOC));
1428 range_tree_walk(msp->ms_freeing,
1429 range_tree_remove, svr->svr_allocd_segs);
1432 * Clear everything past what has been synced,
1433 * because we have not allocated mappings for it yet.
1435 uint64_t syncd = vdev_indirect_mapping_max_offset(vim);
1436 range_tree_clear(svr->svr_allocd_segs, syncd,
1437 msp->ms_sm->sm_start + msp->ms_sm->sm_size - syncd);
1439 mutex_exit(&svr->svr_lock);
1441 mutex_exit(&msp->ms_lock);
1443 mutex_enter(&svr->svr_lock);
1444 range_tree_vacate(svr->svr_allocd_segs,
1445 free_mapped_segment_cb, vd);
1446 mutex_exit(&svr->svr_lock);
1450 * Note: this must happen after we invoke free_mapped_segment_cb,
1451 * because it adds to the obsolete_segments.
1453 range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
1455 ASSERT3U(vic->vic_mapping_object, ==,
1456 vdev_indirect_mapping_object(vd->vdev_indirect_mapping));
1457 vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
1458 vd->vdev_indirect_mapping = NULL;
1459 vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx);
1460 vic->vic_mapping_object = 0;
1462 ASSERT3U(vic->vic_births_object, ==,
1463 vdev_indirect_births_object(vd->vdev_indirect_births));
1464 vdev_indirect_births_close(vd->vdev_indirect_births);
1465 vd->vdev_indirect_births = NULL;
1466 vdev_indirect_births_free(mos, vic->vic_births_object, tx);
1467 vic->vic_births_object = 0;
1470 * We may have processed some frees from the removing vdev in this
1471 * txg, thus increasing svr_bytes_done; discard that here to
1472 * satisfy the assertions in spa_vdev_removal_destroy().
1473 * Note that future txg's can not have any bytes_done, because
1474 * future TXG's are only modified from open context, and we have
1475 * already shut down the copying thread.
1477 svr->svr_bytes_done[dmu_tx_get_txg(tx) & TXG_MASK] = 0;
1478 spa_finish_removal(spa, DSS_CANCELED, tx);
1480 vd->vdev_removing = B_FALSE;
1481 vdev_config_dirty(vd);
1483 zfs_dbgmsg("canceled device removal for vdev %llu in %llu",
1484 vd->vdev_id, dmu_tx_get_txg(tx));
1485 spa_history_log_internal(spa, "vdev remove canceled", tx,
1486 "%s vdev %llu %s", spa_name(spa),
1487 vd->vdev_id, (vd->vdev_path != NULL) ? vd->vdev_path : "-");
1491 spa_vdev_remove_cancel(spa_t *spa)
1493 spa_vdev_remove_suspend(spa);
1495 if (spa->spa_vdev_removal == NULL)
1498 uint64_t vdid = spa->spa_vdev_removal->svr_vdev->vdev_id;
1500 int error = dsl_sync_task(spa->spa_name, spa_vdev_remove_cancel_check,
1501 spa_vdev_remove_cancel_sync, NULL, 0,
1502 ZFS_SPACE_CHECK_EXTRA_RESERVED);
1505 spa_config_enter(spa, SCL_ALLOC | SCL_VDEV, FTAG, RW_WRITER);
1506 vdev_t *vd = vdev_lookup_top(spa, vdid);
1507 metaslab_group_activate(vd->vdev_mg);
1508 spa_config_exit(spa, SCL_ALLOC | SCL_VDEV, FTAG);
1515 * Called every sync pass of every txg if there's a svr.
1518 svr_sync(spa_t *spa, dmu_tx_t *tx)
1520 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
1521 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
1524 * This check is necessary so that we do not dirty the
1525 * DIRECTORY_OBJECT via spa_sync_removing_state() when there
1526 * is nothing to do. Dirtying it every time would prevent us
1527 * from syncing-to-convergence.
1529 if (svr->svr_bytes_done[txgoff] == 0)
1533 * Update progress accounting.
1535 spa->spa_removing_phys.sr_copied += svr->svr_bytes_done[txgoff];
1536 svr->svr_bytes_done[txgoff] = 0;
1538 spa_sync_removing_state(spa, tx);
1542 vdev_remove_make_hole_and_free(vdev_t *vd)
1544 uint64_t id = vd->vdev_id;
1545 spa_t *spa = vd->vdev_spa;
1546 vdev_t *rvd = spa->spa_root_vdev;
1547 boolean_t last_vdev = (id == (rvd->vdev_children - 1));
1549 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1550 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1555 vdev_compact_children(rvd);
1557 vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops);
1558 vdev_add_child(rvd, vd);
1560 vdev_config_dirty(rvd);
1563 * Reassess the health of our root vdev.
1569 * Remove a log device. The config lock is held for the specified TXG.
1572 spa_vdev_remove_log(vdev_t *vd, uint64_t *txg)
1574 metaslab_group_t *mg = vd->vdev_mg;
1575 spa_t *spa = vd->vdev_spa;
1578 ASSERT(vd->vdev_islog);
1579 ASSERT(vd == vd->vdev_top);
1582 * Stop allocating from this vdev.
1584 metaslab_group_passivate(mg);
1587 * Wait for the youngest allocations and frees to sync,
1588 * and then wait for the deferral of those frees to finish.
1590 spa_vdev_config_exit(spa, NULL,
1591 *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
1594 * Evacuate the device. We don't hold the config lock as writer
1595 * since we need to do I/O but we do keep the
1596 * spa_namespace_lock held. Once this completes the device
1597 * should no longer have any blocks allocated on it.
1599 if (vd->vdev_islog) {
1600 if (vd->vdev_stat.vs_alloc != 0)
1601 error = spa_reset_logs(spa);
1604 *txg = spa_vdev_config_enter(spa);
1607 metaslab_group_activate(mg);
1610 ASSERT0(vd->vdev_stat.vs_alloc);
1613 * The evacuation succeeded. Remove any remaining MOS metadata
1614 * associated with this vdev, and wait for these changes to sync.
1616 vd->vdev_removing = B_TRUE;
1618 vdev_dirty_leaves(vd, VDD_DTL, *txg);
1619 vdev_config_dirty(vd);
1621 spa_history_log_internal(spa, "vdev remove", NULL,
1622 "%s vdev %llu (log) %s", spa_name(spa), vd->vdev_id,
1623 (vd->vdev_path != NULL) ? vd->vdev_path : "-");
1625 /* Make sure these changes are sync'ed */
1626 spa_vdev_config_exit(spa, NULL, *txg, 0, FTAG);
1628 *txg = spa_vdev_config_enter(spa);
1630 sysevent_t *ev = spa_event_create(spa, vd, NULL,
1631 ESC_ZFS_VDEV_REMOVE_DEV);
1632 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1633 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1635 /* The top ZAP should have been destroyed by vdev_remove_empty. */
1636 ASSERT0(vd->vdev_top_zap);
1637 /* The leaf ZAP should have been destroyed by vdev_dtl_sync. */
1638 ASSERT0(vd->vdev_leaf_zap);
1640 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
1642 if (list_link_active(&vd->vdev_state_dirty_node))
1643 vdev_state_clean(vd);
1644 if (list_link_active(&vd->vdev_config_dirty_node))
1645 vdev_config_clean(vd);
1648 * Clean up the vdev namespace.
1650 vdev_remove_make_hole_and_free(vd);
1659 spa_vdev_remove_top_check(vdev_t *vd)
1661 spa_t *spa = vd->vdev_spa;
1663 if (vd != vd->vdev_top)
1664 return (SET_ERROR(ENOTSUP));
1666 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REMOVAL))
1667 return (SET_ERROR(ENOTSUP));
1670 * There has to be enough free space to remove the
1671 * device and leave double the "slop" space (i.e. we
1672 * must leave at least 3% of the pool free, in addition to
1673 * the normal slop space).
1675 if (dsl_dir_space_available(spa->spa_dsl_pool->dp_root_dir,
1677 vd->vdev_stat.vs_dspace + spa_get_slop_space(spa)) {
1678 return (SET_ERROR(ENOSPC));
1682 * There can not be a removal in progress.
1684 if (spa->spa_removing_phys.sr_state == DSS_SCANNING)
1685 return (SET_ERROR(EBUSY));
1688 * The device must have all its data.
1690 if (!vdev_dtl_empty(vd, DTL_MISSING) ||
1691 !vdev_dtl_empty(vd, DTL_OUTAGE))
1692 return (SET_ERROR(EBUSY));
1695 * The device must be healthy.
1697 if (!vdev_readable(vd))
1698 return (SET_ERROR(EIO));
1701 * All vdevs in normal class must have the same ashift.
1703 if (spa->spa_max_ashift != spa->spa_min_ashift) {
1704 return (SET_ERROR(EINVAL));
1708 * All vdevs in normal class must have the same ashift
1711 vdev_t *rvd = spa->spa_root_vdev;
1712 int num_indirect = 0;
1713 for (uint64_t id = 0; id < rvd->vdev_children; id++) {
1714 vdev_t *cvd = rvd->vdev_child[id];
1715 if (cvd->vdev_ashift != 0 && !cvd->vdev_islog)
1716 ASSERT3U(cvd->vdev_ashift, ==, spa->spa_max_ashift);
1717 if (cvd->vdev_ops == &vdev_indirect_ops)
1719 if (!vdev_is_concrete(cvd))
1721 if (cvd->vdev_ops == &vdev_raidz_ops)
1722 return (SET_ERROR(EINVAL));
1724 * Need the mirror to be mirror of leaf vdevs only
1726 if (cvd->vdev_ops == &vdev_mirror_ops) {
1727 for (uint64_t cid = 0;
1728 cid < cvd->vdev_children; cid++) {
1729 vdev_t *tmp = cvd->vdev_child[cid];
1730 if (!tmp->vdev_ops->vdev_op_leaf)
1731 return (SET_ERROR(EINVAL));
1740 * Initiate removal of a top-level vdev, reducing the total space in the pool.
1741 * The config lock is held for the specified TXG. Once initiated,
1742 * evacuation of all allocated space (copying it to other vdevs) happens
1743 * in the background (see spa_vdev_remove_thread()), and can be canceled
1744 * (see spa_vdev_remove_cancel()). If successful, the vdev will
1745 * be transformed to an indirect vdev (see spa_vdev_remove_complete()).
1748 spa_vdev_remove_top(vdev_t *vd, uint64_t *txg)
1750 spa_t *spa = vd->vdev_spa;
1754 * Check for errors up-front, so that we don't waste time
1755 * passivating the metaslab group and clearing the ZIL if there
1758 error = spa_vdev_remove_top_check(vd);
1763 * Stop allocating from this vdev. Note that we must check
1764 * that this is not the only device in the pool before
1765 * passivating, otherwise we will not be able to make
1766 * progress because we can't allocate from any vdevs.
1767 * The above check for sufficient free space serves this
1770 metaslab_group_t *mg = vd->vdev_mg;
1771 metaslab_group_passivate(mg);
1774 * Wait for the youngest allocations and frees to sync,
1775 * and then wait for the deferral of those frees to finish.
1777 spa_vdev_config_exit(spa, NULL,
1778 *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
1781 * We must ensure that no "stubby" log blocks are allocated
1782 * on the device to be removed. These blocks could be
1783 * written at any time, including while we are in the middle
1786 error = spa_reset_logs(spa);
1788 *txg = spa_vdev_config_enter(spa);
1791 * Things might have changed while the config lock was dropped
1792 * (e.g. space usage). Check for errors again.
1795 error = spa_vdev_remove_top_check(vd);
1798 metaslab_group_activate(mg);
1802 vd->vdev_removing = B_TRUE;
1804 vdev_dirty_leaves(vd, VDD_DTL, *txg);
1805 vdev_config_dirty(vd);
1806 dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, *txg);
1807 dsl_sync_task_nowait(spa->spa_dsl_pool,
1808 vdev_remove_initiate_sync,
1809 vd, 0, ZFS_SPACE_CHECK_NONE, tx);
1816 * Remove a device from the pool.
1818 * Removing a device from the vdev namespace requires several steps
1819 * and can take a significant amount of time. As a result we use
1820 * the spa_vdev_config_[enter/exit] functions which allow us to
1821 * grab and release the spa_config_lock while still holding the namespace
1822 * lock. During each step the configuration is synced out.
1825 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
1828 nvlist_t **spares, **l2cache, *nv;
1830 uint_t nspares, nl2cache;
1832 boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
1833 sysevent_t *ev = NULL;
1835 ASSERT(spa_writeable(spa));
1838 txg = spa_vdev_enter(spa);
1840 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1841 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
1842 error = (spa_has_checkpoint(spa)) ?
1843 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
1846 return (spa_vdev_exit(spa, NULL, txg, error));
1851 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
1853 if (spa->spa_spares.sav_vdevs != NULL &&
1854 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
1855 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
1856 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
1858 * Only remove the hot spare if it's not currently in use
1861 if (vd == NULL || unspare) {
1862 char *nvstr = fnvlist_lookup_string(nv,
1864 spa_history_log_internal(spa, "vdev remove", NULL,
1865 "%s vdev (%s) %s", spa_name(spa),
1866 VDEV_TYPE_SPARE, nvstr);
1868 vd = spa_lookup_by_guid(spa, guid, B_TRUE);
1869 ev = spa_event_create(spa, vd, NULL,
1870 ESC_ZFS_VDEV_REMOVE_AUX);
1871 spa_vdev_remove_aux(spa->spa_spares.sav_config,
1872 ZPOOL_CONFIG_SPARES, spares, nspares, nv);
1873 spa_load_spares(spa);
1874 spa->spa_spares.sav_sync = B_TRUE;
1876 error = SET_ERROR(EBUSY);
1878 } else if (spa->spa_l2cache.sav_vdevs != NULL &&
1879 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
1880 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
1881 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
1882 char *nvstr = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
1883 spa_history_log_internal(spa, "vdev remove", NULL,
1884 "%s vdev (%s) %s", spa_name(spa), VDEV_TYPE_L2CACHE, nvstr);
1886 * Cache devices can always be removed.
1888 vd = spa_lookup_by_guid(spa, guid, B_TRUE);
1889 ev = spa_event_create(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE_AUX);
1890 spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
1891 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
1892 spa_load_l2cache(spa);
1893 spa->spa_l2cache.sav_sync = B_TRUE;
1894 } else if (vd != NULL && vd->vdev_islog) {
1896 error = spa_vdev_remove_log(vd, &txg);
1897 } else if (vd != NULL) {
1899 error = spa_vdev_remove_top(vd, &txg);
1902 * There is no vdev of any kind with the specified guid.
1904 error = SET_ERROR(ENOENT);
1908 error = spa_vdev_exit(spa, NULL, txg, error);
1912 spa_event_discard(ev);
1922 spa_removal_get_stats(spa_t *spa, pool_removal_stat_t *prs)
1924 prs->prs_state = spa->spa_removing_phys.sr_state;
1926 if (prs->prs_state == DSS_NONE)
1927 return (SET_ERROR(ENOENT));
1929 prs->prs_removing_vdev = spa->spa_removing_phys.sr_removing_vdev;
1930 prs->prs_start_time = spa->spa_removing_phys.sr_start_time;
1931 prs->prs_end_time = spa->spa_removing_phys.sr_end_time;
1932 prs->prs_to_copy = spa->spa_removing_phys.sr_to_copy;
1933 prs->prs_copied = spa->spa_removing_phys.sr_copied;
1935 if (spa->spa_vdev_removal != NULL) {
1936 for (int i = 0; i < TXG_SIZE; i++) {
1938 spa->spa_vdev_removal->svr_bytes_done[i];
1942 prs->prs_mapping_memory = 0;
1943 uint64_t indirect_vdev_id =
1944 spa->spa_removing_phys.sr_prev_indirect_vdev;
1945 while (indirect_vdev_id != -1) {
1946 vdev_t *vd = spa->spa_root_vdev->vdev_child[indirect_vdev_id];
1947 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
1948 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
1950 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
1951 prs->prs_mapping_memory += vdev_indirect_mapping_size(vim);
1952 indirect_vdev_id = vic->vic_prev_indirect_vdev;