4 * This file and its contents are supplied under the terms of the
5 * Common Development and Distribution License ("CDDL"), version 1.0.
6 * You may only use this file in accordance with the terms of version
9 * A full copy of the text of the CDDL should have accompanied this
10 * source. A copy of the CDDL is also available via the Internet at
11 * http://www.illumos.org/license/CDDL.
17 * Copyright (c) 2014, 2015 by Delphix. All rights reserved.
20 #include <sys/zfs_context.h>
22 #include <sys/spa_impl.h>
23 #include <sys/vdev_impl.h>
24 #include <sys/fs/zfs.h>
26 #include <sys/metaslab.h>
27 #include <sys/refcount.h>
29 #include <sys/vdev_indirect_mapping.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/dsl_synctask.h>
35 * An indirect vdev corresponds to a vdev that has been removed. Since
36 * we cannot rewrite block pointers of snapshots, etc., we keep a
37 * mapping from old location on the removed device to the new location
38 * on another device in the pool and use this mapping whenever we need
39 * to access the DVA. Unfortunately, this mapping did not respect
40 * logical block boundaries when it was first created, and so a DVA on
41 * this indirect vdev may be "split" into multiple sections that each
42 * map to a different location. As a consequence, not all DVAs can be
43 * translated to an equivalent new DVA. Instead we must provide a
44 * "vdev_remap" operation that executes a callback on each contiguous
45 * segment of the new location. This function is used in multiple ways:
47 * - reads and repair writes to this device use the callback to create
48 * a child io for each mapped segment.
50 * - frees and claims to this device use the callback to free or claim
51 * each mapped segment. (Note that we don't actually need to claim
52 * log blocks on indirect vdevs, because we don't allocate to
53 * removing vdevs. However, zdb uses zio_claim() for its leak
58 * "Big theory statement" for how we mark blocks obsolete.
60 * When a block on an indirect vdev is freed or remapped, a section of
61 * that vdev's mapping may no longer be referenced (aka "obsolete"). We
62 * keep track of how much of each mapping entry is obsolete. When
63 * an entry becomes completely obsolete, we can remove it, thus reducing
64 * the memory used by the mapping. The complete picture of obsolescence
65 * is given by the following data structures, described below:
66 * - the entry-specific obsolete count
67 * - the vdev-specific obsolete spacemap
68 * - the pool-specific obsolete bpobj
70 * == On disk data structures used ==
72 * We track the obsolete space for the pool using several objects. Each
73 * of these objects is created on demand and freed when no longer
74 * needed, and is assumed to be empty if it does not exist.
75 * SPA_FEATURE_OBSOLETE_COUNTS includes the count of these objects.
77 * - Each vic_mapping_object (associated with an indirect vdev) can
78 * have a vimp_counts_object. This is an array of uint32_t's
79 * with the same number of entries as the vic_mapping_object. When
80 * the mapping is condensed, entries from the vic_obsolete_sm_object
81 * (see below) are folded into the counts. Therefore, each
82 * obsolete_counts entry tells us the number of bytes in the
83 * corresponding mapping entry that were not referenced when the
84 * mapping was last condensed.
86 * - Each indirect or removing vdev can have a vic_obsolete_sm_object.
87 * This is a space map containing an alloc entry for every DVA that
88 * has been obsoleted since the last time this indirect vdev was
89 * condensed. We use this object in order to improve performance
90 * when marking a DVA as obsolete. Instead of modifying an arbitrary
91 * offset of the vimp_counts_object, we only need to append an entry
92 * to the end of this object. When a DVA becomes obsolete, it is
93 * added to the obsolete space map. This happens when the DVA is
94 * freed, remapped and not referenced by a snapshot, or the last
95 * snapshot referencing it is destroyed.
97 * - Each dataset can have a ds_remap_deadlist object. This is a
98 * deadlist object containing all blocks that were remapped in this
99 * dataset but referenced in a previous snapshot. Blocks can *only*
100 * appear on this list if they were remapped (dsl_dataset_block_remapped);
101 * blocks that were killed in a head dataset are put on the normal
102 * ds_deadlist and marked obsolete when they are freed.
104 * - The pool can have a dp_obsolete_bpobj. This is a list of blocks
105 * in the pool that need to be marked obsolete. When a snapshot is
106 * destroyed, we move some of the ds_remap_deadlist to the obsolete
107 * bpobj (see dsl_destroy_snapshot_handle_remaps()). We then
108 * asynchronously process the obsolete bpobj, moving its entries to
109 * the specific vdevs' obsolete space maps.
111 * == Summary of how we mark blocks as obsolete ==
113 * - When freeing a block: if any DVA is on an indirect vdev, append to
114 * vic_obsolete_sm_object.
115 * - When remapping a block, add dva to ds_remap_deadlist (if prev snap
116 * references; otherwise append to vic_obsolete_sm_object).
117 * - When freeing a snapshot: move parts of ds_remap_deadlist to
118 * dp_obsolete_bpobj (same algorithm as ds_deadlist).
119 * - When syncing the spa: process dp_obsolete_bpobj, moving ranges to
120 * individual vdev's vic_obsolete_sm_object.
124 * "Big theory statement" for how we condense indirect vdevs.
126 * Condensing an indirect vdev's mapping is the process of determining
127 * the precise counts of obsolete space for each mapping entry (by
128 * integrating the obsolete spacemap into the obsolete counts) and
129 * writing out a new mapping that contains only referenced entries.
131 * We condense a vdev when we expect the mapping to shrink (see
132 * vdev_indirect_should_condense()), but only perform one condense at a
133 * time to limit the memory usage. In addition, we use a separate
134 * open-context thread (spa_condense_indirect_thread) to incrementally
135 * create the new mapping object in a way that minimizes the impact on
136 * the rest of the system.
138 * == Generating a new mapping ==
140 * To generate a new mapping, we follow these steps:
142 * 1. Save the old obsolete space map and create a new mapping object
143 * (see spa_condense_indirect_start_sync()). This initializes the
144 * spa_condensing_indirect_phys with the "previous obsolete space map",
145 * which is now read only. Newly obsolete DVAs will be added to a
146 * new (initially empty) obsolete space map, and will not be
147 * considered as part of this condense operation.
149 * 2. Construct in memory the precise counts of obsolete space for each
150 * mapping entry, by incorporating the obsolete space map into the
151 * counts. (See vdev_indirect_mapping_load_obsolete_{counts,spacemap}().)
153 * 3. Iterate through each mapping entry, writing to the new mapping any
154 * entries that are not completely obsolete (i.e. which don't have
155 * obsolete count == mapping length). (See
156 * spa_condense_indirect_generate_new_mapping().)
158 * 4. Destroy the old mapping object and switch over to the new one
159 * (spa_condense_indirect_complete_sync).
161 * == Restarting from failure ==
163 * To restart the condense when we import/open the pool, we must start
164 * at the 2nd step above: reconstruct the precise counts in memory,
165 * based on the space map + counts. Then in the 3rd step, we start
166 * iterating where we left off: at vimp_max_offset of the new mapping
170 boolean_t zfs_condense_indirect_vdevs_enable = B_TRUE;
173 * Condense if at least this percent of the bytes in the mapping is
174 * obsolete. With the default of 25%, the amount of space mapped
175 * will be reduced to 1% of its original size after at most 16
176 * condenses. Higher values will condense less often (causing less
177 * i/o); lower values will reduce the mapping size more quickly.
179 int zfs_indirect_condense_obsolete_pct = 25;
182 * Condense if the obsolete space map takes up more than this amount of
183 * space on disk (logically). This limits the amount of disk space
184 * consumed by the obsolete space map; the default of 1GB is small enough
185 * that we typically don't mind "wasting" it.
187 uint64_t zfs_condense_max_obsolete_bytes = 1024 * 1024 * 1024;
190 * Don't bother condensing if the mapping uses less than this amount of
191 * memory. The default of 128KB is considered a "trivial" amount of
192 * memory and not worth reducing.
194 uint64_t zfs_condense_min_mapping_bytes = 128 * 1024;
197 * This is used by the test suite so that it can ensure that certain
198 * actions happen while in the middle of a condense (which might otherwise
199 * complete too quickly). If used to reduce the performance impact of
200 * condensing in production, a maximum value of 1 should be sufficient.
202 int zfs_condense_indirect_commit_entry_delay_ticks = 0;
205 * Mark the given offset and size as being obsolete in the given txg.
208 vdev_indirect_mark_obsolete(vdev_t *vd, uint64_t offset, uint64_t size,
211 spa_t *spa = vd->vdev_spa;
212 ASSERT3U(spa_syncing_txg(spa), ==, txg);
213 ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, !=, 0);
214 ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
216 VERIFY(vdev_indirect_mapping_entry_for_offset(
217 vd->vdev_indirect_mapping, offset) != NULL);
219 if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
220 mutex_enter(&vd->vdev_obsolete_lock);
221 range_tree_add(vd->vdev_obsolete_segments, offset, size);
222 mutex_exit(&vd->vdev_obsolete_lock);
223 vdev_dirty(vd, 0, NULL, txg);
228 * Mark the DVA vdev_id:offset:size as being obsolete in the given tx. This
229 * wrapper is provided because the DMU does not know about vdev_t's and
230 * cannot directly call vdev_indirect_mark_obsolete.
233 spa_vdev_indirect_mark_obsolete(spa_t *spa, uint64_t vdev_id, uint64_t offset,
234 uint64_t size, dmu_tx_t *tx)
236 vdev_t *vd = vdev_lookup_top(spa, vdev_id);
237 ASSERT(dmu_tx_is_syncing(tx));
239 /* The DMU can only remap indirect vdevs. */
240 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
241 vdev_indirect_mark_obsolete(vd, offset, size, dmu_tx_get_txg(tx));
244 static spa_condensing_indirect_t *
245 spa_condensing_indirect_create(spa_t *spa)
247 spa_condensing_indirect_phys_t *scip =
248 &spa->spa_condensing_indirect_phys;
249 spa_condensing_indirect_t *sci = kmem_zalloc(sizeof (*sci), KM_SLEEP);
250 objset_t *mos = spa->spa_meta_objset;
252 for (int i = 0; i < TXG_SIZE; i++) {
253 list_create(&sci->sci_new_mapping_entries[i],
254 sizeof (vdev_indirect_mapping_entry_t),
255 offsetof(vdev_indirect_mapping_entry_t, vime_node));
258 sci->sci_new_mapping =
259 vdev_indirect_mapping_open(mos, scip->scip_next_mapping_object);
265 spa_condensing_indirect_destroy(spa_condensing_indirect_t *sci)
267 for (int i = 0; i < TXG_SIZE; i++)
268 list_destroy(&sci->sci_new_mapping_entries[i]);
270 if (sci->sci_new_mapping != NULL)
271 vdev_indirect_mapping_close(sci->sci_new_mapping);
273 kmem_free(sci, sizeof (*sci));
277 vdev_indirect_should_condense(vdev_t *vd)
279 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
280 spa_t *spa = vd->vdev_spa;
282 ASSERT(dsl_pool_sync_context(spa->spa_dsl_pool));
284 if (!zfs_condense_indirect_vdevs_enable)
288 * We can only condense one indirect vdev at a time.
290 if (spa->spa_condensing_indirect != NULL)
293 if (spa_shutting_down(spa))
297 * The mapping object size must not change while we are
298 * condensing, so we can only condense indirect vdevs
299 * (not vdevs that are still in the middle of being removed).
301 if (vd->vdev_ops != &vdev_indirect_ops)
305 * If nothing new has been marked obsolete, there is no
306 * point in condensing.
308 if (vd->vdev_obsolete_sm == NULL) {
309 ASSERT0(vdev_obsolete_sm_object(vd));
313 ASSERT(vd->vdev_obsolete_sm != NULL);
315 ASSERT3U(vdev_obsolete_sm_object(vd), ==,
316 space_map_object(vd->vdev_obsolete_sm));
318 uint64_t bytes_mapped = vdev_indirect_mapping_bytes_mapped(vim);
319 uint64_t bytes_obsolete = space_map_allocated(vd->vdev_obsolete_sm);
320 uint64_t mapping_size = vdev_indirect_mapping_size(vim);
321 uint64_t obsolete_sm_size = space_map_length(vd->vdev_obsolete_sm);
323 ASSERT3U(bytes_obsolete, <=, bytes_mapped);
326 * If a high percentage of the bytes that are mapped have become
327 * obsolete, condense (unless the mapping is already small enough).
328 * This has a good chance of reducing the amount of memory used
331 if (bytes_obsolete * 100 / bytes_mapped >=
332 zfs_indirect_condense_obsolete_pct &&
333 mapping_size > zfs_condense_min_mapping_bytes) {
334 zfs_dbgmsg("should condense vdev %llu because obsolete "
335 "spacemap covers %d%% of %lluMB mapping",
336 (u_longlong_t)vd->vdev_id,
337 (int)(bytes_obsolete * 100 / bytes_mapped),
338 (u_longlong_t)bytes_mapped / 1024 / 1024);
343 * If the obsolete space map takes up too much space on disk,
344 * condense in order to free up this disk space.
346 if (obsolete_sm_size >= zfs_condense_max_obsolete_bytes) {
347 zfs_dbgmsg("should condense vdev %llu because obsolete sm "
348 "length %lluMB >= max size %lluMB",
349 (u_longlong_t)vd->vdev_id,
350 (u_longlong_t)obsolete_sm_size / 1024 / 1024,
351 (u_longlong_t)zfs_condense_max_obsolete_bytes /
360 * This sync task completes (finishes) a condense, deleting the old
361 * mapping and replacing it with the new one.
364 spa_condense_indirect_complete_sync(void *arg, dmu_tx_t *tx)
366 spa_condensing_indirect_t *sci = arg;
367 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
368 spa_condensing_indirect_phys_t *scip =
369 &spa->spa_condensing_indirect_phys;
370 vdev_t *vd = vdev_lookup_top(spa, scip->scip_vdev);
371 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
372 objset_t *mos = spa->spa_meta_objset;
373 vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
374 uint64_t old_count = vdev_indirect_mapping_num_entries(old_mapping);
376 vdev_indirect_mapping_num_entries(sci->sci_new_mapping);
378 ASSERT(dmu_tx_is_syncing(tx));
379 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
380 ASSERT3P(sci, ==, spa->spa_condensing_indirect);
381 for (int i = 0; i < TXG_SIZE; i++) {
382 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
384 ASSERT(vic->vic_mapping_object != 0);
385 ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
386 ASSERT(scip->scip_next_mapping_object != 0);
387 ASSERT(scip->scip_prev_obsolete_sm_object != 0);
390 * Reset vdev_indirect_mapping to refer to the new object.
392 rw_enter(&vd->vdev_indirect_rwlock, RW_WRITER);
393 vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
394 vd->vdev_indirect_mapping = sci->sci_new_mapping;
395 rw_exit(&vd->vdev_indirect_rwlock);
397 sci->sci_new_mapping = NULL;
398 vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx);
399 vic->vic_mapping_object = scip->scip_next_mapping_object;
400 scip->scip_next_mapping_object = 0;
402 space_map_free_obj(mos, scip->scip_prev_obsolete_sm_object, tx);
403 spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
404 scip->scip_prev_obsolete_sm_object = 0;
408 VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
409 DMU_POOL_CONDENSING_INDIRECT, tx));
410 spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
411 spa->spa_condensing_indirect = NULL;
413 zfs_dbgmsg("finished condense of vdev %llu in txg %llu: "
414 "new mapping object %llu has %llu entries "
415 "(was %llu entries)",
416 vd->vdev_id, dmu_tx_get_txg(tx), vic->vic_mapping_object,
417 new_count, old_count);
419 vdev_config_dirty(spa->spa_root_vdev);
423 * This sync task appends entries to the new mapping object.
426 spa_condense_indirect_commit_sync(void *arg, dmu_tx_t *tx)
428 spa_condensing_indirect_t *sci = arg;
429 uint64_t txg = dmu_tx_get_txg(tx);
430 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
432 ASSERT(dmu_tx_is_syncing(tx));
433 ASSERT3P(sci, ==, spa->spa_condensing_indirect);
435 vdev_indirect_mapping_add_entries(sci->sci_new_mapping,
436 &sci->sci_new_mapping_entries[txg & TXG_MASK], tx);
437 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[txg & TXG_MASK]));
441 * Open-context function to add one entry to the new mapping. The new
442 * entry will be remembered and written from syncing context.
445 spa_condense_indirect_commit_entry(spa_t *spa,
446 vdev_indirect_mapping_entry_phys_t *vimep, uint32_t count)
448 spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
450 ASSERT3U(count, <, DVA_GET_ASIZE(&vimep->vimep_dst));
452 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
453 dmu_tx_hold_space(tx, sizeof (*vimep) + sizeof (count));
454 VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
455 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
458 * If we are the first entry committed this txg, kick off the sync
459 * task to write to the MOS on our behalf.
461 if (list_is_empty(&sci->sci_new_mapping_entries[txgoff])) {
462 dsl_sync_task_nowait(dmu_tx_pool(tx),
463 spa_condense_indirect_commit_sync, sci,
464 0, ZFS_SPACE_CHECK_NONE, tx);
467 vdev_indirect_mapping_entry_t *vime =
468 kmem_alloc(sizeof (*vime), KM_SLEEP);
469 vime->vime_mapping = *vimep;
470 vime->vime_obsolete_count = count;
471 list_insert_tail(&sci->sci_new_mapping_entries[txgoff], vime);
477 spa_condense_indirect_generate_new_mapping(vdev_t *vd,
478 uint32_t *obsolete_counts, uint64_t start_index)
480 spa_t *spa = vd->vdev_spa;
481 uint64_t mapi = start_index;
482 vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
483 uint64_t old_num_entries =
484 vdev_indirect_mapping_num_entries(old_mapping);
486 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
487 ASSERT3U(vd->vdev_id, ==, spa->spa_condensing_indirect_phys.scip_vdev);
489 zfs_dbgmsg("starting condense of vdev %llu from index %llu",
490 (u_longlong_t)vd->vdev_id,
493 while (mapi < old_num_entries && !spa_shutting_down(spa)) {
494 vdev_indirect_mapping_entry_phys_t *entry =
495 &old_mapping->vim_entries[mapi];
496 uint64_t entry_size = DVA_GET_ASIZE(&entry->vimep_dst);
497 ASSERT3U(obsolete_counts[mapi], <=, entry_size);
498 if (obsolete_counts[mapi] < entry_size) {
499 spa_condense_indirect_commit_entry(spa, entry,
500 obsolete_counts[mapi]);
503 * This delay may be requested for testing, debugging,
504 * or performance reasons.
506 delay(zfs_condense_indirect_commit_entry_delay_ticks);
511 if (spa_shutting_down(spa)) {
512 zfs_dbgmsg("pausing condense of vdev %llu at index %llu",
513 (u_longlong_t)vd->vdev_id,
519 spa_condense_indirect_thread(void *arg)
522 spa_t *spa = vd->vdev_spa;
523 spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
524 spa_condensing_indirect_phys_t *scip =
525 &spa->spa_condensing_indirect_phys;
527 uint64_t start_index;
528 vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
529 space_map_t *prev_obsolete_sm = NULL;
531 ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
532 ASSERT(scip->scip_next_mapping_object != 0);
533 ASSERT(scip->scip_prev_obsolete_sm_object != 0);
534 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
536 for (int i = 0; i < TXG_SIZE; i++) {
538 * The list must start out empty in order for the
539 * _commit_sync() sync task to be properly registered
540 * on the first call to _commit_entry(); so it's wise
541 * to double check and ensure we actually are starting
544 ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
547 VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset,
548 scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0));
549 space_map_update(prev_obsolete_sm);
550 counts = vdev_indirect_mapping_load_obsolete_counts(old_mapping);
551 if (prev_obsolete_sm != NULL) {
552 vdev_indirect_mapping_load_obsolete_spacemap(old_mapping,
553 counts, prev_obsolete_sm);
555 space_map_close(prev_obsolete_sm);
558 * Generate new mapping. Determine what index to continue from
559 * based on the max offset that we've already written in the
562 uint64_t max_offset =
563 vdev_indirect_mapping_max_offset(sci->sci_new_mapping);
564 if (max_offset == 0) {
565 /* We haven't written anything to the new mapping yet. */
569 * Pick up from where we left off. _entry_for_offset()
570 * returns a pointer into the vim_entries array. If
571 * max_offset is greater than any of the mappings
572 * contained in the table NULL will be returned and
573 * that indicates we've exhausted our iteration of the
577 vdev_indirect_mapping_entry_phys_t *entry =
578 vdev_indirect_mapping_entry_for_offset_or_next(old_mapping,
583 * We've already written the whole new mapping.
584 * This special value will cause us to skip the
585 * generate_new_mapping step and just do the sync
586 * task to complete the condense.
588 start_index = UINT64_MAX;
590 start_index = entry - old_mapping->vim_entries;
591 ASSERT3U(start_index, <,
592 vdev_indirect_mapping_num_entries(old_mapping));
596 spa_condense_indirect_generate_new_mapping(vd, counts, start_index);
598 vdev_indirect_mapping_free_obsolete_counts(old_mapping, counts);
601 * We may have bailed early from generate_new_mapping(), if
602 * the spa is shutting down. In this case, do not complete
605 if (!spa_shutting_down(spa)) {
606 VERIFY0(dsl_sync_task(spa_name(spa), NULL,
607 spa_condense_indirect_complete_sync, sci, 0,
608 ZFS_SPACE_CHECK_NONE));
611 mutex_enter(&spa->spa_async_lock);
612 spa->spa_condense_thread = NULL;
613 cv_broadcast(&spa->spa_async_cv);
614 mutex_exit(&spa->spa_async_lock);
619 * Sync task to begin the condensing process.
622 spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx)
624 spa_t *spa = vd->vdev_spa;
625 spa_condensing_indirect_phys_t *scip =
626 &spa->spa_condensing_indirect_phys;
628 ASSERT0(scip->scip_next_mapping_object);
629 ASSERT0(scip->scip_prev_obsolete_sm_object);
630 ASSERT0(scip->scip_vdev);
631 ASSERT(dmu_tx_is_syncing(tx));
632 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
633 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_OBSOLETE_COUNTS));
634 ASSERT(vdev_indirect_mapping_num_entries(vd->vdev_indirect_mapping));
636 uint64_t obsolete_sm_obj = vdev_obsolete_sm_object(vd);
637 ASSERT(obsolete_sm_obj != 0);
639 scip->scip_vdev = vd->vdev_id;
640 scip->scip_next_mapping_object =
641 vdev_indirect_mapping_alloc(spa->spa_meta_objset, tx);
643 scip->scip_prev_obsolete_sm_object = obsolete_sm_obj;
646 * We don't need to allocate a new space map object, since
647 * vdev_indirect_sync_obsolete will allocate one when needed.
649 space_map_close(vd->vdev_obsolete_sm);
650 vd->vdev_obsolete_sm = NULL;
651 VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
652 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx));
654 VERIFY0(zap_add(spa->spa_dsl_pool->dp_meta_objset,
655 DMU_POOL_DIRECTORY_OBJECT,
656 DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
657 sizeof (*scip) / sizeof (uint64_t), scip, tx));
659 ASSERT3P(spa->spa_condensing_indirect, ==, NULL);
660 spa->spa_condensing_indirect = spa_condensing_indirect_create(spa);
662 zfs_dbgmsg("starting condense of vdev %llu in txg %llu: "
664 vd->vdev_id, dmu_tx_get_txg(tx),
665 (u_longlong_t)scip->scip_prev_obsolete_sm_object,
666 (u_longlong_t)scip->scip_next_mapping_object);
668 ASSERT3P(spa->spa_condense_thread, ==, NULL);
669 spa->spa_condense_thread = thread_create(NULL, 0,
670 spa_condense_indirect_thread, vd, 0, &p0, TS_RUN, minclsyspri);
674 * Sync to the given vdev's obsolete space map any segments that are no longer
675 * referenced as of the given txg.
677 * If the obsolete space map doesn't exist yet, create and open it.
680 vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx)
682 spa_t *spa = vd->vdev_spa;
683 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
685 ASSERT3U(vic->vic_mapping_object, !=, 0);
686 ASSERT(range_tree_space(vd->vdev_obsolete_segments) > 0);
687 ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
688 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS));
690 if (vdev_obsolete_sm_object(vd) == 0) {
691 uint64_t obsolete_sm_object =
692 space_map_alloc(spa->spa_meta_objset, tx);
694 ASSERT(vd->vdev_top_zap != 0);
695 VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
696 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM,
697 sizeof (obsolete_sm_object), 1, &obsolete_sm_object, tx));
698 ASSERT3U(vdev_obsolete_sm_object(vd), !=, 0);
700 spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
701 VERIFY0(space_map_open(&vd->vdev_obsolete_sm,
702 spa->spa_meta_objset, obsolete_sm_object,
703 0, vd->vdev_asize, 0));
704 space_map_update(vd->vdev_obsolete_sm);
707 ASSERT(vd->vdev_obsolete_sm != NULL);
708 ASSERT3U(vdev_obsolete_sm_object(vd), ==,
709 space_map_object(vd->vdev_obsolete_sm));
711 space_map_write(vd->vdev_obsolete_sm,
712 vd->vdev_obsolete_segments, SM_ALLOC, tx);
713 space_map_update(vd->vdev_obsolete_sm);
714 range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
718 spa_condense_init(spa_t *spa)
720 int error = zap_lookup(spa->spa_meta_objset,
721 DMU_POOL_DIRECTORY_OBJECT,
722 DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
723 sizeof (spa->spa_condensing_indirect_phys) / sizeof (uint64_t),
724 &spa->spa_condensing_indirect_phys);
726 if (spa_writeable(spa)) {
727 spa->spa_condensing_indirect =
728 spa_condensing_indirect_create(spa);
731 } else if (error == ENOENT) {
739 spa_condense_fini(spa_t *spa)
741 if (spa->spa_condensing_indirect != NULL) {
742 spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
743 spa->spa_condensing_indirect = NULL;
748 * Restart the condense - called when the pool is opened.
751 spa_condense_indirect_restart(spa_t *spa)
754 ASSERT(spa->spa_condensing_indirect != NULL);
755 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
756 vd = vdev_lookup_top(spa,
757 spa->spa_condensing_indirect_phys.scip_vdev);
759 spa_config_exit(spa, SCL_VDEV, FTAG);
761 ASSERT3P(spa->spa_condense_thread, ==, NULL);
762 spa->spa_condense_thread = thread_create(NULL, 0,
763 spa_condense_indirect_thread, vd, 0, &p0, TS_RUN,
768 * Gets the obsolete spacemap object from the vdev's ZAP.
769 * Returns the spacemap object, or 0 if it wasn't in the ZAP or the ZAP doesn't
773 vdev_obsolete_sm_object(vdev_t *vd)
775 ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
776 if (vd->vdev_top_zap == 0) {
781 int err = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
782 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, sizeof (sm_obj), 1, &sm_obj);
784 ASSERT(err == 0 || err == ENOENT);
790 vdev_obsolete_counts_are_precise(vdev_t *vd)
792 ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
793 if (vd->vdev_top_zap == 0) {
798 int err = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
799 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (val), 1, &val);
801 ASSERT(err == 0 || err == ENOENT);
808 vdev_indirect_close(vdev_t *vd)
814 vdev_indirect_io_done(zio_t *zio)
820 vdev_indirect_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
821 uint64_t *logical_ashift, uint64_t *physical_ashift)
823 *psize = *max_psize = vd->vdev_asize +
824 VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
825 *logical_ashift = vd->vdev_ashift;
826 *physical_ashift = vd->vdev_physical_ashift;
830 typedef struct remap_segment {
834 uint64_t rs_split_offset;
839 rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset)
841 remap_segment_t *rs = kmem_alloc(sizeof (remap_segment_t), KM_SLEEP);
843 rs->rs_offset = offset;
844 rs->rs_asize = asize;
845 rs->rs_split_offset = split_offset;
850 * Goes through the relevant indirect mappings until it hits a concrete vdev
851 * and issues the callback. On the way to the concrete vdev, if any other
852 * indirect vdevs are encountered, then the callback will also be called on
853 * each of those indirect vdevs. For example, if the segment is mapped to
854 * segment A on indirect vdev 1, and then segment A on indirect vdev 1 is
855 * mapped to segment B on concrete vdev 2, then the callback will be called on
856 * both vdev 1 and vdev 2.
858 * While the callback passed to vdev_indirect_remap() is called on every vdev
859 * the function encounters, certain callbacks only care about concrete vdevs.
860 * These types of callbacks should return immediately and explicitly when they
861 * are called on an indirect vdev.
863 * Because there is a possibility that a DVA section in the indirect device
864 * has been split into multiple sections in our mapping, we keep track
865 * of the relevant contiguous segments of the new location (remap_segment_t)
866 * in a stack. This way we can call the callback for each of the new sections
867 * created by a single section of the indirect device. Note though, that in
868 * this scenario the callbacks in each split block won't occur in-order in
869 * terms of offset, so callers should not make any assumptions about that.
871 * For callbacks that don't handle split blocks and immediately return when
872 * they encounter them (as is the case for remap_blkptr_cb), the caller can
873 * assume that its callback will be applied from the first indirect vdev
874 * encountered to the last one and then the concrete vdev, in that order.
877 vdev_indirect_remap(vdev_t *vd, uint64_t offset, uint64_t asize,
878 void (*func)(uint64_t, vdev_t *, uint64_t, uint64_t, void *), void *arg)
881 spa_t *spa = vd->vdev_spa;
883 list_create(&stack, sizeof (remap_segment_t),
884 offsetof(remap_segment_t, rs_node));
886 for (remap_segment_t *rs = rs_alloc(vd, offset, asize, 0);
887 rs != NULL; rs = list_remove_head(&stack)) {
888 vdev_t *v = rs->rs_vd;
891 * Note: this can be called from open context
892 * (eg. zio_read()), so we need the rwlock to prevent
893 * the mapping from being changed by condensing.
895 rw_enter(&v->vdev_indirect_rwlock, RW_READER);
896 vdev_indirect_mapping_t *vim = v->vdev_indirect_mapping;
897 ASSERT3P(vim, !=, NULL);
899 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
900 ASSERT(rs->rs_asize > 0);
902 vdev_indirect_mapping_entry_phys_t *mapping =
903 vdev_indirect_mapping_entry_for_offset(vim, rs->rs_offset);
904 ASSERT3P(mapping, !=, NULL);
906 while (rs->rs_asize > 0) {
908 * Note: the vdev_indirect_mapping can not change
909 * while we are running. It only changes while the
910 * removal is in progress, and then only from syncing
911 * context. While a removal is in progress, this
912 * function is only called for frees, which also only
913 * happen from syncing context.
916 uint64_t size = DVA_GET_ASIZE(&mapping->vimep_dst);
917 uint64_t dst_offset =
918 DVA_GET_OFFSET(&mapping->vimep_dst);
919 uint64_t dst_vdev = DVA_GET_VDEV(&mapping->vimep_dst);
921 ASSERT3U(rs->rs_offset, >=,
922 DVA_MAPPING_GET_SRC_OFFSET(mapping));
923 ASSERT3U(rs->rs_offset, <,
924 DVA_MAPPING_GET_SRC_OFFSET(mapping) + size);
925 ASSERT3U(dst_vdev, !=, v->vdev_id);
927 uint64_t inner_offset = rs->rs_offset -
928 DVA_MAPPING_GET_SRC_OFFSET(mapping);
929 uint64_t inner_size =
930 MIN(rs->rs_asize, size - inner_offset);
932 vdev_t *dst_v = vdev_lookup_top(spa, dst_vdev);
933 ASSERT3P(dst_v, !=, NULL);
935 if (dst_v->vdev_ops == &vdev_indirect_ops) {
936 list_insert_head(&stack,
937 rs_alloc(dst_v, dst_offset + inner_offset,
938 inner_size, rs->rs_split_offset));
942 if ((zfs_flags & ZFS_DEBUG_INDIRECT_REMAP) &&
943 IS_P2ALIGNED(inner_size, 2 * SPA_MINBLOCKSIZE)) {
945 * Note: This clause exists only solely for
946 * testing purposes. We use it to ensure that
947 * split blocks work and that the callbacks
948 * using them yield the same result if issued
951 uint64_t inner_half = inner_size / 2;
953 func(rs->rs_split_offset + inner_half, dst_v,
954 dst_offset + inner_offset + inner_half,
957 func(rs->rs_split_offset, dst_v,
958 dst_offset + inner_offset,
961 func(rs->rs_split_offset, dst_v,
962 dst_offset + inner_offset,
966 rs->rs_offset += inner_size;
967 rs->rs_asize -= inner_size;
968 rs->rs_split_offset += inner_size;
972 rw_exit(&v->vdev_indirect_rwlock);
973 kmem_free(rs, sizeof (remap_segment_t));
975 list_destroy(&stack);
979 vdev_indirect_child_io_done(zio_t *zio)
981 zio_t *pio = zio->io_private;
983 mutex_enter(&pio->io_lock);
984 pio->io_error = zio_worst_error(pio->io_error, zio->io_error);
985 mutex_exit(&pio->io_lock);
987 abd_put(zio->io_abd);
991 vdev_indirect_io_start_cb(uint64_t split_offset, vdev_t *vd, uint64_t offset,
992 uint64_t size, void *arg)
996 ASSERT3P(vd, !=, NULL);
998 if (vd->vdev_ops == &vdev_indirect_ops)
1001 zio_nowait(zio_vdev_child_io(zio, NULL, vd, offset,
1002 abd_get_offset(zio->io_abd, split_offset),
1003 size, zio->io_type, zio->io_priority,
1004 0, vdev_indirect_child_io_done, zio));
1008 vdev_indirect_io_start(zio_t *zio)
1010 spa_t *spa = zio->io_spa;
1012 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1013 if (zio->io_type != ZIO_TYPE_READ) {
1014 ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
1015 ASSERT((zio->io_flags &
1016 (ZIO_FLAG_SELF_HEAL | ZIO_FLAG_INDUCE_DAMAGE)) != 0);
1019 vdev_indirect_remap(zio->io_vd, zio->io_offset, zio->io_size,
1020 vdev_indirect_io_start_cb, zio);
1025 vdev_ops_t vdev_indirect_ops = {
1027 vdev_indirect_close,
1029 vdev_indirect_io_start,
1030 vdev_indirect_io_done,
1034 vdev_indirect_remap,
1035 VDEV_TYPE_INDIRECT, /* name of this vdev type */
1036 B_FALSE /* leaf vdev */