4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
28 #include <sys/zfs_context.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/space_map.h>
32 #include <sys/metaslab_impl.h>
33 #include <sys/vdev_impl.h>
35 #include <sys/spa_impl.h>
36 #include <sys/zfeature.h>
37 #include <sys/vdev_indirect_mapping.h>
40 SYSCTL_DECL(_vfs_zfs);
41 SYSCTL_NODE(_vfs_zfs, OID_AUTO, metaslab, CTLFLAG_RW, 0, "ZFS metaslab");
43 #define GANG_ALLOCATION(flags) \
44 ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
46 uint64_t metaslab_aliquot = 512ULL << 10;
47 uint64_t metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
48 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, force_ganging, CTLFLAG_RWTUN,
49 &metaslab_force_ganging, 0,
50 "Force gang block allocation for blocks larger than or equal to this value");
53 * Since we can touch multiple metaslabs (and their respective space maps)
54 * with each transaction group, we benefit from having a smaller space map
55 * block size since it allows us to issue more I/O operations scattered
58 int zfs_metaslab_sm_blksz = (1 << 12);
59 SYSCTL_INT(_vfs_zfs, OID_AUTO, metaslab_sm_blksz, CTLFLAG_RDTUN,
60 &zfs_metaslab_sm_blksz, 0,
61 "Block size for metaslab DTL space map. Power of 2 and greater than 4096.");
64 * The in-core space map representation is more compact than its on-disk form.
65 * The zfs_condense_pct determines how much more compact the in-core
66 * space map representation must be before we compact it on-disk.
67 * Values should be greater than or equal to 100.
69 int zfs_condense_pct = 200;
70 SYSCTL_INT(_vfs_zfs, OID_AUTO, condense_pct, CTLFLAG_RWTUN,
72 "Condense on-disk spacemap when it is more than this many percents"
73 " of in-memory counterpart");
76 * Condensing a metaslab is not guaranteed to actually reduce the amount of
77 * space used on disk. In particular, a space map uses data in increments of
78 * MAX(1 << ashift, space_map_blksize), so a metaslab might use the
79 * same number of blocks after condensing. Since the goal of condensing is to
80 * reduce the number of IOPs required to read the space map, we only want to
81 * condense when we can be sure we will reduce the number of blocks used by the
82 * space map. Unfortunately, we cannot precisely compute whether or not this is
83 * the case in metaslab_should_condense since we are holding ms_lock. Instead,
84 * we apply the following heuristic: do not condense a spacemap unless the
85 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
88 int zfs_metaslab_condense_block_threshold = 4;
91 * The zfs_mg_noalloc_threshold defines which metaslab groups should
92 * be eligible for allocation. The value is defined as a percentage of
93 * free space. Metaslab groups that have more free space than
94 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
95 * a metaslab group's free space is less than or equal to the
96 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
97 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
98 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
99 * groups are allowed to accept allocations. Gang blocks are always
100 * eligible to allocate on any metaslab group. The default value of 0 means
101 * no metaslab group will be excluded based on this criterion.
103 int zfs_mg_noalloc_threshold = 0;
104 SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_noalloc_threshold, CTLFLAG_RWTUN,
105 &zfs_mg_noalloc_threshold, 0,
106 "Percentage of metaslab group size that should be free"
107 " to make it eligible for allocation");
110 * Metaslab groups are considered eligible for allocations if their
111 * fragmenation metric (measured as a percentage) is less than or equal to
112 * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold
113 * then it will be skipped unless all metaslab groups within the metaslab
114 * class have also crossed this threshold.
116 int zfs_mg_fragmentation_threshold = 85;
117 SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_fragmentation_threshold, CTLFLAG_RWTUN,
118 &zfs_mg_fragmentation_threshold, 0,
119 "Percentage of metaslab group size that should be considered "
120 "eligible for allocations unless all metaslab groups within the metaslab class "
121 "have also crossed this threshold");
124 * Allow metaslabs to keep their active state as long as their fragmentation
125 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
126 * active metaslab that exceeds this threshold will no longer keep its active
127 * status allowing better metaslabs to be selected.
129 int zfs_metaslab_fragmentation_threshold = 70;
130 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, fragmentation_threshold, CTLFLAG_RWTUN,
131 &zfs_metaslab_fragmentation_threshold, 0,
132 "Maximum percentage of metaslab fragmentation level to keep their active state");
135 * When set will load all metaslabs when pool is first opened.
137 int metaslab_debug_load = 0;
138 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_load, CTLFLAG_RWTUN,
139 &metaslab_debug_load, 0,
140 "Load all metaslabs when pool is first opened");
143 * When set will prevent metaslabs from being unloaded.
145 int metaslab_debug_unload = 0;
146 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_unload, CTLFLAG_RWTUN,
147 &metaslab_debug_unload, 0,
148 "Prevent metaslabs from being unloaded");
151 * Minimum size which forces the dynamic allocator to change
152 * it's allocation strategy. Once the space map cannot satisfy
153 * an allocation of this size then it switches to using more
154 * aggressive strategy (i.e search by size rather than offset).
156 uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
157 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold, CTLFLAG_RWTUN,
158 &metaslab_df_alloc_threshold, 0,
159 "Minimum size which forces the dynamic allocator to change it's allocation strategy");
162 * The minimum free space, in percent, which must be available
163 * in a space map to continue allocations in a first-fit fashion.
164 * Once the space map's free space drops below this level we dynamically
165 * switch to using best-fit allocations.
167 int metaslab_df_free_pct = 4;
168 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct, CTLFLAG_RWTUN,
169 &metaslab_df_free_pct, 0,
170 "The minimum free space, in percent, which must be available in a "
171 "space map to continue allocations in a first-fit fashion");
174 * A metaslab is considered "free" if it contains a contiguous
175 * segment which is greater than metaslab_min_alloc_size.
177 uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS;
178 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, min_alloc_size, CTLFLAG_RWTUN,
179 &metaslab_min_alloc_size, 0,
180 "A metaslab is considered \"free\" if it contains a contiguous "
181 "segment which is greater than vfs.zfs.metaslab.min_alloc_size");
184 * Percentage of all cpus that can be used by the metaslab taskq.
186 int metaslab_load_pct = 50;
187 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, load_pct, CTLFLAG_RWTUN,
188 &metaslab_load_pct, 0,
189 "Percentage of cpus that can be used by the metaslab taskq");
192 * Determines how many txgs a metaslab may remain loaded without having any
193 * allocations from it. As long as a metaslab continues to be used we will
196 int metaslab_unload_delay = TXG_SIZE * 2;
197 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, unload_delay, CTLFLAG_RWTUN,
198 &metaslab_unload_delay, 0,
199 "Number of TXGs that an unused metaslab can be kept in memory");
202 * Max number of metaslabs per group to preload.
204 int metaslab_preload_limit = SPA_DVAS_PER_BP;
205 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_limit, CTLFLAG_RWTUN,
206 &metaslab_preload_limit, 0,
207 "Max number of metaslabs per group to preload");
210 * Enable/disable preloading of metaslab.
212 boolean_t metaslab_preload_enabled = B_TRUE;
213 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_enabled, CTLFLAG_RWTUN,
214 &metaslab_preload_enabled, 0,
215 "Max number of metaslabs per group to preload");
218 * Enable/disable fragmentation weighting on metaslabs.
220 boolean_t metaslab_fragmentation_factor_enabled = B_TRUE;
221 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, fragmentation_factor_enabled, CTLFLAG_RWTUN,
222 &metaslab_fragmentation_factor_enabled, 0,
223 "Enable fragmentation weighting on metaslabs");
226 * Enable/disable lba weighting (i.e. outer tracks are given preference).
228 boolean_t metaslab_lba_weighting_enabled = B_TRUE;
229 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, lba_weighting_enabled, CTLFLAG_RWTUN,
230 &metaslab_lba_weighting_enabled, 0,
231 "Enable LBA weighting (i.e. outer tracks are given preference)");
234 * Enable/disable metaslab group biasing.
236 boolean_t metaslab_bias_enabled = B_TRUE;
237 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, bias_enabled, CTLFLAG_RWTUN,
238 &metaslab_bias_enabled, 0,
239 "Enable metaslab group biasing");
242 * Enable/disable remapping of indirect DVAs to their concrete vdevs.
244 boolean_t zfs_remap_blkptr_enable = B_TRUE;
247 * Enable/disable segment-based metaslab selection.
249 boolean_t zfs_metaslab_segment_weight_enabled = B_TRUE;
252 * When using segment-based metaslab selection, we will continue
253 * allocating from the active metaslab until we have exhausted
254 * zfs_metaslab_switch_threshold of its buckets.
256 int zfs_metaslab_switch_threshold = 2;
259 * Internal switch to enable/disable the metaslab allocation tracing
262 #ifdef _METASLAB_TRACING
263 boolean_t metaslab_trace_enabled = B_TRUE;
267 * Maximum entries that the metaslab allocation tracing facility will keep
268 * in a given list when running in non-debug mode. We limit the number
269 * of entries in non-debug mode to prevent us from using up too much memory.
270 * The limit should be sufficiently large that we don't expect any allocation
271 * to every exceed this value. In debug mode, the system will panic if this
272 * limit is ever reached allowing for further investigation.
274 #ifdef _METASLAB_TRACING
275 uint64_t metaslab_trace_max_entries = 5000;
278 static uint64_t metaslab_weight(metaslab_t *);
279 static void metaslab_set_fragmentation(metaslab_t *);
280 static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t);
281 static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t);
282 static void metaslab_passivate(metaslab_t *msp, uint64_t weight);
283 static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp);
284 #ifdef _METASLAB_TRACING
285 kmem_cache_t *metaslab_alloc_trace_cache;
289 * ==========================================================================
291 * ==========================================================================
294 metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
296 metaslab_class_t *mc;
298 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
303 mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
304 mc->mc_alloc_slots = kmem_zalloc(spa->spa_alloc_count *
305 sizeof (zfs_refcount_t), KM_SLEEP);
306 mc->mc_alloc_max_slots = kmem_zalloc(spa->spa_alloc_count *
307 sizeof (uint64_t), KM_SLEEP);
308 for (int i = 0; i < spa->spa_alloc_count; i++)
309 zfs_refcount_create_tracked(&mc->mc_alloc_slots[i]);
315 metaslab_class_destroy(metaslab_class_t *mc)
317 ASSERT(mc->mc_rotor == NULL);
318 ASSERT(mc->mc_alloc == 0);
319 ASSERT(mc->mc_deferred == 0);
320 ASSERT(mc->mc_space == 0);
321 ASSERT(mc->mc_dspace == 0);
323 for (int i = 0; i < mc->mc_spa->spa_alloc_count; i++)
324 zfs_refcount_destroy(&mc->mc_alloc_slots[i]);
325 kmem_free(mc->mc_alloc_slots, mc->mc_spa->spa_alloc_count *
326 sizeof (zfs_refcount_t));
327 kmem_free(mc->mc_alloc_max_slots, mc->mc_spa->spa_alloc_count *
329 mutex_destroy(&mc->mc_lock);
330 kmem_free(mc, sizeof (metaslab_class_t));
334 metaslab_class_validate(metaslab_class_t *mc)
336 metaslab_group_t *mg;
340 * Must hold one of the spa_config locks.
342 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
343 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
345 if ((mg = mc->mc_rotor) == NULL)
350 ASSERT(vd->vdev_mg != NULL);
351 ASSERT3P(vd->vdev_top, ==, vd);
352 ASSERT3P(mg->mg_class, ==, mc);
353 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
354 } while ((mg = mg->mg_next) != mc->mc_rotor);
360 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
361 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
363 atomic_add_64(&mc->mc_alloc, alloc_delta);
364 atomic_add_64(&mc->mc_deferred, defer_delta);
365 atomic_add_64(&mc->mc_space, space_delta);
366 atomic_add_64(&mc->mc_dspace, dspace_delta);
370 metaslab_class_minblocksize_update(metaslab_class_t *mc)
372 metaslab_group_t *mg;
374 uint64_t minashift = UINT64_MAX;
376 if ((mg = mc->mc_rotor) == NULL) {
377 mc->mc_minblocksize = SPA_MINBLOCKSIZE;
383 if (vd->vdev_ashift < minashift)
384 minashift = vd->vdev_ashift;
385 } while ((mg = mg->mg_next) != mc->mc_rotor);
387 mc->mc_minblocksize = 1ULL << minashift;
391 metaslab_class_get_alloc(metaslab_class_t *mc)
393 return (mc->mc_alloc);
397 metaslab_class_get_deferred(metaslab_class_t *mc)
399 return (mc->mc_deferred);
403 metaslab_class_get_space(metaslab_class_t *mc)
405 return (mc->mc_space);
409 metaslab_class_get_dspace(metaslab_class_t *mc)
411 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
415 metaslab_class_get_minblocksize(metaslab_class_t *mc)
417 return (mc->mc_minblocksize);
421 metaslab_class_histogram_verify(metaslab_class_t *mc)
423 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
427 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
430 mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
433 for (int c = 0; c < rvd->vdev_children; c++) {
434 vdev_t *tvd = rvd->vdev_child[c];
435 metaslab_group_t *mg = tvd->vdev_mg;
438 * Skip any holes, uninitialized top-levels, or
439 * vdevs that are not in this metalab class.
441 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
442 mg->mg_class != mc) {
446 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
447 mc_hist[i] += mg->mg_histogram[i];
450 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
451 VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
453 kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
457 * Calculate the metaslab class's fragmentation metric. The metric
458 * is weighted based on the space contribution of each metaslab group.
459 * The return value will be a number between 0 and 100 (inclusive), or
460 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
461 * zfs_frag_table for more information about the metric.
464 metaslab_class_fragmentation(metaslab_class_t *mc)
466 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
467 uint64_t fragmentation = 0;
469 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
471 for (int c = 0; c < rvd->vdev_children; c++) {
472 vdev_t *tvd = rvd->vdev_child[c];
473 metaslab_group_t *mg = tvd->vdev_mg;
476 * Skip any holes, uninitialized top-levels,
477 * or vdevs that are not in this metalab class.
479 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
480 mg->mg_class != mc) {
485 * If a metaslab group does not contain a fragmentation
486 * metric then just bail out.
488 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
489 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
490 return (ZFS_FRAG_INVALID);
494 * Determine how much this metaslab_group is contributing
495 * to the overall pool fragmentation metric.
497 fragmentation += mg->mg_fragmentation *
498 metaslab_group_get_space(mg);
500 fragmentation /= metaslab_class_get_space(mc);
502 ASSERT3U(fragmentation, <=, 100);
503 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
504 return (fragmentation);
508 * Calculate the amount of expandable space that is available in
509 * this metaslab class. If a device is expanded then its expandable
510 * space will be the amount of allocatable space that is currently not
511 * part of this metaslab class.
514 metaslab_class_expandable_space(metaslab_class_t *mc)
516 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
519 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
520 for (int c = 0; c < rvd->vdev_children; c++) {
522 vdev_t *tvd = rvd->vdev_child[c];
523 metaslab_group_t *mg = tvd->vdev_mg;
525 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
526 mg->mg_class != mc) {
531 * Calculate if we have enough space to add additional
532 * metaslabs. We report the expandable space in terms
533 * of the metaslab size since that's the unit of expansion.
534 * Adjust by efi system partition size.
536 tspace = tvd->vdev_max_asize - tvd->vdev_asize;
537 if (tspace > mc->mc_spa->spa_bootsize) {
538 tspace -= mc->mc_spa->spa_bootsize;
540 space += P2ALIGN(tspace, 1ULL << tvd->vdev_ms_shift);
542 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
547 metaslab_compare(const void *x1, const void *x2)
549 const metaslab_t *m1 = (const metaslab_t *)x1;
550 const metaslab_t *m2 = (const metaslab_t *)x2;
554 if (m1->ms_allocator != -1 && m1->ms_primary)
556 else if (m1->ms_allocator != -1 && !m1->ms_primary)
558 if (m2->ms_allocator != -1 && m2->ms_primary)
560 else if (m2->ms_allocator != -1 && !m2->ms_primary)
564 * Sort inactive metaslabs first, then primaries, then secondaries. When
565 * selecting a metaslab to allocate from, an allocator first tries its
566 * primary, then secondary active metaslab. If it doesn't have active
567 * metaslabs, or can't allocate from them, it searches for an inactive
568 * metaslab to activate. If it can't find a suitable one, it will steal
569 * a primary or secondary metaslab from another allocator.
576 int cmp = AVL_CMP(m2->ms_weight, m1->ms_weight);
580 IMPLY(AVL_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2);
582 return (AVL_CMP(m1->ms_start, m2->ms_start));
586 * Verify that the space accounting on disk matches the in-core range_trees.
589 metaslab_verify_space(metaslab_t *msp, uint64_t txg)
591 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
592 uint64_t allocated = 0;
593 uint64_t sm_free_space, msp_free_space;
595 ASSERT(MUTEX_HELD(&msp->ms_lock));
597 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
601 * We can only verify the metaslab space when we're called
602 * from syncing context with a loaded metaslab that has an allocated
603 * space map. Calling this in non-syncing context does not
604 * provide a consistent view of the metaslab since we're performing
605 * allocations in the future.
607 if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
611 sm_free_space = msp->ms_size - space_map_allocated(msp->ms_sm) -
612 space_map_alloc_delta(msp->ms_sm);
615 * Account for future allocations since we would have already
616 * deducted that space from the ms_freetree.
618 for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
620 range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]);
623 msp_free_space = range_tree_space(msp->ms_allocatable) + allocated +
624 msp->ms_deferspace + range_tree_space(msp->ms_freed);
626 VERIFY3U(sm_free_space, ==, msp_free_space);
630 * ==========================================================================
632 * ==========================================================================
635 * Update the allocatable flag and the metaslab group's capacity.
636 * The allocatable flag is set to true if the capacity is below
637 * the zfs_mg_noalloc_threshold or has a fragmentation value that is
638 * greater than zfs_mg_fragmentation_threshold. If a metaslab group
639 * transitions from allocatable to non-allocatable or vice versa then the
640 * metaslab group's class is updated to reflect the transition.
643 metaslab_group_alloc_update(metaslab_group_t *mg)
645 vdev_t *vd = mg->mg_vd;
646 metaslab_class_t *mc = mg->mg_class;
647 vdev_stat_t *vs = &vd->vdev_stat;
648 boolean_t was_allocatable;
649 boolean_t was_initialized;
651 ASSERT(vd == vd->vdev_top);
652 ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==,
655 mutex_enter(&mg->mg_lock);
656 was_allocatable = mg->mg_allocatable;
657 was_initialized = mg->mg_initialized;
659 mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
662 mutex_enter(&mc->mc_lock);
665 * If the metaslab group was just added then it won't
666 * have any space until we finish syncing out this txg.
667 * At that point we will consider it initialized and available
668 * for allocations. We also don't consider non-activated
669 * metaslab groups (e.g. vdevs that are in the middle of being removed)
670 * to be initialized, because they can't be used for allocation.
672 mg->mg_initialized = metaslab_group_initialized(mg);
673 if (!was_initialized && mg->mg_initialized) {
675 } else if (was_initialized && !mg->mg_initialized) {
676 ASSERT3U(mc->mc_groups, >, 0);
679 if (mg->mg_initialized)
680 mg->mg_no_free_space = B_FALSE;
683 * A metaslab group is considered allocatable if it has plenty
684 * of free space or is not heavily fragmented. We only take
685 * fragmentation into account if the metaslab group has a valid
686 * fragmentation metric (i.e. a value between 0 and 100).
688 mg->mg_allocatable = (mg->mg_activation_count > 0 &&
689 mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
690 (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
691 mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
694 * The mc_alloc_groups maintains a count of the number of
695 * groups in this metaslab class that are still above the
696 * zfs_mg_noalloc_threshold. This is used by the allocating
697 * threads to determine if they should avoid allocations to
698 * a given group. The allocator will avoid allocations to a group
699 * if that group has reached or is below the zfs_mg_noalloc_threshold
700 * and there are still other groups that are above the threshold.
701 * When a group transitions from allocatable to non-allocatable or
702 * vice versa we update the metaslab class to reflect that change.
703 * When the mc_alloc_groups value drops to 0 that means that all
704 * groups have reached the zfs_mg_noalloc_threshold making all groups
705 * eligible for allocations. This effectively means that all devices
706 * are balanced again.
708 if (was_allocatable && !mg->mg_allocatable)
709 mc->mc_alloc_groups--;
710 else if (!was_allocatable && mg->mg_allocatable)
711 mc->mc_alloc_groups++;
712 mutex_exit(&mc->mc_lock);
714 mutex_exit(&mg->mg_lock);
718 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
720 metaslab_group_t *mg;
722 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
723 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
724 mutex_init(&mg->mg_ms_initialize_lock, NULL, MUTEX_DEFAULT, NULL);
725 cv_init(&mg->mg_ms_initialize_cv, NULL, CV_DEFAULT, NULL);
726 mg->mg_primaries = kmem_zalloc(allocators * sizeof (metaslab_t *),
728 mg->mg_secondaries = kmem_zalloc(allocators * sizeof (metaslab_t *),
730 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
731 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
734 mg->mg_activation_count = 0;
735 mg->mg_initialized = B_FALSE;
736 mg->mg_no_free_space = B_TRUE;
737 mg->mg_allocators = allocators;
739 mg->mg_alloc_queue_depth = kmem_zalloc(allocators *
740 sizeof (zfs_refcount_t), KM_SLEEP);
741 mg->mg_cur_max_alloc_queue_depth = kmem_zalloc(allocators *
742 sizeof (uint64_t), KM_SLEEP);
743 for (int i = 0; i < allocators; i++) {
744 zfs_refcount_create_tracked(&mg->mg_alloc_queue_depth[i]);
745 mg->mg_cur_max_alloc_queue_depth[i] = 0;
748 mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
749 minclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT);
755 metaslab_group_destroy(metaslab_group_t *mg)
757 ASSERT(mg->mg_prev == NULL);
758 ASSERT(mg->mg_next == NULL);
760 * We may have gone below zero with the activation count
761 * either because we never activated in the first place or
762 * because we're done, and possibly removing the vdev.
764 ASSERT(mg->mg_activation_count <= 0);
766 taskq_destroy(mg->mg_taskq);
767 avl_destroy(&mg->mg_metaslab_tree);
768 kmem_free(mg->mg_primaries, mg->mg_allocators * sizeof (metaslab_t *));
769 kmem_free(mg->mg_secondaries, mg->mg_allocators *
770 sizeof (metaslab_t *));
771 mutex_destroy(&mg->mg_lock);
772 mutex_destroy(&mg->mg_ms_initialize_lock);
773 cv_destroy(&mg->mg_ms_initialize_cv);
775 for (int i = 0; i < mg->mg_allocators; i++) {
776 zfs_refcount_destroy(&mg->mg_alloc_queue_depth[i]);
777 mg->mg_cur_max_alloc_queue_depth[i] = 0;
779 kmem_free(mg->mg_alloc_queue_depth, mg->mg_allocators *
780 sizeof (zfs_refcount_t));
781 kmem_free(mg->mg_cur_max_alloc_queue_depth, mg->mg_allocators *
784 kmem_free(mg, sizeof (metaslab_group_t));
788 metaslab_group_activate(metaslab_group_t *mg)
790 metaslab_class_t *mc = mg->mg_class;
791 metaslab_group_t *mgprev, *mgnext;
793 ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER), !=, 0);
795 ASSERT(mc->mc_rotor != mg);
796 ASSERT(mg->mg_prev == NULL);
797 ASSERT(mg->mg_next == NULL);
798 ASSERT(mg->mg_activation_count <= 0);
800 if (++mg->mg_activation_count <= 0)
803 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
804 metaslab_group_alloc_update(mg);
806 if ((mgprev = mc->mc_rotor) == NULL) {
810 mgnext = mgprev->mg_next;
811 mg->mg_prev = mgprev;
812 mg->mg_next = mgnext;
813 mgprev->mg_next = mg;
814 mgnext->mg_prev = mg;
817 metaslab_class_minblocksize_update(mc);
821 * Passivate a metaslab group and remove it from the allocation rotor.
822 * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating
823 * a metaslab group. This function will momentarily drop spa_config_locks
824 * that are lower than the SCL_ALLOC lock (see comment below).
827 metaslab_group_passivate(metaslab_group_t *mg)
829 metaslab_class_t *mc = mg->mg_class;
830 spa_t *spa = mc->mc_spa;
831 metaslab_group_t *mgprev, *mgnext;
832 int locks = spa_config_held(spa, SCL_ALL, RW_WRITER);
834 ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==,
835 (SCL_ALLOC | SCL_ZIO));
837 if (--mg->mg_activation_count != 0) {
838 ASSERT(mc->mc_rotor != mg);
839 ASSERT(mg->mg_prev == NULL);
840 ASSERT(mg->mg_next == NULL);
841 ASSERT(mg->mg_activation_count < 0);
846 * The spa_config_lock is an array of rwlocks, ordered as
847 * follows (from highest to lowest):
848 * SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC >
849 * SCL_ZIO > SCL_FREE > SCL_VDEV
850 * (For more information about the spa_config_lock see spa_misc.c)
851 * The higher the lock, the broader its coverage. When we passivate
852 * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO
853 * config locks. However, the metaslab group's taskq might be trying
854 * to preload metaslabs so we must drop the SCL_ZIO lock and any
855 * lower locks to allow the I/O to complete. At a minimum,
856 * we continue to hold the SCL_ALLOC lock, which prevents any future
857 * allocations from taking place and any changes to the vdev tree.
859 spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa);
860 taskq_wait(mg->mg_taskq);
861 spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER);
862 metaslab_group_alloc_update(mg);
863 for (int i = 0; i < mg->mg_allocators; i++) {
864 metaslab_t *msp = mg->mg_primaries[i];
866 mutex_enter(&msp->ms_lock);
867 metaslab_passivate(msp,
868 metaslab_weight_from_range_tree(msp));
869 mutex_exit(&msp->ms_lock);
871 msp = mg->mg_secondaries[i];
873 mutex_enter(&msp->ms_lock);
874 metaslab_passivate(msp,
875 metaslab_weight_from_range_tree(msp));
876 mutex_exit(&msp->ms_lock);
880 mgprev = mg->mg_prev;
881 mgnext = mg->mg_next;
886 mc->mc_rotor = mgnext;
887 mgprev->mg_next = mgnext;
888 mgnext->mg_prev = mgprev;
893 metaslab_class_minblocksize_update(mc);
897 metaslab_group_initialized(metaslab_group_t *mg)
899 vdev_t *vd = mg->mg_vd;
900 vdev_stat_t *vs = &vd->vdev_stat;
902 return (vs->vs_space != 0 && mg->mg_activation_count > 0);
906 metaslab_group_get_space(metaslab_group_t *mg)
908 return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count);
912 metaslab_group_histogram_verify(metaslab_group_t *mg)
915 vdev_t *vd = mg->mg_vd;
916 uint64_t ashift = vd->vdev_ashift;
919 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
922 mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
925 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
926 SPACE_MAP_HISTOGRAM_SIZE + ashift);
928 for (int m = 0; m < vd->vdev_ms_count; m++) {
929 metaslab_t *msp = vd->vdev_ms[m];
931 if (msp->ms_sm == NULL)
934 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
935 mg_hist[i + ashift] +=
936 msp->ms_sm->sm_phys->smp_histogram[i];
939 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
940 VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
942 kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
946 metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
948 metaslab_class_t *mc = mg->mg_class;
949 uint64_t ashift = mg->mg_vd->vdev_ashift;
951 ASSERT(MUTEX_HELD(&msp->ms_lock));
952 if (msp->ms_sm == NULL)
955 mutex_enter(&mg->mg_lock);
956 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
957 mg->mg_histogram[i + ashift] +=
958 msp->ms_sm->sm_phys->smp_histogram[i];
959 mc->mc_histogram[i + ashift] +=
960 msp->ms_sm->sm_phys->smp_histogram[i];
962 mutex_exit(&mg->mg_lock);
966 metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
968 metaslab_class_t *mc = mg->mg_class;
969 uint64_t ashift = mg->mg_vd->vdev_ashift;
971 ASSERT(MUTEX_HELD(&msp->ms_lock));
972 if (msp->ms_sm == NULL)
975 mutex_enter(&mg->mg_lock);
976 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
977 ASSERT3U(mg->mg_histogram[i + ashift], >=,
978 msp->ms_sm->sm_phys->smp_histogram[i]);
979 ASSERT3U(mc->mc_histogram[i + ashift], >=,
980 msp->ms_sm->sm_phys->smp_histogram[i]);
982 mg->mg_histogram[i + ashift] -=
983 msp->ms_sm->sm_phys->smp_histogram[i];
984 mc->mc_histogram[i + ashift] -=
985 msp->ms_sm->sm_phys->smp_histogram[i];
987 mutex_exit(&mg->mg_lock);
991 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
993 ASSERT(msp->ms_group == NULL);
994 mutex_enter(&mg->mg_lock);
997 avl_add(&mg->mg_metaslab_tree, msp);
998 mutex_exit(&mg->mg_lock);
1000 mutex_enter(&msp->ms_lock);
1001 metaslab_group_histogram_add(mg, msp);
1002 mutex_exit(&msp->ms_lock);
1006 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
1008 mutex_enter(&msp->ms_lock);
1009 metaslab_group_histogram_remove(mg, msp);
1010 mutex_exit(&msp->ms_lock);
1012 mutex_enter(&mg->mg_lock);
1013 ASSERT(msp->ms_group == mg);
1014 avl_remove(&mg->mg_metaslab_tree, msp);
1015 msp->ms_group = NULL;
1016 mutex_exit(&mg->mg_lock);
1020 metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
1022 ASSERT(MUTEX_HELD(&mg->mg_lock));
1023 ASSERT(msp->ms_group == mg);
1024 avl_remove(&mg->mg_metaslab_tree, msp);
1025 msp->ms_weight = weight;
1026 avl_add(&mg->mg_metaslab_tree, msp);
1031 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
1034 * Although in principle the weight can be any value, in
1035 * practice we do not use values in the range [1, 511].
1037 ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
1038 ASSERT(MUTEX_HELD(&msp->ms_lock));
1040 mutex_enter(&mg->mg_lock);
1041 metaslab_group_sort_impl(mg, msp, weight);
1042 mutex_exit(&mg->mg_lock);
1046 * Calculate the fragmentation for a given metaslab group. We can use
1047 * a simple average here since all metaslabs within the group must have
1048 * the same size. The return value will be a value between 0 and 100
1049 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
1050 * group have a fragmentation metric.
1053 metaslab_group_fragmentation(metaslab_group_t *mg)
1055 vdev_t *vd = mg->mg_vd;
1056 uint64_t fragmentation = 0;
1057 uint64_t valid_ms = 0;
1059 for (int m = 0; m < vd->vdev_ms_count; m++) {
1060 metaslab_t *msp = vd->vdev_ms[m];
1062 if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
1066 fragmentation += msp->ms_fragmentation;
1069 if (valid_ms <= vd->vdev_ms_count / 2)
1070 return (ZFS_FRAG_INVALID);
1072 fragmentation /= valid_ms;
1073 ASSERT3U(fragmentation, <=, 100);
1074 return (fragmentation);
1078 * Determine if a given metaslab group should skip allocations. A metaslab
1079 * group should avoid allocations if its free capacity is less than the
1080 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
1081 * zfs_mg_fragmentation_threshold and there is at least one metaslab group
1082 * that can still handle allocations. If the allocation throttle is enabled
1083 * then we skip allocations to devices that have reached their maximum
1084 * allocation queue depth unless the selected metaslab group is the only
1085 * eligible group remaining.
1088 metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
1089 uint64_t psize, int allocator, int d)
1091 spa_t *spa = mg->mg_vd->vdev_spa;
1092 metaslab_class_t *mc = mg->mg_class;
1095 * We can only consider skipping this metaslab group if it's
1096 * in the normal metaslab class and there are other metaslab
1097 * groups to select from. Otherwise, we always consider it eligible
1100 if (mc != spa_normal_class(spa) || mc->mc_groups <= 1)
1104 * If the metaslab group's mg_allocatable flag is set (see comments
1105 * in metaslab_group_alloc_update() for more information) and
1106 * the allocation throttle is disabled then allow allocations to this
1107 * device. However, if the allocation throttle is enabled then
1108 * check if we have reached our allocation limit (mg_alloc_queue_depth)
1109 * to determine if we should allow allocations to this metaslab group.
1110 * If all metaslab groups are no longer considered allocatable
1111 * (mc_alloc_groups == 0) or we're trying to allocate the smallest
1112 * gang block size then we allow allocations on this metaslab group
1113 * regardless of the mg_allocatable or throttle settings.
1115 if (mg->mg_allocatable) {
1116 metaslab_group_t *mgp;
1118 uint64_t qmax = mg->mg_cur_max_alloc_queue_depth[allocator];
1120 if (!mc->mc_alloc_throttle_enabled)
1124 * If this metaslab group does not have any free space, then
1125 * there is no point in looking further.
1127 if (mg->mg_no_free_space)
1131 * Relax allocation throttling for ditto blocks. Due to
1132 * random imbalances in allocation it tends to push copies
1133 * to one vdev, that looks a bit better at the moment.
1135 qmax = qmax * (4 + d) / 4;
1137 qdepth = zfs_refcount_count(
1138 &mg->mg_alloc_queue_depth[allocator]);
1141 * If this metaslab group is below its qmax or it's
1142 * the only allocatable metasable group, then attempt
1143 * to allocate from it.
1145 if (qdepth < qmax || mc->mc_alloc_groups == 1)
1147 ASSERT3U(mc->mc_alloc_groups, >, 1);
1150 * Since this metaslab group is at or over its qmax, we
1151 * need to determine if there are metaslab groups after this
1152 * one that might be able to handle this allocation. This is
1153 * racy since we can't hold the locks for all metaslab
1154 * groups at the same time when we make this check.
1156 for (mgp = mg->mg_next; mgp != rotor; mgp = mgp->mg_next) {
1157 qmax = mgp->mg_cur_max_alloc_queue_depth[allocator];
1158 qmax = qmax * (4 + d) / 4;
1159 qdepth = zfs_refcount_count(
1160 &mgp->mg_alloc_queue_depth[allocator]);
1163 * If there is another metaslab group that
1164 * might be able to handle the allocation, then
1165 * we return false so that we skip this group.
1167 if (qdepth < qmax && !mgp->mg_no_free_space)
1172 * We didn't find another group to handle the allocation
1173 * so we can't skip this metaslab group even though
1174 * we are at or over our qmax.
1178 } else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
1185 * ==========================================================================
1186 * Range tree callbacks
1187 * ==========================================================================
1191 * Comparison function for the private size-ordered tree. Tree is sorted
1192 * by size, larger sizes at the end of the tree.
1195 metaslab_rangesize_compare(const void *x1, const void *x2)
1197 const range_seg_t *r1 = x1;
1198 const range_seg_t *r2 = x2;
1199 uint64_t rs_size1 = r1->rs_end - r1->rs_start;
1200 uint64_t rs_size2 = r2->rs_end - r2->rs_start;
1202 int cmp = AVL_CMP(rs_size1, rs_size2);
1206 return (AVL_CMP(r1->rs_start, r2->rs_start));
1210 * ==========================================================================
1211 * Common allocator routines
1212 * ==========================================================================
1216 * Return the maximum contiguous segment within the metaslab.
1219 metaslab_block_maxsize(metaslab_t *msp)
1221 avl_tree_t *t = &msp->ms_allocatable_by_size;
1224 if (t == NULL || (rs = avl_last(t)) == NULL)
1227 return (rs->rs_end - rs->rs_start);
1230 static range_seg_t *
1231 metaslab_block_find(avl_tree_t *t, uint64_t start, uint64_t size)
1233 range_seg_t *rs, rsearch;
1236 rsearch.rs_start = start;
1237 rsearch.rs_end = start + size;
1239 rs = avl_find(t, &rsearch, &where);
1241 rs = avl_nearest(t, where, AVL_AFTER);
1248 * This is a helper function that can be used by the allocator to find
1249 * a suitable block to allocate. This will search the specified AVL
1250 * tree looking for a block that matches the specified criteria.
1253 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
1256 range_seg_t *rs = metaslab_block_find(t, *cursor, size);
1258 while (rs != NULL) {
1259 uint64_t offset = P2ROUNDUP(rs->rs_start, align);
1261 if (offset + size <= rs->rs_end) {
1262 *cursor = offset + size;
1265 rs = AVL_NEXT(t, rs);
1269 * If we know we've searched the whole map (*cursor == 0), give up.
1270 * Otherwise, reset the cursor to the beginning and try again.
1276 return (metaslab_block_picker(t, cursor, size, align));
1280 * ==========================================================================
1281 * The first-fit block allocator
1282 * ==========================================================================
1285 metaslab_ff_alloc(metaslab_t *msp, uint64_t size)
1288 * Find the largest power of 2 block size that evenly divides the
1289 * requested size. This is used to try to allocate blocks with similar
1290 * alignment from the same area of the metaslab (i.e. same cursor
1291 * bucket) but it does not guarantee that other allocations sizes
1292 * may exist in the same region.
1294 uint64_t align = size & -size;
1295 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1296 avl_tree_t *t = &msp->ms_allocatable->rt_root;
1298 return (metaslab_block_picker(t, cursor, size, align));
1301 static metaslab_ops_t metaslab_ff_ops = {
1306 * ==========================================================================
1307 * Dynamic block allocator -
1308 * Uses the first fit allocation scheme until space get low and then
1309 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
1310 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
1311 * ==========================================================================
1314 metaslab_df_alloc(metaslab_t *msp, uint64_t size)
1317 * Find the largest power of 2 block size that evenly divides the
1318 * requested size. This is used to try to allocate blocks with similar
1319 * alignment from the same area of the metaslab (i.e. same cursor
1320 * bucket) but it does not guarantee that other allocations sizes
1321 * may exist in the same region.
1323 uint64_t align = size & -size;
1324 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1325 range_tree_t *rt = msp->ms_allocatable;
1326 avl_tree_t *t = &rt->rt_root;
1327 uint64_t max_size = metaslab_block_maxsize(msp);
1328 int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
1330 ASSERT(MUTEX_HELD(&msp->ms_lock));
1331 ASSERT3U(avl_numnodes(t), ==,
1332 avl_numnodes(&msp->ms_allocatable_by_size));
1334 if (max_size < size)
1338 * If we're running low on space switch to using the size
1339 * sorted AVL tree (best-fit).
1341 if (max_size < metaslab_df_alloc_threshold ||
1342 free_pct < metaslab_df_free_pct) {
1343 t = &msp->ms_allocatable_by_size;
1347 return (metaslab_block_picker(t, cursor, size, 1ULL));
1350 static metaslab_ops_t metaslab_df_ops = {
1355 * ==========================================================================
1356 * Cursor fit block allocator -
1357 * Select the largest region in the metaslab, set the cursor to the beginning
1358 * of the range and the cursor_end to the end of the range. As allocations
1359 * are made advance the cursor. Continue allocating from the cursor until
1360 * the range is exhausted and then find a new range.
1361 * ==========================================================================
1364 metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
1366 range_tree_t *rt = msp->ms_allocatable;
1367 avl_tree_t *t = &msp->ms_allocatable_by_size;
1368 uint64_t *cursor = &msp->ms_lbas[0];
1369 uint64_t *cursor_end = &msp->ms_lbas[1];
1370 uint64_t offset = 0;
1372 ASSERT(MUTEX_HELD(&msp->ms_lock));
1373 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root));
1375 ASSERT3U(*cursor_end, >=, *cursor);
1377 if ((*cursor + size) > *cursor_end) {
1380 rs = avl_last(&msp->ms_allocatable_by_size);
1381 if (rs == NULL || (rs->rs_end - rs->rs_start) < size)
1384 *cursor = rs->rs_start;
1385 *cursor_end = rs->rs_end;
1394 static metaslab_ops_t metaslab_cf_ops = {
1399 * ==========================================================================
1400 * New dynamic fit allocator -
1401 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1402 * contiguous blocks. If no region is found then just use the largest segment
1404 * ==========================================================================
1408 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1409 * to request from the allocator.
1411 uint64_t metaslab_ndf_clump_shift = 4;
1414 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
1416 avl_tree_t *t = &msp->ms_allocatable->rt_root;
1418 range_seg_t *rs, rsearch;
1419 uint64_t hbit = highbit64(size);
1420 uint64_t *cursor = &msp->ms_lbas[hbit - 1];
1421 uint64_t max_size = metaslab_block_maxsize(msp);
1423 ASSERT(MUTEX_HELD(&msp->ms_lock));
1424 ASSERT3U(avl_numnodes(t), ==,
1425 avl_numnodes(&msp->ms_allocatable_by_size));
1427 if (max_size < size)
1430 rsearch.rs_start = *cursor;
1431 rsearch.rs_end = *cursor + size;
1433 rs = avl_find(t, &rsearch, &where);
1434 if (rs == NULL || (rs->rs_end - rs->rs_start) < size) {
1435 t = &msp->ms_allocatable_by_size;
1437 rsearch.rs_start = 0;
1438 rsearch.rs_end = MIN(max_size,
1439 1ULL << (hbit + metaslab_ndf_clump_shift));
1440 rs = avl_find(t, &rsearch, &where);
1442 rs = avl_nearest(t, where, AVL_AFTER);
1446 if ((rs->rs_end - rs->rs_start) >= size) {
1447 *cursor = rs->rs_start + size;
1448 return (rs->rs_start);
1453 static metaslab_ops_t metaslab_ndf_ops = {
1457 metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
1460 * ==========================================================================
1462 * ==========================================================================
1466 * Wait for any in-progress metaslab loads to complete.
1469 metaslab_load_wait(metaslab_t *msp)
1471 ASSERT(MUTEX_HELD(&msp->ms_lock));
1473 while (msp->ms_loading) {
1474 ASSERT(!msp->ms_loaded);
1475 cv_wait(&msp->ms_load_cv, &msp->ms_lock);
1480 metaslab_load_impl(metaslab_t *msp)
1484 ASSERT(MUTEX_HELD(&msp->ms_lock));
1485 ASSERT(msp->ms_loading);
1488 * Nobody else can manipulate a loading metaslab, so it's now safe
1489 * to drop the lock. This way we don't have to hold the lock while
1490 * reading the spacemap from disk.
1492 mutex_exit(&msp->ms_lock);
1495 * If the space map has not been allocated yet, then treat
1496 * all the space in the metaslab as free and add it to ms_allocatable.
1498 if (msp->ms_sm != NULL) {
1499 error = space_map_load(msp->ms_sm, msp->ms_allocatable,
1502 range_tree_add(msp->ms_allocatable,
1503 msp->ms_start, msp->ms_size);
1506 mutex_enter(&msp->ms_lock);
1511 ASSERT3P(msp->ms_group, !=, NULL);
1512 msp->ms_loaded = B_TRUE;
1515 * If the metaslab already has a spacemap, then we need to
1516 * remove all segments from the defer tree; otherwise, the
1517 * metaslab is completely empty and we can skip this.
1519 if (msp->ms_sm != NULL) {
1520 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1521 range_tree_walk(msp->ms_defer[t],
1522 range_tree_remove, msp->ms_allocatable);
1525 msp->ms_max_size = metaslab_block_maxsize(msp);
1531 metaslab_load(metaslab_t *msp)
1533 ASSERT(MUTEX_HELD(&msp->ms_lock));
1536 * There may be another thread loading the same metaslab, if that's
1537 * the case just wait until the other thread is done and return.
1539 metaslab_load_wait(msp);
1542 VERIFY(!msp->ms_loading);
1544 msp->ms_loading = B_TRUE;
1545 int error = metaslab_load_impl(msp);
1546 msp->ms_loading = B_FALSE;
1547 cv_broadcast(&msp->ms_load_cv);
1553 metaslab_unload(metaslab_t *msp)
1555 ASSERT(MUTEX_HELD(&msp->ms_lock));
1556 range_tree_vacate(msp->ms_allocatable, NULL, NULL);
1557 msp->ms_loaded = B_FALSE;
1558 msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
1559 msp->ms_max_size = 0;
1563 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg,
1566 vdev_t *vd = mg->mg_vd;
1567 objset_t *mos = vd->vdev_spa->spa_meta_objset;
1571 ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
1572 mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
1573 mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL);
1574 cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
1577 ms->ms_start = id << vd->vdev_ms_shift;
1578 ms->ms_size = 1ULL << vd->vdev_ms_shift;
1579 ms->ms_allocator = -1;
1580 ms->ms_new = B_TRUE;
1583 * We only open space map objects that already exist. All others
1584 * will be opened when we finally allocate an object for it.
1587 error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
1588 ms->ms_size, vd->vdev_ashift);
1591 kmem_free(ms, sizeof (metaslab_t));
1595 ASSERT(ms->ms_sm != NULL);
1599 * We create the main range tree here, but we don't create the
1600 * other range trees until metaslab_sync_done(). This serves
1601 * two purposes: it allows metaslab_sync_done() to detect the
1602 * addition of new space; and for debugging, it ensures that we'd
1603 * data fault on any attempt to use this metaslab before it's ready.
1605 ms->ms_allocatable = range_tree_create_impl(&rt_avl_ops, &ms->ms_allocatable_by_size,
1606 metaslab_rangesize_compare, 0);
1607 metaslab_group_add(mg, ms);
1609 metaslab_set_fragmentation(ms);
1612 * If we're opening an existing pool (txg == 0) or creating
1613 * a new one (txg == TXG_INITIAL), all space is available now.
1614 * If we're adding space to an existing pool, the new space
1615 * does not become available until after this txg has synced.
1616 * The metaslab's weight will also be initialized when we sync
1617 * out this txg. This ensures that we don't attempt to allocate
1618 * from it before we have initialized it completely.
1620 if (txg <= TXG_INITIAL)
1621 metaslab_sync_done(ms, 0);
1624 * If metaslab_debug_load is set and we're initializing a metaslab
1625 * that has an allocated space map object then load the its space
1626 * map so that can verify frees.
1628 if (metaslab_debug_load && ms->ms_sm != NULL) {
1629 mutex_enter(&ms->ms_lock);
1630 VERIFY0(metaslab_load(ms));
1631 mutex_exit(&ms->ms_lock);
1635 vdev_dirty(vd, 0, NULL, txg);
1636 vdev_dirty(vd, VDD_METASLAB, ms, txg);
1645 metaslab_fini(metaslab_t *msp)
1647 metaslab_group_t *mg = msp->ms_group;
1649 metaslab_group_remove(mg, msp);
1651 mutex_enter(&msp->ms_lock);
1652 VERIFY(msp->ms_group == NULL);
1653 vdev_space_update(mg->mg_vd, -space_map_allocated(msp->ms_sm),
1655 space_map_close(msp->ms_sm);
1657 metaslab_unload(msp);
1658 range_tree_destroy(msp->ms_allocatable);
1659 range_tree_destroy(msp->ms_freeing);
1660 range_tree_destroy(msp->ms_freed);
1662 for (int t = 0; t < TXG_SIZE; t++) {
1663 range_tree_destroy(msp->ms_allocating[t]);
1666 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1667 range_tree_destroy(msp->ms_defer[t]);
1669 ASSERT0(msp->ms_deferspace);
1671 range_tree_destroy(msp->ms_checkpointing);
1673 mutex_exit(&msp->ms_lock);
1674 cv_destroy(&msp->ms_load_cv);
1675 mutex_destroy(&msp->ms_lock);
1676 mutex_destroy(&msp->ms_sync_lock);
1677 ASSERT3U(msp->ms_allocator, ==, -1);
1679 kmem_free(msp, sizeof (metaslab_t));
1682 #define FRAGMENTATION_TABLE_SIZE 17
1685 * This table defines a segment size based fragmentation metric that will
1686 * allow each metaslab to derive its own fragmentation value. This is done
1687 * by calculating the space in each bucket of the spacemap histogram and
1688 * multiplying that by the fragmetation metric in this table. Doing
1689 * this for all buckets and dividing it by the total amount of free
1690 * space in this metaslab (i.e. the total free space in all buckets) gives
1691 * us the fragmentation metric. This means that a high fragmentation metric
1692 * equates to most of the free space being comprised of small segments.
1693 * Conversely, if the metric is low, then most of the free space is in
1694 * large segments. A 10% change in fragmentation equates to approximately
1695 * double the number of segments.
1697 * This table defines 0% fragmented space using 16MB segments. Testing has
1698 * shown that segments that are greater than or equal to 16MB do not suffer
1699 * from drastic performance problems. Using this value, we derive the rest
1700 * of the table. Since the fragmentation value is never stored on disk, it
1701 * is possible to change these calculations in the future.
1703 int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
1723 * Calclate the metaslab's fragmentation metric. A return value
1724 * of ZFS_FRAG_INVALID means that the metaslab has not been upgraded and does
1725 * not support this metric. Otherwise, the return value should be in the
1729 metaslab_set_fragmentation(metaslab_t *msp)
1731 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1732 uint64_t fragmentation = 0;
1734 boolean_t feature_enabled = spa_feature_is_enabled(spa,
1735 SPA_FEATURE_SPACEMAP_HISTOGRAM);
1737 if (!feature_enabled) {
1738 msp->ms_fragmentation = ZFS_FRAG_INVALID;
1743 * A null space map means that the entire metaslab is free
1744 * and thus is not fragmented.
1746 if (msp->ms_sm == NULL) {
1747 msp->ms_fragmentation = 0;
1752 * If this metaslab's space map has not been upgraded, flag it
1753 * so that we upgrade next time we encounter it.
1755 if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
1756 uint64_t txg = spa_syncing_txg(spa);
1757 vdev_t *vd = msp->ms_group->mg_vd;
1760 * If we've reached the final dirty txg, then we must
1761 * be shutting down the pool. We don't want to dirty
1762 * any data past this point so skip setting the condense
1763 * flag. We can retry this action the next time the pool
1766 if (spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
1767 msp->ms_condense_wanted = B_TRUE;
1768 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
1769 zfs_dbgmsg("txg %llu, requesting force condense: "
1770 "ms_id %llu, vdev_id %llu", txg, msp->ms_id,
1773 msp->ms_fragmentation = ZFS_FRAG_INVALID;
1777 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1779 uint8_t shift = msp->ms_sm->sm_shift;
1781 int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
1782 FRAGMENTATION_TABLE_SIZE - 1);
1784 if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
1787 space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
1790 ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
1791 fragmentation += space * zfs_frag_table[idx];
1795 fragmentation /= total;
1796 ASSERT3U(fragmentation, <=, 100);
1798 msp->ms_fragmentation = fragmentation;
1802 * Compute a weight -- a selection preference value -- for the given metaslab.
1803 * This is based on the amount of free space, the level of fragmentation,
1804 * the LBA range, and whether the metaslab is loaded.
1807 metaslab_space_weight(metaslab_t *msp)
1809 metaslab_group_t *mg = msp->ms_group;
1810 vdev_t *vd = mg->mg_vd;
1811 uint64_t weight, space;
1813 ASSERT(MUTEX_HELD(&msp->ms_lock));
1814 ASSERT(!vd->vdev_removing);
1817 * The baseline weight is the metaslab's free space.
1819 space = msp->ms_size - space_map_allocated(msp->ms_sm);
1821 if (metaslab_fragmentation_factor_enabled &&
1822 msp->ms_fragmentation != ZFS_FRAG_INVALID) {
1824 * Use the fragmentation information to inversely scale
1825 * down the baseline weight. We need to ensure that we
1826 * don't exclude this metaslab completely when it's 100%
1827 * fragmented. To avoid this we reduce the fragmented value
1830 space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
1833 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
1834 * this metaslab again. The fragmentation metric may have
1835 * decreased the space to something smaller than
1836 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
1837 * so that we can consume any remaining space.
1839 if (space > 0 && space < SPA_MINBLOCKSIZE)
1840 space = SPA_MINBLOCKSIZE;
1845 * Modern disks have uniform bit density and constant angular velocity.
1846 * Therefore, the outer recording zones are faster (higher bandwidth)
1847 * than the inner zones by the ratio of outer to inner track diameter,
1848 * which is typically around 2:1. We account for this by assigning
1849 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
1850 * In effect, this means that we'll select the metaslab with the most
1851 * free bandwidth rather than simply the one with the most free space.
1853 if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) {
1854 weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
1855 ASSERT(weight >= space && weight <= 2 * space);
1859 * If this metaslab is one we're actively using, adjust its
1860 * weight to make it preferable to any inactive metaslab so
1861 * we'll polish it off. If the fragmentation on this metaslab
1862 * has exceed our threshold, then don't mark it active.
1864 if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
1865 msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
1866 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
1869 WEIGHT_SET_SPACEBASED(weight);
1874 * Return the weight of the specified metaslab, according to the segment-based
1875 * weighting algorithm. The metaslab must be loaded. This function can
1876 * be called within a sync pass since it relies only on the metaslab's
1877 * range tree which is always accurate when the metaslab is loaded.
1880 metaslab_weight_from_range_tree(metaslab_t *msp)
1882 uint64_t weight = 0;
1883 uint32_t segments = 0;
1885 ASSERT(msp->ms_loaded);
1887 for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
1889 uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
1890 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
1893 segments += msp->ms_allocatable->rt_histogram[i];
1896 * The range tree provides more precision than the space map
1897 * and must be downgraded so that all values fit within the
1898 * space map's histogram. This allows us to compare loaded
1899 * vs. unloaded metaslabs to determine which metaslab is
1900 * considered "best".
1905 if (segments != 0) {
1906 WEIGHT_SET_COUNT(weight, segments);
1907 WEIGHT_SET_INDEX(weight, i);
1908 WEIGHT_SET_ACTIVE(weight, 0);
1916 * Calculate the weight based on the on-disk histogram. This should only
1917 * be called after a sync pass has completely finished since the on-disk
1918 * information is updated in metaslab_sync().
1921 metaslab_weight_from_spacemap(metaslab_t *msp)
1923 uint64_t weight = 0;
1925 for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
1926 if (msp->ms_sm->sm_phys->smp_histogram[i] != 0) {
1927 WEIGHT_SET_COUNT(weight,
1928 msp->ms_sm->sm_phys->smp_histogram[i]);
1929 WEIGHT_SET_INDEX(weight, i +
1930 msp->ms_sm->sm_shift);
1931 WEIGHT_SET_ACTIVE(weight, 0);
1939 * Compute a segment-based weight for the specified metaslab. The weight
1940 * is determined by highest bucket in the histogram. The information
1941 * for the highest bucket is encoded into the weight value.
1944 metaslab_segment_weight(metaslab_t *msp)
1946 metaslab_group_t *mg = msp->ms_group;
1947 uint64_t weight = 0;
1948 uint8_t shift = mg->mg_vd->vdev_ashift;
1950 ASSERT(MUTEX_HELD(&msp->ms_lock));
1953 * The metaslab is completely free.
1955 if (space_map_allocated(msp->ms_sm) == 0) {
1956 int idx = highbit64(msp->ms_size) - 1;
1957 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
1959 if (idx < max_idx) {
1960 WEIGHT_SET_COUNT(weight, 1ULL);
1961 WEIGHT_SET_INDEX(weight, idx);
1963 WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
1964 WEIGHT_SET_INDEX(weight, max_idx);
1966 WEIGHT_SET_ACTIVE(weight, 0);
1967 ASSERT(!WEIGHT_IS_SPACEBASED(weight));
1972 ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
1975 * If the metaslab is fully allocated then just make the weight 0.
1977 if (space_map_allocated(msp->ms_sm) == msp->ms_size)
1980 * If the metaslab is already loaded, then use the range tree to
1981 * determine the weight. Otherwise, we rely on the space map information
1982 * to generate the weight.
1984 if (msp->ms_loaded) {
1985 weight = metaslab_weight_from_range_tree(msp);
1987 weight = metaslab_weight_from_spacemap(msp);
1991 * If the metaslab was active the last time we calculated its weight
1992 * then keep it active. We want to consume the entire region that
1993 * is associated with this weight.
1995 if (msp->ms_activation_weight != 0 && weight != 0)
1996 WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
2001 * Determine if we should attempt to allocate from this metaslab. If the
2002 * metaslab has a maximum size then we can quickly determine if the desired
2003 * allocation size can be satisfied. Otherwise, if we're using segment-based
2004 * weighting then we can determine the maximum allocation that this metaslab
2005 * can accommodate based on the index encoded in the weight. If we're using
2006 * space-based weights then rely on the entire weight (excluding the weight
2010 metaslab_should_allocate(metaslab_t *msp, uint64_t asize)
2012 boolean_t should_allocate;
2014 if (msp->ms_max_size != 0)
2015 return (msp->ms_max_size >= asize);
2017 if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
2019 * The metaslab segment weight indicates segments in the
2020 * range [2^i, 2^(i+1)), where i is the index in the weight.
2021 * Since the asize might be in the middle of the range, we
2022 * should attempt the allocation if asize < 2^(i+1).
2024 should_allocate = (asize <
2025 1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
2027 should_allocate = (asize <=
2028 (msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
2030 return (should_allocate);
2034 metaslab_weight(metaslab_t *msp)
2036 vdev_t *vd = msp->ms_group->mg_vd;
2037 spa_t *spa = vd->vdev_spa;
2040 ASSERT(MUTEX_HELD(&msp->ms_lock));
2043 * If this vdev is in the process of being removed, there is nothing
2044 * for us to do here.
2046 if (vd->vdev_removing)
2049 metaslab_set_fragmentation(msp);
2052 * Update the maximum size if the metaslab is loaded. This will
2053 * ensure that we get an accurate maximum size if newly freed space
2054 * has been added back into the free tree.
2057 msp->ms_max_size = metaslab_block_maxsize(msp);
2060 * Segment-based weighting requires space map histogram support.
2062 if (zfs_metaslab_segment_weight_enabled &&
2063 spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
2064 (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
2065 sizeof (space_map_phys_t))) {
2066 weight = metaslab_segment_weight(msp);
2068 weight = metaslab_space_weight(msp);
2074 metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp,
2075 int allocator, uint64_t activation_weight)
2078 * If we're activating for the claim code, we don't want to actually
2079 * set the metaslab up for a specific allocator.
2081 if (activation_weight == METASLAB_WEIGHT_CLAIM)
2083 metaslab_t **arr = (activation_weight == METASLAB_WEIGHT_PRIMARY ?
2084 mg->mg_primaries : mg->mg_secondaries);
2086 ASSERT(MUTEX_HELD(&msp->ms_lock));
2087 mutex_enter(&mg->mg_lock);
2088 if (arr[allocator] != NULL) {
2089 mutex_exit(&mg->mg_lock);
2093 arr[allocator] = msp;
2094 ASSERT3S(msp->ms_allocator, ==, -1);
2095 msp->ms_allocator = allocator;
2096 msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY);
2097 mutex_exit(&mg->mg_lock);
2103 metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight)
2105 ASSERT(MUTEX_HELD(&msp->ms_lock));
2107 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
2108 int error = metaslab_load(msp);
2110 metaslab_group_sort(msp->ms_group, msp, 0);
2113 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
2115 * The metaslab was activated for another allocator
2116 * while we were waiting, we should reselect.
2120 if ((error = metaslab_activate_allocator(msp->ms_group, msp,
2121 allocator, activation_weight)) != 0) {
2125 msp->ms_activation_weight = msp->ms_weight;
2126 metaslab_group_sort(msp->ms_group, msp,
2127 msp->ms_weight | activation_weight);
2129 ASSERT(msp->ms_loaded);
2130 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
2136 metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp,
2139 ASSERT(MUTEX_HELD(&msp->ms_lock));
2140 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
2141 metaslab_group_sort(mg, msp, weight);
2145 mutex_enter(&mg->mg_lock);
2146 ASSERT3P(msp->ms_group, ==, mg);
2147 if (msp->ms_primary) {
2148 ASSERT3U(0, <=, msp->ms_allocator);
2149 ASSERT3U(msp->ms_allocator, <, mg->mg_allocators);
2150 ASSERT3P(mg->mg_primaries[msp->ms_allocator], ==, msp);
2151 ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
2152 mg->mg_primaries[msp->ms_allocator] = NULL;
2154 ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
2155 ASSERT3P(mg->mg_secondaries[msp->ms_allocator], ==, msp);
2156 mg->mg_secondaries[msp->ms_allocator] = NULL;
2158 msp->ms_allocator = -1;
2159 metaslab_group_sort_impl(mg, msp, weight);
2160 mutex_exit(&mg->mg_lock);
2164 metaslab_passivate(metaslab_t *msp, uint64_t weight)
2166 uint64_t size = weight & ~METASLAB_WEIGHT_TYPE;
2169 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
2170 * this metaslab again. In that case, it had better be empty,
2171 * or we would be leaving space on the table.
2173 ASSERT(size >= SPA_MINBLOCKSIZE ||
2174 range_tree_is_empty(msp->ms_allocatable));
2175 ASSERT0(weight & METASLAB_ACTIVE_MASK);
2177 msp->ms_activation_weight = 0;
2178 metaslab_passivate_allocator(msp->ms_group, msp, weight);
2179 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
2183 * Segment-based metaslabs are activated once and remain active until
2184 * we either fail an allocation attempt (similar to space-based metaslabs)
2185 * or have exhausted the free space in zfs_metaslab_switch_threshold
2186 * buckets since the metaslab was activated. This function checks to see
2187 * if we've exhaused the zfs_metaslab_switch_threshold buckets in the
2188 * metaslab and passivates it proactively. This will allow us to select a
2189 * metaslabs with larger contiguous region if any remaining within this
2190 * metaslab group. If we're in sync pass > 1, then we continue using this
2191 * metaslab so that we don't dirty more block and cause more sync passes.
2194 metaslab_segment_may_passivate(metaslab_t *msp)
2196 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2198 if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
2202 * Since we are in the middle of a sync pass, the most accurate
2203 * information that is accessible to us is the in-core range tree
2204 * histogram; calculate the new weight based on that information.
2206 uint64_t weight = metaslab_weight_from_range_tree(msp);
2207 int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
2208 int current_idx = WEIGHT_GET_INDEX(weight);
2210 if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
2211 metaslab_passivate(msp, weight);
2215 metaslab_preload(void *arg)
2217 metaslab_t *msp = arg;
2218 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2220 ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
2222 mutex_enter(&msp->ms_lock);
2223 (void) metaslab_load(msp);
2224 msp->ms_selected_txg = spa_syncing_txg(spa);
2225 mutex_exit(&msp->ms_lock);
2229 metaslab_group_preload(metaslab_group_t *mg)
2231 spa_t *spa = mg->mg_vd->vdev_spa;
2233 avl_tree_t *t = &mg->mg_metaslab_tree;
2236 if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
2237 taskq_wait(mg->mg_taskq);
2241 mutex_enter(&mg->mg_lock);
2244 * Load the next potential metaslabs
2246 for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
2247 ASSERT3P(msp->ms_group, ==, mg);
2250 * We preload only the maximum number of metaslabs specified
2251 * by metaslab_preload_limit. If a metaslab is being forced
2252 * to condense then we preload it too. This will ensure
2253 * that force condensing happens in the next txg.
2255 if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
2259 VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
2260 msp, TQ_SLEEP) != 0);
2262 mutex_exit(&mg->mg_lock);
2266 * Determine if the space map's on-disk footprint is past our tolerance
2267 * for inefficiency. We would like to use the following criteria to make
2270 * 1. The size of the space map object should not dramatically increase as a
2271 * result of writing out the free space range tree.
2273 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
2274 * times the size than the free space range tree representation
2275 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1MB).
2277 * 3. The on-disk size of the space map should actually decrease.
2279 * Unfortunately, we cannot compute the on-disk size of the space map in this
2280 * context because we cannot accurately compute the effects of compression, etc.
2281 * Instead, we apply the heuristic described in the block comment for
2282 * zfs_metaslab_condense_block_threshold - we only condense if the space used
2283 * is greater than a threshold number of blocks.
2286 metaslab_should_condense(metaslab_t *msp)
2288 space_map_t *sm = msp->ms_sm;
2289 vdev_t *vd = msp->ms_group->mg_vd;
2290 uint64_t vdev_blocksize = 1 << vd->vdev_ashift;
2291 uint64_t current_txg = spa_syncing_txg(vd->vdev_spa);
2293 ASSERT(MUTEX_HELD(&msp->ms_lock));
2294 ASSERT(msp->ms_loaded);
2297 * Allocations and frees in early passes are generally more space
2298 * efficient (in terms of blocks described in space map entries)
2299 * than the ones in later passes (e.g. we don't compress after
2300 * sync pass 5) and condensing a metaslab multiple times in a txg
2301 * could degrade performance.
2303 * Thus we prefer condensing each metaslab at most once every txg at
2304 * the earliest sync pass possible. If a metaslab is eligible for
2305 * condensing again after being considered for condensing within the
2306 * same txg, it will hopefully be dirty in the next txg where it will
2307 * be condensed at an earlier pass.
2309 if (msp->ms_condense_checked_txg == current_txg)
2311 msp->ms_condense_checked_txg = current_txg;
2314 * We always condense metaslabs that are empty and metaslabs for
2315 * which a condense request has been made.
2317 if (avl_is_empty(&msp->ms_allocatable_by_size) ||
2318 msp->ms_condense_wanted)
2321 uint64_t object_size = space_map_length(msp->ms_sm);
2322 uint64_t optimal_size = space_map_estimate_optimal_size(sm,
2323 msp->ms_allocatable, SM_NO_VDEVID);
2325 dmu_object_info_t doi;
2326 dmu_object_info_from_db(sm->sm_dbuf, &doi);
2327 uint64_t record_size = MAX(doi.doi_data_block_size, vdev_blocksize);
2329 return (object_size >= (optimal_size * zfs_condense_pct / 100) &&
2330 object_size > zfs_metaslab_condense_block_threshold * record_size);
2334 * Condense the on-disk space map representation to its minimized form.
2335 * The minimized form consists of a small number of allocations followed by
2336 * the entries of the free range tree.
2339 metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
2341 range_tree_t *condense_tree;
2342 space_map_t *sm = msp->ms_sm;
2344 ASSERT(MUTEX_HELD(&msp->ms_lock));
2345 ASSERT(msp->ms_loaded);
2347 zfs_dbgmsg("condensing: txg %llu, msp[%llu] %p, vdev id %llu, "
2348 "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg,
2349 msp->ms_id, msp, msp->ms_group->mg_vd->vdev_id,
2350 msp->ms_group->mg_vd->vdev_spa->spa_name,
2351 space_map_length(msp->ms_sm),
2352 avl_numnodes(&msp->ms_allocatable->rt_root),
2353 msp->ms_condense_wanted ? "TRUE" : "FALSE");
2355 msp->ms_condense_wanted = B_FALSE;
2358 * Create an range tree that is 100% allocated. We remove segments
2359 * that have been freed in this txg, any deferred frees that exist,
2360 * and any allocation in the future. Removing segments should be
2361 * a relatively inexpensive operation since we expect these trees to
2362 * have a small number of nodes.
2364 condense_tree = range_tree_create(NULL, NULL);
2365 range_tree_add(condense_tree, msp->ms_start, msp->ms_size);
2367 range_tree_walk(msp->ms_freeing, range_tree_remove, condense_tree);
2368 range_tree_walk(msp->ms_freed, range_tree_remove, condense_tree);
2370 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2371 range_tree_walk(msp->ms_defer[t],
2372 range_tree_remove, condense_tree);
2375 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
2376 range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK],
2377 range_tree_remove, condense_tree);
2381 * We're about to drop the metaslab's lock thus allowing
2382 * other consumers to change it's content. Set the
2383 * metaslab's ms_condensing flag to ensure that
2384 * allocations on this metaslab do not occur while we're
2385 * in the middle of committing it to disk. This is only critical
2386 * for ms_allocatable as all other range trees use per txg
2387 * views of their content.
2389 msp->ms_condensing = B_TRUE;
2391 mutex_exit(&msp->ms_lock);
2392 space_map_truncate(sm, zfs_metaslab_sm_blksz, tx);
2395 * While we would ideally like to create a space map representation
2396 * that consists only of allocation records, doing so can be
2397 * prohibitively expensive because the in-core free tree can be
2398 * large, and therefore computationally expensive to subtract
2399 * from the condense_tree. Instead we sync out two trees, a cheap
2400 * allocation only tree followed by the in-core free tree. While not
2401 * optimal, this is typically close to optimal, and much cheaper to
2404 space_map_write(sm, condense_tree, SM_ALLOC, SM_NO_VDEVID, tx);
2405 range_tree_vacate(condense_tree, NULL, NULL);
2406 range_tree_destroy(condense_tree);
2408 space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx);
2409 mutex_enter(&msp->ms_lock);
2410 msp->ms_condensing = B_FALSE;
2414 * Write a metaslab to disk in the context of the specified transaction group.
2417 metaslab_sync(metaslab_t *msp, uint64_t txg)
2419 metaslab_group_t *mg = msp->ms_group;
2420 vdev_t *vd = mg->mg_vd;
2421 spa_t *spa = vd->vdev_spa;
2422 objset_t *mos = spa_meta_objset(spa);
2423 range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK];
2425 uint64_t object = space_map_object(msp->ms_sm);
2427 ASSERT(!vd->vdev_ishole);
2430 * This metaslab has just been added so there's no work to do now.
2432 if (msp->ms_freeing == NULL) {
2433 ASSERT3P(alloctree, ==, NULL);
2437 ASSERT3P(alloctree, !=, NULL);
2438 ASSERT3P(msp->ms_freeing, !=, NULL);
2439 ASSERT3P(msp->ms_freed, !=, NULL);
2440 ASSERT3P(msp->ms_checkpointing, !=, NULL);
2443 * Normally, we don't want to process a metaslab if there are no
2444 * allocations or frees to perform. However, if the metaslab is being
2445 * forced to condense and it's loaded, we need to let it through.
2447 if (range_tree_is_empty(alloctree) &&
2448 range_tree_is_empty(msp->ms_freeing) &&
2449 range_tree_is_empty(msp->ms_checkpointing) &&
2450 !(msp->ms_loaded && msp->ms_condense_wanted))
2454 VERIFY(txg <= spa_final_dirty_txg(spa));
2457 * The only state that can actually be changing concurrently with
2458 * metaslab_sync() is the metaslab's ms_allocatable. No other
2459 * thread can be modifying this txg's alloc, freeing,
2460 * freed, or space_map_phys_t. We drop ms_lock whenever we
2461 * could call into the DMU, because the DMU can call down to us
2462 * (e.g. via zio_free()) at any time.
2464 * The spa_vdev_remove_thread() can be reading metaslab state
2465 * concurrently, and it is locked out by the ms_sync_lock. Note
2466 * that the ms_lock is insufficient for this, because it is dropped
2467 * by space_map_write().
2469 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
2471 if (msp->ms_sm == NULL) {
2472 uint64_t new_object;
2474 new_object = space_map_alloc(mos, zfs_metaslab_sm_blksz, tx);
2475 VERIFY3U(new_object, !=, 0);
2477 VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
2478 msp->ms_start, msp->ms_size, vd->vdev_ashift));
2479 ASSERT(msp->ms_sm != NULL);
2482 if (!range_tree_is_empty(msp->ms_checkpointing) &&
2483 vd->vdev_checkpoint_sm == NULL) {
2484 ASSERT(spa_has_checkpoint(spa));
2486 uint64_t new_object = space_map_alloc(mos,
2487 vdev_standard_sm_blksz, tx);
2488 VERIFY3U(new_object, !=, 0);
2490 VERIFY0(space_map_open(&vd->vdev_checkpoint_sm,
2491 mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift));
2492 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
2495 * We save the space map object as an entry in vdev_top_zap
2496 * so it can be retrieved when the pool is reopened after an
2497 * export or through zdb.
2499 VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset,
2500 vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
2501 sizeof (new_object), 1, &new_object, tx));
2504 mutex_enter(&msp->ms_sync_lock);
2505 mutex_enter(&msp->ms_lock);
2508 * Note: metaslab_condense() clears the space map's histogram.
2509 * Therefore we must verify and remove this histogram before
2512 metaslab_group_histogram_verify(mg);
2513 metaslab_class_histogram_verify(mg->mg_class);
2514 metaslab_group_histogram_remove(mg, msp);
2516 if (msp->ms_loaded && metaslab_should_condense(msp)) {
2517 metaslab_condense(msp, txg, tx);
2519 mutex_exit(&msp->ms_lock);
2520 space_map_write(msp->ms_sm, alloctree, SM_ALLOC,
2522 space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE,
2524 mutex_enter(&msp->ms_lock);
2527 if (!range_tree_is_empty(msp->ms_checkpointing)) {
2528 ASSERT(spa_has_checkpoint(spa));
2529 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
2532 * Since we are doing writes to disk and the ms_checkpointing
2533 * tree won't be changing during that time, we drop the
2534 * ms_lock while writing to the checkpoint space map.
2536 mutex_exit(&msp->ms_lock);
2537 space_map_write(vd->vdev_checkpoint_sm,
2538 msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx);
2539 mutex_enter(&msp->ms_lock);
2540 space_map_update(vd->vdev_checkpoint_sm);
2542 spa->spa_checkpoint_info.sci_dspace +=
2543 range_tree_space(msp->ms_checkpointing);
2544 vd->vdev_stat.vs_checkpoint_space +=
2545 range_tree_space(msp->ms_checkpointing);
2546 ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==,
2547 -vd->vdev_checkpoint_sm->sm_alloc);
2549 range_tree_vacate(msp->ms_checkpointing, NULL, NULL);
2552 if (msp->ms_loaded) {
2554 * When the space map is loaded, we have an accurate
2555 * histogram in the range tree. This gives us an opportunity
2556 * to bring the space map's histogram up-to-date so we clear
2557 * it first before updating it.
2559 space_map_histogram_clear(msp->ms_sm);
2560 space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
2563 * Since we've cleared the histogram we need to add back
2564 * any free space that has already been processed, plus
2565 * any deferred space. This allows the on-disk histogram
2566 * to accurately reflect all free space even if some space
2567 * is not yet available for allocation (i.e. deferred).
2569 space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx);
2572 * Add back any deferred free space that has not been
2573 * added back into the in-core free tree yet. This will
2574 * ensure that we don't end up with a space map histogram
2575 * that is completely empty unless the metaslab is fully
2578 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2579 space_map_histogram_add(msp->ms_sm,
2580 msp->ms_defer[t], tx);
2585 * Always add the free space from this sync pass to the space
2586 * map histogram. We want to make sure that the on-disk histogram
2587 * accounts for all free space. If the space map is not loaded,
2588 * then we will lose some accuracy but will correct it the next
2589 * time we load the space map.
2591 space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx);
2593 metaslab_group_histogram_add(mg, msp);
2594 metaslab_group_histogram_verify(mg);
2595 metaslab_class_histogram_verify(mg->mg_class);
2598 * For sync pass 1, we avoid traversing this txg's free range tree
2599 * and instead will just swap the pointers for freeing and
2600 * freed. We can safely do this since the freed_tree is
2601 * guaranteed to be empty on the initial pass.
2603 if (spa_sync_pass(spa) == 1) {
2604 range_tree_swap(&msp->ms_freeing, &msp->ms_freed);
2606 range_tree_vacate(msp->ms_freeing,
2607 range_tree_add, msp->ms_freed);
2609 range_tree_vacate(alloctree, NULL, NULL);
2611 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
2612 ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg)
2614 ASSERT0(range_tree_space(msp->ms_freeing));
2615 ASSERT0(range_tree_space(msp->ms_checkpointing));
2617 mutex_exit(&msp->ms_lock);
2619 if (object != space_map_object(msp->ms_sm)) {
2620 object = space_map_object(msp->ms_sm);
2621 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
2622 msp->ms_id, sizeof (uint64_t), &object, tx);
2624 mutex_exit(&msp->ms_sync_lock);
2629 * Called after a transaction group has completely synced to mark
2630 * all of the metaslab's free space as usable.
2633 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
2635 metaslab_group_t *mg = msp->ms_group;
2636 vdev_t *vd = mg->mg_vd;
2637 spa_t *spa = vd->vdev_spa;
2638 range_tree_t **defer_tree;
2639 int64_t alloc_delta, defer_delta;
2640 boolean_t defer_allowed = B_TRUE;
2642 ASSERT(!vd->vdev_ishole);
2644 mutex_enter(&msp->ms_lock);
2647 * If this metaslab is just becoming available, initialize its
2648 * range trees and add its capacity to the vdev.
2650 if (msp->ms_freed == NULL) {
2651 for (int t = 0; t < TXG_SIZE; t++) {
2652 ASSERT(msp->ms_allocating[t] == NULL);
2654 msp->ms_allocating[t] = range_tree_create(NULL, NULL);
2657 ASSERT3P(msp->ms_freeing, ==, NULL);
2658 msp->ms_freeing = range_tree_create(NULL, NULL);
2660 ASSERT3P(msp->ms_freed, ==, NULL);
2661 msp->ms_freed = range_tree_create(NULL, NULL);
2663 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2664 ASSERT(msp->ms_defer[t] == NULL);
2666 msp->ms_defer[t] = range_tree_create(NULL, NULL);
2669 ASSERT3P(msp->ms_checkpointing, ==, NULL);
2670 msp->ms_checkpointing = range_tree_create(NULL, NULL);
2672 vdev_space_update(vd, 0, 0, msp->ms_size);
2674 ASSERT0(range_tree_space(msp->ms_freeing));
2675 ASSERT0(range_tree_space(msp->ms_checkpointing));
2677 defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE];
2679 uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
2680 metaslab_class_get_alloc(spa_normal_class(spa));
2681 if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing) {
2682 defer_allowed = B_FALSE;
2686 alloc_delta = space_map_alloc_delta(msp->ms_sm);
2687 if (defer_allowed) {
2688 defer_delta = range_tree_space(msp->ms_freed) -
2689 range_tree_space(*defer_tree);
2691 defer_delta -= range_tree_space(*defer_tree);
2694 vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
2697 * If there's a metaslab_load() in progress, wait for it to complete
2698 * so that we have a consistent view of the in-core space map.
2700 metaslab_load_wait(msp);
2703 * Move the frees from the defer_tree back to the free
2704 * range tree (if it's loaded). Swap the freed_tree and
2705 * the defer_tree -- this is safe to do because we've
2706 * just emptied out the defer_tree.
2708 range_tree_vacate(*defer_tree,
2709 msp->ms_loaded ? range_tree_add : NULL, msp->ms_allocatable);
2710 if (defer_allowed) {
2711 range_tree_swap(&msp->ms_freed, defer_tree);
2713 range_tree_vacate(msp->ms_freed,
2714 msp->ms_loaded ? range_tree_add : NULL,
2715 msp->ms_allocatable);
2717 space_map_update(msp->ms_sm);
2719 msp->ms_deferspace += defer_delta;
2720 ASSERT3S(msp->ms_deferspace, >=, 0);
2721 ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
2722 if (msp->ms_deferspace != 0) {
2724 * Keep syncing this metaslab until all deferred frees
2725 * are back in circulation.
2727 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
2731 msp->ms_new = B_FALSE;
2732 mutex_enter(&mg->mg_lock);
2734 mutex_exit(&mg->mg_lock);
2737 * Calculate the new weights before unloading any metaslabs.
2738 * This will give us the most accurate weighting.
2740 metaslab_group_sort(mg, msp, metaslab_weight(msp) |
2741 (msp->ms_weight & METASLAB_ACTIVE_MASK));
2744 * If the metaslab is loaded and we've not tried to load or allocate
2745 * from it in 'metaslab_unload_delay' txgs, then unload it.
2747 if (msp->ms_loaded &&
2748 msp->ms_initializing == 0 &&
2749 msp->ms_selected_txg + metaslab_unload_delay < txg) {
2750 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
2751 VERIFY0(range_tree_space(
2752 msp->ms_allocating[(txg + t) & TXG_MASK]));
2754 if (msp->ms_allocator != -1) {
2755 metaslab_passivate(msp, msp->ms_weight &
2756 ~METASLAB_ACTIVE_MASK);
2759 if (!metaslab_debug_unload)
2760 metaslab_unload(msp);
2763 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
2764 ASSERT0(range_tree_space(msp->ms_freeing));
2765 ASSERT0(range_tree_space(msp->ms_freed));
2766 ASSERT0(range_tree_space(msp->ms_checkpointing));
2768 mutex_exit(&msp->ms_lock);
2772 metaslab_sync_reassess(metaslab_group_t *mg)
2774 spa_t *spa = mg->mg_class->mc_spa;
2776 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
2777 metaslab_group_alloc_update(mg);
2778 mg->mg_fragmentation = metaslab_group_fragmentation(mg);
2781 * Preload the next potential metaslabs but only on active
2782 * metaslab groups. We can get into a state where the metaslab
2783 * is no longer active since we dirty metaslabs as we remove a
2784 * a device, thus potentially making the metaslab group eligible
2787 if (mg->mg_activation_count > 0) {
2788 metaslab_group_preload(mg);
2790 spa_config_exit(spa, SCL_ALLOC, FTAG);
2794 metaslab_distance(metaslab_t *msp, dva_t *dva)
2796 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
2797 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
2798 uint64_t start = msp->ms_id;
2800 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
2801 return (1ULL << 63);
2804 return ((start - offset) << ms_shift);
2806 return ((offset - start) << ms_shift);
2811 * ==========================================================================
2812 * Metaslab allocation tracing facility
2813 * ==========================================================================
2815 #ifdef _METASLAB_TRACING
2816 kstat_t *metaslab_trace_ksp;
2817 kstat_named_t metaslab_trace_over_limit;
2820 metaslab_alloc_trace_init(void)
2822 ASSERT(metaslab_alloc_trace_cache == NULL);
2823 metaslab_alloc_trace_cache = kmem_cache_create(
2824 "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
2825 0, NULL, NULL, NULL, NULL, NULL, 0);
2826 metaslab_trace_ksp = kstat_create("zfs", 0, "metaslab_trace_stats",
2827 "misc", KSTAT_TYPE_NAMED, 1, KSTAT_FLAG_VIRTUAL);
2828 if (metaslab_trace_ksp != NULL) {
2829 metaslab_trace_ksp->ks_data = &metaslab_trace_over_limit;
2830 kstat_named_init(&metaslab_trace_over_limit,
2831 "metaslab_trace_over_limit", KSTAT_DATA_UINT64);
2832 kstat_install(metaslab_trace_ksp);
2837 metaslab_alloc_trace_fini(void)
2839 if (metaslab_trace_ksp != NULL) {
2840 kstat_delete(metaslab_trace_ksp);
2841 metaslab_trace_ksp = NULL;
2843 kmem_cache_destroy(metaslab_alloc_trace_cache);
2844 metaslab_alloc_trace_cache = NULL;
2848 * Add an allocation trace element to the allocation tracing list.
2851 metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
2852 metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset,
2855 if (!metaslab_trace_enabled)
2859 * When the tracing list reaches its maximum we remove
2860 * the second element in the list before adding a new one.
2861 * By removing the second element we preserve the original
2862 * entry as a clue to what allocations steps have already been
2865 if (zal->zal_size == metaslab_trace_max_entries) {
2866 metaslab_alloc_trace_t *mat_next;
2868 panic("too many entries in allocation list");
2870 atomic_inc_64(&metaslab_trace_over_limit.value.ui64);
2872 mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
2873 list_remove(&zal->zal_list, mat_next);
2874 kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
2877 metaslab_alloc_trace_t *mat =
2878 kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
2879 list_link_init(&mat->mat_list_node);
2882 mat->mat_size = psize;
2883 mat->mat_dva_id = dva_id;
2884 mat->mat_offset = offset;
2885 mat->mat_weight = 0;
2886 mat->mat_allocator = allocator;
2889 mat->mat_weight = msp->ms_weight;
2892 * The list is part of the zio so locking is not required. Only
2893 * a single thread will perform allocations for a given zio.
2895 list_insert_tail(&zal->zal_list, mat);
2898 ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
2902 metaslab_trace_init(zio_alloc_list_t *zal)
2904 list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
2905 offsetof(metaslab_alloc_trace_t, mat_list_node));
2910 metaslab_trace_fini(zio_alloc_list_t *zal)
2912 metaslab_alloc_trace_t *mat;
2914 while ((mat = list_remove_head(&zal->zal_list)) != NULL)
2915 kmem_cache_free(metaslab_alloc_trace_cache, mat);
2916 list_destroy(&zal->zal_list);
2922 #define metaslab_trace_add(zal, mg, msp, psize, id, off, alloc)
2925 metaslab_alloc_trace_init(void)
2930 metaslab_alloc_trace_fini(void)
2935 metaslab_trace_init(zio_alloc_list_t *zal)
2940 metaslab_trace_fini(zio_alloc_list_t *zal)
2944 #endif /* _METASLAB_TRACING */
2947 * ==========================================================================
2948 * Metaslab block operations
2949 * ==========================================================================
2953 metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags,
2956 if (!(flags & METASLAB_ASYNC_ALLOC) ||
2957 (flags & METASLAB_DONT_THROTTLE))
2960 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2961 if (!mg->mg_class->mc_alloc_throttle_enabled)
2964 (void) zfs_refcount_add(&mg->mg_alloc_queue_depth[allocator], tag);
2968 metaslab_group_increment_qdepth(metaslab_group_t *mg, int allocator)
2970 uint64_t max = mg->mg_max_alloc_queue_depth;
2971 uint64_t cur = mg->mg_cur_max_alloc_queue_depth[allocator];
2973 if (atomic_cas_64(&mg->mg_cur_max_alloc_queue_depth[allocator],
2974 cur, cur + 1) == cur) {
2976 &mg->mg_class->mc_alloc_max_slots[allocator]);
2979 cur = mg->mg_cur_max_alloc_queue_depth[allocator];
2984 metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags,
2985 int allocator, boolean_t io_complete)
2987 if (!(flags & METASLAB_ASYNC_ALLOC) ||
2988 (flags & METASLAB_DONT_THROTTLE))
2991 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2992 if (!mg->mg_class->mc_alloc_throttle_enabled)
2995 (void) zfs_refcount_remove(&mg->mg_alloc_queue_depth[allocator], tag);
2997 metaslab_group_increment_qdepth(mg, allocator);
3001 metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag,
3005 const dva_t *dva = bp->blk_dva;
3006 int ndvas = BP_GET_NDVAS(bp);
3008 for (int d = 0; d < ndvas; d++) {
3009 uint64_t vdev = DVA_GET_VDEV(&dva[d]);
3010 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
3011 VERIFY(zfs_refcount_not_held(
3012 &mg->mg_alloc_queue_depth[allocator], tag));
3018 metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
3021 range_tree_t *rt = msp->ms_allocatable;
3022 metaslab_class_t *mc = msp->ms_group->mg_class;
3024 VERIFY(!msp->ms_condensing);
3025 VERIFY0(msp->ms_initializing);
3027 start = mc->mc_ops->msop_alloc(msp, size);
3028 if (start != -1ULL) {
3029 metaslab_group_t *mg = msp->ms_group;
3030 vdev_t *vd = mg->mg_vd;
3032 VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
3033 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
3034 VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
3035 range_tree_remove(rt, start, size);
3037 if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
3038 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
3040 range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size);
3042 /* Track the last successful allocation */
3043 msp->ms_alloc_txg = txg;
3044 metaslab_verify_space(msp, txg);
3048 * Now that we've attempted the allocation we need to update the
3049 * metaslab's maximum block size since it may have changed.
3051 msp->ms_max_size = metaslab_block_maxsize(msp);
3056 * Find the metaslab with the highest weight that is less than what we've
3057 * already tried. In the common case, this means that we will examine each
3058 * metaslab at most once. Note that concurrent callers could reorder metaslabs
3059 * by activation/passivation once we have dropped the mg_lock. If a metaslab is
3060 * activated by another thread, and we fail to allocate from the metaslab we
3061 * have selected, we may not try the newly-activated metaslab, and instead
3062 * activate another metaslab. This is not optimal, but generally does not cause
3063 * any problems (a possible exception being if every metaslab is completely full
3064 * except for the the newly-activated metaslab which we fail to examine).
3067 find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight,
3068 dva_t *dva, int d, uint64_t min_distance, uint64_t asize, int allocator,
3069 zio_alloc_list_t *zal, metaslab_t *search, boolean_t *was_active)
3072 avl_tree_t *t = &mg->mg_metaslab_tree;
3073 metaslab_t *msp = avl_find(t, search, &idx);
3075 msp = avl_nearest(t, idx, AVL_AFTER);
3077 for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
3079 if (!metaslab_should_allocate(msp, asize)) {
3080 metaslab_trace_add(zal, mg, msp, asize, d,
3081 TRACE_TOO_SMALL, allocator);
3086 * If the selected metaslab is condensing or being
3087 * initialized, skip it.
3089 if (msp->ms_condensing || msp->ms_initializing > 0)
3092 *was_active = msp->ms_allocator != -1;
3094 * If we're activating as primary, this is our first allocation
3095 * from this disk, so we don't need to check how close we are.
3096 * If the metaslab under consideration was already active,
3097 * we're getting desperate enough to steal another allocator's
3098 * metaslab, so we still don't care about distances.
3100 if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active)
3103 uint64_t target_distance = min_distance
3104 + (space_map_allocated(msp->ms_sm) != 0 ? 0 :
3107 for (i = 0; i < d; i++) {
3108 if (metaslab_distance(msp, &dva[i]) < target_distance)
3116 search->ms_weight = msp->ms_weight;
3117 search->ms_start = msp->ms_start + 1;
3118 search->ms_allocator = msp->ms_allocator;
3119 search->ms_primary = msp->ms_primary;
3126 metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
3127 uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d,
3130 metaslab_t *msp = NULL;
3131 uint64_t offset = -1ULL;
3132 uint64_t activation_weight;
3134 activation_weight = METASLAB_WEIGHT_PRIMARY;
3135 for (int i = 0; i < d; i++) {
3136 if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
3137 DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
3138 activation_weight = METASLAB_WEIGHT_SECONDARY;
3139 } else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
3140 DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
3141 activation_weight = METASLAB_WEIGHT_CLAIM;
3147 * If we don't have enough metaslabs active to fill the entire array, we
3148 * just use the 0th slot.
3150 if (mg->mg_ms_ready < mg->mg_allocators * 3)
3153 ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2);
3155 metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP);
3156 search->ms_weight = UINT64_MAX;
3157 search->ms_start = 0;
3159 * At the end of the metaslab tree are the already-active metaslabs,
3160 * first the primaries, then the secondaries. When we resume searching
3161 * through the tree, we need to consider ms_allocator and ms_primary so
3162 * we start in the location right after where we left off, and don't
3163 * accidentally loop forever considering the same metaslabs.
3165 search->ms_allocator = -1;
3166 search->ms_primary = B_TRUE;
3168 boolean_t was_active = B_FALSE;
3170 mutex_enter(&mg->mg_lock);
3172 if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
3173 mg->mg_primaries[allocator] != NULL) {
3174 msp = mg->mg_primaries[allocator];
3175 was_active = B_TRUE;
3176 } else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
3177 mg->mg_secondaries[allocator] != NULL) {
3178 msp = mg->mg_secondaries[allocator];
3179 was_active = B_TRUE;
3181 msp = find_valid_metaslab(mg, activation_weight, dva, d,
3182 min_distance, asize, allocator, zal, search,
3186 mutex_exit(&mg->mg_lock);
3188 kmem_free(search, sizeof (*search));
3192 mutex_enter(&msp->ms_lock);
3194 * Ensure that the metaslab we have selected is still
3195 * capable of handling our request. It's possible that
3196 * another thread may have changed the weight while we
3197 * were blocked on the metaslab lock. We check the
3198 * active status first to see if we need to reselect
3201 if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
3202 mutex_exit(&msp->ms_lock);
3207 * If the metaslab is freshly activated for an allocator that
3208 * isn't the one we're allocating from, or if it's a primary and
3209 * we're seeking a secondary (or vice versa), we go back and
3210 * select a new metaslab.
3212 if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) &&
3213 (msp->ms_allocator != -1) &&
3214 (msp->ms_allocator != allocator || ((activation_weight ==
3215 METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) {
3216 mutex_exit(&msp->ms_lock);
3220 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM &&
3221 activation_weight != METASLAB_WEIGHT_CLAIM) {
3222 metaslab_passivate(msp, msp->ms_weight &
3223 ~METASLAB_WEIGHT_CLAIM);
3224 mutex_exit(&msp->ms_lock);
3228 if (metaslab_activate(msp, allocator, activation_weight) != 0) {
3229 mutex_exit(&msp->ms_lock);
3233 msp->ms_selected_txg = txg;
3236 * Now that we have the lock, recheck to see if we should
3237 * continue to use this metaslab for this allocation. The
3238 * the metaslab is now loaded so metaslab_should_allocate() can
3239 * accurately determine if the allocation attempt should
3242 if (!metaslab_should_allocate(msp, asize)) {
3243 /* Passivate this metaslab and select a new one. */
3244 metaslab_trace_add(zal, mg, msp, asize, d,
3245 TRACE_TOO_SMALL, allocator);
3250 * If this metaslab is currently condensing then pick again as
3251 * we can't manipulate this metaslab until it's committed
3252 * to disk. If this metaslab is being initialized, we shouldn't
3253 * allocate from it since the allocated region might be
3254 * overwritten after allocation.
3256 if (msp->ms_condensing) {
3257 metaslab_trace_add(zal, mg, msp, asize, d,
3258 TRACE_CONDENSING, allocator);
3259 metaslab_passivate(msp, msp->ms_weight &
3260 ~METASLAB_ACTIVE_MASK);
3261 mutex_exit(&msp->ms_lock);
3263 } else if (msp->ms_initializing > 0) {
3264 metaslab_trace_add(zal, mg, msp, asize, d,
3265 TRACE_INITIALIZING, allocator);
3266 metaslab_passivate(msp, msp->ms_weight &
3267 ~METASLAB_ACTIVE_MASK);
3268 mutex_exit(&msp->ms_lock);
3272 offset = metaslab_block_alloc(msp, asize, txg);
3273 metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator);
3275 if (offset != -1ULL) {
3276 /* Proactively passivate the metaslab, if needed */
3277 metaslab_segment_may_passivate(msp);
3281 ASSERT(msp->ms_loaded);
3284 * We were unable to allocate from this metaslab so determine
3285 * a new weight for this metaslab. Now that we have loaded
3286 * the metaslab we can provide a better hint to the metaslab
3289 * For space-based metaslabs, we use the maximum block size.
3290 * This information is only available when the metaslab
3291 * is loaded and is more accurate than the generic free
3292 * space weight that was calculated by metaslab_weight().
3293 * This information allows us to quickly compare the maximum
3294 * available allocation in the metaslab to the allocation
3295 * size being requested.
3297 * For segment-based metaslabs, determine the new weight
3298 * based on the highest bucket in the range tree. We
3299 * explicitly use the loaded segment weight (i.e. the range
3300 * tree histogram) since it contains the space that is
3301 * currently available for allocation and is accurate
3302 * even within a sync pass.
3304 if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
3305 uint64_t weight = metaslab_block_maxsize(msp);
3306 WEIGHT_SET_SPACEBASED(weight);
3307 metaslab_passivate(msp, weight);
3309 metaslab_passivate(msp,
3310 metaslab_weight_from_range_tree(msp));
3314 * We have just failed an allocation attempt, check
3315 * that metaslab_should_allocate() agrees. Otherwise,
3316 * we may end up in an infinite loop retrying the same
3319 ASSERT(!metaslab_should_allocate(msp, asize));
3320 mutex_exit(&msp->ms_lock);
3322 mutex_exit(&msp->ms_lock);
3323 kmem_free(search, sizeof (*search));
3328 metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
3329 uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d,
3333 ASSERT(mg->mg_initialized);
3335 offset = metaslab_group_alloc_normal(mg, zal, asize, txg,
3336 min_distance, dva, d, allocator);
3338 mutex_enter(&mg->mg_lock);
3339 if (offset == -1ULL) {
3340 mg->mg_failed_allocations++;
3341 metaslab_trace_add(zal, mg, NULL, asize, d,
3342 TRACE_GROUP_FAILURE, allocator);
3343 if (asize == SPA_GANGBLOCKSIZE) {
3345 * This metaslab group was unable to allocate
3346 * the minimum gang block size so it must be out of
3347 * space. We must notify the allocation throttle
3348 * to start skipping allocation attempts to this
3349 * metaslab group until more space becomes available.
3350 * Note: this failure cannot be caused by the
3351 * allocation throttle since the allocation throttle
3352 * is only responsible for skipping devices and
3353 * not failing block allocations.
3355 mg->mg_no_free_space = B_TRUE;
3358 mg->mg_allocations++;
3359 mutex_exit(&mg->mg_lock);
3364 * If we have to write a ditto block (i.e. more than one DVA for a given BP)
3365 * on the same vdev as an existing DVA of this BP, then try to allocate it
3366 * at least (vdev_asize / (2 ^ ditto_same_vdev_distance_shift)) away from the
3369 int ditto_same_vdev_distance_shift = 3;
3372 * Allocate a block for the specified i/o.
3375 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
3376 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
3377 zio_alloc_list_t *zal, int allocator)
3379 metaslab_group_t *mg, *rotor;
3381 boolean_t try_hard = B_FALSE;
3383 ASSERT(!DVA_IS_VALID(&dva[d]));
3386 * For testing, make some blocks above a certain size be gang blocks.
3388 if (psize >= metaslab_force_ganging && (ddi_get_lbolt() & 3) == 0) {
3389 metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG,
3391 return (SET_ERROR(ENOSPC));
3395 * Start at the rotor and loop through all mgs until we find something.
3396 * Note that there's no locking on mc_rotor or mc_aliquot because
3397 * nothing actually breaks if we miss a few updates -- we just won't
3398 * allocate quite as evenly. It all balances out over time.
3400 * If we are doing ditto or log blocks, try to spread them across
3401 * consecutive vdevs. If we're forced to reuse a vdev before we've
3402 * allocated all of our ditto blocks, then try and spread them out on
3403 * that vdev as much as possible. If it turns out to not be possible,
3404 * gradually lower our standards until anything becomes acceptable.
3405 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
3406 * gives us hope of containing our fault domains to something we're
3407 * able to reason about. Otherwise, any two top-level vdev failures
3408 * will guarantee the loss of data. With consecutive allocation,
3409 * only two adjacent top-level vdev failures will result in data loss.
3411 * If we are doing gang blocks (hintdva is non-NULL), try to keep
3412 * ourselves on the same vdev as our gang block header. That
3413 * way, we can hope for locality in vdev_cache, plus it makes our
3414 * fault domains something tractable.
3417 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
3420 * It's possible the vdev we're using as the hint no
3421 * longer exists or its mg has been closed (e.g. by
3422 * device removal). Consult the rotor when
3425 if (vd != NULL && vd->vdev_mg != NULL) {
3428 if (flags & METASLAB_HINTBP_AVOID &&
3429 mg->mg_next != NULL)
3434 } else if (d != 0) {
3435 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
3436 mg = vd->vdev_mg->mg_next;
3442 * If the hint put us into the wrong metaslab class, or into a
3443 * metaslab group that has been passivated, just follow the rotor.
3445 if (mg->mg_class != mc || mg->mg_activation_count <= 0)
3451 boolean_t allocatable;
3453 ASSERT(mg->mg_activation_count == 1);
3457 * Don't allocate from faulted devices.
3460 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
3461 allocatable = vdev_allocatable(vd);
3462 spa_config_exit(spa, SCL_ZIO, FTAG);
3464 allocatable = vdev_allocatable(vd);
3468 * Determine if the selected metaslab group is eligible
3469 * for allocations. If we're ganging then don't allow
3470 * this metaslab group to skip allocations since that would
3471 * inadvertently return ENOSPC and suspend the pool
3472 * even though space is still available.
3474 if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) {
3475 allocatable = metaslab_group_allocatable(mg, rotor,
3476 psize, allocator, d);
3480 metaslab_trace_add(zal, mg, NULL, psize, d,
3481 TRACE_NOT_ALLOCATABLE, allocator);
3485 ASSERT(mg->mg_initialized);
3488 * Avoid writing single-copy data to a failing,
3489 * non-redundant vdev, unless we've already tried all
3492 if ((vd->vdev_stat.vs_write_errors > 0 ||
3493 vd->vdev_state < VDEV_STATE_HEALTHY) &&
3494 d == 0 && !try_hard && vd->vdev_children == 0) {
3495 metaslab_trace_add(zal, mg, NULL, psize, d,
3496 TRACE_VDEV_ERROR, allocator);
3500 ASSERT(mg->mg_class == mc);
3503 * If we don't need to try hard, then require that the
3504 * block be 1/8th of the device away from any other DVAs
3505 * in this BP. If we are trying hard, allow any offset
3506 * to be used (distance=0).
3508 uint64_t distance = 0;
3510 distance = vd->vdev_asize >>
3511 ditto_same_vdev_distance_shift;
3512 if (distance <= (1ULL << vd->vdev_ms_shift))
3516 uint64_t asize = vdev_psize_to_asize(vd, psize);
3517 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
3519 uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg,
3520 distance, dva, d, allocator);
3522 if (offset != -1ULL) {
3524 * If we've just selected this metaslab group,
3525 * figure out whether the corresponding vdev is
3526 * over- or under-used relative to the pool,
3527 * and set an allocation bias to even it out.
3529 if (mc->mc_aliquot == 0 && metaslab_bias_enabled) {
3530 vdev_stat_t *vs = &vd->vdev_stat;
3533 vu = (vs->vs_alloc * 100) / (vs->vs_space + 1);
3534 cu = (mc->mc_alloc * 100) / (mc->mc_space + 1);
3537 * Calculate how much more or less we should
3538 * try to allocate from this device during
3539 * this iteration around the rotor.
3540 * For example, if a device is 80% full
3541 * and the pool is 20% full then we should
3542 * reduce allocations by 60% on this device.
3544 * mg_bias = (20 - 80) * 512K / 100 = -307K
3546 * This reduces allocations by 307K for this
3549 mg->mg_bias = ((cu - vu) *
3550 (int64_t)mg->mg_aliquot) / 100;
3551 } else if (!metaslab_bias_enabled) {
3555 if (atomic_add_64_nv(&mc->mc_aliquot, asize) >=
3556 mg->mg_aliquot + mg->mg_bias) {
3557 mc->mc_rotor = mg->mg_next;
3561 DVA_SET_VDEV(&dva[d], vd->vdev_id);
3562 DVA_SET_OFFSET(&dva[d], offset);
3563 DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER));
3564 DVA_SET_ASIZE(&dva[d], asize);
3569 mc->mc_rotor = mg->mg_next;
3571 } while ((mg = mg->mg_next) != rotor);
3574 * If we haven't tried hard, do so now.
3581 bzero(&dva[d], sizeof (dva_t));
3583 metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator);
3584 return (SET_ERROR(ENOSPC));
3588 metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize,
3589 boolean_t checkpoint)
3592 spa_t *spa = vd->vdev_spa;
3594 ASSERT(vdev_is_concrete(vd));
3595 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
3596 ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
3598 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3600 VERIFY(!msp->ms_condensing);
3601 VERIFY3U(offset, >=, msp->ms_start);
3602 VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size);
3603 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
3604 VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift));
3606 metaslab_check_free_impl(vd, offset, asize);
3608 mutex_enter(&msp->ms_lock);
3609 if (range_tree_is_empty(msp->ms_freeing) &&
3610 range_tree_is_empty(msp->ms_checkpointing)) {
3611 vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa));
3615 ASSERT(spa_has_checkpoint(spa));
3616 range_tree_add(msp->ms_checkpointing, offset, asize);
3618 range_tree_add(msp->ms_freeing, offset, asize);
3620 mutex_exit(&msp->ms_lock);
3625 metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
3626 uint64_t size, void *arg)
3628 boolean_t *checkpoint = arg;
3630 ASSERT3P(checkpoint, !=, NULL);
3632 if (vd->vdev_ops->vdev_op_remap != NULL)
3633 vdev_indirect_mark_obsolete(vd, offset, size);
3635 metaslab_free_impl(vd, offset, size, *checkpoint);
3639 metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size,
3640 boolean_t checkpoint)
3642 spa_t *spa = vd->vdev_spa;
3644 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
3646 if (spa_syncing_txg(spa) > spa_freeze_txg(spa))
3649 if (spa->spa_vdev_removal != NULL &&
3650 spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id &&
3651 vdev_is_concrete(vd)) {
3653 * Note: we check if the vdev is concrete because when
3654 * we complete the removal, we first change the vdev to be
3655 * an indirect vdev (in open context), and then (in syncing
3656 * context) clear spa_vdev_removal.
3658 free_from_removing_vdev(vd, offset, size);
3659 } else if (vd->vdev_ops->vdev_op_remap != NULL) {
3660 vdev_indirect_mark_obsolete(vd, offset, size);
3661 vd->vdev_ops->vdev_op_remap(vd, offset, size,
3662 metaslab_free_impl_cb, &checkpoint);
3664 metaslab_free_concrete(vd, offset, size, checkpoint);
3668 typedef struct remap_blkptr_cb_arg {
3670 spa_remap_cb_t rbca_cb;
3671 vdev_t *rbca_remap_vd;
3672 uint64_t rbca_remap_offset;
3674 } remap_blkptr_cb_arg_t;
3677 remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
3678 uint64_t size, void *arg)
3680 remap_blkptr_cb_arg_t *rbca = arg;
3681 blkptr_t *bp = rbca->rbca_bp;
3683 /* We can not remap split blocks. */
3684 if (size != DVA_GET_ASIZE(&bp->blk_dva[0]))
3686 ASSERT0(inner_offset);
3688 if (rbca->rbca_cb != NULL) {
3690 * At this point we know that we are not handling split
3691 * blocks and we invoke the callback on the previous
3692 * vdev which must be indirect.
3694 ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops);
3696 rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id,
3697 rbca->rbca_remap_offset, size, rbca->rbca_cb_arg);
3699 /* set up remap_blkptr_cb_arg for the next call */
3700 rbca->rbca_remap_vd = vd;
3701 rbca->rbca_remap_offset = offset;
3705 * The phys birth time is that of dva[0]. This ensures that we know
3706 * when each dva was written, so that resilver can determine which
3707 * blocks need to be scrubbed (i.e. those written during the time
3708 * the vdev was offline). It also ensures that the key used in
3709 * the ARC hash table is unique (i.e. dva[0] + phys_birth). If
3710 * we didn't change the phys_birth, a lookup in the ARC for a
3711 * remapped BP could find the data that was previously stored at
3712 * this vdev + offset.
3714 vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa,
3715 DVA_GET_VDEV(&bp->blk_dva[0]));
3716 vdev_indirect_births_t *vib = oldvd->vdev_indirect_births;
3717 bp->blk_phys_birth = vdev_indirect_births_physbirth(vib,
3718 DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0]));
3720 DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id);
3721 DVA_SET_OFFSET(&bp->blk_dva[0], offset);
3725 * If the block pointer contains any indirect DVAs, modify them to refer to
3726 * concrete DVAs. Note that this will sometimes not be possible, leaving
3727 * the indirect DVA in place. This happens if the indirect DVA spans multiple
3728 * segments in the mapping (i.e. it is a "split block").
3730 * If the BP was remapped, calls the callback on the original dva (note the
3731 * callback can be called multiple times if the original indirect DVA refers
3732 * to another indirect DVA, etc).
3734 * Returns TRUE if the BP was remapped.
3737 spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg)
3739 remap_blkptr_cb_arg_t rbca;
3741 if (!zfs_remap_blkptr_enable)
3744 if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS))
3748 * Dedup BP's can not be remapped, because ddt_phys_select() depends
3749 * on DVA[0] being the same in the BP as in the DDT (dedup table).
3751 if (BP_GET_DEDUP(bp))
3755 * Gang blocks can not be remapped, because
3756 * zio_checksum_gang_verifier() depends on the DVA[0] that's in
3757 * the BP used to read the gang block header (GBH) being the same
3758 * as the DVA[0] that we allocated for the GBH.
3764 * Embedded BP's have no DVA to remap.
3766 if (BP_GET_NDVAS(bp) < 1)
3770 * Note: we only remap dva[0]. If we remapped other dvas, we
3771 * would no longer know what their phys birth txg is.
3773 dva_t *dva = &bp->blk_dva[0];
3775 uint64_t offset = DVA_GET_OFFSET(dva);
3776 uint64_t size = DVA_GET_ASIZE(dva);
3777 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
3779 if (vd->vdev_ops->vdev_op_remap == NULL)
3783 rbca.rbca_cb = callback;
3784 rbca.rbca_remap_vd = vd;
3785 rbca.rbca_remap_offset = offset;
3786 rbca.rbca_cb_arg = arg;
3789 * remap_blkptr_cb() will be called in order for each level of
3790 * indirection, until a concrete vdev is reached or a split block is
3791 * encountered. old_vd and old_offset are updated within the callback
3792 * as we go from the one indirect vdev to the next one (either concrete
3793 * or indirect again) in that order.
3795 vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca);
3797 /* Check if the DVA wasn't remapped because it is a split block */
3798 if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id)
3805 * Undo the allocation of a DVA which happened in the given transaction group.
3808 metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
3812 uint64_t vdev = DVA_GET_VDEV(dva);
3813 uint64_t offset = DVA_GET_OFFSET(dva);
3814 uint64_t size = DVA_GET_ASIZE(dva);
3816 ASSERT(DVA_IS_VALID(dva));
3817 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
3819 if (txg > spa_freeze_txg(spa))
3822 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
3823 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
3824 cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu",
3825 (u_longlong_t)vdev, (u_longlong_t)offset);
3830 ASSERT(!vd->vdev_removing);
3831 ASSERT(vdev_is_concrete(vd));
3832 ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
3833 ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
3835 if (DVA_GET_GANG(dva))
3836 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
3838 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3840 mutex_enter(&msp->ms_lock);
3841 range_tree_remove(msp->ms_allocating[txg & TXG_MASK],
3844 VERIFY(!msp->ms_condensing);
3845 VERIFY3U(offset, >=, msp->ms_start);
3846 VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
3847 VERIFY3U(range_tree_space(msp->ms_allocatable) + size, <=,
3849 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
3850 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
3851 range_tree_add(msp->ms_allocatable, offset, size);
3852 mutex_exit(&msp->ms_lock);
3856 * Free the block represented by the given DVA.
3859 metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint)
3861 uint64_t vdev = DVA_GET_VDEV(dva);
3862 uint64_t offset = DVA_GET_OFFSET(dva);
3863 uint64_t size = DVA_GET_ASIZE(dva);
3864 vdev_t *vd = vdev_lookup_top(spa, vdev);
3866 ASSERT(DVA_IS_VALID(dva));
3867 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
3869 if (DVA_GET_GANG(dva)) {
3870 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
3873 metaslab_free_impl(vd, offset, size, checkpoint);
3877 * Reserve some allocation slots. The reservation system must be called
3878 * before we call into the allocator. If there aren't any available slots
3879 * then the I/O will be throttled until an I/O completes and its slots are
3880 * freed up. The function returns true if it was successful in placing
3884 metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator,
3885 zio_t *zio, int flags)
3887 uint64_t available_slots = 0;
3888 boolean_t slot_reserved = B_FALSE;
3889 uint64_t max = mc->mc_alloc_max_slots[allocator];
3891 ASSERT(mc->mc_alloc_throttle_enabled);
3892 mutex_enter(&mc->mc_lock);
3894 uint64_t reserved_slots =
3895 zfs_refcount_count(&mc->mc_alloc_slots[allocator]);
3896 if (reserved_slots < max)
3897 available_slots = max - reserved_slots;
3899 if (slots <= available_slots || GANG_ALLOCATION(flags)) {
3901 * We reserve the slots individually so that we can unreserve
3902 * them individually when an I/O completes.
3904 for (int d = 0; d < slots; d++) {
3906 zfs_refcount_add(&mc->mc_alloc_slots[allocator],
3909 zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
3910 slot_reserved = B_TRUE;
3913 mutex_exit(&mc->mc_lock);
3914 return (slot_reserved);
3918 metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots,
3919 int allocator, zio_t *zio)
3921 ASSERT(mc->mc_alloc_throttle_enabled);
3922 mutex_enter(&mc->mc_lock);
3923 for (int d = 0; d < slots; d++) {
3924 (void) zfs_refcount_remove(&mc->mc_alloc_slots[allocator],
3927 mutex_exit(&mc->mc_lock);
3931 metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size,
3935 spa_t *spa = vd->vdev_spa;
3938 if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count)
3941 ASSERT3P(vd->vdev_ms, !=, NULL);
3942 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3944 mutex_enter(&msp->ms_lock);
3946 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded)
3947 error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM);
3949 * No need to fail in that case; someone else has activated the
3950 * metaslab, but that doesn't preclude us from using it.
3956 !range_tree_contains(msp->ms_allocatable, offset, size))
3957 error = SET_ERROR(ENOENT);
3959 if (error || txg == 0) { /* txg == 0 indicates dry run */
3960 mutex_exit(&msp->ms_lock);
3964 VERIFY(!msp->ms_condensing);
3965 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
3966 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
3967 VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=,
3969 range_tree_remove(msp->ms_allocatable, offset, size);
3971 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
3972 if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
3973 vdev_dirty(vd, VDD_METASLAB, msp, txg);
3974 range_tree_add(msp->ms_allocating[txg & TXG_MASK],
3978 mutex_exit(&msp->ms_lock);
3983 typedef struct metaslab_claim_cb_arg_t {
3986 } metaslab_claim_cb_arg_t;
3990 metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
3991 uint64_t size, void *arg)
3993 metaslab_claim_cb_arg_t *mcca_arg = arg;
3995 if (mcca_arg->mcca_error == 0) {
3996 mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset,
3997 size, mcca_arg->mcca_txg);
4002 metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg)
4004 if (vd->vdev_ops->vdev_op_remap != NULL) {
4005 metaslab_claim_cb_arg_t arg;
4008 * Only zdb(1M) can claim on indirect vdevs. This is used
4009 * to detect leaks of mapped space (that are not accounted
4010 * for in the obsolete counts, spacemap, or bpobj).
4012 ASSERT(!spa_writeable(vd->vdev_spa));
4016 vd->vdev_ops->vdev_op_remap(vd, offset, size,
4017 metaslab_claim_impl_cb, &arg);
4019 if (arg.mcca_error == 0) {
4020 arg.mcca_error = metaslab_claim_concrete(vd,
4023 return (arg.mcca_error);
4025 return (metaslab_claim_concrete(vd, offset, size, txg));
4030 * Intent log support: upon opening the pool after a crash, notify the SPA
4031 * of blocks that the intent log has allocated for immediate write, but
4032 * which are still considered free by the SPA because the last transaction
4033 * group didn't commit yet.
4036 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
4038 uint64_t vdev = DVA_GET_VDEV(dva);
4039 uint64_t offset = DVA_GET_OFFSET(dva);
4040 uint64_t size = DVA_GET_ASIZE(dva);
4043 if ((vd = vdev_lookup_top(spa, vdev)) == NULL) {
4044 return (SET_ERROR(ENXIO));
4047 ASSERT(DVA_IS_VALID(dva));
4049 if (DVA_GET_GANG(dva))
4050 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
4052 return (metaslab_claim_impl(vd, offset, size, txg));
4056 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
4057 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
4058 zio_alloc_list_t *zal, zio_t *zio, int allocator)
4060 dva_t *dva = bp->blk_dva;
4061 dva_t *hintdva = hintbp->blk_dva;
4064 ASSERT(bp->blk_birth == 0);
4065 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
4067 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
4069 if (mc->mc_rotor == NULL) { /* no vdevs in this class */
4070 spa_config_exit(spa, SCL_ALLOC, FTAG);
4071 return (SET_ERROR(ENOSPC));
4074 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
4075 ASSERT(BP_GET_NDVAS(bp) == 0);
4076 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
4077 ASSERT3P(zal, !=, NULL);
4079 for (int d = 0; d < ndvas; d++) {
4080 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
4081 txg, flags, zal, allocator);
4083 for (d--; d >= 0; d--) {
4084 metaslab_unalloc_dva(spa, &dva[d], txg);
4085 metaslab_group_alloc_decrement(spa,
4086 DVA_GET_VDEV(&dva[d]), zio, flags,
4087 allocator, B_FALSE);
4088 bzero(&dva[d], sizeof (dva_t));
4090 spa_config_exit(spa, SCL_ALLOC, FTAG);
4094 * Update the metaslab group's queue depth
4095 * based on the newly allocated dva.
4097 metaslab_group_alloc_increment(spa,
4098 DVA_GET_VDEV(&dva[d]), zio, flags, allocator);
4103 ASSERT(BP_GET_NDVAS(bp) == ndvas);
4105 spa_config_exit(spa, SCL_ALLOC, FTAG);
4107 BP_SET_BIRTH(bp, txg, txg);
4113 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
4115 const dva_t *dva = bp->blk_dva;
4116 int ndvas = BP_GET_NDVAS(bp);
4118 ASSERT(!BP_IS_HOLE(bp));
4119 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
4122 * If we have a checkpoint for the pool we need to make sure that
4123 * the blocks that we free that are part of the checkpoint won't be
4124 * reused until the checkpoint is discarded or we revert to it.
4126 * The checkpoint flag is passed down the metaslab_free code path
4127 * and is set whenever we want to add a block to the checkpoint's
4128 * accounting. That is, we "checkpoint" blocks that existed at the
4129 * time the checkpoint was created and are therefore referenced by
4130 * the checkpointed uberblock.
4132 * Note that, we don't checkpoint any blocks if the current
4133 * syncing txg <= spa_checkpoint_txg. We want these frees to sync
4134 * normally as they will be referenced by the checkpointed uberblock.
4136 boolean_t checkpoint = B_FALSE;
4137 if (bp->blk_birth <= spa->spa_checkpoint_txg &&
4138 spa_syncing_txg(spa) > spa->spa_checkpoint_txg) {
4140 * At this point, if the block is part of the checkpoint
4141 * there is no way it was created in the current txg.
4144 ASSERT3U(spa_syncing_txg(spa), ==, txg);
4145 checkpoint = B_TRUE;
4148 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
4150 for (int d = 0; d < ndvas; d++) {
4152 metaslab_unalloc_dva(spa, &dva[d], txg);
4154 ASSERT3U(txg, ==, spa_syncing_txg(spa));
4155 metaslab_free_dva(spa, &dva[d], checkpoint);
4159 spa_config_exit(spa, SCL_FREE, FTAG);
4163 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
4165 const dva_t *dva = bp->blk_dva;
4166 int ndvas = BP_GET_NDVAS(bp);
4169 ASSERT(!BP_IS_HOLE(bp));
4173 * First do a dry run to make sure all DVAs are claimable,
4174 * so we don't have to unwind from partial failures below.
4176 if ((error = metaslab_claim(spa, bp, 0)) != 0)
4180 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
4182 for (int d = 0; d < ndvas; d++)
4183 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
4186 spa_config_exit(spa, SCL_ALLOC, FTAG);
4188 ASSERT(error == 0 || txg == 0);
4195 metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset,
4196 uint64_t size, void *arg)
4198 if (vd->vdev_ops == &vdev_indirect_ops)
4201 metaslab_check_free_impl(vd, offset, size);
4205 metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size)
4208 spa_t *spa = vd->vdev_spa;
4210 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
4213 if (vd->vdev_ops->vdev_op_remap != NULL) {
4214 vd->vdev_ops->vdev_op_remap(vd, offset, size,
4215 metaslab_check_free_impl_cb, NULL);
4219 ASSERT(vdev_is_concrete(vd));
4220 ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
4221 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
4223 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
4225 mutex_enter(&msp->ms_lock);
4227 range_tree_verify(msp->ms_allocatable, offset, size);
4229 range_tree_verify(msp->ms_freeing, offset, size);
4230 range_tree_verify(msp->ms_checkpointing, offset, size);
4231 range_tree_verify(msp->ms_freed, offset, size);
4232 for (int j = 0; j < TXG_DEFER_SIZE; j++)
4233 range_tree_verify(msp->ms_defer[j], offset, size);
4234 mutex_exit(&msp->ms_lock);
4238 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
4240 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
4243 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
4244 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
4245 uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
4246 vdev_t *vd = vdev_lookup_top(spa, vdev);
4247 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
4248 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
4250 if (DVA_GET_GANG(&bp->blk_dva[i]))
4251 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
4253 ASSERT3P(vd, !=, NULL);
4255 metaslab_check_free_impl(vd, offset, size);
4257 spa_config_exit(spa, SCL_VDEV, FTAG);