4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
28 #include <sys/zfs_context.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/space_map.h>
32 #include <sys/metaslab_impl.h>
33 #include <sys/vdev_impl.h>
35 #include <sys/spa_impl.h>
36 #include <sys/zfeature.h>
38 SYSCTL_DECL(_vfs_zfs);
39 SYSCTL_NODE(_vfs_zfs, OID_AUTO, metaslab, CTLFLAG_RW, 0, "ZFS metaslab");
41 #define GANG_ALLOCATION(flags) \
42 ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
44 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
45 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
46 #define METASLAB_ACTIVE_MASK \
47 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
49 uint64_t metaslab_aliquot = 512ULL << 10;
50 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
51 TUNABLE_QUAD("vfs.zfs.metaslab.gang_bang", &metaslab_gang_bang);
52 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, gang_bang, CTLFLAG_RWTUN,
53 &metaslab_gang_bang, 0,
54 "Force gang block allocation for blocks larger than or equal to this value");
57 * The in-core space map representation is more compact than its on-disk form.
58 * The zfs_condense_pct determines how much more compact the in-core
59 * space_map representation must be before we compact it on-disk.
60 * Values should be greater than or equal to 100.
62 int zfs_condense_pct = 200;
63 TUNABLE_INT("vfs.zfs.condense_pct", &zfs_condense_pct);
64 SYSCTL_INT(_vfs_zfs, OID_AUTO, condense_pct, CTLFLAG_RWTUN,
66 "Condense on-disk spacemap when it is more than this many percents"
67 " of in-memory counterpart");
70 * Condensing a metaslab is not guaranteed to actually reduce the amount of
71 * space used on disk. In particular, a space map uses data in increments of
72 * MAX(1 << ashift, space_map_blksize), so a metaslab might use the
73 * same number of blocks after condensing. Since the goal of condensing is to
74 * reduce the number of IOPs required to read the space map, we only want to
75 * condense when we can be sure we will reduce the number of blocks used by the
76 * space map. Unfortunately, we cannot precisely compute whether or not this is
77 * the case in metaslab_should_condense since we are holding ms_lock. Instead,
78 * we apply the following heuristic: do not condense a spacemap unless the
79 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
82 int zfs_metaslab_condense_block_threshold = 4;
85 * The zfs_mg_noalloc_threshold defines which metaslab groups should
86 * be eligible for allocation. The value is defined as a percentage of
87 * free space. Metaslab groups that have more free space than
88 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
89 * a metaslab group's free space is less than or equal to the
90 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
91 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
92 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
93 * groups are allowed to accept allocations. Gang blocks are always
94 * eligible to allocate on any metaslab group. The default value of 0 means
95 * no metaslab group will be excluded based on this criterion.
97 int zfs_mg_noalloc_threshold = 0;
98 TUNABLE_INT("vfs.zfs.mg_noalloc_threshold", &zfs_mg_noalloc_threshold);
99 SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_noalloc_threshold, CTLFLAG_RWTUN,
100 &zfs_mg_noalloc_threshold, 0,
101 "Percentage of metaslab group size that should be free"
102 " to make it eligible for allocation");
105 * Metaslab groups are considered eligible for allocations if their
106 * fragmenation metric (measured as a percentage) is less than or equal to
107 * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold
108 * then it will be skipped unless all metaslab groups within the metaslab
109 * class have also crossed this threshold.
111 int zfs_mg_fragmentation_threshold = 85;
112 TUNABLE_INT("vfs.zfs.mg_fragmentation_threshold", &zfs_mg_fragmentation_threshold);
113 SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_fragmentation_threshold, CTLFLAG_RWTUN,
114 &zfs_mg_fragmentation_threshold, 0,
115 "Percentage of metaslab group size that should be considered "
116 "eligible for allocations unless all metaslab groups within the metaslab class "
117 "have also crossed this threshold");
120 * Allow metaslabs to keep their active state as long as their fragmentation
121 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
122 * active metaslab that exceeds this threshold will no longer keep its active
123 * status allowing better metaslabs to be selected.
125 int zfs_metaslab_fragmentation_threshold = 70;
126 TUNABLE_INT("vfs.zfs.metaslab.fragmentation_threshold",
127 &zfs_metaslab_fragmentation_threshold);
128 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, fragmentation_threshold, CTLFLAG_RWTUN,
129 &zfs_metaslab_fragmentation_threshold, 0,
130 "Maximum percentage of metaslab fragmentation level to keep their active state");
133 * When set will load all metaslabs when pool is first opened.
135 int metaslab_debug_load = 0;
136 TUNABLE_INT("vfs.zfs.metaslab.debug_load", &metaslab_debug_load);
137 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_load, CTLFLAG_RWTUN,
138 &metaslab_debug_load, 0,
139 "Load all metaslabs when pool is first opened");
142 * When set will prevent metaslabs from being unloaded.
144 int metaslab_debug_unload = 0;
145 TUNABLE_INT("vfs.zfs.metaslab.debug_unload", &metaslab_debug_unload);
146 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_unload, CTLFLAG_RWTUN,
147 &metaslab_debug_unload, 0,
148 "Prevent metaslabs from being unloaded");
151 * Minimum size which forces the dynamic allocator to change
152 * it's allocation strategy. Once the space map cannot satisfy
153 * an allocation of this size then it switches to using more
154 * aggressive strategy (i.e search by size rather than offset).
156 uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
157 TUNABLE_QUAD("vfs.zfs.metaslab.df_alloc_threshold",
158 &metaslab_df_alloc_threshold);
159 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold, CTLFLAG_RWTUN,
160 &metaslab_df_alloc_threshold, 0,
161 "Minimum size which forces the dynamic allocator to change it's allocation strategy");
164 * The minimum free space, in percent, which must be available
165 * in a space map to continue allocations in a first-fit fashion.
166 * Once the space_map's free space drops below this level we dynamically
167 * switch to using best-fit allocations.
169 int metaslab_df_free_pct = 4;
170 TUNABLE_INT("vfs.zfs.metaslab.df_free_pct", &metaslab_df_free_pct);
171 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct, CTLFLAG_RWTUN,
172 &metaslab_df_free_pct, 0,
173 "The minimum free space, in percent, which must be available in a space map to continue allocations in a first-fit fashion");
176 * A metaslab is considered "free" if it contains a contiguous
177 * segment which is greater than metaslab_min_alloc_size.
179 uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS;
180 TUNABLE_QUAD("vfs.zfs.metaslab.min_alloc_size",
181 &metaslab_min_alloc_size);
182 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, min_alloc_size, CTLFLAG_RWTUN,
183 &metaslab_min_alloc_size, 0,
184 "A metaslab is considered \"free\" if it contains a contiguous segment which is greater than vfs.zfs.metaslab.min_alloc_size");
187 * Percentage of all cpus that can be used by the metaslab taskq.
189 int metaslab_load_pct = 50;
190 TUNABLE_INT("vfs.zfs.metaslab.load_pct", &metaslab_load_pct);
191 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, load_pct, CTLFLAG_RWTUN,
192 &metaslab_load_pct, 0,
193 "Percentage of cpus that can be used by the metaslab taskq");
196 * Determines how many txgs a metaslab may remain loaded without having any
197 * allocations from it. As long as a metaslab continues to be used we will
200 int metaslab_unload_delay = TXG_SIZE * 2;
201 TUNABLE_INT("vfs.zfs.metaslab.unload_delay", &metaslab_unload_delay);
202 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, unload_delay, CTLFLAG_RWTUN,
203 &metaslab_unload_delay, 0,
204 "Number of TXGs that an unused metaslab can be kept in memory");
207 * Max number of metaslabs per group to preload.
209 int metaslab_preload_limit = SPA_DVAS_PER_BP;
210 TUNABLE_INT("vfs.zfs.metaslab.preload_limit", &metaslab_preload_limit);
211 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_limit, CTLFLAG_RWTUN,
212 &metaslab_preload_limit, 0,
213 "Max number of metaslabs per group to preload");
216 * Enable/disable preloading of metaslab.
218 boolean_t metaslab_preload_enabled = B_TRUE;
219 TUNABLE_INT("vfs.zfs.metaslab.preload_enabled", &metaslab_preload_enabled);
220 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_enabled, CTLFLAG_RWTUN,
221 &metaslab_preload_enabled, 0,
222 "Max number of metaslabs per group to preload");
225 * Enable/disable fragmentation weighting on metaslabs.
227 boolean_t metaslab_fragmentation_factor_enabled = B_TRUE;
228 TUNABLE_INT("vfs.zfs.metaslab_fragmentation_factor_enabled",
229 &metaslab_fragmentation_factor_enabled);
230 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, fragmentation_factor_enabled, CTLFLAG_RWTUN,
231 &metaslab_fragmentation_factor_enabled, 0,
232 "Enable fragmentation weighting on metaslabs");
235 * Enable/disable lba weighting (i.e. outer tracks are given preference).
237 boolean_t metaslab_lba_weighting_enabled = B_TRUE;
238 TUNABLE_INT("vfs.zfs.metaslab.lba_weighting_enabled",
239 &metaslab_lba_weighting_enabled);
240 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, lba_weighting_enabled, CTLFLAG_RWTUN,
241 &metaslab_lba_weighting_enabled, 0,
242 "Enable LBA weighting (i.e. outer tracks are given preference)");
245 * Enable/disable metaslab group biasing.
247 boolean_t metaslab_bias_enabled = B_TRUE;
248 TUNABLE_INT("vfs.zfs.metaslab.bias_enabled",
249 &metaslab_bias_enabled);
250 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, bias_enabled, CTLFLAG_RWTUN,
251 &metaslab_bias_enabled, 0,
252 "Enable metaslab group biasing");
254 static uint64_t metaslab_fragmentation(metaslab_t *);
257 * ==========================================================================
259 * ==========================================================================
262 metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
264 metaslab_class_t *mc;
266 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
271 mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
272 refcount_create_tracked(&mc->mc_alloc_slots);
278 metaslab_class_destroy(metaslab_class_t *mc)
280 ASSERT(mc->mc_rotor == NULL);
281 ASSERT(mc->mc_alloc == 0);
282 ASSERT(mc->mc_deferred == 0);
283 ASSERT(mc->mc_space == 0);
284 ASSERT(mc->mc_dspace == 0);
286 refcount_destroy(&mc->mc_alloc_slots);
287 mutex_destroy(&mc->mc_lock);
288 kmem_free(mc, sizeof (metaslab_class_t));
292 metaslab_class_validate(metaslab_class_t *mc)
294 metaslab_group_t *mg;
298 * Must hold one of the spa_config locks.
300 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
301 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
303 if ((mg = mc->mc_rotor) == NULL)
308 ASSERT(vd->vdev_mg != NULL);
309 ASSERT3P(vd->vdev_top, ==, vd);
310 ASSERT3P(mg->mg_class, ==, mc);
311 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
312 } while ((mg = mg->mg_next) != mc->mc_rotor);
318 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
319 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
321 atomic_add_64(&mc->mc_alloc, alloc_delta);
322 atomic_add_64(&mc->mc_deferred, defer_delta);
323 atomic_add_64(&mc->mc_space, space_delta);
324 atomic_add_64(&mc->mc_dspace, dspace_delta);
328 metaslab_class_minblocksize_update(metaslab_class_t *mc)
330 metaslab_group_t *mg;
332 uint64_t minashift = UINT64_MAX;
334 if ((mg = mc->mc_rotor) == NULL) {
335 mc->mc_minblocksize = SPA_MINBLOCKSIZE;
341 if (vd->vdev_ashift < minashift)
342 minashift = vd->vdev_ashift;
343 } while ((mg = mg->mg_next) != mc->mc_rotor);
345 mc->mc_minblocksize = 1ULL << minashift;
349 metaslab_class_get_alloc(metaslab_class_t *mc)
351 return (mc->mc_alloc);
355 metaslab_class_get_deferred(metaslab_class_t *mc)
357 return (mc->mc_deferred);
361 metaslab_class_get_space(metaslab_class_t *mc)
363 return (mc->mc_space);
367 metaslab_class_get_dspace(metaslab_class_t *mc)
369 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
373 metaslab_class_get_minblocksize(metaslab_class_t *mc)
375 return (mc->mc_minblocksize);
379 metaslab_class_histogram_verify(metaslab_class_t *mc)
381 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
385 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
388 mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
391 for (int c = 0; c < rvd->vdev_children; c++) {
392 vdev_t *tvd = rvd->vdev_child[c];
393 metaslab_group_t *mg = tvd->vdev_mg;
396 * Skip any holes, uninitialized top-levels, or
397 * vdevs that are not in this metalab class.
399 if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
400 mg->mg_class != mc) {
404 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
405 mc_hist[i] += mg->mg_histogram[i];
408 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
409 VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
411 kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
415 * Calculate the metaslab class's fragmentation metric. The metric
416 * is weighted based on the space contribution of each metaslab group.
417 * The return value will be a number between 0 and 100 (inclusive), or
418 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
419 * zfs_frag_table for more information about the metric.
422 metaslab_class_fragmentation(metaslab_class_t *mc)
424 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
425 uint64_t fragmentation = 0;
427 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
429 for (int c = 0; c < rvd->vdev_children; c++) {
430 vdev_t *tvd = rvd->vdev_child[c];
431 metaslab_group_t *mg = tvd->vdev_mg;
434 * Skip any holes, uninitialized top-levels, or
435 * vdevs that are not in this metalab class.
437 if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
438 mg->mg_class != mc) {
443 * If a metaslab group does not contain a fragmentation
444 * metric then just bail out.
446 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
447 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
448 return (ZFS_FRAG_INVALID);
452 * Determine how much this metaslab_group is contributing
453 * to the overall pool fragmentation metric.
455 fragmentation += mg->mg_fragmentation *
456 metaslab_group_get_space(mg);
458 fragmentation /= metaslab_class_get_space(mc);
460 ASSERT3U(fragmentation, <=, 100);
461 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
462 return (fragmentation);
466 * Calculate the amount of expandable space that is available in
467 * this metaslab class. If a device is expanded then its expandable
468 * space will be the amount of allocatable space that is currently not
469 * part of this metaslab class.
472 metaslab_class_expandable_space(metaslab_class_t *mc)
474 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
477 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
478 for (int c = 0; c < rvd->vdev_children; c++) {
479 vdev_t *tvd = rvd->vdev_child[c];
480 metaslab_group_t *mg = tvd->vdev_mg;
482 if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
483 mg->mg_class != mc) {
488 * Calculate if we have enough space to add additional
489 * metaslabs. We report the expandable space in terms
490 * of the metaslab size since that's the unit of expansion.
492 space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize,
493 1ULL << tvd->vdev_ms_shift);
495 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
500 * ==========================================================================
502 * ==========================================================================
505 metaslab_compare(const void *x1, const void *x2)
507 const metaslab_t *m1 = x1;
508 const metaslab_t *m2 = x2;
510 if (m1->ms_weight < m2->ms_weight)
512 if (m1->ms_weight > m2->ms_weight)
516 * If the weights are identical, use the offset to force uniqueness.
518 if (m1->ms_start < m2->ms_start)
520 if (m1->ms_start > m2->ms_start)
523 ASSERT3P(m1, ==, m2);
529 * Update the allocatable flag and the metaslab group's capacity.
530 * The allocatable flag is set to true if the capacity is below
531 * the zfs_mg_noalloc_threshold or has a fragmentation value that is
532 * greater than zfs_mg_fragmentation_threshold. If a metaslab group
533 * transitions from allocatable to non-allocatable or vice versa then the
534 * metaslab group's class is updated to reflect the transition.
537 metaslab_group_alloc_update(metaslab_group_t *mg)
539 vdev_t *vd = mg->mg_vd;
540 metaslab_class_t *mc = mg->mg_class;
541 vdev_stat_t *vs = &vd->vdev_stat;
542 boolean_t was_allocatable;
543 boolean_t was_initialized;
545 ASSERT(vd == vd->vdev_top);
547 mutex_enter(&mg->mg_lock);
548 was_allocatable = mg->mg_allocatable;
549 was_initialized = mg->mg_initialized;
551 mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
554 mutex_enter(&mc->mc_lock);
557 * If the metaslab group was just added then it won't
558 * have any space until we finish syncing out this txg.
559 * At that point we will consider it initialized and available
560 * for allocations. We also don't consider non-activated
561 * metaslab groups (e.g. vdevs that are in the middle of being removed)
562 * to be initialized, because they can't be used for allocation.
564 mg->mg_initialized = metaslab_group_initialized(mg);
565 if (!was_initialized && mg->mg_initialized) {
567 } else if (was_initialized && !mg->mg_initialized) {
568 ASSERT3U(mc->mc_groups, >, 0);
571 if (mg->mg_initialized)
572 mg->mg_no_free_space = B_FALSE;
575 * A metaslab group is considered allocatable if it has plenty
576 * of free space or is not heavily fragmented. We only take
577 * fragmentation into account if the metaslab group has a valid
578 * fragmentation metric (i.e. a value between 0 and 100).
580 mg->mg_allocatable = (mg->mg_activation_count > 0 &&
581 mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
582 (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
583 mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
586 * The mc_alloc_groups maintains a count of the number of
587 * groups in this metaslab class that are still above the
588 * zfs_mg_noalloc_threshold. This is used by the allocating
589 * threads to determine if they should avoid allocations to
590 * a given group. The allocator will avoid allocations to a group
591 * if that group has reached or is below the zfs_mg_noalloc_threshold
592 * and there are still other groups that are above the threshold.
593 * When a group transitions from allocatable to non-allocatable or
594 * vice versa we update the metaslab class to reflect that change.
595 * When the mc_alloc_groups value drops to 0 that means that all
596 * groups have reached the zfs_mg_noalloc_threshold making all groups
597 * eligible for allocations. This effectively means that all devices
598 * are balanced again.
600 if (was_allocatable && !mg->mg_allocatable)
601 mc->mc_alloc_groups--;
602 else if (!was_allocatable && mg->mg_allocatable)
603 mc->mc_alloc_groups++;
604 mutex_exit(&mc->mc_lock);
606 mutex_exit(&mg->mg_lock);
610 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
612 metaslab_group_t *mg;
614 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
615 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
616 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
617 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
620 mg->mg_activation_count = 0;
621 mg->mg_initialized = B_FALSE;
622 mg->mg_no_free_space = B_TRUE;
623 refcount_create_tracked(&mg->mg_alloc_queue_depth);
625 mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
626 minclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT);
632 metaslab_group_destroy(metaslab_group_t *mg)
634 ASSERT(mg->mg_prev == NULL);
635 ASSERT(mg->mg_next == NULL);
637 * We may have gone below zero with the activation count
638 * either because we never activated in the first place or
639 * because we're done, and possibly removing the vdev.
641 ASSERT(mg->mg_activation_count <= 0);
643 taskq_destroy(mg->mg_taskq);
644 avl_destroy(&mg->mg_metaslab_tree);
645 mutex_destroy(&mg->mg_lock);
646 refcount_destroy(&mg->mg_alloc_queue_depth);
647 kmem_free(mg, sizeof (metaslab_group_t));
651 metaslab_group_activate(metaslab_group_t *mg)
653 metaslab_class_t *mc = mg->mg_class;
654 metaslab_group_t *mgprev, *mgnext;
656 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
658 ASSERT(mc->mc_rotor != mg);
659 ASSERT(mg->mg_prev == NULL);
660 ASSERT(mg->mg_next == NULL);
661 ASSERT(mg->mg_activation_count <= 0);
663 if (++mg->mg_activation_count <= 0)
666 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
667 metaslab_group_alloc_update(mg);
669 if ((mgprev = mc->mc_rotor) == NULL) {
673 mgnext = mgprev->mg_next;
674 mg->mg_prev = mgprev;
675 mg->mg_next = mgnext;
676 mgprev->mg_next = mg;
677 mgnext->mg_prev = mg;
680 metaslab_class_minblocksize_update(mc);
684 metaslab_group_passivate(metaslab_group_t *mg)
686 metaslab_class_t *mc = mg->mg_class;
687 metaslab_group_t *mgprev, *mgnext;
689 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
691 if (--mg->mg_activation_count != 0) {
692 ASSERT(mc->mc_rotor != mg);
693 ASSERT(mg->mg_prev == NULL);
694 ASSERT(mg->mg_next == NULL);
695 ASSERT(mg->mg_activation_count < 0);
699 taskq_wait(mg->mg_taskq);
700 metaslab_group_alloc_update(mg);
702 mgprev = mg->mg_prev;
703 mgnext = mg->mg_next;
708 mc->mc_rotor = mgnext;
709 mgprev->mg_next = mgnext;
710 mgnext->mg_prev = mgprev;
715 metaslab_class_minblocksize_update(mc);
719 metaslab_group_initialized(metaslab_group_t *mg)
721 vdev_t *vd = mg->mg_vd;
722 vdev_stat_t *vs = &vd->vdev_stat;
724 return (vs->vs_space != 0 && mg->mg_activation_count > 0);
728 metaslab_group_get_space(metaslab_group_t *mg)
730 return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count);
734 metaslab_group_histogram_verify(metaslab_group_t *mg)
737 vdev_t *vd = mg->mg_vd;
738 uint64_t ashift = vd->vdev_ashift;
741 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
744 mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
747 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
748 SPACE_MAP_HISTOGRAM_SIZE + ashift);
750 for (int m = 0; m < vd->vdev_ms_count; m++) {
751 metaslab_t *msp = vd->vdev_ms[m];
753 if (msp->ms_sm == NULL)
756 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
757 mg_hist[i + ashift] +=
758 msp->ms_sm->sm_phys->smp_histogram[i];
761 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
762 VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
764 kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
768 metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
770 metaslab_class_t *mc = mg->mg_class;
771 uint64_t ashift = mg->mg_vd->vdev_ashift;
773 ASSERT(MUTEX_HELD(&msp->ms_lock));
774 if (msp->ms_sm == NULL)
777 mutex_enter(&mg->mg_lock);
778 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
779 mg->mg_histogram[i + ashift] +=
780 msp->ms_sm->sm_phys->smp_histogram[i];
781 mc->mc_histogram[i + ashift] +=
782 msp->ms_sm->sm_phys->smp_histogram[i];
784 mutex_exit(&mg->mg_lock);
788 metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
790 metaslab_class_t *mc = mg->mg_class;
791 uint64_t ashift = mg->mg_vd->vdev_ashift;
793 ASSERT(MUTEX_HELD(&msp->ms_lock));
794 if (msp->ms_sm == NULL)
797 mutex_enter(&mg->mg_lock);
798 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
799 ASSERT3U(mg->mg_histogram[i + ashift], >=,
800 msp->ms_sm->sm_phys->smp_histogram[i]);
801 ASSERT3U(mc->mc_histogram[i + ashift], >=,
802 msp->ms_sm->sm_phys->smp_histogram[i]);
804 mg->mg_histogram[i + ashift] -=
805 msp->ms_sm->sm_phys->smp_histogram[i];
806 mc->mc_histogram[i + ashift] -=
807 msp->ms_sm->sm_phys->smp_histogram[i];
809 mutex_exit(&mg->mg_lock);
813 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
815 ASSERT(msp->ms_group == NULL);
816 mutex_enter(&mg->mg_lock);
819 avl_add(&mg->mg_metaslab_tree, msp);
820 mutex_exit(&mg->mg_lock);
822 mutex_enter(&msp->ms_lock);
823 metaslab_group_histogram_add(mg, msp);
824 mutex_exit(&msp->ms_lock);
828 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
830 mutex_enter(&msp->ms_lock);
831 metaslab_group_histogram_remove(mg, msp);
832 mutex_exit(&msp->ms_lock);
834 mutex_enter(&mg->mg_lock);
835 ASSERT(msp->ms_group == mg);
836 avl_remove(&mg->mg_metaslab_tree, msp);
837 msp->ms_group = NULL;
838 mutex_exit(&mg->mg_lock);
842 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
845 * Although in principle the weight can be any value, in
846 * practice we do not use values in the range [1, 511].
848 ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
849 ASSERT(MUTEX_HELD(&msp->ms_lock));
851 mutex_enter(&mg->mg_lock);
852 ASSERT(msp->ms_group == mg);
853 avl_remove(&mg->mg_metaslab_tree, msp);
854 msp->ms_weight = weight;
855 avl_add(&mg->mg_metaslab_tree, msp);
856 mutex_exit(&mg->mg_lock);
860 * Calculate the fragmentation for a given metaslab group. We can use
861 * a simple average here since all metaslabs within the group must have
862 * the same size. The return value will be a value between 0 and 100
863 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
864 * group have a fragmentation metric.
867 metaslab_group_fragmentation(metaslab_group_t *mg)
869 vdev_t *vd = mg->mg_vd;
870 uint64_t fragmentation = 0;
871 uint64_t valid_ms = 0;
873 for (int m = 0; m < vd->vdev_ms_count; m++) {
874 metaslab_t *msp = vd->vdev_ms[m];
876 if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
880 fragmentation += msp->ms_fragmentation;
883 if (valid_ms <= vd->vdev_ms_count / 2)
884 return (ZFS_FRAG_INVALID);
886 fragmentation /= valid_ms;
887 ASSERT3U(fragmentation, <=, 100);
888 return (fragmentation);
892 * Determine if a given metaslab group should skip allocations. A metaslab
893 * group should avoid allocations if its free capacity is less than the
894 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
895 * zfs_mg_fragmentation_threshold and there is at least one metaslab group
896 * that can still handle allocations. If the allocation throttle is enabled
897 * then we skip allocations to devices that have reached their maximum
898 * allocation queue depth unless the selected metaslab group is the only
899 * eligible group remaining.
902 metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
905 spa_t *spa = mg->mg_vd->vdev_spa;
906 metaslab_class_t *mc = mg->mg_class;
909 * We can only consider skipping this metaslab group if it's
910 * in the normal metaslab class and there are other metaslab
911 * groups to select from. Otherwise, we always consider it eligible
914 if (mc != spa_normal_class(spa) || mc->mc_groups <= 1)
918 * If the metaslab group's mg_allocatable flag is set (see comments
919 * in metaslab_group_alloc_update() for more information) and
920 * the allocation throttle is disabled then allow allocations to this
921 * device. However, if the allocation throttle is enabled then
922 * check if we have reached our allocation limit (mg_alloc_queue_depth)
923 * to determine if we should allow allocations to this metaslab group.
924 * If all metaslab groups are no longer considered allocatable
925 * (mc_alloc_groups == 0) or we're trying to allocate the smallest
926 * gang block size then we allow allocations on this metaslab group
927 * regardless of the mg_allocatable or throttle settings.
929 if (mg->mg_allocatable) {
930 metaslab_group_t *mgp;
932 uint64_t qmax = mg->mg_max_alloc_queue_depth;
934 if (!mc->mc_alloc_throttle_enabled)
938 * If this metaslab group does not have any free space, then
939 * there is no point in looking further.
941 if (mg->mg_no_free_space)
944 qdepth = refcount_count(&mg->mg_alloc_queue_depth);
947 * If this metaslab group is below its qmax or it's
948 * the only allocatable metasable group, then attempt
949 * to allocate from it.
951 if (qdepth < qmax || mc->mc_alloc_groups == 1)
953 ASSERT3U(mc->mc_alloc_groups, >, 1);
956 * Since this metaslab group is at or over its qmax, we
957 * need to determine if there are metaslab groups after this
958 * one that might be able to handle this allocation. This is
959 * racy since we can't hold the locks for all metaslab
960 * groups at the same time when we make this check.
962 for (mgp = mg->mg_next; mgp != rotor; mgp = mgp->mg_next) {
963 qmax = mgp->mg_max_alloc_queue_depth;
965 qdepth = refcount_count(&mgp->mg_alloc_queue_depth);
968 * If there is another metaslab group that
969 * might be able to handle the allocation, then
970 * we return false so that we skip this group.
972 if (qdepth < qmax && !mgp->mg_no_free_space)
977 * We didn't find another group to handle the allocation
978 * so we can't skip this metaslab group even though
979 * we are at or over our qmax.
983 } else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
990 * ==========================================================================
991 * Range tree callbacks
992 * ==========================================================================
996 * Comparison function for the private size-ordered tree. Tree is sorted
997 * by size, larger sizes at the end of the tree.
1000 metaslab_rangesize_compare(const void *x1, const void *x2)
1002 const range_seg_t *r1 = x1;
1003 const range_seg_t *r2 = x2;
1004 uint64_t rs_size1 = r1->rs_end - r1->rs_start;
1005 uint64_t rs_size2 = r2->rs_end - r2->rs_start;
1007 if (rs_size1 < rs_size2)
1009 if (rs_size1 > rs_size2)
1012 if (r1->rs_start < r2->rs_start)
1015 if (r1->rs_start > r2->rs_start)
1022 * Create any block allocator specific components. The current allocators
1023 * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
1026 metaslab_rt_create(range_tree_t *rt, void *arg)
1028 metaslab_t *msp = arg;
1030 ASSERT3P(rt->rt_arg, ==, msp);
1031 ASSERT(msp->ms_tree == NULL);
1033 avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
1034 sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
1038 * Destroy the block allocator specific components.
1041 metaslab_rt_destroy(range_tree_t *rt, void *arg)
1043 metaslab_t *msp = arg;
1045 ASSERT3P(rt->rt_arg, ==, msp);
1046 ASSERT3P(msp->ms_tree, ==, rt);
1047 ASSERT0(avl_numnodes(&msp->ms_size_tree));
1049 avl_destroy(&msp->ms_size_tree);
1053 metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
1055 metaslab_t *msp = arg;
1057 ASSERT3P(rt->rt_arg, ==, msp);
1058 ASSERT3P(msp->ms_tree, ==, rt);
1059 VERIFY(!msp->ms_condensing);
1060 avl_add(&msp->ms_size_tree, rs);
1064 metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
1066 metaslab_t *msp = arg;
1068 ASSERT3P(rt->rt_arg, ==, msp);
1069 ASSERT3P(msp->ms_tree, ==, rt);
1070 VERIFY(!msp->ms_condensing);
1071 avl_remove(&msp->ms_size_tree, rs);
1075 metaslab_rt_vacate(range_tree_t *rt, void *arg)
1077 metaslab_t *msp = arg;
1079 ASSERT3P(rt->rt_arg, ==, msp);
1080 ASSERT3P(msp->ms_tree, ==, rt);
1083 * Normally one would walk the tree freeing nodes along the way.
1084 * Since the nodes are shared with the range trees we can avoid
1085 * walking all nodes and just reinitialize the avl tree. The nodes
1086 * will be freed by the range tree, so we don't want to free them here.
1088 avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
1089 sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
1092 static range_tree_ops_t metaslab_rt_ops = {
1094 metaslab_rt_destroy,
1101 * ==========================================================================
1102 * Metaslab block operations
1103 * ==========================================================================
1107 * Return the maximum contiguous segment within the metaslab.
1110 metaslab_block_maxsize(metaslab_t *msp)
1112 avl_tree_t *t = &msp->ms_size_tree;
1115 if (t == NULL || (rs = avl_last(t)) == NULL)
1118 return (rs->rs_end - rs->rs_start);
1122 metaslab_block_alloc(metaslab_t *msp, uint64_t size)
1125 range_tree_t *rt = msp->ms_tree;
1127 VERIFY(!msp->ms_condensing);
1129 start = msp->ms_ops->msop_alloc(msp, size);
1130 if (start != -1ULL) {
1131 vdev_t *vd = msp->ms_group->mg_vd;
1133 VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
1134 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
1135 VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
1136 range_tree_remove(rt, start, size);
1142 * ==========================================================================
1143 * Common allocator routines
1144 * ==========================================================================
1148 * This is a helper function that can be used by the allocator to find
1149 * a suitable block to allocate. This will search the specified AVL
1150 * tree looking for a block that matches the specified criteria.
1153 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
1156 range_seg_t *rs, rsearch;
1159 rsearch.rs_start = *cursor;
1160 rsearch.rs_end = *cursor + size;
1162 rs = avl_find(t, &rsearch, &where);
1164 rs = avl_nearest(t, where, AVL_AFTER);
1166 while (rs != NULL) {
1167 uint64_t offset = P2ROUNDUP(rs->rs_start, align);
1169 if (offset + size <= rs->rs_end) {
1170 *cursor = offset + size;
1173 rs = AVL_NEXT(t, rs);
1177 * If we know we've searched the whole map (*cursor == 0), give up.
1178 * Otherwise, reset the cursor to the beginning and try again.
1184 return (metaslab_block_picker(t, cursor, size, align));
1188 * ==========================================================================
1189 * The first-fit block allocator
1190 * ==========================================================================
1193 metaslab_ff_alloc(metaslab_t *msp, uint64_t size)
1196 * Find the largest power of 2 block size that evenly divides the
1197 * requested size. This is used to try to allocate blocks with similar
1198 * alignment from the same area of the metaslab (i.e. same cursor
1199 * bucket) but it does not guarantee that other allocations sizes
1200 * may exist in the same region.
1202 uint64_t align = size & -size;
1203 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1204 avl_tree_t *t = &msp->ms_tree->rt_root;
1206 return (metaslab_block_picker(t, cursor, size, align));
1209 static metaslab_ops_t metaslab_ff_ops = {
1214 * ==========================================================================
1215 * Dynamic block allocator -
1216 * Uses the first fit allocation scheme until space get low and then
1217 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
1218 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
1219 * ==========================================================================
1222 metaslab_df_alloc(metaslab_t *msp, uint64_t size)
1225 * Find the largest power of 2 block size that evenly divides the
1226 * requested size. This is used to try to allocate blocks with similar
1227 * alignment from the same area of the metaslab (i.e. same cursor
1228 * bucket) but it does not guarantee that other allocations sizes
1229 * may exist in the same region.
1231 uint64_t align = size & -size;
1232 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1233 range_tree_t *rt = msp->ms_tree;
1234 avl_tree_t *t = &rt->rt_root;
1235 uint64_t max_size = metaslab_block_maxsize(msp);
1236 int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
1238 ASSERT(MUTEX_HELD(&msp->ms_lock));
1239 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
1241 if (max_size < size)
1245 * If we're running low on space switch to using the size
1246 * sorted AVL tree (best-fit).
1248 if (max_size < metaslab_df_alloc_threshold ||
1249 free_pct < metaslab_df_free_pct) {
1250 t = &msp->ms_size_tree;
1254 return (metaslab_block_picker(t, cursor, size, 1ULL));
1257 static metaslab_ops_t metaslab_df_ops = {
1262 * ==========================================================================
1263 * Cursor fit block allocator -
1264 * Select the largest region in the metaslab, set the cursor to the beginning
1265 * of the range and the cursor_end to the end of the range. As allocations
1266 * are made advance the cursor. Continue allocating from the cursor until
1267 * the range is exhausted and then find a new range.
1268 * ==========================================================================
1271 metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
1273 range_tree_t *rt = msp->ms_tree;
1274 avl_tree_t *t = &msp->ms_size_tree;
1275 uint64_t *cursor = &msp->ms_lbas[0];
1276 uint64_t *cursor_end = &msp->ms_lbas[1];
1277 uint64_t offset = 0;
1279 ASSERT(MUTEX_HELD(&msp->ms_lock));
1280 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root));
1282 ASSERT3U(*cursor_end, >=, *cursor);
1284 if ((*cursor + size) > *cursor_end) {
1287 rs = avl_last(&msp->ms_size_tree);
1288 if (rs == NULL || (rs->rs_end - rs->rs_start) < size)
1291 *cursor = rs->rs_start;
1292 *cursor_end = rs->rs_end;
1301 static metaslab_ops_t metaslab_cf_ops = {
1306 * ==========================================================================
1307 * New dynamic fit allocator -
1308 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1309 * contiguous blocks. If no region is found then just use the largest segment
1311 * ==========================================================================
1315 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1316 * to request from the allocator.
1318 uint64_t metaslab_ndf_clump_shift = 4;
1321 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
1323 avl_tree_t *t = &msp->ms_tree->rt_root;
1325 range_seg_t *rs, rsearch;
1326 uint64_t hbit = highbit64(size);
1327 uint64_t *cursor = &msp->ms_lbas[hbit - 1];
1328 uint64_t max_size = metaslab_block_maxsize(msp);
1330 ASSERT(MUTEX_HELD(&msp->ms_lock));
1331 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
1333 if (max_size < size)
1336 rsearch.rs_start = *cursor;
1337 rsearch.rs_end = *cursor + size;
1339 rs = avl_find(t, &rsearch, &where);
1340 if (rs == NULL || (rs->rs_end - rs->rs_start) < size) {
1341 t = &msp->ms_size_tree;
1343 rsearch.rs_start = 0;
1344 rsearch.rs_end = MIN(max_size,
1345 1ULL << (hbit + metaslab_ndf_clump_shift));
1346 rs = avl_find(t, &rsearch, &where);
1348 rs = avl_nearest(t, where, AVL_AFTER);
1352 if ((rs->rs_end - rs->rs_start) >= size) {
1353 *cursor = rs->rs_start + size;
1354 return (rs->rs_start);
1359 static metaslab_ops_t metaslab_ndf_ops = {
1363 metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
1366 * ==========================================================================
1368 * ==========================================================================
1372 * Wait for any in-progress metaslab loads to complete.
1375 metaslab_load_wait(metaslab_t *msp)
1377 ASSERT(MUTEX_HELD(&msp->ms_lock));
1379 while (msp->ms_loading) {
1380 ASSERT(!msp->ms_loaded);
1381 cv_wait(&msp->ms_load_cv, &msp->ms_lock);
1386 metaslab_load(metaslab_t *msp)
1390 ASSERT(MUTEX_HELD(&msp->ms_lock));
1391 ASSERT(!msp->ms_loaded);
1392 ASSERT(!msp->ms_loading);
1394 msp->ms_loading = B_TRUE;
1397 * If the space map has not been allocated yet, then treat
1398 * all the space in the metaslab as free and add it to the
1401 if (msp->ms_sm != NULL)
1402 error = space_map_load(msp->ms_sm, msp->ms_tree, SM_FREE);
1404 range_tree_add(msp->ms_tree, msp->ms_start, msp->ms_size);
1406 msp->ms_loaded = (error == 0);
1407 msp->ms_loading = B_FALSE;
1409 if (msp->ms_loaded) {
1410 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1411 range_tree_walk(msp->ms_defertree[t],
1412 range_tree_remove, msp->ms_tree);
1415 cv_broadcast(&msp->ms_load_cv);
1420 metaslab_unload(metaslab_t *msp)
1422 ASSERT(MUTEX_HELD(&msp->ms_lock));
1423 range_tree_vacate(msp->ms_tree, NULL, NULL);
1424 msp->ms_loaded = B_FALSE;
1425 msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
1429 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg,
1432 vdev_t *vd = mg->mg_vd;
1433 objset_t *mos = vd->vdev_spa->spa_meta_objset;
1437 ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
1438 mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
1439 cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
1441 ms->ms_start = id << vd->vdev_ms_shift;
1442 ms->ms_size = 1ULL << vd->vdev_ms_shift;
1445 * We only open space map objects that already exist. All others
1446 * will be opened when we finally allocate an object for it.
1449 error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
1450 ms->ms_size, vd->vdev_ashift, &ms->ms_lock);
1453 kmem_free(ms, sizeof (metaslab_t));
1457 ASSERT(ms->ms_sm != NULL);
1461 * We create the main range tree here, but we don't create the
1462 * alloctree and freetree until metaslab_sync_done(). This serves
1463 * two purposes: it allows metaslab_sync_done() to detect the
1464 * addition of new space; and for debugging, it ensures that we'd
1465 * data fault on any attempt to use this metaslab before it's ready.
1467 ms->ms_tree = range_tree_create(&metaslab_rt_ops, ms, &ms->ms_lock);
1468 metaslab_group_add(mg, ms);
1470 ms->ms_fragmentation = metaslab_fragmentation(ms);
1471 ms->ms_ops = mg->mg_class->mc_ops;
1474 * If we're opening an existing pool (txg == 0) or creating
1475 * a new one (txg == TXG_INITIAL), all space is available now.
1476 * If we're adding space to an existing pool, the new space
1477 * does not become available until after this txg has synced.
1479 if (txg <= TXG_INITIAL)
1480 metaslab_sync_done(ms, 0);
1483 * If metaslab_debug_load is set and we're initializing a metaslab
1484 * that has an allocated space_map object then load the its space
1485 * map so that can verify frees.
1487 if (metaslab_debug_load && ms->ms_sm != NULL) {
1488 mutex_enter(&ms->ms_lock);
1489 VERIFY0(metaslab_load(ms));
1490 mutex_exit(&ms->ms_lock);
1494 vdev_dirty(vd, 0, NULL, txg);
1495 vdev_dirty(vd, VDD_METASLAB, ms, txg);
1504 metaslab_fini(metaslab_t *msp)
1506 metaslab_group_t *mg = msp->ms_group;
1508 metaslab_group_remove(mg, msp);
1510 mutex_enter(&msp->ms_lock);
1512 VERIFY(msp->ms_group == NULL);
1513 vdev_space_update(mg->mg_vd, -space_map_allocated(msp->ms_sm),
1515 space_map_close(msp->ms_sm);
1517 metaslab_unload(msp);
1518 range_tree_destroy(msp->ms_tree);
1520 for (int t = 0; t < TXG_SIZE; t++) {
1521 range_tree_destroy(msp->ms_alloctree[t]);
1522 range_tree_destroy(msp->ms_freetree[t]);
1525 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1526 range_tree_destroy(msp->ms_defertree[t]);
1529 ASSERT0(msp->ms_deferspace);
1531 mutex_exit(&msp->ms_lock);
1532 cv_destroy(&msp->ms_load_cv);
1533 mutex_destroy(&msp->ms_lock);
1535 kmem_free(msp, sizeof (metaslab_t));
1538 #define FRAGMENTATION_TABLE_SIZE 17
1541 * This table defines a segment size based fragmentation metric that will
1542 * allow each metaslab to derive its own fragmentation value. This is done
1543 * by calculating the space in each bucket of the spacemap histogram and
1544 * multiplying that by the fragmetation metric in this table. Doing
1545 * this for all buckets and dividing it by the total amount of free
1546 * space in this metaslab (i.e. the total free space in all buckets) gives
1547 * us the fragmentation metric. This means that a high fragmentation metric
1548 * equates to most of the free space being comprised of small segments.
1549 * Conversely, if the metric is low, then most of the free space is in
1550 * large segments. A 10% change in fragmentation equates to approximately
1551 * double the number of segments.
1553 * This table defines 0% fragmented space using 16MB segments. Testing has
1554 * shown that segments that are greater than or equal to 16MB do not suffer
1555 * from drastic performance problems. Using this value, we derive the rest
1556 * of the table. Since the fragmentation value is never stored on disk, it
1557 * is possible to change these calculations in the future.
1559 int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
1579 * Calclate the metaslab's fragmentation metric. A return value
1580 * of ZFS_FRAG_INVALID means that the metaslab has not been upgraded and does
1581 * not support this metric. Otherwise, the return value should be in the
1585 metaslab_fragmentation(metaslab_t *msp)
1587 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1588 uint64_t fragmentation = 0;
1590 boolean_t feature_enabled = spa_feature_is_enabled(spa,
1591 SPA_FEATURE_SPACEMAP_HISTOGRAM);
1593 if (!feature_enabled)
1594 return (ZFS_FRAG_INVALID);
1597 * A null space map means that the entire metaslab is free
1598 * and thus is not fragmented.
1600 if (msp->ms_sm == NULL)
1604 * If this metaslab's space_map has not been upgraded, flag it
1605 * so that we upgrade next time we encounter it.
1607 if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
1608 uint64_t txg = spa_syncing_txg(spa);
1609 vdev_t *vd = msp->ms_group->mg_vd;
1611 if (spa_writeable(spa)) {
1612 msp->ms_condense_wanted = B_TRUE;
1613 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
1614 spa_dbgmsg(spa, "txg %llu, requesting force condense: "
1615 "msp %p, vd %p", txg, msp, vd);
1617 return (ZFS_FRAG_INVALID);
1620 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1622 uint8_t shift = msp->ms_sm->sm_shift;
1623 int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
1624 FRAGMENTATION_TABLE_SIZE - 1);
1626 if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
1629 space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
1632 ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
1633 fragmentation += space * zfs_frag_table[idx];
1637 fragmentation /= total;
1638 ASSERT3U(fragmentation, <=, 100);
1639 return (fragmentation);
1643 * Compute a weight -- a selection preference value -- for the given metaslab.
1644 * This is based on the amount of free space, the level of fragmentation,
1645 * the LBA range, and whether the metaslab is loaded.
1648 metaslab_weight(metaslab_t *msp)
1650 metaslab_group_t *mg = msp->ms_group;
1651 vdev_t *vd = mg->mg_vd;
1652 uint64_t weight, space;
1654 ASSERT(MUTEX_HELD(&msp->ms_lock));
1657 * This vdev is in the process of being removed so there is nothing
1658 * for us to do here.
1660 if (vd->vdev_removing) {
1661 ASSERT0(space_map_allocated(msp->ms_sm));
1662 ASSERT0(vd->vdev_ms_shift);
1667 * The baseline weight is the metaslab's free space.
1669 space = msp->ms_size - space_map_allocated(msp->ms_sm);
1671 msp->ms_fragmentation = metaslab_fragmentation(msp);
1672 if (metaslab_fragmentation_factor_enabled &&
1673 msp->ms_fragmentation != ZFS_FRAG_INVALID) {
1675 * Use the fragmentation information to inversely scale
1676 * down the baseline weight. We need to ensure that we
1677 * don't exclude this metaslab completely when it's 100%
1678 * fragmented. To avoid this we reduce the fragmented value
1681 space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
1684 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
1685 * this metaslab again. The fragmentation metric may have
1686 * decreased the space to something smaller than
1687 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
1688 * so that we can consume any remaining space.
1690 if (space > 0 && space < SPA_MINBLOCKSIZE)
1691 space = SPA_MINBLOCKSIZE;
1696 * Modern disks have uniform bit density and constant angular velocity.
1697 * Therefore, the outer recording zones are faster (higher bandwidth)
1698 * than the inner zones by the ratio of outer to inner track diameter,
1699 * which is typically around 2:1. We account for this by assigning
1700 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
1701 * In effect, this means that we'll select the metaslab with the most
1702 * free bandwidth rather than simply the one with the most free space.
1704 if (metaslab_lba_weighting_enabled) {
1705 weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
1706 ASSERT(weight >= space && weight <= 2 * space);
1710 * If this metaslab is one we're actively using, adjust its
1711 * weight to make it preferable to any inactive metaslab so
1712 * we'll polish it off. If the fragmentation on this metaslab
1713 * has exceed our threshold, then don't mark it active.
1715 if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
1716 msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
1717 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
1724 metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
1726 ASSERT(MUTEX_HELD(&msp->ms_lock));
1728 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
1729 metaslab_load_wait(msp);
1730 if (!msp->ms_loaded) {
1731 int error = metaslab_load(msp);
1733 metaslab_group_sort(msp->ms_group, msp, 0);
1738 metaslab_group_sort(msp->ms_group, msp,
1739 msp->ms_weight | activation_weight);
1741 ASSERT(msp->ms_loaded);
1742 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
1748 metaslab_passivate(metaslab_t *msp, uint64_t size)
1751 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
1752 * this metaslab again. In that case, it had better be empty,
1753 * or we would be leaving space on the table.
1755 ASSERT(size >= SPA_MINBLOCKSIZE || range_tree_space(msp->ms_tree) == 0);
1756 metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size));
1757 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
1761 metaslab_preload(void *arg)
1763 metaslab_t *msp = arg;
1764 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1766 ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
1768 mutex_enter(&msp->ms_lock);
1769 metaslab_load_wait(msp);
1770 if (!msp->ms_loaded)
1771 (void) metaslab_load(msp);
1774 * Set the ms_access_txg value so that we don't unload it right away.
1776 msp->ms_access_txg = spa_syncing_txg(spa) + metaslab_unload_delay + 1;
1777 mutex_exit(&msp->ms_lock);
1781 metaslab_group_preload(metaslab_group_t *mg)
1783 spa_t *spa = mg->mg_vd->vdev_spa;
1785 avl_tree_t *t = &mg->mg_metaslab_tree;
1788 if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
1789 taskq_wait(mg->mg_taskq);
1793 mutex_enter(&mg->mg_lock);
1795 * Load the next potential metaslabs
1798 while (msp != NULL) {
1799 metaslab_t *msp_next = AVL_NEXT(t, msp);
1802 * We preload only the maximum number of metaslabs specified
1803 * by metaslab_preload_limit. If a metaslab is being forced
1804 * to condense then we preload it too. This will ensure
1805 * that force condensing happens in the next txg.
1807 if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
1813 * We must drop the metaslab group lock here to preserve
1814 * lock ordering with the ms_lock (when grabbing both
1815 * the mg_lock and the ms_lock, the ms_lock must be taken
1816 * first). As a result, it is possible that the ordering
1817 * of the metaslabs within the avl tree may change before
1818 * we reacquire the lock. The metaslab cannot be removed from
1819 * the tree while we're in syncing context so it is safe to
1820 * drop the mg_lock here. If the metaslabs are reordered
1821 * nothing will break -- we just may end up loading a
1822 * less than optimal one.
1824 mutex_exit(&mg->mg_lock);
1825 VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
1826 msp, TQ_SLEEP) != 0);
1827 mutex_enter(&mg->mg_lock);
1830 mutex_exit(&mg->mg_lock);
1834 * Determine if the space map's on-disk footprint is past our tolerance
1835 * for inefficiency. We would like to use the following criteria to make
1838 * 1. The size of the space map object should not dramatically increase as a
1839 * result of writing out the free space range tree.
1841 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
1842 * times the size than the free space range tree representation
1843 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1.MB).
1845 * 3. The on-disk size of the space map should actually decrease.
1847 * Checking the first condition is tricky since we don't want to walk
1848 * the entire AVL tree calculating the estimated on-disk size. Instead we
1849 * use the size-ordered range tree in the metaslab and calculate the
1850 * size required to write out the largest segment in our free tree. If the
1851 * size required to represent that segment on disk is larger than the space
1852 * map object then we avoid condensing this map.
1854 * To determine the second criterion we use a best-case estimate and assume
1855 * each segment can be represented on-disk as a single 64-bit entry. We refer
1856 * to this best-case estimate as the space map's minimal form.
1858 * Unfortunately, we cannot compute the on-disk size of the space map in this
1859 * context because we cannot accurately compute the effects of compression, etc.
1860 * Instead, we apply the heuristic described in the block comment for
1861 * zfs_metaslab_condense_block_threshold - we only condense if the space used
1862 * is greater than a threshold number of blocks.
1865 metaslab_should_condense(metaslab_t *msp)
1867 space_map_t *sm = msp->ms_sm;
1869 uint64_t size, entries, segsz, object_size, optimal_size, record_size;
1870 dmu_object_info_t doi;
1871 uint64_t vdev_blocksize = 1 << msp->ms_group->mg_vd->vdev_ashift;
1873 ASSERT(MUTEX_HELD(&msp->ms_lock));
1874 ASSERT(msp->ms_loaded);
1877 * Use the ms_size_tree range tree, which is ordered by size, to
1878 * obtain the largest segment in the free tree. We always condense
1879 * metaslabs that are empty and metaslabs for which a condense
1880 * request has been made.
1882 rs = avl_last(&msp->ms_size_tree);
1883 if (rs == NULL || msp->ms_condense_wanted)
1887 * Calculate the number of 64-bit entries this segment would
1888 * require when written to disk. If this single segment would be
1889 * larger on-disk than the entire current on-disk structure, then
1890 * clearly condensing will increase the on-disk structure size.
1892 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
1893 entries = size / (MIN(size, SM_RUN_MAX));
1894 segsz = entries * sizeof (uint64_t);
1896 optimal_size = sizeof (uint64_t) * avl_numnodes(&msp->ms_tree->rt_root);
1897 object_size = space_map_length(msp->ms_sm);
1899 dmu_object_info_from_db(sm->sm_dbuf, &doi);
1900 record_size = MAX(doi.doi_data_block_size, vdev_blocksize);
1902 return (segsz <= object_size &&
1903 object_size >= (optimal_size * zfs_condense_pct / 100) &&
1904 object_size > zfs_metaslab_condense_block_threshold * record_size);
1908 * Condense the on-disk space map representation to its minimized form.
1909 * The minimized form consists of a small number of allocations followed by
1910 * the entries of the free range tree.
1913 metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
1915 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1916 range_tree_t *freetree = msp->ms_freetree[txg & TXG_MASK];
1917 range_tree_t *condense_tree;
1918 space_map_t *sm = msp->ms_sm;
1920 ASSERT(MUTEX_HELD(&msp->ms_lock));
1921 ASSERT3U(spa_sync_pass(spa), ==, 1);
1922 ASSERT(msp->ms_loaded);
1925 spa_dbgmsg(spa, "condensing: txg %llu, msp[%llu] %p, vdev id %llu, "
1926 "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg,
1927 msp->ms_id, msp, msp->ms_group->mg_vd->vdev_id,
1928 msp->ms_group->mg_vd->vdev_spa->spa_name,
1929 space_map_length(msp->ms_sm), avl_numnodes(&msp->ms_tree->rt_root),
1930 msp->ms_condense_wanted ? "TRUE" : "FALSE");
1932 msp->ms_condense_wanted = B_FALSE;
1935 * Create an range tree that is 100% allocated. We remove segments
1936 * that have been freed in this txg, any deferred frees that exist,
1937 * and any allocation in the future. Removing segments should be
1938 * a relatively inexpensive operation since we expect these trees to
1939 * have a small number of nodes.
1941 condense_tree = range_tree_create(NULL, NULL, &msp->ms_lock);
1942 range_tree_add(condense_tree, msp->ms_start, msp->ms_size);
1945 * Remove what's been freed in this txg from the condense_tree.
1946 * Since we're in sync_pass 1, we know that all the frees from
1947 * this txg are in the freetree.
1949 range_tree_walk(freetree, range_tree_remove, condense_tree);
1951 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1952 range_tree_walk(msp->ms_defertree[t],
1953 range_tree_remove, condense_tree);
1956 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
1957 range_tree_walk(msp->ms_alloctree[(txg + t) & TXG_MASK],
1958 range_tree_remove, condense_tree);
1962 * We're about to drop the metaslab's lock thus allowing
1963 * other consumers to change it's content. Set the
1964 * metaslab's ms_condensing flag to ensure that
1965 * allocations on this metaslab do not occur while we're
1966 * in the middle of committing it to disk. This is only critical
1967 * for the ms_tree as all other range trees use per txg
1968 * views of their content.
1970 msp->ms_condensing = B_TRUE;
1972 mutex_exit(&msp->ms_lock);
1973 space_map_truncate(sm, tx);
1974 mutex_enter(&msp->ms_lock);
1977 * While we would ideally like to create a space_map representation
1978 * that consists only of allocation records, doing so can be
1979 * prohibitively expensive because the in-core free tree can be
1980 * large, and therefore computationally expensive to subtract
1981 * from the condense_tree. Instead we sync out two trees, a cheap
1982 * allocation only tree followed by the in-core free tree. While not
1983 * optimal, this is typically close to optimal, and much cheaper to
1986 space_map_write(sm, condense_tree, SM_ALLOC, tx);
1987 range_tree_vacate(condense_tree, NULL, NULL);
1988 range_tree_destroy(condense_tree);
1990 space_map_write(sm, msp->ms_tree, SM_FREE, tx);
1991 msp->ms_condensing = B_FALSE;
1995 * Write a metaslab to disk in the context of the specified transaction group.
1998 metaslab_sync(metaslab_t *msp, uint64_t txg)
2000 metaslab_group_t *mg = msp->ms_group;
2001 vdev_t *vd = mg->mg_vd;
2002 spa_t *spa = vd->vdev_spa;
2003 objset_t *mos = spa_meta_objset(spa);
2004 range_tree_t *alloctree = msp->ms_alloctree[txg & TXG_MASK];
2005 range_tree_t **freetree = &msp->ms_freetree[txg & TXG_MASK];
2006 range_tree_t **freed_tree =
2007 &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK];
2009 uint64_t object = space_map_object(msp->ms_sm);
2011 ASSERT(!vd->vdev_ishole);
2014 * This metaslab has just been added so there's no work to do now.
2016 if (*freetree == NULL) {
2017 ASSERT3P(alloctree, ==, NULL);
2021 ASSERT3P(alloctree, !=, NULL);
2022 ASSERT3P(*freetree, !=, NULL);
2023 ASSERT3P(*freed_tree, !=, NULL);
2026 * Normally, we don't want to process a metaslab if there
2027 * are no allocations or frees to perform. However, if the metaslab
2028 * is being forced to condense we need to let it through.
2030 if (range_tree_space(alloctree) == 0 &&
2031 range_tree_space(*freetree) == 0 &&
2032 !msp->ms_condense_wanted)
2036 * The only state that can actually be changing concurrently with
2037 * metaslab_sync() is the metaslab's ms_tree. No other thread can
2038 * be modifying this txg's alloctree, freetree, freed_tree, or
2039 * space_map_phys_t. Therefore, we only hold ms_lock to satify
2040 * space_map ASSERTs. We drop it whenever we call into the DMU,
2041 * because the DMU can call down to us (e.g. via zio_free()) at
2045 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
2047 if (msp->ms_sm == NULL) {
2048 uint64_t new_object;
2050 new_object = space_map_alloc(mos, tx);
2051 VERIFY3U(new_object, !=, 0);
2053 VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
2054 msp->ms_start, msp->ms_size, vd->vdev_ashift,
2056 ASSERT(msp->ms_sm != NULL);
2059 mutex_enter(&msp->ms_lock);
2062 * Note: metaslab_condense() clears the space_map's histogram.
2063 * Therefore we must verify and remove this histogram before
2066 metaslab_group_histogram_verify(mg);
2067 metaslab_class_histogram_verify(mg->mg_class);
2068 metaslab_group_histogram_remove(mg, msp);
2070 if (msp->ms_loaded && spa_sync_pass(spa) == 1 &&
2071 metaslab_should_condense(msp)) {
2072 metaslab_condense(msp, txg, tx);
2074 space_map_write(msp->ms_sm, alloctree, SM_ALLOC, tx);
2075 space_map_write(msp->ms_sm, *freetree, SM_FREE, tx);
2078 if (msp->ms_loaded) {
2080 * When the space map is loaded, we have an accruate
2081 * histogram in the range tree. This gives us an opportunity
2082 * to bring the space map's histogram up-to-date so we clear
2083 * it first before updating it.
2085 space_map_histogram_clear(msp->ms_sm);
2086 space_map_histogram_add(msp->ms_sm, msp->ms_tree, tx);
2089 * Since the space map is not loaded we simply update the
2090 * exisiting histogram with what was freed in this txg. This
2091 * means that the on-disk histogram may not have an accurate
2092 * view of the free space but it's close enough to allow
2093 * us to make allocation decisions.
2095 space_map_histogram_add(msp->ms_sm, *freetree, tx);
2097 metaslab_group_histogram_add(mg, msp);
2098 metaslab_group_histogram_verify(mg);
2099 metaslab_class_histogram_verify(mg->mg_class);
2102 * For sync pass 1, we avoid traversing this txg's free range tree
2103 * and instead will just swap the pointers for freetree and
2104 * freed_tree. We can safely do this since the freed_tree is
2105 * guaranteed to be empty on the initial pass.
2107 if (spa_sync_pass(spa) == 1) {
2108 range_tree_swap(freetree, freed_tree);
2110 range_tree_vacate(*freetree, range_tree_add, *freed_tree);
2112 range_tree_vacate(alloctree, NULL, NULL);
2114 ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
2115 ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK]));
2117 mutex_exit(&msp->ms_lock);
2119 if (object != space_map_object(msp->ms_sm)) {
2120 object = space_map_object(msp->ms_sm);
2121 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
2122 msp->ms_id, sizeof (uint64_t), &object, tx);
2128 * Called after a transaction group has completely synced to mark
2129 * all of the metaslab's free space as usable.
2132 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
2134 metaslab_group_t *mg = msp->ms_group;
2135 vdev_t *vd = mg->mg_vd;
2136 range_tree_t **freed_tree;
2137 range_tree_t **defer_tree;
2138 int64_t alloc_delta, defer_delta;
2140 ASSERT(!vd->vdev_ishole);
2142 mutex_enter(&msp->ms_lock);
2145 * If this metaslab is just becoming available, initialize its
2146 * alloctrees, freetrees, and defertree and add its capacity to
2149 if (msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK] == NULL) {
2150 for (int t = 0; t < TXG_SIZE; t++) {
2151 ASSERT(msp->ms_alloctree[t] == NULL);
2152 ASSERT(msp->ms_freetree[t] == NULL);
2154 msp->ms_alloctree[t] = range_tree_create(NULL, msp,
2156 msp->ms_freetree[t] = range_tree_create(NULL, msp,
2160 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2161 ASSERT(msp->ms_defertree[t] == NULL);
2163 msp->ms_defertree[t] = range_tree_create(NULL, msp,
2167 vdev_space_update(vd, 0, 0, msp->ms_size);
2170 freed_tree = &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK];
2171 defer_tree = &msp->ms_defertree[txg % TXG_DEFER_SIZE];
2173 alloc_delta = space_map_alloc_delta(msp->ms_sm);
2174 defer_delta = range_tree_space(*freed_tree) -
2175 range_tree_space(*defer_tree);
2177 vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
2179 ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
2180 ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK]));
2183 * If there's a metaslab_load() in progress, wait for it to complete
2184 * so that we have a consistent view of the in-core space map.
2186 metaslab_load_wait(msp);
2189 * Move the frees from the defer_tree back to the free
2190 * range tree (if it's loaded). Swap the freed_tree and the
2191 * defer_tree -- this is safe to do because we've just emptied out
2194 range_tree_vacate(*defer_tree,
2195 msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree);
2196 range_tree_swap(freed_tree, defer_tree);
2198 space_map_update(msp->ms_sm);
2200 msp->ms_deferspace += defer_delta;
2201 ASSERT3S(msp->ms_deferspace, >=, 0);
2202 ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
2203 if (msp->ms_deferspace != 0) {
2205 * Keep syncing this metaslab until all deferred frees
2206 * are back in circulation.
2208 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
2211 if (msp->ms_loaded && msp->ms_access_txg < txg) {
2212 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
2213 VERIFY0(range_tree_space(
2214 msp->ms_alloctree[(txg + t) & TXG_MASK]));
2217 if (!metaslab_debug_unload)
2218 metaslab_unload(msp);
2221 metaslab_group_sort(mg, msp, metaslab_weight(msp));
2222 mutex_exit(&msp->ms_lock);
2226 metaslab_sync_reassess(metaslab_group_t *mg)
2228 metaslab_group_alloc_update(mg);
2229 mg->mg_fragmentation = metaslab_group_fragmentation(mg);
2232 * Preload the next potential metaslabs
2234 metaslab_group_preload(mg);
2238 metaslab_distance(metaslab_t *msp, dva_t *dva)
2240 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
2241 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
2242 uint64_t start = msp->ms_id;
2244 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
2245 return (1ULL << 63);
2248 return ((start - offset) << ms_shift);
2250 return ((offset - start) << ms_shift);
2255 * ==========================================================================
2256 * Metaslab block operations
2257 * ==========================================================================
2261 metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags)
2263 if (!(flags & METASLAB_ASYNC_ALLOC) ||
2264 flags & METASLAB_DONT_THROTTLE)
2267 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2268 if (!mg->mg_class->mc_alloc_throttle_enabled)
2271 (void) refcount_add(&mg->mg_alloc_queue_depth, tag);
2275 metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags)
2277 if (!(flags & METASLAB_ASYNC_ALLOC) ||
2278 flags & METASLAB_DONT_THROTTLE)
2281 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2282 if (!mg->mg_class->mc_alloc_throttle_enabled)
2285 (void) refcount_remove(&mg->mg_alloc_queue_depth, tag);
2289 metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag)
2292 const dva_t *dva = bp->blk_dva;
2293 int ndvas = BP_GET_NDVAS(bp);
2295 for (int d = 0; d < ndvas; d++) {
2296 uint64_t vdev = DVA_GET_VDEV(&dva[d]);
2297 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2298 VERIFY(refcount_not_held(&mg->mg_alloc_queue_depth, tag));
2304 metaslab_group_alloc(metaslab_group_t *mg, uint64_t asize,
2305 uint64_t txg, uint64_t min_distance, dva_t *dva, int d)
2307 spa_t *spa = mg->mg_vd->vdev_spa;
2308 metaslab_t *msp = NULL;
2309 uint64_t offset = -1ULL;
2310 avl_tree_t *t = &mg->mg_metaslab_tree;
2311 uint64_t activation_weight;
2312 uint64_t target_distance;
2315 activation_weight = METASLAB_WEIGHT_PRIMARY;
2316 for (i = 0; i < d; i++) {
2317 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
2318 activation_weight = METASLAB_WEIGHT_SECONDARY;
2324 boolean_t was_active;
2326 mutex_enter(&mg->mg_lock);
2327 for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) {
2328 if (msp->ms_weight < asize) {
2329 spa_dbgmsg(spa, "%s: failed to meet weight "
2330 "requirement: vdev %llu, txg %llu, mg %p, "
2331 "msp %p, asize %llu, "
2332 "weight %llu", spa_name(spa),
2333 mg->mg_vd->vdev_id, txg,
2334 mg, msp, asize, msp->ms_weight);
2335 mutex_exit(&mg->mg_lock);
2340 * If the selected metaslab is condensing, skip it.
2342 if (msp->ms_condensing)
2345 was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
2346 if (activation_weight == METASLAB_WEIGHT_PRIMARY)
2349 target_distance = min_distance +
2350 (space_map_allocated(msp->ms_sm) != 0 ? 0 :
2353 for (i = 0; i < d; i++)
2354 if (metaslab_distance(msp, &dva[i]) <
2360 mutex_exit(&mg->mg_lock);
2364 mutex_enter(&msp->ms_lock);
2367 * Ensure that the metaslab we have selected is still
2368 * capable of handling our request. It's possible that
2369 * another thread may have changed the weight while we
2370 * were blocked on the metaslab lock.
2372 if (msp->ms_weight < asize || (was_active &&
2373 !(msp->ms_weight & METASLAB_ACTIVE_MASK) &&
2374 activation_weight == METASLAB_WEIGHT_PRIMARY)) {
2375 mutex_exit(&msp->ms_lock);
2379 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
2380 activation_weight == METASLAB_WEIGHT_PRIMARY) {
2381 metaslab_passivate(msp,
2382 msp->ms_weight & ~METASLAB_ACTIVE_MASK);
2383 mutex_exit(&msp->ms_lock);
2387 if (metaslab_activate(msp, activation_weight) != 0) {
2388 mutex_exit(&msp->ms_lock);
2393 * If this metaslab is currently condensing then pick again as
2394 * we can't manipulate this metaslab until it's committed
2397 if (msp->ms_condensing) {
2398 mutex_exit(&msp->ms_lock);
2402 if ((offset = metaslab_block_alloc(msp, asize)) != -1ULL)
2405 metaslab_passivate(msp, metaslab_block_maxsize(msp));
2406 mutex_exit(&msp->ms_lock);
2409 if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
2410 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
2412 range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, asize);
2413 msp->ms_access_txg = txg + metaslab_unload_delay;
2415 mutex_exit(&msp->ms_lock);
2420 * Allocate a block for the specified i/o.
2423 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
2424 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags)
2426 metaslab_group_t *mg, *rotor;
2430 int zio_lock = B_FALSE;
2431 boolean_t allocatable;
2435 ASSERT(!DVA_IS_VALID(&dva[d]));
2438 * For testing, make some blocks above a certain size be gang blocks.
2440 if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0)
2441 return (SET_ERROR(ENOSPC));
2444 * Start at the rotor and loop through all mgs until we find something.
2445 * Note that there's no locking on mc_rotor or mc_aliquot because
2446 * nothing actually breaks if we miss a few updates -- we just won't
2447 * allocate quite as evenly. It all balances out over time.
2449 * If we are doing ditto or log blocks, try to spread them across
2450 * consecutive vdevs. If we're forced to reuse a vdev before we've
2451 * allocated all of our ditto blocks, then try and spread them out on
2452 * that vdev as much as possible. If it turns out to not be possible,
2453 * gradually lower our standards until anything becomes acceptable.
2454 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
2455 * gives us hope of containing our fault domains to something we're
2456 * able to reason about. Otherwise, any two top-level vdev failures
2457 * will guarantee the loss of data. With consecutive allocation,
2458 * only two adjacent top-level vdev failures will result in data loss.
2460 * If we are doing gang blocks (hintdva is non-NULL), try to keep
2461 * ourselves on the same vdev as our gang block header. That
2462 * way, we can hope for locality in vdev_cache, plus it makes our
2463 * fault domains something tractable.
2466 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
2469 * It's possible the vdev we're using as the hint no
2470 * longer exists (i.e. removed). Consult the rotor when
2476 if (flags & METASLAB_HINTBP_AVOID &&
2477 mg->mg_next != NULL)
2482 } else if (d != 0) {
2483 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
2484 mg = vd->vdev_mg->mg_next;
2490 * If the hint put us into the wrong metaslab class, or into a
2491 * metaslab group that has been passivated, just follow the rotor.
2493 if (mg->mg_class != mc || mg->mg_activation_count <= 0)
2500 ASSERT(mg->mg_activation_count == 1);
2504 * Don't allocate from faulted devices.
2507 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
2508 allocatable = vdev_allocatable(vd);
2509 spa_config_exit(spa, SCL_ZIO, FTAG);
2511 allocatable = vdev_allocatable(vd);
2515 * Determine if the selected metaslab group is eligible
2516 * for allocations. If we're ganging then don't allow
2517 * this metaslab group to skip allocations since that would
2518 * inadvertently return ENOSPC and suspend the pool
2519 * even though space is still available.
2521 if (allocatable && !GANG_ALLOCATION(flags) && !zio_lock) {
2522 allocatable = metaslab_group_allocatable(mg, rotor,
2529 ASSERT(mg->mg_initialized);
2532 * Avoid writing single-copy data to a failing vdev.
2534 if ((vd->vdev_stat.vs_write_errors > 0 ||
2535 vd->vdev_state < VDEV_STATE_HEALTHY) &&
2536 d == 0 && dshift == 3 && vd->vdev_children == 0) {
2541 ASSERT(mg->mg_class == mc);
2543 distance = vd->vdev_asize >> dshift;
2544 if (distance <= (1ULL << vd->vdev_ms_shift))
2549 asize = vdev_psize_to_asize(vd, psize);
2550 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
2552 uint64_t offset = metaslab_group_alloc(mg, asize, txg,
2555 mutex_enter(&mg->mg_lock);
2556 if (offset == -1ULL) {
2557 mg->mg_failed_allocations++;
2558 if (asize == SPA_GANGBLOCKSIZE) {
2560 * This metaslab group was unable to allocate
2561 * the minimum gang block size so it must be
2562 * out of space. We must notify the allocation
2563 * throttle to start skipping allocation
2564 * attempts to this metaslab group until more
2565 * space becomes available.
2567 * Note: this failure cannot be caused by the
2568 * allocation throttle since the allocation
2569 * throttle is only responsible for skipping
2570 * devices and not failing block allocations.
2572 mg->mg_no_free_space = B_TRUE;
2575 mg->mg_allocations++;
2576 mutex_exit(&mg->mg_lock);
2578 if (offset != -1ULL) {
2580 * If we've just selected this metaslab group,
2581 * figure out whether the corresponding vdev is
2582 * over- or under-used relative to the pool,
2583 * and set an allocation bias to even it out.
2585 if (mc->mc_aliquot == 0 && metaslab_bias_enabled) {
2586 vdev_stat_t *vs = &vd->vdev_stat;
2589 vu = (vs->vs_alloc * 100) / (vs->vs_space + 1);
2590 cu = (mc->mc_alloc * 100) / (mc->mc_space + 1);
2593 * Calculate how much more or less we should
2594 * try to allocate from this device during
2595 * this iteration around the rotor.
2596 * For example, if a device is 80% full
2597 * and the pool is 20% full then we should
2598 * reduce allocations by 60% on this device.
2600 * mg_bias = (20 - 80) * 512K / 100 = -307K
2602 * This reduces allocations by 307K for this
2605 mg->mg_bias = ((cu - vu) *
2606 (int64_t)mg->mg_aliquot) / 100;
2607 } else if (!metaslab_bias_enabled) {
2611 if (atomic_add_64_nv(&mc->mc_aliquot, asize) >=
2612 mg->mg_aliquot + mg->mg_bias) {
2613 mc->mc_rotor = mg->mg_next;
2617 DVA_SET_VDEV(&dva[d], vd->vdev_id);
2618 DVA_SET_OFFSET(&dva[d], offset);
2619 DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER));
2620 DVA_SET_ASIZE(&dva[d], asize);
2625 mc->mc_rotor = mg->mg_next;
2627 } while ((mg = mg->mg_next) != rotor);
2631 ASSERT(dshift < 64);
2635 if (!allocatable && !zio_lock) {
2641 bzero(&dva[d], sizeof (dva_t));
2643 return (SET_ERROR(ENOSPC));
2647 * Free the block represented by DVA in the context of the specified
2648 * transaction group.
2651 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
2653 uint64_t vdev = DVA_GET_VDEV(dva);
2654 uint64_t offset = DVA_GET_OFFSET(dva);
2655 uint64_t size = DVA_GET_ASIZE(dva);
2659 ASSERT(DVA_IS_VALID(dva));
2661 if (txg > spa_freeze_txg(spa))
2664 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
2665 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
2666 cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu",
2667 (u_longlong_t)vdev, (u_longlong_t)offset);
2672 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
2674 if (DVA_GET_GANG(dva))
2675 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
2677 mutex_enter(&msp->ms_lock);
2680 range_tree_remove(msp->ms_alloctree[txg & TXG_MASK],
2683 VERIFY(!msp->ms_condensing);
2684 VERIFY3U(offset, >=, msp->ms_start);
2685 VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
2686 VERIFY3U(range_tree_space(msp->ms_tree) + size, <=,
2688 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
2689 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
2690 range_tree_add(msp->ms_tree, offset, size);
2692 if (range_tree_space(msp->ms_freetree[txg & TXG_MASK]) == 0)
2693 vdev_dirty(vd, VDD_METASLAB, msp, txg);
2694 range_tree_add(msp->ms_freetree[txg & TXG_MASK],
2698 mutex_exit(&msp->ms_lock);
2702 * Intent log support: upon opening the pool after a crash, notify the SPA
2703 * of blocks that the intent log has allocated for immediate write, but
2704 * which are still considered free by the SPA because the last transaction
2705 * group didn't commit yet.
2708 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
2710 uint64_t vdev = DVA_GET_VDEV(dva);
2711 uint64_t offset = DVA_GET_OFFSET(dva);
2712 uint64_t size = DVA_GET_ASIZE(dva);
2717 ASSERT(DVA_IS_VALID(dva));
2719 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
2720 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
2721 return (SET_ERROR(ENXIO));
2723 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
2725 if (DVA_GET_GANG(dva))
2726 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
2728 mutex_enter(&msp->ms_lock);
2730 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded)
2731 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
2733 if (error == 0 && !range_tree_contains(msp->ms_tree, offset, size))
2734 error = SET_ERROR(ENOENT);
2736 if (error || txg == 0) { /* txg == 0 indicates dry run */
2737 mutex_exit(&msp->ms_lock);
2741 VERIFY(!msp->ms_condensing);
2742 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
2743 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
2744 VERIFY3U(range_tree_space(msp->ms_tree) - size, <=, msp->ms_size);
2745 range_tree_remove(msp->ms_tree, offset, size);
2747 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
2748 if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
2749 vdev_dirty(vd, VDD_METASLAB, msp, txg);
2750 range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, size);
2753 mutex_exit(&msp->ms_lock);
2759 * Reserve some allocation slots. The reservation system must be called
2760 * before we call into the allocator. If there aren't any available slots
2761 * then the I/O will be throttled until an I/O completes and its slots are
2762 * freed up. The function returns true if it was successful in placing
2766 metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, zio_t *zio,
2769 uint64_t available_slots = 0;
2770 boolean_t slot_reserved = B_FALSE;
2772 ASSERT(mc->mc_alloc_throttle_enabled);
2773 mutex_enter(&mc->mc_lock);
2775 uint64_t reserved_slots = refcount_count(&mc->mc_alloc_slots);
2776 if (reserved_slots < mc->mc_alloc_max_slots)
2777 available_slots = mc->mc_alloc_max_slots - reserved_slots;
2779 if (slots <= available_slots || GANG_ALLOCATION(flags)) {
2781 * We reserve the slots individually so that we can unreserve
2782 * them individually when an I/O completes.
2784 for (int d = 0; d < slots; d++) {
2785 reserved_slots = refcount_add(&mc->mc_alloc_slots, zio);
2787 zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
2788 slot_reserved = B_TRUE;
2791 mutex_exit(&mc->mc_lock);
2792 return (slot_reserved);
2796 metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots, zio_t *zio)
2798 ASSERT(mc->mc_alloc_throttle_enabled);
2799 mutex_enter(&mc->mc_lock);
2800 for (int d = 0; d < slots; d++) {
2801 (void) refcount_remove(&mc->mc_alloc_slots, zio);
2803 mutex_exit(&mc->mc_lock);
2807 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
2808 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags, zio_t *zio)
2810 dva_t *dva = bp->blk_dva;
2811 dva_t *hintdva = hintbp->blk_dva;
2814 ASSERT(bp->blk_birth == 0);
2815 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
2817 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
2819 if (mc->mc_rotor == NULL) { /* no vdevs in this class */
2820 spa_config_exit(spa, SCL_ALLOC, FTAG);
2821 return (SET_ERROR(ENOSPC));
2824 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
2825 ASSERT(BP_GET_NDVAS(bp) == 0);
2826 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
2828 for (int d = 0; d < ndvas; d++) {
2829 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
2832 for (d--; d >= 0; d--) {
2833 metaslab_free_dva(spa, &dva[d], txg, B_TRUE);
2834 metaslab_group_alloc_decrement(spa,
2835 DVA_GET_VDEV(&dva[d]), zio, flags);
2836 bzero(&dva[d], sizeof (dva_t));
2838 spa_config_exit(spa, SCL_ALLOC, FTAG);
2842 * Update the metaslab group's queue depth
2843 * based on the newly allocated dva.
2845 metaslab_group_alloc_increment(spa,
2846 DVA_GET_VDEV(&dva[d]), zio, flags);
2851 ASSERT(BP_GET_NDVAS(bp) == ndvas);
2853 spa_config_exit(spa, SCL_ALLOC, FTAG);
2855 BP_SET_BIRTH(bp, txg, txg);
2861 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
2863 const dva_t *dva = bp->blk_dva;
2864 int ndvas = BP_GET_NDVAS(bp);
2866 ASSERT(!BP_IS_HOLE(bp));
2867 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
2869 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
2871 for (int d = 0; d < ndvas; d++)
2872 metaslab_free_dva(spa, &dva[d], txg, now);
2874 spa_config_exit(spa, SCL_FREE, FTAG);
2878 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
2880 const dva_t *dva = bp->blk_dva;
2881 int ndvas = BP_GET_NDVAS(bp);
2884 ASSERT(!BP_IS_HOLE(bp));
2888 * First do a dry run to make sure all DVAs are claimable,
2889 * so we don't have to unwind from partial failures below.
2891 if ((error = metaslab_claim(spa, bp, 0)) != 0)
2895 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
2897 for (int d = 0; d < ndvas; d++)
2898 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
2901 spa_config_exit(spa, SCL_ALLOC, FTAG);
2903 ASSERT(error == 0 || txg == 0);
2909 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
2911 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
2914 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2915 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
2916 uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
2917 vdev_t *vd = vdev_lookup_top(spa, vdev);
2918 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
2919 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
2920 metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
2923 range_tree_verify(msp->ms_tree, offset, size);
2925 for (int j = 0; j < TXG_SIZE; j++)
2926 range_tree_verify(msp->ms_freetree[j], offset, size);
2927 for (int j = 0; j < TXG_DEFER_SIZE; j++)
2928 range_tree_verify(msp->ms_defertree[j], offset, size);
2930 spa_config_exit(spa, SCL_VDEV, FTAG);