4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
28 #include <sys/zfs_context.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/space_map.h>
32 #include <sys/metaslab_impl.h>
33 #include <sys/vdev_impl.h>
35 #include <sys/spa_impl.h>
36 #include <sys/zfeature.h>
37 #include <sys/vdev_indirect_mapping.h>
40 SYSCTL_DECL(_vfs_zfs);
41 SYSCTL_NODE(_vfs_zfs, OID_AUTO, metaslab, CTLFLAG_RW, 0, "ZFS metaslab");
43 #define GANG_ALLOCATION(flags) \
44 ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
46 uint64_t metaslab_aliquot = 512ULL << 10;
47 uint64_t metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
48 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, force_ganging, CTLFLAG_RWTUN,
49 &metaslab_force_ganging, 0,
50 "Force gang block allocation for blocks larger than or equal to this value");
53 * Since we can touch multiple metaslabs (and their respective space maps)
54 * with each transaction group, we benefit from having a smaller space map
55 * block size since it allows us to issue more I/O operations scattered
58 int zfs_metaslab_sm_blksz = (1 << 12);
59 SYSCTL_INT(_vfs_zfs, OID_AUTO, metaslab_sm_blksz, CTLFLAG_RDTUN,
60 &zfs_metaslab_sm_blksz, 0,
61 "Block size for metaslab DTL space map. Power of 2 and greater than 4096.");
64 * The in-core space map representation is more compact than its on-disk form.
65 * The zfs_condense_pct determines how much more compact the in-core
66 * space map representation must be before we compact it on-disk.
67 * Values should be greater than or equal to 100.
69 int zfs_condense_pct = 200;
70 SYSCTL_INT(_vfs_zfs, OID_AUTO, condense_pct, CTLFLAG_RWTUN,
72 "Condense on-disk spacemap when it is more than this many percents"
73 " of in-memory counterpart");
76 * Condensing a metaslab is not guaranteed to actually reduce the amount of
77 * space used on disk. In particular, a space map uses data in increments of
78 * MAX(1 << ashift, space_map_blksize), so a metaslab might use the
79 * same number of blocks after condensing. Since the goal of condensing is to
80 * reduce the number of IOPs required to read the space map, we only want to
81 * condense when we can be sure we will reduce the number of blocks used by the
82 * space map. Unfortunately, we cannot precisely compute whether or not this is
83 * the case in metaslab_should_condense since we are holding ms_lock. Instead,
84 * we apply the following heuristic: do not condense a spacemap unless the
85 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
88 int zfs_metaslab_condense_block_threshold = 4;
91 * The zfs_mg_noalloc_threshold defines which metaslab groups should
92 * be eligible for allocation. The value is defined as a percentage of
93 * free space. Metaslab groups that have more free space than
94 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
95 * a metaslab group's free space is less than or equal to the
96 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
97 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
98 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
99 * groups are allowed to accept allocations. Gang blocks are always
100 * eligible to allocate on any metaslab group. The default value of 0 means
101 * no metaslab group will be excluded based on this criterion.
103 int zfs_mg_noalloc_threshold = 0;
104 SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_noalloc_threshold, CTLFLAG_RWTUN,
105 &zfs_mg_noalloc_threshold, 0,
106 "Percentage of metaslab group size that should be free"
107 " to make it eligible for allocation");
110 * Metaslab groups are considered eligible for allocations if their
111 * fragmenation metric (measured as a percentage) is less than or equal to
112 * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold
113 * then it will be skipped unless all metaslab groups within the metaslab
114 * class have also crossed this threshold.
116 int zfs_mg_fragmentation_threshold = 85;
117 SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_fragmentation_threshold, CTLFLAG_RWTUN,
118 &zfs_mg_fragmentation_threshold, 0,
119 "Percentage of metaslab group size that should be considered "
120 "eligible for allocations unless all metaslab groups within the metaslab class "
121 "have also crossed this threshold");
124 * Allow metaslabs to keep their active state as long as their fragmentation
125 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
126 * active metaslab that exceeds this threshold will no longer keep its active
127 * status allowing better metaslabs to be selected.
129 int zfs_metaslab_fragmentation_threshold = 70;
130 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, fragmentation_threshold, CTLFLAG_RWTUN,
131 &zfs_metaslab_fragmentation_threshold, 0,
132 "Maximum percentage of metaslab fragmentation level to keep their active state");
135 * When set will load all metaslabs when pool is first opened.
137 int metaslab_debug_load = 0;
138 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_load, CTLFLAG_RWTUN,
139 &metaslab_debug_load, 0,
140 "Load all metaslabs when pool is first opened");
143 * When set will prevent metaslabs from being unloaded.
145 int metaslab_debug_unload = 0;
146 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_unload, CTLFLAG_RWTUN,
147 &metaslab_debug_unload, 0,
148 "Prevent metaslabs from being unloaded");
151 * Minimum size which forces the dynamic allocator to change
152 * it's allocation strategy. Once the space map cannot satisfy
153 * an allocation of this size then it switches to using more
154 * aggressive strategy (i.e search by size rather than offset).
156 uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
157 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold, CTLFLAG_RWTUN,
158 &metaslab_df_alloc_threshold, 0,
159 "Minimum size which forces the dynamic allocator to change it's allocation strategy");
162 * The minimum free space, in percent, which must be available
163 * in a space map to continue allocations in a first-fit fashion.
164 * Once the space map's free space drops below this level we dynamically
165 * switch to using best-fit allocations.
167 int metaslab_df_free_pct = 4;
168 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct, CTLFLAG_RWTUN,
169 &metaslab_df_free_pct, 0,
170 "The minimum free space, in percent, which must be available in a "
171 "space map to continue allocations in a first-fit fashion");
174 * A metaslab is considered "free" if it contains a contiguous
175 * segment which is greater than metaslab_min_alloc_size.
177 uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS;
178 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, min_alloc_size, CTLFLAG_RWTUN,
179 &metaslab_min_alloc_size, 0,
180 "A metaslab is considered \"free\" if it contains a contiguous "
181 "segment which is greater than vfs.zfs.metaslab.min_alloc_size");
184 * Percentage of all cpus that can be used by the metaslab taskq.
186 int metaslab_load_pct = 50;
187 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, load_pct, CTLFLAG_RWTUN,
188 &metaslab_load_pct, 0,
189 "Percentage of cpus that can be used by the metaslab taskq");
192 * Determines how many txgs a metaslab may remain loaded without having any
193 * allocations from it. As long as a metaslab continues to be used we will
196 int metaslab_unload_delay = TXG_SIZE * 2;
197 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, unload_delay, CTLFLAG_RWTUN,
198 &metaslab_unload_delay, 0,
199 "Number of TXGs that an unused metaslab can be kept in memory");
202 * Max number of metaslabs per group to preload.
204 int metaslab_preload_limit = SPA_DVAS_PER_BP;
205 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_limit, CTLFLAG_RWTUN,
206 &metaslab_preload_limit, 0,
207 "Max number of metaslabs per group to preload");
210 * Enable/disable preloading of metaslab.
212 boolean_t metaslab_preload_enabled = B_TRUE;
213 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_enabled, CTLFLAG_RWTUN,
214 &metaslab_preload_enabled, 0,
215 "Max number of metaslabs per group to preload");
218 * Enable/disable fragmentation weighting on metaslabs.
220 boolean_t metaslab_fragmentation_factor_enabled = B_TRUE;
221 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, fragmentation_factor_enabled, CTLFLAG_RWTUN,
222 &metaslab_fragmentation_factor_enabled, 0,
223 "Enable fragmentation weighting on metaslabs");
226 * Enable/disable lba weighting (i.e. outer tracks are given preference).
228 boolean_t metaslab_lba_weighting_enabled = B_TRUE;
229 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, lba_weighting_enabled, CTLFLAG_RWTUN,
230 &metaslab_lba_weighting_enabled, 0,
231 "Enable LBA weighting (i.e. outer tracks are given preference)");
234 * Enable/disable metaslab group biasing.
236 boolean_t metaslab_bias_enabled = B_TRUE;
237 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, bias_enabled, CTLFLAG_RWTUN,
238 &metaslab_bias_enabled, 0,
239 "Enable metaslab group biasing");
242 * Enable/disable remapping of indirect DVAs to their concrete vdevs.
244 boolean_t zfs_remap_blkptr_enable = B_TRUE;
247 * Enable/disable segment-based metaslab selection.
249 boolean_t zfs_metaslab_segment_weight_enabled = B_TRUE;
252 * When using segment-based metaslab selection, we will continue
253 * allocating from the active metaslab until we have exhausted
254 * zfs_metaslab_switch_threshold of its buckets.
256 int zfs_metaslab_switch_threshold = 2;
259 * Internal switch to enable/disable the metaslab allocation tracing
262 boolean_t metaslab_trace_enabled = B_TRUE;
265 * Maximum entries that the metaslab allocation tracing facility will keep
266 * in a given list when running in non-debug mode. We limit the number
267 * of entries in non-debug mode to prevent us from using up too much memory.
268 * The limit should be sufficiently large that we don't expect any allocation
269 * to every exceed this value. In debug mode, the system will panic if this
270 * limit is ever reached allowing for further investigation.
272 uint64_t metaslab_trace_max_entries = 5000;
274 static uint64_t metaslab_weight(metaslab_t *);
275 static void metaslab_set_fragmentation(metaslab_t *);
276 static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t);
277 static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t);
278 static void metaslab_passivate(metaslab_t *msp, uint64_t weight);
279 static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp);
281 kmem_cache_t *metaslab_alloc_trace_cache;
284 * ==========================================================================
286 * ==========================================================================
289 metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
291 metaslab_class_t *mc;
293 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
298 mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
299 mc->mc_alloc_slots = kmem_zalloc(spa->spa_alloc_count *
300 sizeof (refcount_t), KM_SLEEP);
301 mc->mc_alloc_max_slots = kmem_zalloc(spa->spa_alloc_count *
302 sizeof (uint64_t), KM_SLEEP);
303 for (int i = 0; i < spa->spa_alloc_count; i++)
304 refcount_create_tracked(&mc->mc_alloc_slots[i]);
310 metaslab_class_destroy(metaslab_class_t *mc)
312 ASSERT(mc->mc_rotor == NULL);
313 ASSERT(mc->mc_alloc == 0);
314 ASSERT(mc->mc_deferred == 0);
315 ASSERT(mc->mc_space == 0);
316 ASSERT(mc->mc_dspace == 0);
318 for (int i = 0; i < mc->mc_spa->spa_alloc_count; i++)
319 refcount_destroy(&mc->mc_alloc_slots[i]);
320 kmem_free(mc->mc_alloc_slots, mc->mc_spa->spa_alloc_count *
321 sizeof (refcount_t));
322 kmem_free(mc->mc_alloc_max_slots, mc->mc_spa->spa_alloc_count *
324 mutex_destroy(&mc->mc_lock);
325 kmem_free(mc, sizeof (metaslab_class_t));
329 metaslab_class_validate(metaslab_class_t *mc)
331 metaslab_group_t *mg;
335 * Must hold one of the spa_config locks.
337 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
338 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
340 if ((mg = mc->mc_rotor) == NULL)
345 ASSERT(vd->vdev_mg != NULL);
346 ASSERT3P(vd->vdev_top, ==, vd);
347 ASSERT3P(mg->mg_class, ==, mc);
348 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
349 } while ((mg = mg->mg_next) != mc->mc_rotor);
355 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
356 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
358 atomic_add_64(&mc->mc_alloc, alloc_delta);
359 atomic_add_64(&mc->mc_deferred, defer_delta);
360 atomic_add_64(&mc->mc_space, space_delta);
361 atomic_add_64(&mc->mc_dspace, dspace_delta);
365 metaslab_class_minblocksize_update(metaslab_class_t *mc)
367 metaslab_group_t *mg;
369 uint64_t minashift = UINT64_MAX;
371 if ((mg = mc->mc_rotor) == NULL) {
372 mc->mc_minblocksize = SPA_MINBLOCKSIZE;
378 if (vd->vdev_ashift < minashift)
379 minashift = vd->vdev_ashift;
380 } while ((mg = mg->mg_next) != mc->mc_rotor);
382 mc->mc_minblocksize = 1ULL << minashift;
386 metaslab_class_get_alloc(metaslab_class_t *mc)
388 return (mc->mc_alloc);
392 metaslab_class_get_deferred(metaslab_class_t *mc)
394 return (mc->mc_deferred);
398 metaslab_class_get_space(metaslab_class_t *mc)
400 return (mc->mc_space);
404 metaslab_class_get_dspace(metaslab_class_t *mc)
406 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
410 metaslab_class_get_minblocksize(metaslab_class_t *mc)
412 return (mc->mc_minblocksize);
416 metaslab_class_histogram_verify(metaslab_class_t *mc)
418 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
422 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
425 mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
428 for (int c = 0; c < rvd->vdev_children; c++) {
429 vdev_t *tvd = rvd->vdev_child[c];
430 metaslab_group_t *mg = tvd->vdev_mg;
433 * Skip any holes, uninitialized top-levels, or
434 * vdevs that are not in this metalab class.
436 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
437 mg->mg_class != mc) {
441 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
442 mc_hist[i] += mg->mg_histogram[i];
445 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
446 VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
448 kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
452 * Calculate the metaslab class's fragmentation metric. The metric
453 * is weighted based on the space contribution of each metaslab group.
454 * The return value will be a number between 0 and 100 (inclusive), or
455 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
456 * zfs_frag_table for more information about the metric.
459 metaslab_class_fragmentation(metaslab_class_t *mc)
461 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
462 uint64_t fragmentation = 0;
464 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
466 for (int c = 0; c < rvd->vdev_children; c++) {
467 vdev_t *tvd = rvd->vdev_child[c];
468 metaslab_group_t *mg = tvd->vdev_mg;
471 * Skip any holes, uninitialized top-levels,
472 * or vdevs that are not in this metalab class.
474 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
475 mg->mg_class != mc) {
480 * If a metaslab group does not contain a fragmentation
481 * metric then just bail out.
483 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
484 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
485 return (ZFS_FRAG_INVALID);
489 * Determine how much this metaslab_group is contributing
490 * to the overall pool fragmentation metric.
492 fragmentation += mg->mg_fragmentation *
493 metaslab_group_get_space(mg);
495 fragmentation /= metaslab_class_get_space(mc);
497 ASSERT3U(fragmentation, <=, 100);
498 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
499 return (fragmentation);
503 * Calculate the amount of expandable space that is available in
504 * this metaslab class. If a device is expanded then its expandable
505 * space will be the amount of allocatable space that is currently not
506 * part of this metaslab class.
509 metaslab_class_expandable_space(metaslab_class_t *mc)
511 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
514 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
515 for (int c = 0; c < rvd->vdev_children; c++) {
517 vdev_t *tvd = rvd->vdev_child[c];
518 metaslab_group_t *mg = tvd->vdev_mg;
520 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
521 mg->mg_class != mc) {
526 * Calculate if we have enough space to add additional
527 * metaslabs. We report the expandable space in terms
528 * of the metaslab size since that's the unit of expansion.
529 * Adjust by efi system partition size.
531 tspace = tvd->vdev_max_asize - tvd->vdev_asize;
532 if (tspace > mc->mc_spa->spa_bootsize) {
533 tspace -= mc->mc_spa->spa_bootsize;
535 space += P2ALIGN(tspace, 1ULL << tvd->vdev_ms_shift);
537 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
542 metaslab_compare(const void *x1, const void *x2)
544 const metaslab_t *m1 = (const metaslab_t *)x1;
545 const metaslab_t *m2 = (const metaslab_t *)x2;
549 if (m1->ms_allocator != -1 && m1->ms_primary)
551 else if (m1->ms_allocator != -1 && !m1->ms_primary)
553 if (m2->ms_allocator != -1 && m2->ms_primary)
555 else if (m2->ms_allocator != -1 && !m2->ms_primary)
559 * Sort inactive metaslabs first, then primaries, then secondaries. When
560 * selecting a metaslab to allocate from, an allocator first tries its
561 * primary, then secondary active metaslab. If it doesn't have active
562 * metaslabs, or can't allocate from them, it searches for an inactive
563 * metaslab to activate. If it can't find a suitable one, it will steal
564 * a primary or secondary metaslab from another allocator.
571 int cmp = AVL_CMP(m2->ms_weight, m1->ms_weight);
575 IMPLY(AVL_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2);
577 return (AVL_CMP(m1->ms_start, m2->ms_start));
581 * Verify that the space accounting on disk matches the in-core range_trees.
584 metaslab_verify_space(metaslab_t *msp, uint64_t txg)
586 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
587 uint64_t allocated = 0;
588 uint64_t sm_free_space, msp_free_space;
590 ASSERT(MUTEX_HELD(&msp->ms_lock));
592 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
596 * We can only verify the metaslab space when we're called
597 * from syncing context with a loaded metaslab that has an allocated
598 * space map. Calling this in non-syncing context does not
599 * provide a consistent view of the metaslab since we're performing
600 * allocations in the future.
602 if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
606 sm_free_space = msp->ms_size - space_map_allocated(msp->ms_sm) -
607 space_map_alloc_delta(msp->ms_sm);
610 * Account for future allocations since we would have already
611 * deducted that space from the ms_freetree.
613 for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
615 range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]);
618 msp_free_space = range_tree_space(msp->ms_allocatable) + allocated +
619 msp->ms_deferspace + range_tree_space(msp->ms_freed);
621 VERIFY3U(sm_free_space, ==, msp_free_space);
625 * ==========================================================================
627 * ==========================================================================
630 * Update the allocatable flag and the metaslab group's capacity.
631 * The allocatable flag is set to true if the capacity is below
632 * the zfs_mg_noalloc_threshold or has a fragmentation value that is
633 * greater than zfs_mg_fragmentation_threshold. If a metaslab group
634 * transitions from allocatable to non-allocatable or vice versa then the
635 * metaslab group's class is updated to reflect the transition.
638 metaslab_group_alloc_update(metaslab_group_t *mg)
640 vdev_t *vd = mg->mg_vd;
641 metaslab_class_t *mc = mg->mg_class;
642 vdev_stat_t *vs = &vd->vdev_stat;
643 boolean_t was_allocatable;
644 boolean_t was_initialized;
646 ASSERT(vd == vd->vdev_top);
647 ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==,
650 mutex_enter(&mg->mg_lock);
651 was_allocatable = mg->mg_allocatable;
652 was_initialized = mg->mg_initialized;
654 mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
657 mutex_enter(&mc->mc_lock);
660 * If the metaslab group was just added then it won't
661 * have any space until we finish syncing out this txg.
662 * At that point we will consider it initialized and available
663 * for allocations. We also don't consider non-activated
664 * metaslab groups (e.g. vdevs that are in the middle of being removed)
665 * to be initialized, because they can't be used for allocation.
667 mg->mg_initialized = metaslab_group_initialized(mg);
668 if (!was_initialized && mg->mg_initialized) {
670 } else if (was_initialized && !mg->mg_initialized) {
671 ASSERT3U(mc->mc_groups, >, 0);
674 if (mg->mg_initialized)
675 mg->mg_no_free_space = B_FALSE;
678 * A metaslab group is considered allocatable if it has plenty
679 * of free space or is not heavily fragmented. We only take
680 * fragmentation into account if the metaslab group has a valid
681 * fragmentation metric (i.e. a value between 0 and 100).
683 mg->mg_allocatable = (mg->mg_activation_count > 0 &&
684 mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
685 (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
686 mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
689 * The mc_alloc_groups maintains a count of the number of
690 * groups in this metaslab class that are still above the
691 * zfs_mg_noalloc_threshold. This is used by the allocating
692 * threads to determine if they should avoid allocations to
693 * a given group. The allocator will avoid allocations to a group
694 * if that group has reached or is below the zfs_mg_noalloc_threshold
695 * and there are still other groups that are above the threshold.
696 * When a group transitions from allocatable to non-allocatable or
697 * vice versa we update the metaslab class to reflect that change.
698 * When the mc_alloc_groups value drops to 0 that means that all
699 * groups have reached the zfs_mg_noalloc_threshold making all groups
700 * eligible for allocations. This effectively means that all devices
701 * are balanced again.
703 if (was_allocatable && !mg->mg_allocatable)
704 mc->mc_alloc_groups--;
705 else if (!was_allocatable && mg->mg_allocatable)
706 mc->mc_alloc_groups++;
707 mutex_exit(&mc->mc_lock);
709 mutex_exit(&mg->mg_lock);
713 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
715 metaslab_group_t *mg;
717 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
718 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
719 mutex_init(&mg->mg_ms_initialize_lock, NULL, MUTEX_DEFAULT, NULL);
720 cv_init(&mg->mg_ms_initialize_cv, NULL, CV_DEFAULT, NULL);
721 mg->mg_primaries = kmem_zalloc(allocators * sizeof (metaslab_t *),
723 mg->mg_secondaries = kmem_zalloc(allocators * sizeof (metaslab_t *),
725 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
726 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
729 mg->mg_activation_count = 0;
730 mg->mg_initialized = B_FALSE;
731 mg->mg_no_free_space = B_TRUE;
732 mg->mg_allocators = allocators;
734 mg->mg_alloc_queue_depth = kmem_zalloc(allocators * sizeof (refcount_t),
736 mg->mg_cur_max_alloc_queue_depth = kmem_zalloc(allocators *
737 sizeof (uint64_t), KM_SLEEP);
738 for (int i = 0; i < allocators; i++) {
739 refcount_create_tracked(&mg->mg_alloc_queue_depth[i]);
740 mg->mg_cur_max_alloc_queue_depth[i] = 0;
743 mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
744 minclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT);
750 metaslab_group_destroy(metaslab_group_t *mg)
752 ASSERT(mg->mg_prev == NULL);
753 ASSERT(mg->mg_next == NULL);
755 * We may have gone below zero with the activation count
756 * either because we never activated in the first place or
757 * because we're done, and possibly removing the vdev.
759 ASSERT(mg->mg_activation_count <= 0);
761 taskq_destroy(mg->mg_taskq);
762 avl_destroy(&mg->mg_metaslab_tree);
763 kmem_free(mg->mg_primaries, mg->mg_allocators * sizeof (metaslab_t *));
764 kmem_free(mg->mg_secondaries, mg->mg_allocators *
765 sizeof (metaslab_t *));
766 mutex_destroy(&mg->mg_lock);
767 mutex_destroy(&mg->mg_ms_initialize_lock);
768 cv_destroy(&mg->mg_ms_initialize_cv);
770 for (int i = 0; i < mg->mg_allocators; i++) {
771 refcount_destroy(&mg->mg_alloc_queue_depth[i]);
772 mg->mg_cur_max_alloc_queue_depth[i] = 0;
774 kmem_free(mg->mg_alloc_queue_depth, mg->mg_allocators *
775 sizeof (refcount_t));
776 kmem_free(mg->mg_cur_max_alloc_queue_depth, mg->mg_allocators *
779 kmem_free(mg, sizeof (metaslab_group_t));
783 metaslab_group_activate(metaslab_group_t *mg)
785 metaslab_class_t *mc = mg->mg_class;
786 metaslab_group_t *mgprev, *mgnext;
788 ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER), !=, 0);
790 ASSERT(mc->mc_rotor != mg);
791 ASSERT(mg->mg_prev == NULL);
792 ASSERT(mg->mg_next == NULL);
793 ASSERT(mg->mg_activation_count <= 0);
795 if (++mg->mg_activation_count <= 0)
798 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
799 metaslab_group_alloc_update(mg);
801 if ((mgprev = mc->mc_rotor) == NULL) {
805 mgnext = mgprev->mg_next;
806 mg->mg_prev = mgprev;
807 mg->mg_next = mgnext;
808 mgprev->mg_next = mg;
809 mgnext->mg_prev = mg;
812 metaslab_class_minblocksize_update(mc);
816 * Passivate a metaslab group and remove it from the allocation rotor.
817 * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating
818 * a metaslab group. This function will momentarily drop spa_config_locks
819 * that are lower than the SCL_ALLOC lock (see comment below).
822 metaslab_group_passivate(metaslab_group_t *mg)
824 metaslab_class_t *mc = mg->mg_class;
825 spa_t *spa = mc->mc_spa;
826 metaslab_group_t *mgprev, *mgnext;
827 int locks = spa_config_held(spa, SCL_ALL, RW_WRITER);
829 ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==,
830 (SCL_ALLOC | SCL_ZIO));
832 if (--mg->mg_activation_count != 0) {
833 ASSERT(mc->mc_rotor != mg);
834 ASSERT(mg->mg_prev == NULL);
835 ASSERT(mg->mg_next == NULL);
836 ASSERT(mg->mg_activation_count < 0);
841 * The spa_config_lock is an array of rwlocks, ordered as
842 * follows (from highest to lowest):
843 * SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC >
844 * SCL_ZIO > SCL_FREE > SCL_VDEV
845 * (For more information about the spa_config_lock see spa_misc.c)
846 * The higher the lock, the broader its coverage. When we passivate
847 * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO
848 * config locks. However, the metaslab group's taskq might be trying
849 * to preload metaslabs so we must drop the SCL_ZIO lock and any
850 * lower locks to allow the I/O to complete. At a minimum,
851 * we continue to hold the SCL_ALLOC lock, which prevents any future
852 * allocations from taking place and any changes to the vdev tree.
854 spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa);
855 taskq_wait(mg->mg_taskq);
856 spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER);
857 metaslab_group_alloc_update(mg);
858 for (int i = 0; i < mg->mg_allocators; i++) {
859 metaslab_t *msp = mg->mg_primaries[i];
861 mutex_enter(&msp->ms_lock);
862 metaslab_passivate(msp,
863 metaslab_weight_from_range_tree(msp));
864 mutex_exit(&msp->ms_lock);
866 msp = mg->mg_secondaries[i];
868 mutex_enter(&msp->ms_lock);
869 metaslab_passivate(msp,
870 metaslab_weight_from_range_tree(msp));
871 mutex_exit(&msp->ms_lock);
875 mgprev = mg->mg_prev;
876 mgnext = mg->mg_next;
881 mc->mc_rotor = mgnext;
882 mgprev->mg_next = mgnext;
883 mgnext->mg_prev = mgprev;
888 metaslab_class_minblocksize_update(mc);
892 metaslab_group_initialized(metaslab_group_t *mg)
894 vdev_t *vd = mg->mg_vd;
895 vdev_stat_t *vs = &vd->vdev_stat;
897 return (vs->vs_space != 0 && mg->mg_activation_count > 0);
901 metaslab_group_get_space(metaslab_group_t *mg)
903 return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count);
907 metaslab_group_histogram_verify(metaslab_group_t *mg)
910 vdev_t *vd = mg->mg_vd;
911 uint64_t ashift = vd->vdev_ashift;
914 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
917 mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
920 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
921 SPACE_MAP_HISTOGRAM_SIZE + ashift);
923 for (int m = 0; m < vd->vdev_ms_count; m++) {
924 metaslab_t *msp = vd->vdev_ms[m];
926 if (msp->ms_sm == NULL)
929 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
930 mg_hist[i + ashift] +=
931 msp->ms_sm->sm_phys->smp_histogram[i];
934 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
935 VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
937 kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
941 metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
943 metaslab_class_t *mc = mg->mg_class;
944 uint64_t ashift = mg->mg_vd->vdev_ashift;
946 ASSERT(MUTEX_HELD(&msp->ms_lock));
947 if (msp->ms_sm == NULL)
950 mutex_enter(&mg->mg_lock);
951 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
952 mg->mg_histogram[i + ashift] +=
953 msp->ms_sm->sm_phys->smp_histogram[i];
954 mc->mc_histogram[i + ashift] +=
955 msp->ms_sm->sm_phys->smp_histogram[i];
957 mutex_exit(&mg->mg_lock);
961 metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
963 metaslab_class_t *mc = mg->mg_class;
964 uint64_t ashift = mg->mg_vd->vdev_ashift;
966 ASSERT(MUTEX_HELD(&msp->ms_lock));
967 if (msp->ms_sm == NULL)
970 mutex_enter(&mg->mg_lock);
971 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
972 ASSERT3U(mg->mg_histogram[i + ashift], >=,
973 msp->ms_sm->sm_phys->smp_histogram[i]);
974 ASSERT3U(mc->mc_histogram[i + ashift], >=,
975 msp->ms_sm->sm_phys->smp_histogram[i]);
977 mg->mg_histogram[i + ashift] -=
978 msp->ms_sm->sm_phys->smp_histogram[i];
979 mc->mc_histogram[i + ashift] -=
980 msp->ms_sm->sm_phys->smp_histogram[i];
982 mutex_exit(&mg->mg_lock);
986 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
988 ASSERT(msp->ms_group == NULL);
989 mutex_enter(&mg->mg_lock);
992 avl_add(&mg->mg_metaslab_tree, msp);
993 mutex_exit(&mg->mg_lock);
995 mutex_enter(&msp->ms_lock);
996 metaslab_group_histogram_add(mg, msp);
997 mutex_exit(&msp->ms_lock);
1001 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
1003 mutex_enter(&msp->ms_lock);
1004 metaslab_group_histogram_remove(mg, msp);
1005 mutex_exit(&msp->ms_lock);
1007 mutex_enter(&mg->mg_lock);
1008 ASSERT(msp->ms_group == mg);
1009 avl_remove(&mg->mg_metaslab_tree, msp);
1010 msp->ms_group = NULL;
1011 mutex_exit(&mg->mg_lock);
1015 metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
1017 ASSERT(MUTEX_HELD(&mg->mg_lock));
1018 ASSERT(msp->ms_group == mg);
1019 avl_remove(&mg->mg_metaslab_tree, msp);
1020 msp->ms_weight = weight;
1021 avl_add(&mg->mg_metaslab_tree, msp);
1026 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
1029 * Although in principle the weight can be any value, in
1030 * practice we do not use values in the range [1, 511].
1032 ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
1033 ASSERT(MUTEX_HELD(&msp->ms_lock));
1035 mutex_enter(&mg->mg_lock);
1036 metaslab_group_sort_impl(mg, msp, weight);
1037 mutex_exit(&mg->mg_lock);
1041 * Calculate the fragmentation for a given metaslab group. We can use
1042 * a simple average here since all metaslabs within the group must have
1043 * the same size. The return value will be a value between 0 and 100
1044 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
1045 * group have a fragmentation metric.
1048 metaslab_group_fragmentation(metaslab_group_t *mg)
1050 vdev_t *vd = mg->mg_vd;
1051 uint64_t fragmentation = 0;
1052 uint64_t valid_ms = 0;
1054 for (int m = 0; m < vd->vdev_ms_count; m++) {
1055 metaslab_t *msp = vd->vdev_ms[m];
1057 if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
1061 fragmentation += msp->ms_fragmentation;
1064 if (valid_ms <= vd->vdev_ms_count / 2)
1065 return (ZFS_FRAG_INVALID);
1067 fragmentation /= valid_ms;
1068 ASSERT3U(fragmentation, <=, 100);
1069 return (fragmentation);
1073 * Determine if a given metaslab group should skip allocations. A metaslab
1074 * group should avoid allocations if its free capacity is less than the
1075 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
1076 * zfs_mg_fragmentation_threshold and there is at least one metaslab group
1077 * that can still handle allocations. If the allocation throttle is enabled
1078 * then we skip allocations to devices that have reached their maximum
1079 * allocation queue depth unless the selected metaslab group is the only
1080 * eligible group remaining.
1083 metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
1084 uint64_t psize, int allocator, int d)
1086 spa_t *spa = mg->mg_vd->vdev_spa;
1087 metaslab_class_t *mc = mg->mg_class;
1090 * We can only consider skipping this metaslab group if it's
1091 * in the normal metaslab class and there are other metaslab
1092 * groups to select from. Otherwise, we always consider it eligible
1095 if (mc != spa_normal_class(spa) || mc->mc_groups <= 1)
1099 * If the metaslab group's mg_allocatable flag is set (see comments
1100 * in metaslab_group_alloc_update() for more information) and
1101 * the allocation throttle is disabled then allow allocations to this
1102 * device. However, if the allocation throttle is enabled then
1103 * check if we have reached our allocation limit (mg_alloc_queue_depth)
1104 * to determine if we should allow allocations to this metaslab group.
1105 * If all metaslab groups are no longer considered allocatable
1106 * (mc_alloc_groups == 0) or we're trying to allocate the smallest
1107 * gang block size then we allow allocations on this metaslab group
1108 * regardless of the mg_allocatable or throttle settings.
1110 if (mg->mg_allocatable) {
1111 metaslab_group_t *mgp;
1113 uint64_t qmax = mg->mg_cur_max_alloc_queue_depth[allocator];
1115 if (!mc->mc_alloc_throttle_enabled)
1119 * If this metaslab group does not have any free space, then
1120 * there is no point in looking further.
1122 if (mg->mg_no_free_space)
1126 * Relax allocation throttling for ditto blocks. Due to
1127 * random imbalances in allocation it tends to push copies
1128 * to one vdev, that looks a bit better at the moment.
1130 qmax = qmax * (4 + d) / 4;
1132 qdepth = refcount_count(&mg->mg_alloc_queue_depth[allocator]);
1135 * If this metaslab group is below its qmax or it's
1136 * the only allocatable metasable group, then attempt
1137 * to allocate from it.
1139 if (qdepth < qmax || mc->mc_alloc_groups == 1)
1141 ASSERT3U(mc->mc_alloc_groups, >, 1);
1144 * Since this metaslab group is at or over its qmax, we
1145 * need to determine if there are metaslab groups after this
1146 * one that might be able to handle this allocation. This is
1147 * racy since we can't hold the locks for all metaslab
1148 * groups at the same time when we make this check.
1150 for (mgp = mg->mg_next; mgp != rotor; mgp = mgp->mg_next) {
1151 qmax = mgp->mg_cur_max_alloc_queue_depth[allocator];
1152 qmax = qmax * (4 + d) / 4;
1153 qdepth = refcount_count(
1154 &mgp->mg_alloc_queue_depth[allocator]);
1157 * If there is another metaslab group that
1158 * might be able to handle the allocation, then
1159 * we return false so that we skip this group.
1161 if (qdepth < qmax && !mgp->mg_no_free_space)
1166 * We didn't find another group to handle the allocation
1167 * so we can't skip this metaslab group even though
1168 * we are at or over our qmax.
1172 } else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
1179 * ==========================================================================
1180 * Range tree callbacks
1181 * ==========================================================================
1185 * Comparison function for the private size-ordered tree. Tree is sorted
1186 * by size, larger sizes at the end of the tree.
1189 metaslab_rangesize_compare(const void *x1, const void *x2)
1191 const range_seg_t *r1 = x1;
1192 const range_seg_t *r2 = x2;
1193 uint64_t rs_size1 = r1->rs_end - r1->rs_start;
1194 uint64_t rs_size2 = r2->rs_end - r2->rs_start;
1196 int cmp = AVL_CMP(rs_size1, rs_size2);
1200 if (r1->rs_start < r2->rs_start)
1203 return (AVL_CMP(r1->rs_start, r2->rs_start));
1207 * ==========================================================================
1208 * Common allocator routines
1209 * ==========================================================================
1213 * Return the maximum contiguous segment within the metaslab.
1216 metaslab_block_maxsize(metaslab_t *msp)
1218 avl_tree_t *t = &msp->ms_allocatable_by_size;
1221 if (t == NULL || (rs = avl_last(t)) == NULL)
1224 return (rs->rs_end - rs->rs_start);
1227 static range_seg_t *
1228 metaslab_block_find(avl_tree_t *t, uint64_t start, uint64_t size)
1230 range_seg_t *rs, rsearch;
1233 rsearch.rs_start = start;
1234 rsearch.rs_end = start + size;
1236 rs = avl_find(t, &rsearch, &where);
1238 rs = avl_nearest(t, where, AVL_AFTER);
1245 * This is a helper function that can be used by the allocator to find
1246 * a suitable block to allocate. This will search the specified AVL
1247 * tree looking for a block that matches the specified criteria.
1250 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
1253 range_seg_t *rs = metaslab_block_find(t, *cursor, size);
1255 while (rs != NULL) {
1256 uint64_t offset = P2ROUNDUP(rs->rs_start, align);
1258 if (offset + size <= rs->rs_end) {
1259 *cursor = offset + size;
1262 rs = AVL_NEXT(t, rs);
1266 * If we know we've searched the whole map (*cursor == 0), give up.
1267 * Otherwise, reset the cursor to the beginning and try again.
1273 return (metaslab_block_picker(t, cursor, size, align));
1277 * ==========================================================================
1278 * The first-fit block allocator
1279 * ==========================================================================
1282 metaslab_ff_alloc(metaslab_t *msp, uint64_t size)
1285 * Find the largest power of 2 block size that evenly divides the
1286 * requested size. This is used to try to allocate blocks with similar
1287 * alignment from the same area of the metaslab (i.e. same cursor
1288 * bucket) but it does not guarantee that other allocations sizes
1289 * may exist in the same region.
1291 uint64_t align = size & -size;
1292 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1293 avl_tree_t *t = &msp->ms_allocatable->rt_root;
1295 return (metaslab_block_picker(t, cursor, size, align));
1298 static metaslab_ops_t metaslab_ff_ops = {
1303 * ==========================================================================
1304 * Dynamic block allocator -
1305 * Uses the first fit allocation scheme until space get low and then
1306 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
1307 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
1308 * ==========================================================================
1311 metaslab_df_alloc(metaslab_t *msp, uint64_t size)
1314 * Find the largest power of 2 block size that evenly divides the
1315 * requested size. This is used to try to allocate blocks with similar
1316 * alignment from the same area of the metaslab (i.e. same cursor
1317 * bucket) but it does not guarantee that other allocations sizes
1318 * may exist in the same region.
1320 uint64_t align = size & -size;
1321 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1322 range_tree_t *rt = msp->ms_allocatable;
1323 avl_tree_t *t = &rt->rt_root;
1324 uint64_t max_size = metaslab_block_maxsize(msp);
1325 int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
1327 ASSERT(MUTEX_HELD(&msp->ms_lock));
1328 ASSERT3U(avl_numnodes(t), ==,
1329 avl_numnodes(&msp->ms_allocatable_by_size));
1331 if (max_size < size)
1335 * If we're running low on space switch to using the size
1336 * sorted AVL tree (best-fit).
1338 if (max_size < metaslab_df_alloc_threshold ||
1339 free_pct < metaslab_df_free_pct) {
1340 t = &msp->ms_allocatable_by_size;
1344 return (metaslab_block_picker(t, cursor, size, 1ULL));
1347 static metaslab_ops_t metaslab_df_ops = {
1352 * ==========================================================================
1353 * Cursor fit block allocator -
1354 * Select the largest region in the metaslab, set the cursor to the beginning
1355 * of the range and the cursor_end to the end of the range. As allocations
1356 * are made advance the cursor. Continue allocating from the cursor until
1357 * the range is exhausted and then find a new range.
1358 * ==========================================================================
1361 metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
1363 range_tree_t *rt = msp->ms_allocatable;
1364 avl_tree_t *t = &msp->ms_allocatable_by_size;
1365 uint64_t *cursor = &msp->ms_lbas[0];
1366 uint64_t *cursor_end = &msp->ms_lbas[1];
1367 uint64_t offset = 0;
1369 ASSERT(MUTEX_HELD(&msp->ms_lock));
1370 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root));
1372 ASSERT3U(*cursor_end, >=, *cursor);
1374 if ((*cursor + size) > *cursor_end) {
1377 rs = avl_last(&msp->ms_allocatable_by_size);
1378 if (rs == NULL || (rs->rs_end - rs->rs_start) < size)
1381 *cursor = rs->rs_start;
1382 *cursor_end = rs->rs_end;
1391 static metaslab_ops_t metaslab_cf_ops = {
1396 * ==========================================================================
1397 * New dynamic fit allocator -
1398 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1399 * contiguous blocks. If no region is found then just use the largest segment
1401 * ==========================================================================
1405 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1406 * to request from the allocator.
1408 uint64_t metaslab_ndf_clump_shift = 4;
1411 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
1413 avl_tree_t *t = &msp->ms_allocatable->rt_root;
1415 range_seg_t *rs, rsearch;
1416 uint64_t hbit = highbit64(size);
1417 uint64_t *cursor = &msp->ms_lbas[hbit - 1];
1418 uint64_t max_size = metaslab_block_maxsize(msp);
1420 ASSERT(MUTEX_HELD(&msp->ms_lock));
1421 ASSERT3U(avl_numnodes(t), ==,
1422 avl_numnodes(&msp->ms_allocatable_by_size));
1424 if (max_size < size)
1427 rsearch.rs_start = *cursor;
1428 rsearch.rs_end = *cursor + size;
1430 rs = avl_find(t, &rsearch, &where);
1431 if (rs == NULL || (rs->rs_end - rs->rs_start) < size) {
1432 t = &msp->ms_allocatable_by_size;
1434 rsearch.rs_start = 0;
1435 rsearch.rs_end = MIN(max_size,
1436 1ULL << (hbit + metaslab_ndf_clump_shift));
1437 rs = avl_find(t, &rsearch, &where);
1439 rs = avl_nearest(t, where, AVL_AFTER);
1443 if ((rs->rs_end - rs->rs_start) >= size) {
1444 *cursor = rs->rs_start + size;
1445 return (rs->rs_start);
1450 static metaslab_ops_t metaslab_ndf_ops = {
1454 metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
1457 * ==========================================================================
1459 * ==========================================================================
1463 * Wait for any in-progress metaslab loads to complete.
1466 metaslab_load_wait(metaslab_t *msp)
1468 ASSERT(MUTEX_HELD(&msp->ms_lock));
1470 while (msp->ms_loading) {
1471 ASSERT(!msp->ms_loaded);
1472 cv_wait(&msp->ms_load_cv, &msp->ms_lock);
1477 metaslab_load(metaslab_t *msp)
1480 boolean_t success = B_FALSE;
1482 ASSERT(MUTEX_HELD(&msp->ms_lock));
1483 ASSERT(!msp->ms_loaded);
1484 ASSERT(!msp->ms_loading);
1486 msp->ms_loading = B_TRUE;
1488 * Nobody else can manipulate a loading metaslab, so it's now safe
1489 * to drop the lock. This way we don't have to hold the lock while
1490 * reading the spacemap from disk.
1492 mutex_exit(&msp->ms_lock);
1495 * If the space map has not been allocated yet, then treat
1496 * all the space in the metaslab as free and add it to ms_allocatable.
1498 if (msp->ms_sm != NULL) {
1499 error = space_map_load(msp->ms_sm, msp->ms_allocatable,
1502 range_tree_add(msp->ms_allocatable,
1503 msp->ms_start, msp->ms_size);
1506 success = (error == 0);
1508 mutex_enter(&msp->ms_lock);
1509 msp->ms_loading = B_FALSE;
1512 ASSERT3P(msp->ms_group, !=, NULL);
1513 msp->ms_loaded = B_TRUE;
1516 * If the metaslab already has a spacemap, then we need to
1517 * remove all segments from the defer tree; otherwise, the
1518 * metaslab is completely empty and we can skip this.
1520 if (msp->ms_sm != NULL) {
1521 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1522 range_tree_walk(msp->ms_defer[t],
1523 range_tree_remove, msp->ms_allocatable);
1526 msp->ms_max_size = metaslab_block_maxsize(msp);
1528 cv_broadcast(&msp->ms_load_cv);
1533 metaslab_unload(metaslab_t *msp)
1535 ASSERT(MUTEX_HELD(&msp->ms_lock));
1536 range_tree_vacate(msp->ms_allocatable, NULL, NULL);
1537 msp->ms_loaded = B_FALSE;
1538 msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
1539 msp->ms_max_size = 0;
1543 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg,
1546 vdev_t *vd = mg->mg_vd;
1547 objset_t *mos = vd->vdev_spa->spa_meta_objset;
1551 ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
1552 mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
1553 mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL);
1554 cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
1557 ms->ms_start = id << vd->vdev_ms_shift;
1558 ms->ms_size = 1ULL << vd->vdev_ms_shift;
1559 ms->ms_allocator = -1;
1560 ms->ms_new = B_TRUE;
1563 * We only open space map objects that already exist. All others
1564 * will be opened when we finally allocate an object for it.
1567 error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
1568 ms->ms_size, vd->vdev_ashift);
1571 kmem_free(ms, sizeof (metaslab_t));
1575 ASSERT(ms->ms_sm != NULL);
1579 * We create the main range tree here, but we don't create the
1580 * other range trees until metaslab_sync_done(). This serves
1581 * two purposes: it allows metaslab_sync_done() to detect the
1582 * addition of new space; and for debugging, it ensures that we'd
1583 * data fault on any attempt to use this metaslab before it's ready.
1585 ms->ms_allocatable = range_tree_create_impl(&rt_avl_ops, &ms->ms_allocatable_by_size,
1586 metaslab_rangesize_compare, 0);
1587 metaslab_group_add(mg, ms);
1589 metaslab_set_fragmentation(ms);
1592 * If we're opening an existing pool (txg == 0) or creating
1593 * a new one (txg == TXG_INITIAL), all space is available now.
1594 * If we're adding space to an existing pool, the new space
1595 * does not become available until after this txg has synced.
1596 * The metaslab's weight will also be initialized when we sync
1597 * out this txg. This ensures that we don't attempt to allocate
1598 * from it before we have initialized it completely.
1600 if (txg <= TXG_INITIAL)
1601 metaslab_sync_done(ms, 0);
1604 * If metaslab_debug_load is set and we're initializing a metaslab
1605 * that has an allocated space map object then load the its space
1606 * map so that can verify frees.
1608 if (metaslab_debug_load && ms->ms_sm != NULL) {
1609 mutex_enter(&ms->ms_lock);
1610 VERIFY0(metaslab_load(ms));
1611 mutex_exit(&ms->ms_lock);
1615 vdev_dirty(vd, 0, NULL, txg);
1616 vdev_dirty(vd, VDD_METASLAB, ms, txg);
1625 metaslab_fini(metaslab_t *msp)
1627 metaslab_group_t *mg = msp->ms_group;
1629 metaslab_group_remove(mg, msp);
1631 mutex_enter(&msp->ms_lock);
1632 VERIFY(msp->ms_group == NULL);
1633 vdev_space_update(mg->mg_vd, -space_map_allocated(msp->ms_sm),
1635 space_map_close(msp->ms_sm);
1637 metaslab_unload(msp);
1638 range_tree_destroy(msp->ms_allocatable);
1639 range_tree_destroy(msp->ms_freeing);
1640 range_tree_destroy(msp->ms_freed);
1642 for (int t = 0; t < TXG_SIZE; t++) {
1643 range_tree_destroy(msp->ms_allocating[t]);
1646 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1647 range_tree_destroy(msp->ms_defer[t]);
1649 ASSERT0(msp->ms_deferspace);
1651 range_tree_destroy(msp->ms_checkpointing);
1653 mutex_exit(&msp->ms_lock);
1654 cv_destroy(&msp->ms_load_cv);
1655 mutex_destroy(&msp->ms_lock);
1656 mutex_destroy(&msp->ms_sync_lock);
1657 ASSERT3U(msp->ms_allocator, ==, -1);
1659 kmem_free(msp, sizeof (metaslab_t));
1662 #define FRAGMENTATION_TABLE_SIZE 17
1665 * This table defines a segment size based fragmentation metric that will
1666 * allow each metaslab to derive its own fragmentation value. This is done
1667 * by calculating the space in each bucket of the spacemap histogram and
1668 * multiplying that by the fragmetation metric in this table. Doing
1669 * this for all buckets and dividing it by the total amount of free
1670 * space in this metaslab (i.e. the total free space in all buckets) gives
1671 * us the fragmentation metric. This means that a high fragmentation metric
1672 * equates to most of the free space being comprised of small segments.
1673 * Conversely, if the metric is low, then most of the free space is in
1674 * large segments. A 10% change in fragmentation equates to approximately
1675 * double the number of segments.
1677 * This table defines 0% fragmented space using 16MB segments. Testing has
1678 * shown that segments that are greater than or equal to 16MB do not suffer
1679 * from drastic performance problems. Using this value, we derive the rest
1680 * of the table. Since the fragmentation value is never stored on disk, it
1681 * is possible to change these calculations in the future.
1683 int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
1703 * Calclate the metaslab's fragmentation metric. A return value
1704 * of ZFS_FRAG_INVALID means that the metaslab has not been upgraded and does
1705 * not support this metric. Otherwise, the return value should be in the
1709 metaslab_set_fragmentation(metaslab_t *msp)
1711 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1712 uint64_t fragmentation = 0;
1714 boolean_t feature_enabled = spa_feature_is_enabled(spa,
1715 SPA_FEATURE_SPACEMAP_HISTOGRAM);
1717 if (!feature_enabled) {
1718 msp->ms_fragmentation = ZFS_FRAG_INVALID;
1723 * A null space map means that the entire metaslab is free
1724 * and thus is not fragmented.
1726 if (msp->ms_sm == NULL) {
1727 msp->ms_fragmentation = 0;
1732 * If this metaslab's space map has not been upgraded, flag it
1733 * so that we upgrade next time we encounter it.
1735 if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
1736 uint64_t txg = spa_syncing_txg(spa);
1737 vdev_t *vd = msp->ms_group->mg_vd;
1740 * If we've reached the final dirty txg, then we must
1741 * be shutting down the pool. We don't want to dirty
1742 * any data past this point so skip setting the condense
1743 * flag. We can retry this action the next time the pool
1746 if (spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
1747 msp->ms_condense_wanted = B_TRUE;
1748 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
1749 zfs_dbgmsg("txg %llu, requesting force condense: "
1750 "ms_id %llu, vdev_id %llu", txg, msp->ms_id,
1753 msp->ms_fragmentation = ZFS_FRAG_INVALID;
1757 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1759 uint8_t shift = msp->ms_sm->sm_shift;
1761 int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
1762 FRAGMENTATION_TABLE_SIZE - 1);
1764 if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
1767 space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
1770 ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
1771 fragmentation += space * zfs_frag_table[idx];
1775 fragmentation /= total;
1776 ASSERT3U(fragmentation, <=, 100);
1778 msp->ms_fragmentation = fragmentation;
1782 * Compute a weight -- a selection preference value -- for the given metaslab.
1783 * This is based on the amount of free space, the level of fragmentation,
1784 * the LBA range, and whether the metaslab is loaded.
1787 metaslab_space_weight(metaslab_t *msp)
1789 metaslab_group_t *mg = msp->ms_group;
1790 vdev_t *vd = mg->mg_vd;
1791 uint64_t weight, space;
1793 ASSERT(MUTEX_HELD(&msp->ms_lock));
1794 ASSERT(!vd->vdev_removing);
1797 * The baseline weight is the metaslab's free space.
1799 space = msp->ms_size - space_map_allocated(msp->ms_sm);
1801 if (metaslab_fragmentation_factor_enabled &&
1802 msp->ms_fragmentation != ZFS_FRAG_INVALID) {
1804 * Use the fragmentation information to inversely scale
1805 * down the baseline weight. We need to ensure that we
1806 * don't exclude this metaslab completely when it's 100%
1807 * fragmented. To avoid this we reduce the fragmented value
1810 space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
1813 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
1814 * this metaslab again. The fragmentation metric may have
1815 * decreased the space to something smaller than
1816 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
1817 * so that we can consume any remaining space.
1819 if (space > 0 && space < SPA_MINBLOCKSIZE)
1820 space = SPA_MINBLOCKSIZE;
1825 * Modern disks have uniform bit density and constant angular velocity.
1826 * Therefore, the outer recording zones are faster (higher bandwidth)
1827 * than the inner zones by the ratio of outer to inner track diameter,
1828 * which is typically around 2:1. We account for this by assigning
1829 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
1830 * In effect, this means that we'll select the metaslab with the most
1831 * free bandwidth rather than simply the one with the most free space.
1833 if (metaslab_lba_weighting_enabled) {
1834 weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
1835 ASSERT(weight >= space && weight <= 2 * space);
1839 * If this metaslab is one we're actively using, adjust its
1840 * weight to make it preferable to any inactive metaslab so
1841 * we'll polish it off. If the fragmentation on this metaslab
1842 * has exceed our threshold, then don't mark it active.
1844 if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
1845 msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
1846 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
1849 WEIGHT_SET_SPACEBASED(weight);
1854 * Return the weight of the specified metaslab, according to the segment-based
1855 * weighting algorithm. The metaslab must be loaded. This function can
1856 * be called within a sync pass since it relies only on the metaslab's
1857 * range tree which is always accurate when the metaslab is loaded.
1860 metaslab_weight_from_range_tree(metaslab_t *msp)
1862 uint64_t weight = 0;
1863 uint32_t segments = 0;
1865 ASSERT(msp->ms_loaded);
1867 for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
1869 uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
1870 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
1873 segments += msp->ms_allocatable->rt_histogram[i];
1876 * The range tree provides more precision than the space map
1877 * and must be downgraded so that all values fit within the
1878 * space map's histogram. This allows us to compare loaded
1879 * vs. unloaded metaslabs to determine which metaslab is
1880 * considered "best".
1885 if (segments != 0) {
1886 WEIGHT_SET_COUNT(weight, segments);
1887 WEIGHT_SET_INDEX(weight, i);
1888 WEIGHT_SET_ACTIVE(weight, 0);
1896 * Calculate the weight based on the on-disk histogram. This should only
1897 * be called after a sync pass has completely finished since the on-disk
1898 * information is updated in metaslab_sync().
1901 metaslab_weight_from_spacemap(metaslab_t *msp)
1903 uint64_t weight = 0;
1905 for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
1906 if (msp->ms_sm->sm_phys->smp_histogram[i] != 0) {
1907 WEIGHT_SET_COUNT(weight,
1908 msp->ms_sm->sm_phys->smp_histogram[i]);
1909 WEIGHT_SET_INDEX(weight, i +
1910 msp->ms_sm->sm_shift);
1911 WEIGHT_SET_ACTIVE(weight, 0);
1919 * Compute a segment-based weight for the specified metaslab. The weight
1920 * is determined by highest bucket in the histogram. The information
1921 * for the highest bucket is encoded into the weight value.
1924 metaslab_segment_weight(metaslab_t *msp)
1926 metaslab_group_t *mg = msp->ms_group;
1927 uint64_t weight = 0;
1928 uint8_t shift = mg->mg_vd->vdev_ashift;
1930 ASSERT(MUTEX_HELD(&msp->ms_lock));
1933 * The metaslab is completely free.
1935 if (space_map_allocated(msp->ms_sm) == 0) {
1936 int idx = highbit64(msp->ms_size) - 1;
1937 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
1939 if (idx < max_idx) {
1940 WEIGHT_SET_COUNT(weight, 1ULL);
1941 WEIGHT_SET_INDEX(weight, idx);
1943 WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
1944 WEIGHT_SET_INDEX(weight, max_idx);
1946 WEIGHT_SET_ACTIVE(weight, 0);
1947 ASSERT(!WEIGHT_IS_SPACEBASED(weight));
1952 ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
1955 * If the metaslab is fully allocated then just make the weight 0.
1957 if (space_map_allocated(msp->ms_sm) == msp->ms_size)
1960 * If the metaslab is already loaded, then use the range tree to
1961 * determine the weight. Otherwise, we rely on the space map information
1962 * to generate the weight.
1964 if (msp->ms_loaded) {
1965 weight = metaslab_weight_from_range_tree(msp);
1967 weight = metaslab_weight_from_spacemap(msp);
1971 * If the metaslab was active the last time we calculated its weight
1972 * then keep it active. We want to consume the entire region that
1973 * is associated with this weight.
1975 if (msp->ms_activation_weight != 0 && weight != 0)
1976 WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
1981 * Determine if we should attempt to allocate from this metaslab. If the
1982 * metaslab has a maximum size then we can quickly determine if the desired
1983 * allocation size can be satisfied. Otherwise, if we're using segment-based
1984 * weighting then we can determine the maximum allocation that this metaslab
1985 * can accommodate based on the index encoded in the weight. If we're using
1986 * space-based weights then rely on the entire weight (excluding the weight
1990 metaslab_should_allocate(metaslab_t *msp, uint64_t asize)
1992 boolean_t should_allocate;
1994 if (msp->ms_max_size != 0)
1995 return (msp->ms_max_size >= asize);
1997 if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
1999 * The metaslab segment weight indicates segments in the
2000 * range [2^i, 2^(i+1)), where i is the index in the weight.
2001 * Since the asize might be in the middle of the range, we
2002 * should attempt the allocation if asize < 2^(i+1).
2004 should_allocate = (asize <
2005 1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
2007 should_allocate = (asize <=
2008 (msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
2010 return (should_allocate);
2014 metaslab_weight(metaslab_t *msp)
2016 vdev_t *vd = msp->ms_group->mg_vd;
2017 spa_t *spa = vd->vdev_spa;
2020 ASSERT(MUTEX_HELD(&msp->ms_lock));
2023 * If this vdev is in the process of being removed, there is nothing
2024 * for us to do here.
2026 if (vd->vdev_removing)
2029 metaslab_set_fragmentation(msp);
2032 * Update the maximum size if the metaslab is loaded. This will
2033 * ensure that we get an accurate maximum size if newly freed space
2034 * has been added back into the free tree.
2037 msp->ms_max_size = metaslab_block_maxsize(msp);
2040 * Segment-based weighting requires space map histogram support.
2042 if (zfs_metaslab_segment_weight_enabled &&
2043 spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
2044 (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
2045 sizeof (space_map_phys_t))) {
2046 weight = metaslab_segment_weight(msp);
2048 weight = metaslab_space_weight(msp);
2054 metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp,
2055 int allocator, uint64_t activation_weight)
2058 * If we're activating for the claim code, we don't want to actually
2059 * set the metaslab up for a specific allocator.
2061 if (activation_weight == METASLAB_WEIGHT_CLAIM)
2063 metaslab_t **arr = (activation_weight == METASLAB_WEIGHT_PRIMARY ?
2064 mg->mg_primaries : mg->mg_secondaries);
2066 ASSERT(MUTEX_HELD(&msp->ms_lock));
2067 mutex_enter(&mg->mg_lock);
2068 if (arr[allocator] != NULL) {
2069 mutex_exit(&mg->mg_lock);
2073 arr[allocator] = msp;
2074 ASSERT3S(msp->ms_allocator, ==, -1);
2075 msp->ms_allocator = allocator;
2076 msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY);
2077 mutex_exit(&mg->mg_lock);
2083 metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight)
2085 ASSERT(MUTEX_HELD(&msp->ms_lock));
2087 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
2089 metaslab_load_wait(msp);
2090 if (!msp->ms_loaded) {
2091 if ((error = metaslab_load(msp)) != 0) {
2092 metaslab_group_sort(msp->ms_group, msp, 0);
2096 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
2098 * The metaslab was activated for another allocator
2099 * while we were waiting, we should reselect.
2103 if ((error = metaslab_activate_allocator(msp->ms_group, msp,
2104 allocator, activation_weight)) != 0) {
2108 msp->ms_activation_weight = msp->ms_weight;
2109 metaslab_group_sort(msp->ms_group, msp,
2110 msp->ms_weight | activation_weight);
2112 ASSERT(msp->ms_loaded);
2113 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
2119 metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp,
2122 ASSERT(MUTEX_HELD(&msp->ms_lock));
2123 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
2124 metaslab_group_sort(mg, msp, weight);
2128 mutex_enter(&mg->mg_lock);
2129 ASSERT3P(msp->ms_group, ==, mg);
2130 if (msp->ms_primary) {
2131 ASSERT3U(0, <=, msp->ms_allocator);
2132 ASSERT3U(msp->ms_allocator, <, mg->mg_allocators);
2133 ASSERT3P(mg->mg_primaries[msp->ms_allocator], ==, msp);
2134 ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
2135 mg->mg_primaries[msp->ms_allocator] = NULL;
2137 ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
2138 ASSERT3P(mg->mg_secondaries[msp->ms_allocator], ==, msp);
2139 mg->mg_secondaries[msp->ms_allocator] = NULL;
2141 msp->ms_allocator = -1;
2142 metaslab_group_sort_impl(mg, msp, weight);
2143 mutex_exit(&mg->mg_lock);
2147 metaslab_passivate(metaslab_t *msp, uint64_t weight)
2149 uint64_t size = weight & ~METASLAB_WEIGHT_TYPE;
2152 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
2153 * this metaslab again. In that case, it had better be empty,
2154 * or we would be leaving space on the table.
2156 ASSERT(size >= SPA_MINBLOCKSIZE ||
2157 range_tree_is_empty(msp->ms_allocatable));
2158 ASSERT0(weight & METASLAB_ACTIVE_MASK);
2160 msp->ms_activation_weight = 0;
2161 metaslab_passivate_allocator(msp->ms_group, msp, weight);
2162 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
2166 * Segment-based metaslabs are activated once and remain active until
2167 * we either fail an allocation attempt (similar to space-based metaslabs)
2168 * or have exhausted the free space in zfs_metaslab_switch_threshold
2169 * buckets since the metaslab was activated. This function checks to see
2170 * if we've exhaused the zfs_metaslab_switch_threshold buckets in the
2171 * metaslab and passivates it proactively. This will allow us to select a
2172 * metaslabs with larger contiguous region if any remaining within this
2173 * metaslab group. If we're in sync pass > 1, then we continue using this
2174 * metaslab so that we don't dirty more block and cause more sync passes.
2177 metaslab_segment_may_passivate(metaslab_t *msp)
2179 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2181 if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
2185 * Since we are in the middle of a sync pass, the most accurate
2186 * information that is accessible to us is the in-core range tree
2187 * histogram; calculate the new weight based on that information.
2189 uint64_t weight = metaslab_weight_from_range_tree(msp);
2190 int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
2191 int current_idx = WEIGHT_GET_INDEX(weight);
2193 if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
2194 metaslab_passivate(msp, weight);
2198 metaslab_preload(void *arg)
2200 metaslab_t *msp = arg;
2201 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2203 ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
2205 mutex_enter(&msp->ms_lock);
2206 metaslab_load_wait(msp);
2207 if (!msp->ms_loaded)
2208 (void) metaslab_load(msp);
2209 msp->ms_selected_txg = spa_syncing_txg(spa);
2210 mutex_exit(&msp->ms_lock);
2214 metaslab_group_preload(metaslab_group_t *mg)
2216 spa_t *spa = mg->mg_vd->vdev_spa;
2218 avl_tree_t *t = &mg->mg_metaslab_tree;
2221 if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
2222 taskq_wait(mg->mg_taskq);
2226 mutex_enter(&mg->mg_lock);
2229 * Load the next potential metaslabs
2231 for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
2232 ASSERT3P(msp->ms_group, ==, mg);
2235 * We preload only the maximum number of metaslabs specified
2236 * by metaslab_preload_limit. If a metaslab is being forced
2237 * to condense then we preload it too. This will ensure
2238 * that force condensing happens in the next txg.
2240 if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
2244 VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
2245 msp, TQ_SLEEP) != 0);
2247 mutex_exit(&mg->mg_lock);
2251 * Determine if the space map's on-disk footprint is past our tolerance
2252 * for inefficiency. We would like to use the following criteria to make
2255 * 1. The size of the space map object should not dramatically increase as a
2256 * result of writing out the free space range tree.
2258 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
2259 * times the size than the free space range tree representation
2260 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1MB).
2262 * 3. The on-disk size of the space map should actually decrease.
2264 * Unfortunately, we cannot compute the on-disk size of the space map in this
2265 * context because we cannot accurately compute the effects of compression, etc.
2266 * Instead, we apply the heuristic described in the block comment for
2267 * zfs_metaslab_condense_block_threshold - we only condense if the space used
2268 * is greater than a threshold number of blocks.
2271 metaslab_should_condense(metaslab_t *msp)
2273 space_map_t *sm = msp->ms_sm;
2274 vdev_t *vd = msp->ms_group->mg_vd;
2275 uint64_t vdev_blocksize = 1 << vd->vdev_ashift;
2276 uint64_t current_txg = spa_syncing_txg(vd->vdev_spa);
2278 ASSERT(MUTEX_HELD(&msp->ms_lock));
2279 ASSERT(msp->ms_loaded);
2282 * Allocations and frees in early passes are generally more space
2283 * efficient (in terms of blocks described in space map entries)
2284 * than the ones in later passes (e.g. we don't compress after
2285 * sync pass 5) and condensing a metaslab multiple times in a txg
2286 * could degrade performance.
2288 * Thus we prefer condensing each metaslab at most once every txg at
2289 * the earliest sync pass possible. If a metaslab is eligible for
2290 * condensing again after being considered for condensing within the
2291 * same txg, it will hopefully be dirty in the next txg where it will
2292 * be condensed at an earlier pass.
2294 if (msp->ms_condense_checked_txg == current_txg)
2296 msp->ms_condense_checked_txg = current_txg;
2299 * We always condense metaslabs that are empty and metaslabs for
2300 * which a condense request has been made.
2302 if (avl_is_empty(&msp->ms_allocatable_by_size) ||
2303 msp->ms_condense_wanted)
2306 uint64_t object_size = space_map_length(msp->ms_sm);
2307 uint64_t optimal_size = space_map_estimate_optimal_size(sm,
2308 msp->ms_allocatable, SM_NO_VDEVID);
2310 dmu_object_info_t doi;
2311 dmu_object_info_from_db(sm->sm_dbuf, &doi);
2312 uint64_t record_size = MAX(doi.doi_data_block_size, vdev_blocksize);
2314 return (object_size >= (optimal_size * zfs_condense_pct / 100) &&
2315 object_size > zfs_metaslab_condense_block_threshold * record_size);
2319 * Condense the on-disk space map representation to its minimized form.
2320 * The minimized form consists of a small number of allocations followed by
2321 * the entries of the free range tree.
2324 metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
2326 range_tree_t *condense_tree;
2327 space_map_t *sm = msp->ms_sm;
2329 ASSERT(MUTEX_HELD(&msp->ms_lock));
2330 ASSERT(msp->ms_loaded);
2332 zfs_dbgmsg("condensing: txg %llu, msp[%llu] %p, vdev id %llu, "
2333 "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg,
2334 msp->ms_id, msp, msp->ms_group->mg_vd->vdev_id,
2335 msp->ms_group->mg_vd->vdev_spa->spa_name,
2336 space_map_length(msp->ms_sm),
2337 avl_numnodes(&msp->ms_allocatable->rt_root),
2338 msp->ms_condense_wanted ? "TRUE" : "FALSE");
2340 msp->ms_condense_wanted = B_FALSE;
2343 * Create an range tree that is 100% allocated. We remove segments
2344 * that have been freed in this txg, any deferred frees that exist,
2345 * and any allocation in the future. Removing segments should be
2346 * a relatively inexpensive operation since we expect these trees to
2347 * have a small number of nodes.
2349 condense_tree = range_tree_create(NULL, NULL);
2350 range_tree_add(condense_tree, msp->ms_start, msp->ms_size);
2352 range_tree_walk(msp->ms_freeing, range_tree_remove, condense_tree);
2353 range_tree_walk(msp->ms_freed, range_tree_remove, condense_tree);
2355 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2356 range_tree_walk(msp->ms_defer[t],
2357 range_tree_remove, condense_tree);
2360 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
2361 range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK],
2362 range_tree_remove, condense_tree);
2366 * We're about to drop the metaslab's lock thus allowing
2367 * other consumers to change it's content. Set the
2368 * metaslab's ms_condensing flag to ensure that
2369 * allocations on this metaslab do not occur while we're
2370 * in the middle of committing it to disk. This is only critical
2371 * for ms_allocatable as all other range trees use per txg
2372 * views of their content.
2374 msp->ms_condensing = B_TRUE;
2376 mutex_exit(&msp->ms_lock);
2377 space_map_truncate(sm, zfs_metaslab_sm_blksz, tx);
2380 * While we would ideally like to create a space map representation
2381 * that consists only of allocation records, doing so can be
2382 * prohibitively expensive because the in-core free tree can be
2383 * large, and therefore computationally expensive to subtract
2384 * from the condense_tree. Instead we sync out two trees, a cheap
2385 * allocation only tree followed by the in-core free tree. While not
2386 * optimal, this is typically close to optimal, and much cheaper to
2389 space_map_write(sm, condense_tree, SM_ALLOC, SM_NO_VDEVID, tx);
2390 range_tree_vacate(condense_tree, NULL, NULL);
2391 range_tree_destroy(condense_tree);
2393 space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx);
2394 mutex_enter(&msp->ms_lock);
2395 msp->ms_condensing = B_FALSE;
2399 * Write a metaslab to disk in the context of the specified transaction group.
2402 metaslab_sync(metaslab_t *msp, uint64_t txg)
2404 metaslab_group_t *mg = msp->ms_group;
2405 vdev_t *vd = mg->mg_vd;
2406 spa_t *spa = vd->vdev_spa;
2407 objset_t *mos = spa_meta_objset(spa);
2408 range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK];
2410 uint64_t object = space_map_object(msp->ms_sm);
2412 ASSERT(!vd->vdev_ishole);
2415 * This metaslab has just been added so there's no work to do now.
2417 if (msp->ms_freeing == NULL) {
2418 ASSERT3P(alloctree, ==, NULL);
2422 ASSERT3P(alloctree, !=, NULL);
2423 ASSERT3P(msp->ms_freeing, !=, NULL);
2424 ASSERT3P(msp->ms_freed, !=, NULL);
2425 ASSERT3P(msp->ms_checkpointing, !=, NULL);
2428 * Normally, we don't want to process a metaslab if there are no
2429 * allocations or frees to perform. However, if the metaslab is being
2430 * forced to condense and it's loaded, we need to let it through.
2432 if (range_tree_is_empty(alloctree) &&
2433 range_tree_is_empty(msp->ms_freeing) &&
2434 range_tree_is_empty(msp->ms_checkpointing) &&
2435 !(msp->ms_loaded && msp->ms_condense_wanted))
2439 VERIFY(txg <= spa_final_dirty_txg(spa));
2442 * The only state that can actually be changing concurrently with
2443 * metaslab_sync() is the metaslab's ms_allocatable. No other
2444 * thread can be modifying this txg's alloc, freeing,
2445 * freed, or space_map_phys_t. We drop ms_lock whenever we
2446 * could call into the DMU, because the DMU can call down to us
2447 * (e.g. via zio_free()) at any time.
2449 * The spa_vdev_remove_thread() can be reading metaslab state
2450 * concurrently, and it is locked out by the ms_sync_lock. Note
2451 * that the ms_lock is insufficient for this, because it is dropped
2452 * by space_map_write().
2454 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
2456 if (msp->ms_sm == NULL) {
2457 uint64_t new_object;
2459 new_object = space_map_alloc(mos, zfs_metaslab_sm_blksz, tx);
2460 VERIFY3U(new_object, !=, 0);
2462 VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
2463 msp->ms_start, msp->ms_size, vd->vdev_ashift));
2464 ASSERT(msp->ms_sm != NULL);
2467 if (!range_tree_is_empty(msp->ms_checkpointing) &&
2468 vd->vdev_checkpoint_sm == NULL) {
2469 ASSERT(spa_has_checkpoint(spa));
2471 uint64_t new_object = space_map_alloc(mos,
2472 vdev_standard_sm_blksz, tx);
2473 VERIFY3U(new_object, !=, 0);
2475 VERIFY0(space_map_open(&vd->vdev_checkpoint_sm,
2476 mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift));
2477 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
2480 * We save the space map object as an entry in vdev_top_zap
2481 * so it can be retrieved when the pool is reopened after an
2482 * export or through zdb.
2484 VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset,
2485 vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
2486 sizeof (new_object), 1, &new_object, tx));
2489 mutex_enter(&msp->ms_sync_lock);
2490 mutex_enter(&msp->ms_lock);
2493 * Note: metaslab_condense() clears the space map's histogram.
2494 * Therefore we must verify and remove this histogram before
2497 metaslab_group_histogram_verify(mg);
2498 metaslab_class_histogram_verify(mg->mg_class);
2499 metaslab_group_histogram_remove(mg, msp);
2501 if (msp->ms_loaded && metaslab_should_condense(msp)) {
2502 metaslab_condense(msp, txg, tx);
2504 mutex_exit(&msp->ms_lock);
2505 space_map_write(msp->ms_sm, alloctree, SM_ALLOC,
2507 space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE,
2509 mutex_enter(&msp->ms_lock);
2512 if (!range_tree_is_empty(msp->ms_checkpointing)) {
2513 ASSERT(spa_has_checkpoint(spa));
2514 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
2517 * Since we are doing writes to disk and the ms_checkpointing
2518 * tree won't be changing during that time, we drop the
2519 * ms_lock while writing to the checkpoint space map.
2521 mutex_exit(&msp->ms_lock);
2522 space_map_write(vd->vdev_checkpoint_sm,
2523 msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx);
2524 mutex_enter(&msp->ms_lock);
2525 space_map_update(vd->vdev_checkpoint_sm);
2527 spa->spa_checkpoint_info.sci_dspace +=
2528 range_tree_space(msp->ms_checkpointing);
2529 vd->vdev_stat.vs_checkpoint_space +=
2530 range_tree_space(msp->ms_checkpointing);
2531 ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==,
2532 -vd->vdev_checkpoint_sm->sm_alloc);
2534 range_tree_vacate(msp->ms_checkpointing, NULL, NULL);
2537 if (msp->ms_loaded) {
2539 * When the space map is loaded, we have an accurate
2540 * histogram in the range tree. This gives us an opportunity
2541 * to bring the space map's histogram up-to-date so we clear
2542 * it first before updating it.
2544 space_map_histogram_clear(msp->ms_sm);
2545 space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
2548 * Since we've cleared the histogram we need to add back
2549 * any free space that has already been processed, plus
2550 * any deferred space. This allows the on-disk histogram
2551 * to accurately reflect all free space even if some space
2552 * is not yet available for allocation (i.e. deferred).
2554 space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx);
2557 * Add back any deferred free space that has not been
2558 * added back into the in-core free tree yet. This will
2559 * ensure that we don't end up with a space map histogram
2560 * that is completely empty unless the metaslab is fully
2563 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2564 space_map_histogram_add(msp->ms_sm,
2565 msp->ms_defer[t], tx);
2570 * Always add the free space from this sync pass to the space
2571 * map histogram. We want to make sure that the on-disk histogram
2572 * accounts for all free space. If the space map is not loaded,
2573 * then we will lose some accuracy but will correct it the next
2574 * time we load the space map.
2576 space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx);
2578 metaslab_group_histogram_add(mg, msp);
2579 metaslab_group_histogram_verify(mg);
2580 metaslab_class_histogram_verify(mg->mg_class);
2583 * For sync pass 1, we avoid traversing this txg's free range tree
2584 * and instead will just swap the pointers for freeing and
2585 * freed. We can safely do this since the freed_tree is
2586 * guaranteed to be empty on the initial pass.
2588 if (spa_sync_pass(spa) == 1) {
2589 range_tree_swap(&msp->ms_freeing, &msp->ms_freed);
2591 range_tree_vacate(msp->ms_freeing,
2592 range_tree_add, msp->ms_freed);
2594 range_tree_vacate(alloctree, NULL, NULL);
2596 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
2597 ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg)
2599 ASSERT0(range_tree_space(msp->ms_freeing));
2600 ASSERT0(range_tree_space(msp->ms_checkpointing));
2602 mutex_exit(&msp->ms_lock);
2604 if (object != space_map_object(msp->ms_sm)) {
2605 object = space_map_object(msp->ms_sm);
2606 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
2607 msp->ms_id, sizeof (uint64_t), &object, tx);
2609 mutex_exit(&msp->ms_sync_lock);
2614 * Called after a transaction group has completely synced to mark
2615 * all of the metaslab's free space as usable.
2618 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
2620 metaslab_group_t *mg = msp->ms_group;
2621 vdev_t *vd = mg->mg_vd;
2622 spa_t *spa = vd->vdev_spa;
2623 range_tree_t **defer_tree;
2624 int64_t alloc_delta, defer_delta;
2625 boolean_t defer_allowed = B_TRUE;
2627 ASSERT(!vd->vdev_ishole);
2629 mutex_enter(&msp->ms_lock);
2632 * If this metaslab is just becoming available, initialize its
2633 * range trees and add its capacity to the vdev.
2635 if (msp->ms_freed == NULL) {
2636 for (int t = 0; t < TXG_SIZE; t++) {
2637 ASSERT(msp->ms_allocating[t] == NULL);
2639 msp->ms_allocating[t] = range_tree_create(NULL, NULL);
2642 ASSERT3P(msp->ms_freeing, ==, NULL);
2643 msp->ms_freeing = range_tree_create(NULL, NULL);
2645 ASSERT3P(msp->ms_freed, ==, NULL);
2646 msp->ms_freed = range_tree_create(NULL, NULL);
2648 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2649 ASSERT(msp->ms_defer[t] == NULL);
2651 msp->ms_defer[t] = range_tree_create(NULL, NULL);
2654 ASSERT3P(msp->ms_checkpointing, ==, NULL);
2655 msp->ms_checkpointing = range_tree_create(NULL, NULL);
2657 vdev_space_update(vd, 0, 0, msp->ms_size);
2659 ASSERT0(range_tree_space(msp->ms_freeing));
2660 ASSERT0(range_tree_space(msp->ms_checkpointing));
2662 defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE];
2664 uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
2665 metaslab_class_get_alloc(spa_normal_class(spa));
2666 if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing) {
2667 defer_allowed = B_FALSE;
2671 alloc_delta = space_map_alloc_delta(msp->ms_sm);
2672 if (defer_allowed) {
2673 defer_delta = range_tree_space(msp->ms_freed) -
2674 range_tree_space(*defer_tree);
2676 defer_delta -= range_tree_space(*defer_tree);
2679 vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
2682 * If there's a metaslab_load() in progress, wait for it to complete
2683 * so that we have a consistent view of the in-core space map.
2685 metaslab_load_wait(msp);
2688 * Move the frees from the defer_tree back to the free
2689 * range tree (if it's loaded). Swap the freed_tree and
2690 * the defer_tree -- this is safe to do because we've
2691 * just emptied out the defer_tree.
2693 range_tree_vacate(*defer_tree,
2694 msp->ms_loaded ? range_tree_add : NULL, msp->ms_allocatable);
2695 if (defer_allowed) {
2696 range_tree_swap(&msp->ms_freed, defer_tree);
2698 range_tree_vacate(msp->ms_freed,
2699 msp->ms_loaded ? range_tree_add : NULL,
2700 msp->ms_allocatable);
2702 space_map_update(msp->ms_sm);
2704 msp->ms_deferspace += defer_delta;
2705 ASSERT3S(msp->ms_deferspace, >=, 0);
2706 ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
2707 if (msp->ms_deferspace != 0) {
2709 * Keep syncing this metaslab until all deferred frees
2710 * are back in circulation.
2712 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
2716 msp->ms_new = B_FALSE;
2717 mutex_enter(&mg->mg_lock);
2719 mutex_exit(&mg->mg_lock);
2722 * Calculate the new weights before unloading any metaslabs.
2723 * This will give us the most accurate weighting.
2725 metaslab_group_sort(mg, msp, metaslab_weight(msp) |
2726 (msp->ms_weight & METASLAB_ACTIVE_MASK));
2729 * If the metaslab is loaded and we've not tried to load or allocate
2730 * from it in 'metaslab_unload_delay' txgs, then unload it.
2732 if (msp->ms_loaded &&
2733 msp->ms_initializing == 0 &&
2734 msp->ms_selected_txg + metaslab_unload_delay < txg) {
2735 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
2736 VERIFY0(range_tree_space(
2737 msp->ms_allocating[(txg + t) & TXG_MASK]));
2739 if (msp->ms_allocator != -1) {
2740 metaslab_passivate(msp, msp->ms_weight &
2741 ~METASLAB_ACTIVE_MASK);
2744 if (!metaslab_debug_unload)
2745 metaslab_unload(msp);
2748 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
2749 ASSERT0(range_tree_space(msp->ms_freeing));
2750 ASSERT0(range_tree_space(msp->ms_freed));
2751 ASSERT0(range_tree_space(msp->ms_checkpointing));
2753 mutex_exit(&msp->ms_lock);
2757 metaslab_sync_reassess(metaslab_group_t *mg)
2759 spa_t *spa = mg->mg_class->mc_spa;
2761 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
2762 metaslab_group_alloc_update(mg);
2763 mg->mg_fragmentation = metaslab_group_fragmentation(mg);
2766 * Preload the next potential metaslabs but only on active
2767 * metaslab groups. We can get into a state where the metaslab
2768 * is no longer active since we dirty metaslabs as we remove a
2769 * a device, thus potentially making the metaslab group eligible
2772 if (mg->mg_activation_count > 0) {
2773 metaslab_group_preload(mg);
2775 spa_config_exit(spa, SCL_ALLOC, FTAG);
2779 metaslab_distance(metaslab_t *msp, dva_t *dva)
2781 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
2782 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
2783 uint64_t start = msp->ms_id;
2785 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
2786 return (1ULL << 63);
2789 return ((start - offset) << ms_shift);
2791 return ((offset - start) << ms_shift);
2796 * ==========================================================================
2797 * Metaslab allocation tracing facility
2798 * ==========================================================================
2800 kstat_t *metaslab_trace_ksp;
2801 kstat_named_t metaslab_trace_over_limit;
2804 metaslab_alloc_trace_init(void)
2806 ASSERT(metaslab_alloc_trace_cache == NULL);
2807 metaslab_alloc_trace_cache = kmem_cache_create(
2808 "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
2809 0, NULL, NULL, NULL, NULL, NULL, 0);
2810 metaslab_trace_ksp = kstat_create("zfs", 0, "metaslab_trace_stats",
2811 "misc", KSTAT_TYPE_NAMED, 1, KSTAT_FLAG_VIRTUAL);
2812 if (metaslab_trace_ksp != NULL) {
2813 metaslab_trace_ksp->ks_data = &metaslab_trace_over_limit;
2814 kstat_named_init(&metaslab_trace_over_limit,
2815 "metaslab_trace_over_limit", KSTAT_DATA_UINT64);
2816 kstat_install(metaslab_trace_ksp);
2821 metaslab_alloc_trace_fini(void)
2823 if (metaslab_trace_ksp != NULL) {
2824 kstat_delete(metaslab_trace_ksp);
2825 metaslab_trace_ksp = NULL;
2827 kmem_cache_destroy(metaslab_alloc_trace_cache);
2828 metaslab_alloc_trace_cache = NULL;
2832 * Add an allocation trace element to the allocation tracing list.
2835 metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
2836 metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset,
2839 if (!metaslab_trace_enabled)
2843 * When the tracing list reaches its maximum we remove
2844 * the second element in the list before adding a new one.
2845 * By removing the second element we preserve the original
2846 * entry as a clue to what allocations steps have already been
2849 if (zal->zal_size == metaslab_trace_max_entries) {
2850 metaslab_alloc_trace_t *mat_next;
2852 panic("too many entries in allocation list");
2854 atomic_inc_64(&metaslab_trace_over_limit.value.ui64);
2856 mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
2857 list_remove(&zal->zal_list, mat_next);
2858 kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
2861 metaslab_alloc_trace_t *mat =
2862 kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
2863 list_link_init(&mat->mat_list_node);
2866 mat->mat_size = psize;
2867 mat->mat_dva_id = dva_id;
2868 mat->mat_offset = offset;
2869 mat->mat_weight = 0;
2870 mat->mat_allocator = allocator;
2873 mat->mat_weight = msp->ms_weight;
2876 * The list is part of the zio so locking is not required. Only
2877 * a single thread will perform allocations for a given zio.
2879 list_insert_tail(&zal->zal_list, mat);
2882 ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
2886 metaslab_trace_init(zio_alloc_list_t *zal)
2888 list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
2889 offsetof(metaslab_alloc_trace_t, mat_list_node));
2894 metaslab_trace_fini(zio_alloc_list_t *zal)
2896 metaslab_alloc_trace_t *mat;
2898 while ((mat = list_remove_head(&zal->zal_list)) != NULL)
2899 kmem_cache_free(metaslab_alloc_trace_cache, mat);
2900 list_destroy(&zal->zal_list);
2905 * ==========================================================================
2906 * Metaslab block operations
2907 * ==========================================================================
2911 metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags,
2914 if (!(flags & METASLAB_ASYNC_ALLOC) ||
2915 (flags & METASLAB_DONT_THROTTLE))
2918 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2919 if (!mg->mg_class->mc_alloc_throttle_enabled)
2922 (void) refcount_add(&mg->mg_alloc_queue_depth[allocator], tag);
2926 metaslab_group_increment_qdepth(metaslab_group_t *mg, int allocator)
2928 uint64_t max = mg->mg_max_alloc_queue_depth;
2929 uint64_t cur = mg->mg_cur_max_alloc_queue_depth[allocator];
2931 if (atomic_cas_64(&mg->mg_cur_max_alloc_queue_depth[allocator],
2932 cur, cur + 1) == cur) {
2934 &mg->mg_class->mc_alloc_max_slots[allocator]);
2937 cur = mg->mg_cur_max_alloc_queue_depth[allocator];
2942 metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags,
2943 int allocator, boolean_t io_complete)
2945 if (!(flags & METASLAB_ASYNC_ALLOC) ||
2946 (flags & METASLAB_DONT_THROTTLE))
2949 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2950 if (!mg->mg_class->mc_alloc_throttle_enabled)
2953 (void) refcount_remove(&mg->mg_alloc_queue_depth[allocator], tag);
2955 metaslab_group_increment_qdepth(mg, allocator);
2959 metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag,
2963 const dva_t *dva = bp->blk_dva;
2964 int ndvas = BP_GET_NDVAS(bp);
2966 for (int d = 0; d < ndvas; d++) {
2967 uint64_t vdev = DVA_GET_VDEV(&dva[d]);
2968 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2969 VERIFY(refcount_not_held(&mg->mg_alloc_queue_depth[allocator],
2976 metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
2979 range_tree_t *rt = msp->ms_allocatable;
2980 metaslab_class_t *mc = msp->ms_group->mg_class;
2982 VERIFY(!msp->ms_condensing);
2983 VERIFY0(msp->ms_initializing);
2985 start = mc->mc_ops->msop_alloc(msp, size);
2986 if (start != -1ULL) {
2987 metaslab_group_t *mg = msp->ms_group;
2988 vdev_t *vd = mg->mg_vd;
2990 VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
2991 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
2992 VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
2993 range_tree_remove(rt, start, size);
2995 if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
2996 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
2998 range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size);
3000 /* Track the last successful allocation */
3001 msp->ms_alloc_txg = txg;
3002 metaslab_verify_space(msp, txg);
3006 * Now that we've attempted the allocation we need to update the
3007 * metaslab's maximum block size since it may have changed.
3009 msp->ms_max_size = metaslab_block_maxsize(msp);
3014 * Find the metaslab with the highest weight that is less than what we've
3015 * already tried. In the common case, this means that we will examine each
3016 * metaslab at most once. Note that concurrent callers could reorder metaslabs
3017 * by activation/passivation once we have dropped the mg_lock. If a metaslab is
3018 * activated by another thread, and we fail to allocate from the metaslab we
3019 * have selected, we may not try the newly-activated metaslab, and instead
3020 * activate another metaslab. This is not optimal, but generally does not cause
3021 * any problems (a possible exception being if every metaslab is completely full
3022 * except for the the newly-activated metaslab which we fail to examine).
3025 find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight,
3026 dva_t *dva, int d, uint64_t min_distance, uint64_t asize, int allocator,
3027 zio_alloc_list_t *zal, metaslab_t *search, boolean_t *was_active)
3030 avl_tree_t *t = &mg->mg_metaslab_tree;
3031 metaslab_t *msp = avl_find(t, search, &idx);
3033 msp = avl_nearest(t, idx, AVL_AFTER);
3035 for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
3037 if (!metaslab_should_allocate(msp, asize)) {
3038 metaslab_trace_add(zal, mg, msp, asize, d,
3039 TRACE_TOO_SMALL, allocator);
3044 * If the selected metaslab is condensing or being
3045 * initialized, skip it.
3047 if (msp->ms_condensing || msp->ms_initializing > 0)
3050 *was_active = msp->ms_allocator != -1;
3052 * If we're activating as primary, this is our first allocation
3053 * from this disk, so we don't need to check how close we are.
3054 * If the metaslab under consideration was already active,
3055 * we're getting desperate enough to steal another allocator's
3056 * metaslab, so we still don't care about distances.
3058 if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active)
3061 uint64_t target_distance = min_distance
3062 + (space_map_allocated(msp->ms_sm) != 0 ? 0 :
3065 for (i = 0; i < d; i++) {
3066 if (metaslab_distance(msp, &dva[i]) < target_distance)
3074 search->ms_weight = msp->ms_weight;
3075 search->ms_start = msp->ms_start + 1;
3076 search->ms_allocator = msp->ms_allocator;
3077 search->ms_primary = msp->ms_primary;
3084 metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
3085 uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d,
3088 metaslab_t *msp = NULL;
3089 uint64_t offset = -1ULL;
3090 uint64_t activation_weight;
3092 activation_weight = METASLAB_WEIGHT_PRIMARY;
3093 for (int i = 0; i < d; i++) {
3094 if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
3095 DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
3096 activation_weight = METASLAB_WEIGHT_SECONDARY;
3097 } else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
3098 DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
3099 activation_weight = METASLAB_WEIGHT_CLAIM;
3105 * If we don't have enough metaslabs active to fill the entire array, we
3106 * just use the 0th slot.
3108 if (mg->mg_ms_ready < mg->mg_allocators * 3)
3111 ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2);
3113 metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP);
3114 search->ms_weight = UINT64_MAX;
3115 search->ms_start = 0;
3117 * At the end of the metaslab tree are the already-active metaslabs,
3118 * first the primaries, then the secondaries. When we resume searching
3119 * through the tree, we need to consider ms_allocator and ms_primary so
3120 * we start in the location right after where we left off, and don't
3121 * accidentally loop forever considering the same metaslabs.
3123 search->ms_allocator = -1;
3124 search->ms_primary = B_TRUE;
3126 boolean_t was_active = B_FALSE;
3128 mutex_enter(&mg->mg_lock);
3130 if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
3131 mg->mg_primaries[allocator] != NULL) {
3132 msp = mg->mg_primaries[allocator];
3133 was_active = B_TRUE;
3134 } else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
3135 mg->mg_secondaries[allocator] != NULL) {
3136 msp = mg->mg_secondaries[allocator];
3137 was_active = B_TRUE;
3139 msp = find_valid_metaslab(mg, activation_weight, dva, d,
3140 min_distance, asize, allocator, zal, search,
3144 mutex_exit(&mg->mg_lock);
3146 kmem_free(search, sizeof (*search));
3150 mutex_enter(&msp->ms_lock);
3152 * Ensure that the metaslab we have selected is still
3153 * capable of handling our request. It's possible that
3154 * another thread may have changed the weight while we
3155 * were blocked on the metaslab lock. We check the
3156 * active status first to see if we need to reselect
3159 if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
3160 mutex_exit(&msp->ms_lock);
3165 * If the metaslab is freshly activated for an allocator that
3166 * isn't the one we're allocating from, or if it's a primary and
3167 * we're seeking a secondary (or vice versa), we go back and
3168 * select a new metaslab.
3170 if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) &&
3171 (msp->ms_allocator != -1) &&
3172 (msp->ms_allocator != allocator || ((activation_weight ==
3173 METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) {
3174 mutex_exit(&msp->ms_lock);
3178 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM &&
3179 activation_weight != METASLAB_WEIGHT_CLAIM) {
3180 metaslab_passivate(msp, msp->ms_weight &
3181 ~METASLAB_WEIGHT_CLAIM);
3182 mutex_exit(&msp->ms_lock);
3186 if (metaslab_activate(msp, allocator, activation_weight) != 0) {
3187 mutex_exit(&msp->ms_lock);
3191 msp->ms_selected_txg = txg;
3194 * Now that we have the lock, recheck to see if we should
3195 * continue to use this metaslab for this allocation. The
3196 * the metaslab is now loaded so metaslab_should_allocate() can
3197 * accurately determine if the allocation attempt should
3200 if (!metaslab_should_allocate(msp, asize)) {
3201 /* Passivate this metaslab and select a new one. */
3202 metaslab_trace_add(zal, mg, msp, asize, d,
3203 TRACE_TOO_SMALL, allocator);
3208 * If this metaslab is currently condensing then pick again as
3209 * we can't manipulate this metaslab until it's committed
3210 * to disk. If this metaslab is being initialized, we shouldn't
3211 * allocate from it since the allocated region might be
3212 * overwritten after allocation.
3214 if (msp->ms_condensing) {
3215 metaslab_trace_add(zal, mg, msp, asize, d,
3216 TRACE_CONDENSING, allocator);
3217 metaslab_passivate(msp, msp->ms_weight &
3218 ~METASLAB_ACTIVE_MASK);
3219 mutex_exit(&msp->ms_lock);
3221 } else if (msp->ms_initializing > 0) {
3222 metaslab_trace_add(zal, mg, msp, asize, d,
3223 TRACE_INITIALIZING, allocator);
3224 metaslab_passivate(msp, msp->ms_weight &
3225 ~METASLAB_ACTIVE_MASK);
3226 mutex_exit(&msp->ms_lock);
3230 offset = metaslab_block_alloc(msp, asize, txg);
3231 metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator);
3233 if (offset != -1ULL) {
3234 /* Proactively passivate the metaslab, if needed */
3235 metaslab_segment_may_passivate(msp);
3239 ASSERT(msp->ms_loaded);
3242 * We were unable to allocate from this metaslab so determine
3243 * a new weight for this metaslab. Now that we have loaded
3244 * the metaslab we can provide a better hint to the metaslab
3247 * For space-based metaslabs, we use the maximum block size.
3248 * This information is only available when the metaslab
3249 * is loaded and is more accurate than the generic free
3250 * space weight that was calculated by metaslab_weight().
3251 * This information allows us to quickly compare the maximum
3252 * available allocation in the metaslab to the allocation
3253 * size being requested.
3255 * For segment-based metaslabs, determine the new weight
3256 * based on the highest bucket in the range tree. We
3257 * explicitly use the loaded segment weight (i.e. the range
3258 * tree histogram) since it contains the space that is
3259 * currently available for allocation and is accurate
3260 * even within a sync pass.
3262 if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
3263 uint64_t weight = metaslab_block_maxsize(msp);
3264 WEIGHT_SET_SPACEBASED(weight);
3265 metaslab_passivate(msp, weight);
3267 metaslab_passivate(msp,
3268 metaslab_weight_from_range_tree(msp));
3272 * We have just failed an allocation attempt, check
3273 * that metaslab_should_allocate() agrees. Otherwise,
3274 * we may end up in an infinite loop retrying the same
3277 ASSERT(!metaslab_should_allocate(msp, asize));
3278 mutex_exit(&msp->ms_lock);
3280 mutex_exit(&msp->ms_lock);
3281 kmem_free(search, sizeof (*search));
3286 metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
3287 uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d,
3291 ASSERT(mg->mg_initialized);
3293 offset = metaslab_group_alloc_normal(mg, zal, asize, txg,
3294 min_distance, dva, d, allocator);
3296 mutex_enter(&mg->mg_lock);
3297 if (offset == -1ULL) {
3298 mg->mg_failed_allocations++;
3299 metaslab_trace_add(zal, mg, NULL, asize, d,
3300 TRACE_GROUP_FAILURE, allocator);
3301 if (asize == SPA_GANGBLOCKSIZE) {
3303 * This metaslab group was unable to allocate
3304 * the minimum gang block size so it must be out of
3305 * space. We must notify the allocation throttle
3306 * to start skipping allocation attempts to this
3307 * metaslab group until more space becomes available.
3308 * Note: this failure cannot be caused by the
3309 * allocation throttle since the allocation throttle
3310 * is only responsible for skipping devices and
3311 * not failing block allocations.
3313 mg->mg_no_free_space = B_TRUE;
3316 mg->mg_allocations++;
3317 mutex_exit(&mg->mg_lock);
3322 * If we have to write a ditto block (i.e. more than one DVA for a given BP)
3323 * on the same vdev as an existing DVA of this BP, then try to allocate it
3324 * at least (vdev_asize / (2 ^ ditto_same_vdev_distance_shift)) away from the
3327 int ditto_same_vdev_distance_shift = 3;
3330 * Allocate a block for the specified i/o.
3333 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
3334 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
3335 zio_alloc_list_t *zal, int allocator)
3337 metaslab_group_t *mg, *rotor;
3339 boolean_t try_hard = B_FALSE;
3341 ASSERT(!DVA_IS_VALID(&dva[d]));
3344 * For testing, make some blocks above a certain size be gang blocks.
3346 if (psize >= metaslab_force_ganging && (ddi_get_lbolt() & 3) == 0) {
3347 metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG,
3349 return (SET_ERROR(ENOSPC));
3353 * Start at the rotor and loop through all mgs until we find something.
3354 * Note that there's no locking on mc_rotor or mc_aliquot because
3355 * nothing actually breaks if we miss a few updates -- we just won't
3356 * allocate quite as evenly. It all balances out over time.
3358 * If we are doing ditto or log blocks, try to spread them across
3359 * consecutive vdevs. If we're forced to reuse a vdev before we've
3360 * allocated all of our ditto blocks, then try and spread them out on
3361 * that vdev as much as possible. If it turns out to not be possible,
3362 * gradually lower our standards until anything becomes acceptable.
3363 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
3364 * gives us hope of containing our fault domains to something we're
3365 * able to reason about. Otherwise, any two top-level vdev failures
3366 * will guarantee the loss of data. With consecutive allocation,
3367 * only two adjacent top-level vdev failures will result in data loss.
3369 * If we are doing gang blocks (hintdva is non-NULL), try to keep
3370 * ourselves on the same vdev as our gang block header. That
3371 * way, we can hope for locality in vdev_cache, plus it makes our
3372 * fault domains something tractable.
3375 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
3378 * It's possible the vdev we're using as the hint no
3379 * longer exists or its mg has been closed (e.g. by
3380 * device removal). Consult the rotor when
3383 if (vd != NULL && vd->vdev_mg != NULL) {
3386 if (flags & METASLAB_HINTBP_AVOID &&
3387 mg->mg_next != NULL)
3392 } else if (d != 0) {
3393 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
3394 mg = vd->vdev_mg->mg_next;
3400 * If the hint put us into the wrong metaslab class, or into a
3401 * metaslab group that has been passivated, just follow the rotor.
3403 if (mg->mg_class != mc || mg->mg_activation_count <= 0)
3409 boolean_t allocatable;
3411 ASSERT(mg->mg_activation_count == 1);
3415 * Don't allocate from faulted devices.
3418 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
3419 allocatable = vdev_allocatable(vd);
3420 spa_config_exit(spa, SCL_ZIO, FTAG);
3422 allocatable = vdev_allocatable(vd);
3426 * Determine if the selected metaslab group is eligible
3427 * for allocations. If we're ganging then don't allow
3428 * this metaslab group to skip allocations since that would
3429 * inadvertently return ENOSPC and suspend the pool
3430 * even though space is still available.
3432 if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) {
3433 allocatable = metaslab_group_allocatable(mg, rotor,
3434 psize, allocator, d);
3438 metaslab_trace_add(zal, mg, NULL, psize, d,
3439 TRACE_NOT_ALLOCATABLE, allocator);
3443 ASSERT(mg->mg_initialized);
3446 * Avoid writing single-copy data to a failing,
3447 * non-redundant vdev, unless we've already tried all
3450 if ((vd->vdev_stat.vs_write_errors > 0 ||
3451 vd->vdev_state < VDEV_STATE_HEALTHY) &&
3452 d == 0 && !try_hard && vd->vdev_children == 0) {
3453 metaslab_trace_add(zal, mg, NULL, psize, d,
3454 TRACE_VDEV_ERROR, allocator);
3458 ASSERT(mg->mg_class == mc);
3461 * If we don't need to try hard, then require that the
3462 * block be 1/8th of the device away from any other DVAs
3463 * in this BP. If we are trying hard, allow any offset
3464 * to be used (distance=0).
3466 uint64_t distance = 0;
3468 distance = vd->vdev_asize >>
3469 ditto_same_vdev_distance_shift;
3470 if (distance <= (1ULL << vd->vdev_ms_shift))
3474 uint64_t asize = vdev_psize_to_asize(vd, psize);
3475 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
3477 uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg,
3478 distance, dva, d, allocator);
3480 if (offset != -1ULL) {
3482 * If we've just selected this metaslab group,
3483 * figure out whether the corresponding vdev is
3484 * over- or under-used relative to the pool,
3485 * and set an allocation bias to even it out.
3487 if (mc->mc_aliquot == 0 && metaslab_bias_enabled) {
3488 vdev_stat_t *vs = &vd->vdev_stat;
3491 vu = (vs->vs_alloc * 100) / (vs->vs_space + 1);
3492 cu = (mc->mc_alloc * 100) / (mc->mc_space + 1);
3495 * Calculate how much more or less we should
3496 * try to allocate from this device during
3497 * this iteration around the rotor.
3498 * For example, if a device is 80% full
3499 * and the pool is 20% full then we should
3500 * reduce allocations by 60% on this device.
3502 * mg_bias = (20 - 80) * 512K / 100 = -307K
3504 * This reduces allocations by 307K for this
3507 mg->mg_bias = ((cu - vu) *
3508 (int64_t)mg->mg_aliquot) / 100;
3509 } else if (!metaslab_bias_enabled) {
3513 if (atomic_add_64_nv(&mc->mc_aliquot, asize) >=
3514 mg->mg_aliquot + mg->mg_bias) {
3515 mc->mc_rotor = mg->mg_next;
3519 DVA_SET_VDEV(&dva[d], vd->vdev_id);
3520 DVA_SET_OFFSET(&dva[d], offset);
3521 DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER));
3522 DVA_SET_ASIZE(&dva[d], asize);
3527 mc->mc_rotor = mg->mg_next;
3529 } while ((mg = mg->mg_next) != rotor);
3532 * If we haven't tried hard, do so now.
3539 bzero(&dva[d], sizeof (dva_t));
3541 metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator);
3542 return (SET_ERROR(ENOSPC));
3546 metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize,
3547 boolean_t checkpoint)
3550 spa_t *spa = vd->vdev_spa;
3552 ASSERT(vdev_is_concrete(vd));
3553 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
3554 ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
3556 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3558 VERIFY(!msp->ms_condensing);
3559 VERIFY3U(offset, >=, msp->ms_start);
3560 VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size);
3561 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
3562 VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift));
3564 metaslab_check_free_impl(vd, offset, asize);
3566 mutex_enter(&msp->ms_lock);
3567 if (range_tree_is_empty(msp->ms_freeing) &&
3568 range_tree_is_empty(msp->ms_checkpointing)) {
3569 vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa));
3573 ASSERT(spa_has_checkpoint(spa));
3574 range_tree_add(msp->ms_checkpointing, offset, asize);
3576 range_tree_add(msp->ms_freeing, offset, asize);
3578 mutex_exit(&msp->ms_lock);
3583 metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
3584 uint64_t size, void *arg)
3586 boolean_t *checkpoint = arg;
3588 ASSERT3P(checkpoint, !=, NULL);
3590 if (vd->vdev_ops->vdev_op_remap != NULL)
3591 vdev_indirect_mark_obsolete(vd, offset, size);
3593 metaslab_free_impl(vd, offset, size, *checkpoint);
3597 metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size,
3598 boolean_t checkpoint)
3600 spa_t *spa = vd->vdev_spa;
3602 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
3604 if (spa_syncing_txg(spa) > spa_freeze_txg(spa))
3607 if (spa->spa_vdev_removal != NULL &&
3608 spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id &&
3609 vdev_is_concrete(vd)) {
3611 * Note: we check if the vdev is concrete because when
3612 * we complete the removal, we first change the vdev to be
3613 * an indirect vdev (in open context), and then (in syncing
3614 * context) clear spa_vdev_removal.
3616 free_from_removing_vdev(vd, offset, size);
3617 } else if (vd->vdev_ops->vdev_op_remap != NULL) {
3618 vdev_indirect_mark_obsolete(vd, offset, size);
3619 vd->vdev_ops->vdev_op_remap(vd, offset, size,
3620 metaslab_free_impl_cb, &checkpoint);
3622 metaslab_free_concrete(vd, offset, size, checkpoint);
3626 typedef struct remap_blkptr_cb_arg {
3628 spa_remap_cb_t rbca_cb;
3629 vdev_t *rbca_remap_vd;
3630 uint64_t rbca_remap_offset;
3632 } remap_blkptr_cb_arg_t;
3635 remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
3636 uint64_t size, void *arg)
3638 remap_blkptr_cb_arg_t *rbca = arg;
3639 blkptr_t *bp = rbca->rbca_bp;
3641 /* We can not remap split blocks. */
3642 if (size != DVA_GET_ASIZE(&bp->blk_dva[0]))
3644 ASSERT0(inner_offset);
3646 if (rbca->rbca_cb != NULL) {
3648 * At this point we know that we are not handling split
3649 * blocks and we invoke the callback on the previous
3650 * vdev which must be indirect.
3652 ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops);
3654 rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id,
3655 rbca->rbca_remap_offset, size, rbca->rbca_cb_arg);
3657 /* set up remap_blkptr_cb_arg for the next call */
3658 rbca->rbca_remap_vd = vd;
3659 rbca->rbca_remap_offset = offset;
3663 * The phys birth time is that of dva[0]. This ensures that we know
3664 * when each dva was written, so that resilver can determine which
3665 * blocks need to be scrubbed (i.e. those written during the time
3666 * the vdev was offline). It also ensures that the key used in
3667 * the ARC hash table is unique (i.e. dva[0] + phys_birth). If
3668 * we didn't change the phys_birth, a lookup in the ARC for a
3669 * remapped BP could find the data that was previously stored at
3670 * this vdev + offset.
3672 vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa,
3673 DVA_GET_VDEV(&bp->blk_dva[0]));
3674 vdev_indirect_births_t *vib = oldvd->vdev_indirect_births;
3675 bp->blk_phys_birth = vdev_indirect_births_physbirth(vib,
3676 DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0]));
3678 DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id);
3679 DVA_SET_OFFSET(&bp->blk_dva[0], offset);
3683 * If the block pointer contains any indirect DVAs, modify them to refer to
3684 * concrete DVAs. Note that this will sometimes not be possible, leaving
3685 * the indirect DVA in place. This happens if the indirect DVA spans multiple
3686 * segments in the mapping (i.e. it is a "split block").
3688 * If the BP was remapped, calls the callback on the original dva (note the
3689 * callback can be called multiple times if the original indirect DVA refers
3690 * to another indirect DVA, etc).
3692 * Returns TRUE if the BP was remapped.
3695 spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg)
3697 remap_blkptr_cb_arg_t rbca;
3699 if (!zfs_remap_blkptr_enable)
3702 if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS))
3706 * Dedup BP's can not be remapped, because ddt_phys_select() depends
3707 * on DVA[0] being the same in the BP as in the DDT (dedup table).
3709 if (BP_GET_DEDUP(bp))
3713 * Gang blocks can not be remapped, because
3714 * zio_checksum_gang_verifier() depends on the DVA[0] that's in
3715 * the BP used to read the gang block header (GBH) being the same
3716 * as the DVA[0] that we allocated for the GBH.
3722 * Embedded BP's have no DVA to remap.
3724 if (BP_GET_NDVAS(bp) < 1)
3728 * Note: we only remap dva[0]. If we remapped other dvas, we
3729 * would no longer know what their phys birth txg is.
3731 dva_t *dva = &bp->blk_dva[0];
3733 uint64_t offset = DVA_GET_OFFSET(dva);
3734 uint64_t size = DVA_GET_ASIZE(dva);
3735 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
3737 if (vd->vdev_ops->vdev_op_remap == NULL)
3741 rbca.rbca_cb = callback;
3742 rbca.rbca_remap_vd = vd;
3743 rbca.rbca_remap_offset = offset;
3744 rbca.rbca_cb_arg = arg;
3747 * remap_blkptr_cb() will be called in order for each level of
3748 * indirection, until a concrete vdev is reached or a split block is
3749 * encountered. old_vd and old_offset are updated within the callback
3750 * as we go from the one indirect vdev to the next one (either concrete
3751 * or indirect again) in that order.
3753 vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca);
3755 /* Check if the DVA wasn't remapped because it is a split block */
3756 if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id)
3763 * Undo the allocation of a DVA which happened in the given transaction group.
3766 metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
3770 uint64_t vdev = DVA_GET_VDEV(dva);
3771 uint64_t offset = DVA_GET_OFFSET(dva);
3772 uint64_t size = DVA_GET_ASIZE(dva);
3774 ASSERT(DVA_IS_VALID(dva));
3775 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
3777 if (txg > spa_freeze_txg(spa))
3780 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
3781 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
3782 cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu",
3783 (u_longlong_t)vdev, (u_longlong_t)offset);
3788 ASSERT(!vd->vdev_removing);
3789 ASSERT(vdev_is_concrete(vd));
3790 ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
3791 ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
3793 if (DVA_GET_GANG(dva))
3794 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
3796 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3798 mutex_enter(&msp->ms_lock);
3799 range_tree_remove(msp->ms_allocating[txg & TXG_MASK],
3802 VERIFY(!msp->ms_condensing);
3803 VERIFY3U(offset, >=, msp->ms_start);
3804 VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
3805 VERIFY3U(range_tree_space(msp->ms_allocatable) + size, <=,
3807 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
3808 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
3809 range_tree_add(msp->ms_allocatable, offset, size);
3810 mutex_exit(&msp->ms_lock);
3814 * Free the block represented by the given DVA.
3817 metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint)
3819 uint64_t vdev = DVA_GET_VDEV(dva);
3820 uint64_t offset = DVA_GET_OFFSET(dva);
3821 uint64_t size = DVA_GET_ASIZE(dva);
3822 vdev_t *vd = vdev_lookup_top(spa, vdev);
3824 ASSERT(DVA_IS_VALID(dva));
3825 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
3827 if (DVA_GET_GANG(dva)) {
3828 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
3831 metaslab_free_impl(vd, offset, size, checkpoint);
3835 * Reserve some allocation slots. The reservation system must be called
3836 * before we call into the allocator. If there aren't any available slots
3837 * then the I/O will be throttled until an I/O completes and its slots are
3838 * freed up. The function returns true if it was successful in placing
3842 metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator,
3843 zio_t *zio, int flags)
3845 uint64_t available_slots = 0;
3846 boolean_t slot_reserved = B_FALSE;
3847 uint64_t max = mc->mc_alloc_max_slots[allocator];
3849 ASSERT(mc->mc_alloc_throttle_enabled);
3850 mutex_enter(&mc->mc_lock);
3852 uint64_t reserved_slots =
3853 refcount_count(&mc->mc_alloc_slots[allocator]);
3854 if (reserved_slots < max)
3855 available_slots = max - reserved_slots;
3857 if (slots <= available_slots || GANG_ALLOCATION(flags)) {
3859 * We reserve the slots individually so that we can unreserve
3860 * them individually when an I/O completes.
3862 for (int d = 0; d < slots; d++) {
3864 refcount_add(&mc->mc_alloc_slots[allocator],
3867 zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
3868 slot_reserved = B_TRUE;
3871 mutex_exit(&mc->mc_lock);
3872 return (slot_reserved);
3876 metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots,
3877 int allocator, zio_t *zio)
3879 ASSERT(mc->mc_alloc_throttle_enabled);
3880 mutex_enter(&mc->mc_lock);
3881 for (int d = 0; d < slots; d++) {
3882 (void) refcount_remove(&mc->mc_alloc_slots[allocator],
3885 mutex_exit(&mc->mc_lock);
3889 metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size,
3893 spa_t *spa = vd->vdev_spa;
3896 if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count)
3899 ASSERT3P(vd->vdev_ms, !=, NULL);
3900 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3902 mutex_enter(&msp->ms_lock);
3904 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded)
3905 error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM);
3907 * No need to fail in that case; someone else has activated the
3908 * metaslab, but that doesn't preclude us from using it.
3914 !range_tree_contains(msp->ms_allocatable, offset, size))
3915 error = SET_ERROR(ENOENT);
3917 if (error || txg == 0) { /* txg == 0 indicates dry run */
3918 mutex_exit(&msp->ms_lock);
3922 VERIFY(!msp->ms_condensing);
3923 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
3924 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
3925 VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=,
3927 range_tree_remove(msp->ms_allocatable, offset, size);
3929 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
3930 if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
3931 vdev_dirty(vd, VDD_METASLAB, msp, txg);
3932 range_tree_add(msp->ms_allocating[txg & TXG_MASK],
3936 mutex_exit(&msp->ms_lock);
3941 typedef struct metaslab_claim_cb_arg_t {
3944 } metaslab_claim_cb_arg_t;
3948 metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
3949 uint64_t size, void *arg)
3951 metaslab_claim_cb_arg_t *mcca_arg = arg;
3953 if (mcca_arg->mcca_error == 0) {
3954 mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset,
3955 size, mcca_arg->mcca_txg);
3960 metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg)
3962 if (vd->vdev_ops->vdev_op_remap != NULL) {
3963 metaslab_claim_cb_arg_t arg;
3966 * Only zdb(1M) can claim on indirect vdevs. This is used
3967 * to detect leaks of mapped space (that are not accounted
3968 * for in the obsolete counts, spacemap, or bpobj).
3970 ASSERT(!spa_writeable(vd->vdev_spa));
3974 vd->vdev_ops->vdev_op_remap(vd, offset, size,
3975 metaslab_claim_impl_cb, &arg);
3977 if (arg.mcca_error == 0) {
3978 arg.mcca_error = metaslab_claim_concrete(vd,
3981 return (arg.mcca_error);
3983 return (metaslab_claim_concrete(vd, offset, size, txg));
3988 * Intent log support: upon opening the pool after a crash, notify the SPA
3989 * of blocks that the intent log has allocated for immediate write, but
3990 * which are still considered free by the SPA because the last transaction
3991 * group didn't commit yet.
3994 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
3996 uint64_t vdev = DVA_GET_VDEV(dva);
3997 uint64_t offset = DVA_GET_OFFSET(dva);
3998 uint64_t size = DVA_GET_ASIZE(dva);
4001 if ((vd = vdev_lookup_top(spa, vdev)) == NULL) {
4002 return (SET_ERROR(ENXIO));
4005 ASSERT(DVA_IS_VALID(dva));
4007 if (DVA_GET_GANG(dva))
4008 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
4010 return (metaslab_claim_impl(vd, offset, size, txg));
4014 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
4015 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
4016 zio_alloc_list_t *zal, zio_t *zio, int allocator)
4018 dva_t *dva = bp->blk_dva;
4019 dva_t *hintdva = hintbp->blk_dva;
4022 ASSERT(bp->blk_birth == 0);
4023 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
4025 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
4027 if (mc->mc_rotor == NULL) { /* no vdevs in this class */
4028 spa_config_exit(spa, SCL_ALLOC, FTAG);
4029 return (SET_ERROR(ENOSPC));
4032 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
4033 ASSERT(BP_GET_NDVAS(bp) == 0);
4034 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
4035 ASSERT3P(zal, !=, NULL);
4037 for (int d = 0; d < ndvas; d++) {
4038 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
4039 txg, flags, zal, allocator);
4041 for (d--; d >= 0; d--) {
4042 metaslab_unalloc_dva(spa, &dva[d], txg);
4043 metaslab_group_alloc_decrement(spa,
4044 DVA_GET_VDEV(&dva[d]), zio, flags,
4045 allocator, B_FALSE);
4046 bzero(&dva[d], sizeof (dva_t));
4048 spa_config_exit(spa, SCL_ALLOC, FTAG);
4052 * Update the metaslab group's queue depth
4053 * based on the newly allocated dva.
4055 metaslab_group_alloc_increment(spa,
4056 DVA_GET_VDEV(&dva[d]), zio, flags, allocator);
4061 ASSERT(BP_GET_NDVAS(bp) == ndvas);
4063 spa_config_exit(spa, SCL_ALLOC, FTAG);
4065 BP_SET_BIRTH(bp, txg, txg);
4071 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
4073 const dva_t *dva = bp->blk_dva;
4074 int ndvas = BP_GET_NDVAS(bp);
4076 ASSERT(!BP_IS_HOLE(bp));
4077 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
4080 * If we have a checkpoint for the pool we need to make sure that
4081 * the blocks that we free that are part of the checkpoint won't be
4082 * reused until the checkpoint is discarded or we revert to it.
4084 * The checkpoint flag is passed down the metaslab_free code path
4085 * and is set whenever we want to add a block to the checkpoint's
4086 * accounting. That is, we "checkpoint" blocks that existed at the
4087 * time the checkpoint was created and are therefore referenced by
4088 * the checkpointed uberblock.
4090 * Note that, we don't checkpoint any blocks if the current
4091 * syncing txg <= spa_checkpoint_txg. We want these frees to sync
4092 * normally as they will be referenced by the checkpointed uberblock.
4094 boolean_t checkpoint = B_FALSE;
4095 if (bp->blk_birth <= spa->spa_checkpoint_txg &&
4096 spa_syncing_txg(spa) > spa->spa_checkpoint_txg) {
4098 * At this point, if the block is part of the checkpoint
4099 * there is no way it was created in the current txg.
4102 ASSERT3U(spa_syncing_txg(spa), ==, txg);
4103 checkpoint = B_TRUE;
4106 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
4108 for (int d = 0; d < ndvas; d++) {
4110 metaslab_unalloc_dva(spa, &dva[d], txg);
4112 ASSERT3U(txg, ==, spa_syncing_txg(spa));
4113 metaslab_free_dva(spa, &dva[d], checkpoint);
4117 spa_config_exit(spa, SCL_FREE, FTAG);
4121 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
4123 const dva_t *dva = bp->blk_dva;
4124 int ndvas = BP_GET_NDVAS(bp);
4127 ASSERT(!BP_IS_HOLE(bp));
4131 * First do a dry run to make sure all DVAs are claimable,
4132 * so we don't have to unwind from partial failures below.
4134 if ((error = metaslab_claim(spa, bp, 0)) != 0)
4138 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
4140 for (int d = 0; d < ndvas; d++)
4141 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
4144 spa_config_exit(spa, SCL_ALLOC, FTAG);
4146 ASSERT(error == 0 || txg == 0);
4153 metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset,
4154 uint64_t size, void *arg)
4156 if (vd->vdev_ops == &vdev_indirect_ops)
4159 metaslab_check_free_impl(vd, offset, size);
4163 metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size)
4166 spa_t *spa = vd->vdev_spa;
4168 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
4171 if (vd->vdev_ops->vdev_op_remap != NULL) {
4172 vd->vdev_ops->vdev_op_remap(vd, offset, size,
4173 metaslab_check_free_impl_cb, NULL);
4177 ASSERT(vdev_is_concrete(vd));
4178 ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
4179 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
4181 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
4183 mutex_enter(&msp->ms_lock);
4185 range_tree_verify(msp->ms_allocatable, offset, size);
4187 range_tree_verify(msp->ms_freeing, offset, size);
4188 range_tree_verify(msp->ms_checkpointing, offset, size);
4189 range_tree_verify(msp->ms_freed, offset, size);
4190 for (int j = 0; j < TXG_DEFER_SIZE; j++)
4191 range_tree_verify(msp->ms_defer[j], offset, size);
4192 mutex_exit(&msp->ms_lock);
4196 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
4198 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
4201 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
4202 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
4203 uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
4204 vdev_t *vd = vdev_lookup_top(spa, vdev);
4205 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
4206 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
4208 if (DVA_GET_GANG(&bp->blk_dva[i]))
4209 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
4211 ASSERT3P(vd, !=, NULL);
4213 metaslab_check_free_impl(vd, offset, size);
4215 spa_config_exit(spa, SCL_VDEV, FTAG);