4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25 * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
26 * Copyright (c) 2017, Intel Corporation.
29 #include <sys/zfs_context.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/space_map.h>
33 #include <sys/metaslab_impl.h>
34 #include <sys/vdev_impl.h>
35 #include <sys/vdev_draid.h>
37 #include <sys/spa_impl.h>
38 #include <sys/zfeature.h>
39 #include <sys/vdev_indirect_mapping.h>
41 #include <sys/btree.h>
43 #define WITH_DF_BLOCK_ALLOCATOR
45 #define GANG_ALLOCATION(flags) \
46 ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
49 * Metaslab granularity, in bytes. This is roughly similar to what would be
50 * referred to as the "stripe size" in traditional RAID arrays. In normal
51 * operation, we will try to write this amount of data to a top-level vdev
52 * before moving on to the next one.
54 unsigned long metaslab_aliquot = 512 << 10;
57 * For testing, make some blocks above a certain size be gang blocks.
59 unsigned long metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1;
62 * In pools where the log space map feature is not enabled we touch
63 * multiple metaslabs (and their respective space maps) with each
64 * transaction group. Thus, we benefit from having a small space map
65 * block size since it allows us to issue more I/O operations scattered
66 * around the disk. So a sane default for the space map block size
69 int zfs_metaslab_sm_blksz_no_log = (1 << 14);
72 * When the log space map feature is enabled, we accumulate a lot of
73 * changes per metaslab that are flushed once in a while so we benefit
74 * from a bigger block size like 128K for the metaslab space maps.
76 int zfs_metaslab_sm_blksz_with_log = (1 << 17);
79 * The in-core space map representation is more compact than its on-disk form.
80 * The zfs_condense_pct determines how much more compact the in-core
81 * space map representation must be before we compact it on-disk.
82 * Values should be greater than or equal to 100.
84 int zfs_condense_pct = 200;
87 * Condensing a metaslab is not guaranteed to actually reduce the amount of
88 * space used on disk. In particular, a space map uses data in increments of
89 * MAX(1 << ashift, space_map_blksz), so a metaslab might use the
90 * same number of blocks after condensing. Since the goal of condensing is to
91 * reduce the number of IOPs required to read the space map, we only want to
92 * condense when we can be sure we will reduce the number of blocks used by the
93 * space map. Unfortunately, we cannot precisely compute whether or not this is
94 * the case in metaslab_should_condense since we are holding ms_lock. Instead,
95 * we apply the following heuristic: do not condense a spacemap unless the
96 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
99 int zfs_metaslab_condense_block_threshold = 4;
102 * The zfs_mg_noalloc_threshold defines which metaslab groups should
103 * be eligible for allocation. The value is defined as a percentage of
104 * free space. Metaslab groups that have more free space than
105 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
106 * a metaslab group's free space is less than or equal to the
107 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
108 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
109 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
110 * groups are allowed to accept allocations. Gang blocks are always
111 * eligible to allocate on any metaslab group. The default value of 0 means
112 * no metaslab group will be excluded based on this criterion.
114 int zfs_mg_noalloc_threshold = 0;
117 * Metaslab groups are considered eligible for allocations if their
118 * fragmentation metric (measured as a percentage) is less than or
119 * equal to zfs_mg_fragmentation_threshold. If a metaslab group
120 * exceeds this threshold then it will be skipped unless all metaslab
121 * groups within the metaslab class have also crossed this threshold.
123 * This tunable was introduced to avoid edge cases where we continue
124 * allocating from very fragmented disks in our pool while other, less
125 * fragmented disks, exists. On the other hand, if all disks in the
126 * pool are uniformly approaching the threshold, the threshold can
127 * be a speed bump in performance, where we keep switching the disks
128 * that we allocate from (e.g. we allocate some segments from disk A
129 * making it bypassing the threshold while freeing segments from disk
130 * B getting its fragmentation below the threshold).
132 * Empirically, we've seen that our vdev selection for allocations is
133 * good enough that fragmentation increases uniformly across all vdevs
134 * the majority of the time. Thus we set the threshold percentage high
135 * enough to avoid hitting the speed bump on pools that are being pushed
138 int zfs_mg_fragmentation_threshold = 95;
141 * Allow metaslabs to keep their active state as long as their fragmentation
142 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
143 * active metaslab that exceeds this threshold will no longer keep its active
144 * status allowing better metaslabs to be selected.
146 int zfs_metaslab_fragmentation_threshold = 70;
149 * When set will load all metaslabs when pool is first opened.
151 int metaslab_debug_load = 0;
154 * When set will prevent metaslabs from being unloaded.
156 int metaslab_debug_unload = 0;
159 * Minimum size which forces the dynamic allocator to change
160 * it's allocation strategy. Once the space map cannot satisfy
161 * an allocation of this size then it switches to using more
162 * aggressive strategy (i.e search by size rather than offset).
164 uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
167 * The minimum free space, in percent, which must be available
168 * in a space map to continue allocations in a first-fit fashion.
169 * Once the space map's free space drops below this level we dynamically
170 * switch to using best-fit allocations.
172 int metaslab_df_free_pct = 4;
175 * Maximum distance to search forward from the last offset. Without this
176 * limit, fragmented pools can see >100,000 iterations and
177 * metaslab_block_picker() becomes the performance limiting factor on
178 * high-performance storage.
180 * With the default setting of 16MB, we typically see less than 500
181 * iterations, even with very fragmented, ashift=9 pools. The maximum number
182 * of iterations possible is:
183 * metaslab_df_max_search / (2 * (1<<ashift))
184 * With the default setting of 16MB this is 16*1024 (with ashift=9) or
185 * 2048 (with ashift=12).
187 int metaslab_df_max_search = 16 * 1024 * 1024;
190 * Forces the metaslab_block_picker function to search for at least this many
191 * segments forwards until giving up on finding a segment that the allocation
194 uint32_t metaslab_min_search_count = 100;
197 * If we are not searching forward (due to metaslab_df_max_search,
198 * metaslab_df_free_pct, or metaslab_df_alloc_threshold), this tunable
199 * controls what segment is used. If it is set, we will use the largest free
200 * segment. If it is not set, we will use a segment of exactly the requested
203 int metaslab_df_use_largest_segment = B_FALSE;
206 * Percentage of all cpus that can be used by the metaslab taskq.
208 int metaslab_load_pct = 50;
211 * These tunables control how long a metaslab will remain loaded after the
212 * last allocation from it. A metaslab can't be unloaded until at least
213 * metaslab_unload_delay TXG's and metaslab_unload_delay_ms milliseconds
214 * have elapsed. However, zfs_metaslab_mem_limit may cause it to be
215 * unloaded sooner. These settings are intended to be generous -- to keep
216 * metaslabs loaded for a long time, reducing the rate of metaslab loading.
218 int metaslab_unload_delay = 32;
219 int metaslab_unload_delay_ms = 10 * 60 * 1000; /* ten minutes */
222 * Max number of metaslabs per group to preload.
224 int metaslab_preload_limit = 10;
227 * Enable/disable preloading of metaslab.
229 int metaslab_preload_enabled = B_TRUE;
232 * Enable/disable fragmentation weighting on metaslabs.
234 int metaslab_fragmentation_factor_enabled = B_TRUE;
237 * Enable/disable lba weighting (i.e. outer tracks are given preference).
239 int metaslab_lba_weighting_enabled = B_TRUE;
242 * Enable/disable metaslab group biasing.
244 int metaslab_bias_enabled = B_TRUE;
247 * Enable/disable remapping of indirect DVAs to their concrete vdevs.
249 boolean_t zfs_remap_blkptr_enable = B_TRUE;
252 * Enable/disable segment-based metaslab selection.
254 int zfs_metaslab_segment_weight_enabled = B_TRUE;
257 * When using segment-based metaslab selection, we will continue
258 * allocating from the active metaslab until we have exhausted
259 * zfs_metaslab_switch_threshold of its buckets.
261 int zfs_metaslab_switch_threshold = 2;
264 * Internal switch to enable/disable the metaslab allocation tracing
267 boolean_t metaslab_trace_enabled = B_FALSE;
270 * Maximum entries that the metaslab allocation tracing facility will keep
271 * in a given list when running in non-debug mode. We limit the number
272 * of entries in non-debug mode to prevent us from using up too much memory.
273 * The limit should be sufficiently large that we don't expect any allocation
274 * to every exceed this value. In debug mode, the system will panic if this
275 * limit is ever reached allowing for further investigation.
277 uint64_t metaslab_trace_max_entries = 5000;
280 * Maximum number of metaslabs per group that can be disabled
283 int max_disabled_ms = 3;
286 * Time (in seconds) to respect ms_max_size when the metaslab is not loaded.
287 * To avoid 64-bit overflow, don't set above UINT32_MAX.
289 unsigned long zfs_metaslab_max_size_cache_sec = 3600; /* 1 hour */
292 * Maximum percentage of memory to use on storing loaded metaslabs. If loading
293 * a metaslab would take it over this percentage, the oldest selected metaslab
294 * is automatically unloaded.
296 int zfs_metaslab_mem_limit = 75;
299 * Force the per-metaslab range trees to use 64-bit integers to store
300 * segments. Used for debugging purposes.
302 boolean_t zfs_metaslab_force_large_segs = B_FALSE;
305 * By default we only store segments over a certain size in the size-sorted
306 * metaslab trees (ms_allocatable_by_size and
307 * ms_unflushed_frees_by_size). This dramatically reduces memory usage and
308 * improves load and unload times at the cost of causing us to use slightly
309 * larger segments than we would otherwise in some cases.
311 uint32_t metaslab_by_size_min_shift = 14;
314 * If not set, we will first try normal allocation. If that fails then
315 * we will do a gang allocation. If that fails then we will do a "try hard"
316 * gang allocation. If that fails then we will have a multi-layer gang
319 * If set, we will first try normal allocation. If that fails then
320 * we will do a "try hard" allocation. If that fails we will do a gang
321 * allocation. If that fails we will do a "try hard" gang allocation. If
322 * that fails then we will have a multi-layer gang block.
324 int zfs_metaslab_try_hard_before_gang = B_FALSE;
327 * When not trying hard, we only consider the best zfs_metaslab_find_max_tries
328 * metaslabs. This improves performance, especially when there are many
329 * metaslabs per vdev and the allocation can't actually be satisfied (so we
330 * would otherwise iterate all the metaslabs). If there is a metaslab with a
331 * worse weight but it can actually satisfy the allocation, we won't find it
332 * until trying hard. This may happen if the worse metaslab is not loaded
333 * (and the true weight is better than we have calculated), or due to weight
334 * bucketization. E.g. we are looking for a 60K segment, and the best
335 * metaslabs all have free segments in the 32-63K bucket, but the best
336 * zfs_metaslab_find_max_tries metaslabs have ms_max_size <60KB, and a
337 * subsequent metaslab has ms_max_size >60KB (but fewer segments in this
338 * bucket, and therefore a lower weight).
340 int zfs_metaslab_find_max_tries = 100;
342 static uint64_t metaslab_weight(metaslab_t *, boolean_t);
343 static void metaslab_set_fragmentation(metaslab_t *, boolean_t);
344 static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t);
345 static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t);
347 static void metaslab_passivate(metaslab_t *msp, uint64_t weight);
348 static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp);
349 static void metaslab_flush_update(metaslab_t *, dmu_tx_t *);
350 static unsigned int metaslab_idx_func(multilist_t *, void *);
351 static void metaslab_evict(metaslab_t *, uint64_t);
352 static void metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg);
353 kmem_cache_t *metaslab_alloc_trace_cache;
355 typedef struct metaslab_stats {
356 kstat_named_t metaslabstat_trace_over_limit;
357 kstat_named_t metaslabstat_reload_tree;
358 kstat_named_t metaslabstat_too_many_tries;
359 kstat_named_t metaslabstat_try_hard;
362 static metaslab_stats_t metaslab_stats = {
363 { "trace_over_limit", KSTAT_DATA_UINT64 },
364 { "reload_tree", KSTAT_DATA_UINT64 },
365 { "too_many_tries", KSTAT_DATA_UINT64 },
366 { "try_hard", KSTAT_DATA_UINT64 },
369 #define METASLABSTAT_BUMP(stat) \
370 atomic_inc_64(&metaslab_stats.stat.value.ui64);
373 kstat_t *metaslab_ksp;
376 metaslab_stat_init(void)
378 ASSERT(metaslab_alloc_trace_cache == NULL);
379 metaslab_alloc_trace_cache = kmem_cache_create(
380 "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
381 0, NULL, NULL, NULL, NULL, NULL, 0);
382 metaslab_ksp = kstat_create("zfs", 0, "metaslab_stats",
383 "misc", KSTAT_TYPE_NAMED, sizeof (metaslab_stats) /
384 sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
385 if (metaslab_ksp != NULL) {
386 metaslab_ksp->ks_data = &metaslab_stats;
387 kstat_install(metaslab_ksp);
392 metaslab_stat_fini(void)
394 if (metaslab_ksp != NULL) {
395 kstat_delete(metaslab_ksp);
399 kmem_cache_destroy(metaslab_alloc_trace_cache);
400 metaslab_alloc_trace_cache = NULL;
404 * ==========================================================================
406 * ==========================================================================
409 metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
411 metaslab_class_t *mc;
413 mc = kmem_zalloc(offsetof(metaslab_class_t,
414 mc_allocator[spa->spa_alloc_count]), KM_SLEEP);
418 mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
419 mc->mc_metaslab_txg_list = multilist_create(sizeof (metaslab_t),
420 offsetof(metaslab_t, ms_class_txg_node), metaslab_idx_func);
421 for (int i = 0; i < spa->spa_alloc_count; i++) {
422 metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
423 mca->mca_rotor = NULL;
424 zfs_refcount_create_tracked(&mca->mca_alloc_slots);
431 metaslab_class_destroy(metaslab_class_t *mc)
433 spa_t *spa = mc->mc_spa;
435 ASSERT(mc->mc_alloc == 0);
436 ASSERT(mc->mc_deferred == 0);
437 ASSERT(mc->mc_space == 0);
438 ASSERT(mc->mc_dspace == 0);
440 for (int i = 0; i < spa->spa_alloc_count; i++) {
441 metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
442 ASSERT(mca->mca_rotor == NULL);
443 zfs_refcount_destroy(&mca->mca_alloc_slots);
445 mutex_destroy(&mc->mc_lock);
446 multilist_destroy(mc->mc_metaslab_txg_list);
447 kmem_free(mc, offsetof(metaslab_class_t,
448 mc_allocator[spa->spa_alloc_count]));
452 metaslab_class_validate(metaslab_class_t *mc)
454 metaslab_group_t *mg;
458 * Must hold one of the spa_config locks.
460 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
461 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
463 if ((mg = mc->mc_allocator[0].mca_rotor) == NULL)
468 ASSERT(vd->vdev_mg != NULL);
469 ASSERT3P(vd->vdev_top, ==, vd);
470 ASSERT3P(mg->mg_class, ==, mc);
471 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
472 } while ((mg = mg->mg_next) != mc->mc_allocator[0].mca_rotor);
478 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
479 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
481 atomic_add_64(&mc->mc_alloc, alloc_delta);
482 atomic_add_64(&mc->mc_deferred, defer_delta);
483 atomic_add_64(&mc->mc_space, space_delta);
484 atomic_add_64(&mc->mc_dspace, dspace_delta);
488 metaslab_class_get_alloc(metaslab_class_t *mc)
490 return (mc->mc_alloc);
494 metaslab_class_get_deferred(metaslab_class_t *mc)
496 return (mc->mc_deferred);
500 metaslab_class_get_space(metaslab_class_t *mc)
502 return (mc->mc_space);
506 metaslab_class_get_dspace(metaslab_class_t *mc)
508 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
512 metaslab_class_histogram_verify(metaslab_class_t *mc)
514 spa_t *spa = mc->mc_spa;
515 vdev_t *rvd = spa->spa_root_vdev;
519 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
522 mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
525 mutex_enter(&mc->mc_lock);
526 for (int c = 0; c < rvd->vdev_children; c++) {
527 vdev_t *tvd = rvd->vdev_child[c];
528 metaslab_group_t *mg = vdev_get_mg(tvd, mc);
531 * Skip any holes, uninitialized top-levels, or
532 * vdevs that are not in this metalab class.
534 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
535 mg->mg_class != mc) {
539 IMPLY(mg == mg->mg_vd->vdev_log_mg,
540 mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
542 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
543 mc_hist[i] += mg->mg_histogram[i];
546 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
547 VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
550 mutex_exit(&mc->mc_lock);
551 kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
555 * Calculate the metaslab class's fragmentation metric. The metric
556 * is weighted based on the space contribution of each metaslab group.
557 * The return value will be a number between 0 and 100 (inclusive), or
558 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
559 * zfs_frag_table for more information about the metric.
562 metaslab_class_fragmentation(metaslab_class_t *mc)
564 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
565 uint64_t fragmentation = 0;
567 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
569 for (int c = 0; c < rvd->vdev_children; c++) {
570 vdev_t *tvd = rvd->vdev_child[c];
571 metaslab_group_t *mg = tvd->vdev_mg;
574 * Skip any holes, uninitialized top-levels,
575 * or vdevs that are not in this metalab class.
577 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
578 mg->mg_class != mc) {
583 * If a metaslab group does not contain a fragmentation
584 * metric then just bail out.
586 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
587 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
588 return (ZFS_FRAG_INVALID);
592 * Determine how much this metaslab_group is contributing
593 * to the overall pool fragmentation metric.
595 fragmentation += mg->mg_fragmentation *
596 metaslab_group_get_space(mg);
598 fragmentation /= metaslab_class_get_space(mc);
600 ASSERT3U(fragmentation, <=, 100);
601 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
602 return (fragmentation);
606 * Calculate the amount of expandable space that is available in
607 * this metaslab class. If a device is expanded then its expandable
608 * space will be the amount of allocatable space that is currently not
609 * part of this metaslab class.
612 metaslab_class_expandable_space(metaslab_class_t *mc)
614 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
617 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
618 for (int c = 0; c < rvd->vdev_children; c++) {
619 vdev_t *tvd = rvd->vdev_child[c];
620 metaslab_group_t *mg = tvd->vdev_mg;
622 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
623 mg->mg_class != mc) {
628 * Calculate if we have enough space to add additional
629 * metaslabs. We report the expandable space in terms
630 * of the metaslab size since that's the unit of expansion.
632 space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize,
633 1ULL << tvd->vdev_ms_shift);
635 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
640 metaslab_class_evict_old(metaslab_class_t *mc, uint64_t txg)
642 multilist_t *ml = mc->mc_metaslab_txg_list;
643 for (int i = 0; i < multilist_get_num_sublists(ml); i++) {
644 multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
645 metaslab_t *msp = multilist_sublist_head(mls);
646 multilist_sublist_unlock(mls);
647 while (msp != NULL) {
648 mutex_enter(&msp->ms_lock);
651 * If the metaslab has been removed from the list
652 * (which could happen if we were at the memory limit
653 * and it was evicted during this loop), then we can't
654 * proceed and we should restart the sublist.
656 if (!multilist_link_active(&msp->ms_class_txg_node)) {
657 mutex_exit(&msp->ms_lock);
661 mls = multilist_sublist_lock(ml, i);
662 metaslab_t *next_msp = multilist_sublist_next(mls, msp);
663 multilist_sublist_unlock(mls);
665 msp->ms_selected_txg + metaslab_unload_delay &&
666 gethrtime() > msp->ms_selected_time +
667 (uint64_t)MSEC2NSEC(metaslab_unload_delay_ms)) {
668 metaslab_evict(msp, txg);
671 * Once we've hit a metaslab selected too
672 * recently to evict, we're done evicting for
675 mutex_exit(&msp->ms_lock);
678 mutex_exit(&msp->ms_lock);
685 metaslab_compare(const void *x1, const void *x2)
687 const metaslab_t *m1 = (const metaslab_t *)x1;
688 const metaslab_t *m2 = (const metaslab_t *)x2;
692 if (m1->ms_allocator != -1 && m1->ms_primary)
694 else if (m1->ms_allocator != -1 && !m1->ms_primary)
696 if (m2->ms_allocator != -1 && m2->ms_primary)
698 else if (m2->ms_allocator != -1 && !m2->ms_primary)
702 * Sort inactive metaslabs first, then primaries, then secondaries. When
703 * selecting a metaslab to allocate from, an allocator first tries its
704 * primary, then secondary active metaslab. If it doesn't have active
705 * metaslabs, or can't allocate from them, it searches for an inactive
706 * metaslab to activate. If it can't find a suitable one, it will steal
707 * a primary or secondary metaslab from another allocator.
714 int cmp = TREE_CMP(m2->ms_weight, m1->ms_weight);
718 IMPLY(TREE_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2);
720 return (TREE_CMP(m1->ms_start, m2->ms_start));
724 * ==========================================================================
726 * ==========================================================================
729 * Update the allocatable flag and the metaslab group's capacity.
730 * The allocatable flag is set to true if the capacity is below
731 * the zfs_mg_noalloc_threshold or has a fragmentation value that is
732 * greater than zfs_mg_fragmentation_threshold. If a metaslab group
733 * transitions from allocatable to non-allocatable or vice versa then the
734 * metaslab group's class is updated to reflect the transition.
737 metaslab_group_alloc_update(metaslab_group_t *mg)
739 vdev_t *vd = mg->mg_vd;
740 metaslab_class_t *mc = mg->mg_class;
741 vdev_stat_t *vs = &vd->vdev_stat;
742 boolean_t was_allocatable;
743 boolean_t was_initialized;
745 ASSERT(vd == vd->vdev_top);
746 ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==,
749 mutex_enter(&mg->mg_lock);
750 was_allocatable = mg->mg_allocatable;
751 was_initialized = mg->mg_initialized;
753 mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
756 mutex_enter(&mc->mc_lock);
759 * If the metaslab group was just added then it won't
760 * have any space until we finish syncing out this txg.
761 * At that point we will consider it initialized and available
762 * for allocations. We also don't consider non-activated
763 * metaslab groups (e.g. vdevs that are in the middle of being removed)
764 * to be initialized, because they can't be used for allocation.
766 mg->mg_initialized = metaslab_group_initialized(mg);
767 if (!was_initialized && mg->mg_initialized) {
769 } else if (was_initialized && !mg->mg_initialized) {
770 ASSERT3U(mc->mc_groups, >, 0);
773 if (mg->mg_initialized)
774 mg->mg_no_free_space = B_FALSE;
777 * A metaslab group is considered allocatable if it has plenty
778 * of free space or is not heavily fragmented. We only take
779 * fragmentation into account if the metaslab group has a valid
780 * fragmentation metric (i.e. a value between 0 and 100).
782 mg->mg_allocatable = (mg->mg_activation_count > 0 &&
783 mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
784 (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
785 mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
788 * The mc_alloc_groups maintains a count of the number of
789 * groups in this metaslab class that are still above the
790 * zfs_mg_noalloc_threshold. This is used by the allocating
791 * threads to determine if they should avoid allocations to
792 * a given group. The allocator will avoid allocations to a group
793 * if that group has reached or is below the zfs_mg_noalloc_threshold
794 * and there are still other groups that are above the threshold.
795 * When a group transitions from allocatable to non-allocatable or
796 * vice versa we update the metaslab class to reflect that change.
797 * When the mc_alloc_groups value drops to 0 that means that all
798 * groups have reached the zfs_mg_noalloc_threshold making all groups
799 * eligible for allocations. This effectively means that all devices
800 * are balanced again.
802 if (was_allocatable && !mg->mg_allocatable)
803 mc->mc_alloc_groups--;
804 else if (!was_allocatable && mg->mg_allocatable)
805 mc->mc_alloc_groups++;
806 mutex_exit(&mc->mc_lock);
808 mutex_exit(&mg->mg_lock);
812 metaslab_sort_by_flushed(const void *va, const void *vb)
814 const metaslab_t *a = va;
815 const metaslab_t *b = vb;
817 int cmp = TREE_CMP(a->ms_unflushed_txg, b->ms_unflushed_txg);
821 uint64_t a_vdev_id = a->ms_group->mg_vd->vdev_id;
822 uint64_t b_vdev_id = b->ms_group->mg_vd->vdev_id;
823 cmp = TREE_CMP(a_vdev_id, b_vdev_id);
827 return (TREE_CMP(a->ms_id, b->ms_id));
831 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
833 metaslab_group_t *mg;
835 mg = kmem_zalloc(offsetof(metaslab_group_t,
836 mg_allocator[allocators]), KM_SLEEP);
837 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
838 mutex_init(&mg->mg_ms_disabled_lock, NULL, MUTEX_DEFAULT, NULL);
839 cv_init(&mg->mg_ms_disabled_cv, NULL, CV_DEFAULT, NULL);
840 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
841 sizeof (metaslab_t), offsetof(metaslab_t, ms_group_node));
844 mg->mg_activation_count = 0;
845 mg->mg_initialized = B_FALSE;
846 mg->mg_no_free_space = B_TRUE;
847 mg->mg_allocators = allocators;
849 for (int i = 0; i < allocators; i++) {
850 metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
851 zfs_refcount_create_tracked(&mga->mga_alloc_queue_depth);
854 mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
855 maxclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT | TASKQ_DYNAMIC);
861 metaslab_group_destroy(metaslab_group_t *mg)
863 ASSERT(mg->mg_prev == NULL);
864 ASSERT(mg->mg_next == NULL);
866 * We may have gone below zero with the activation count
867 * either because we never activated in the first place or
868 * because we're done, and possibly removing the vdev.
870 ASSERT(mg->mg_activation_count <= 0);
872 taskq_destroy(mg->mg_taskq);
873 avl_destroy(&mg->mg_metaslab_tree);
874 mutex_destroy(&mg->mg_lock);
875 mutex_destroy(&mg->mg_ms_disabled_lock);
876 cv_destroy(&mg->mg_ms_disabled_cv);
878 for (int i = 0; i < mg->mg_allocators; i++) {
879 metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
880 zfs_refcount_destroy(&mga->mga_alloc_queue_depth);
882 kmem_free(mg, offsetof(metaslab_group_t,
883 mg_allocator[mg->mg_allocators]));
887 metaslab_group_activate(metaslab_group_t *mg)
889 metaslab_class_t *mc = mg->mg_class;
890 spa_t *spa = mc->mc_spa;
891 metaslab_group_t *mgprev, *mgnext;
893 ASSERT3U(spa_config_held(spa, SCL_ALLOC, RW_WRITER), !=, 0);
895 ASSERT(mg->mg_prev == NULL);
896 ASSERT(mg->mg_next == NULL);
897 ASSERT(mg->mg_activation_count <= 0);
899 if (++mg->mg_activation_count <= 0)
902 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
903 metaslab_group_alloc_update(mg);
905 if ((mgprev = mc->mc_allocator[0].mca_rotor) == NULL) {
909 mgnext = mgprev->mg_next;
910 mg->mg_prev = mgprev;
911 mg->mg_next = mgnext;
912 mgprev->mg_next = mg;
913 mgnext->mg_prev = mg;
915 for (int i = 0; i < spa->spa_alloc_count; i++) {
916 mc->mc_allocator[i].mca_rotor = mg;
922 * Passivate a metaslab group and remove it from the allocation rotor.
923 * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating
924 * a metaslab group. This function will momentarily drop spa_config_locks
925 * that are lower than the SCL_ALLOC lock (see comment below).
928 metaslab_group_passivate(metaslab_group_t *mg)
930 metaslab_class_t *mc = mg->mg_class;
931 spa_t *spa = mc->mc_spa;
932 metaslab_group_t *mgprev, *mgnext;
933 int locks = spa_config_held(spa, SCL_ALL, RW_WRITER);
935 ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==,
936 (SCL_ALLOC | SCL_ZIO));
938 if (--mg->mg_activation_count != 0) {
939 for (int i = 0; i < spa->spa_alloc_count; i++)
940 ASSERT(mc->mc_allocator[i].mca_rotor != mg);
941 ASSERT(mg->mg_prev == NULL);
942 ASSERT(mg->mg_next == NULL);
943 ASSERT(mg->mg_activation_count < 0);
948 * The spa_config_lock is an array of rwlocks, ordered as
949 * follows (from highest to lowest):
950 * SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC >
951 * SCL_ZIO > SCL_FREE > SCL_VDEV
952 * (For more information about the spa_config_lock see spa_misc.c)
953 * The higher the lock, the broader its coverage. When we passivate
954 * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO
955 * config locks. However, the metaslab group's taskq might be trying
956 * to preload metaslabs so we must drop the SCL_ZIO lock and any
957 * lower locks to allow the I/O to complete. At a minimum,
958 * we continue to hold the SCL_ALLOC lock, which prevents any future
959 * allocations from taking place and any changes to the vdev tree.
961 spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa);
962 taskq_wait_outstanding(mg->mg_taskq, 0);
963 spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER);
964 metaslab_group_alloc_update(mg);
965 for (int i = 0; i < mg->mg_allocators; i++) {
966 metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
967 metaslab_t *msp = mga->mga_primary;
969 mutex_enter(&msp->ms_lock);
970 metaslab_passivate(msp,
971 metaslab_weight_from_range_tree(msp));
972 mutex_exit(&msp->ms_lock);
974 msp = mga->mga_secondary;
976 mutex_enter(&msp->ms_lock);
977 metaslab_passivate(msp,
978 metaslab_weight_from_range_tree(msp));
979 mutex_exit(&msp->ms_lock);
983 mgprev = mg->mg_prev;
984 mgnext = mg->mg_next;
989 mgprev->mg_next = mgnext;
990 mgnext->mg_prev = mgprev;
992 for (int i = 0; i < spa->spa_alloc_count; i++) {
993 if (mc->mc_allocator[i].mca_rotor == mg)
994 mc->mc_allocator[i].mca_rotor = mgnext;
1002 metaslab_group_initialized(metaslab_group_t *mg)
1004 vdev_t *vd = mg->mg_vd;
1005 vdev_stat_t *vs = &vd->vdev_stat;
1007 return (vs->vs_space != 0 && mg->mg_activation_count > 0);
1011 metaslab_group_get_space(metaslab_group_t *mg)
1014 * Note that the number of nodes in mg_metaslab_tree may be one less
1015 * than vdev_ms_count, due to the embedded log metaslab.
1017 mutex_enter(&mg->mg_lock);
1018 uint64_t ms_count = avl_numnodes(&mg->mg_metaslab_tree);
1019 mutex_exit(&mg->mg_lock);
1020 return ((1ULL << mg->mg_vd->vdev_ms_shift) * ms_count);
1024 metaslab_group_histogram_verify(metaslab_group_t *mg)
1027 avl_tree_t *t = &mg->mg_metaslab_tree;
1028 uint64_t ashift = mg->mg_vd->vdev_ashift;
1030 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
1033 mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
1036 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
1037 SPACE_MAP_HISTOGRAM_SIZE + ashift);
1039 mutex_enter(&mg->mg_lock);
1040 for (metaslab_t *msp = avl_first(t);
1041 msp != NULL; msp = AVL_NEXT(t, msp)) {
1042 VERIFY3P(msp->ms_group, ==, mg);
1043 /* skip if not active */
1044 if (msp->ms_sm == NULL)
1047 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1048 mg_hist[i + ashift] +=
1049 msp->ms_sm->sm_phys->smp_histogram[i];
1053 for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
1054 VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
1056 mutex_exit(&mg->mg_lock);
1058 kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
1062 metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
1064 metaslab_class_t *mc = mg->mg_class;
1065 uint64_t ashift = mg->mg_vd->vdev_ashift;
1067 ASSERT(MUTEX_HELD(&msp->ms_lock));
1068 if (msp->ms_sm == NULL)
1071 mutex_enter(&mg->mg_lock);
1072 mutex_enter(&mc->mc_lock);
1073 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1074 IMPLY(mg == mg->mg_vd->vdev_log_mg,
1075 mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
1076 mg->mg_histogram[i + ashift] +=
1077 msp->ms_sm->sm_phys->smp_histogram[i];
1078 mc->mc_histogram[i + ashift] +=
1079 msp->ms_sm->sm_phys->smp_histogram[i];
1081 mutex_exit(&mc->mc_lock);
1082 mutex_exit(&mg->mg_lock);
1086 metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
1088 metaslab_class_t *mc = mg->mg_class;
1089 uint64_t ashift = mg->mg_vd->vdev_ashift;
1091 ASSERT(MUTEX_HELD(&msp->ms_lock));
1092 if (msp->ms_sm == NULL)
1095 mutex_enter(&mg->mg_lock);
1096 mutex_enter(&mc->mc_lock);
1097 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1098 ASSERT3U(mg->mg_histogram[i + ashift], >=,
1099 msp->ms_sm->sm_phys->smp_histogram[i]);
1100 ASSERT3U(mc->mc_histogram[i + ashift], >=,
1101 msp->ms_sm->sm_phys->smp_histogram[i]);
1102 IMPLY(mg == mg->mg_vd->vdev_log_mg,
1103 mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
1105 mg->mg_histogram[i + ashift] -=
1106 msp->ms_sm->sm_phys->smp_histogram[i];
1107 mc->mc_histogram[i + ashift] -=
1108 msp->ms_sm->sm_phys->smp_histogram[i];
1110 mutex_exit(&mc->mc_lock);
1111 mutex_exit(&mg->mg_lock);
1115 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
1117 ASSERT(msp->ms_group == NULL);
1118 mutex_enter(&mg->mg_lock);
1121 avl_add(&mg->mg_metaslab_tree, msp);
1122 mutex_exit(&mg->mg_lock);
1124 mutex_enter(&msp->ms_lock);
1125 metaslab_group_histogram_add(mg, msp);
1126 mutex_exit(&msp->ms_lock);
1130 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
1132 mutex_enter(&msp->ms_lock);
1133 metaslab_group_histogram_remove(mg, msp);
1134 mutex_exit(&msp->ms_lock);
1136 mutex_enter(&mg->mg_lock);
1137 ASSERT(msp->ms_group == mg);
1138 avl_remove(&mg->mg_metaslab_tree, msp);
1140 metaslab_class_t *mc = msp->ms_group->mg_class;
1141 multilist_sublist_t *mls =
1142 multilist_sublist_lock_obj(mc->mc_metaslab_txg_list, msp);
1143 if (multilist_link_active(&msp->ms_class_txg_node))
1144 multilist_sublist_remove(mls, msp);
1145 multilist_sublist_unlock(mls);
1147 msp->ms_group = NULL;
1148 mutex_exit(&mg->mg_lock);
1152 metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
1154 ASSERT(MUTEX_HELD(&msp->ms_lock));
1155 ASSERT(MUTEX_HELD(&mg->mg_lock));
1156 ASSERT(msp->ms_group == mg);
1158 avl_remove(&mg->mg_metaslab_tree, msp);
1159 msp->ms_weight = weight;
1160 avl_add(&mg->mg_metaslab_tree, msp);
1165 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
1168 * Although in principle the weight can be any value, in
1169 * practice we do not use values in the range [1, 511].
1171 ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
1172 ASSERT(MUTEX_HELD(&msp->ms_lock));
1174 mutex_enter(&mg->mg_lock);
1175 metaslab_group_sort_impl(mg, msp, weight);
1176 mutex_exit(&mg->mg_lock);
1180 * Calculate the fragmentation for a given metaslab group. We can use
1181 * a simple average here since all metaslabs within the group must have
1182 * the same size. The return value will be a value between 0 and 100
1183 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
1184 * group have a fragmentation metric.
1187 metaslab_group_fragmentation(metaslab_group_t *mg)
1189 vdev_t *vd = mg->mg_vd;
1190 uint64_t fragmentation = 0;
1191 uint64_t valid_ms = 0;
1193 for (int m = 0; m < vd->vdev_ms_count; m++) {
1194 metaslab_t *msp = vd->vdev_ms[m];
1196 if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
1198 if (msp->ms_group != mg)
1202 fragmentation += msp->ms_fragmentation;
1205 if (valid_ms <= mg->mg_vd->vdev_ms_count / 2)
1206 return (ZFS_FRAG_INVALID);
1208 fragmentation /= valid_ms;
1209 ASSERT3U(fragmentation, <=, 100);
1210 return (fragmentation);
1214 * Determine if a given metaslab group should skip allocations. A metaslab
1215 * group should avoid allocations if its free capacity is less than the
1216 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
1217 * zfs_mg_fragmentation_threshold and there is at least one metaslab group
1218 * that can still handle allocations. If the allocation throttle is enabled
1219 * then we skip allocations to devices that have reached their maximum
1220 * allocation queue depth unless the selected metaslab group is the only
1221 * eligible group remaining.
1224 metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
1225 uint64_t psize, int allocator, int d)
1227 spa_t *spa = mg->mg_vd->vdev_spa;
1228 metaslab_class_t *mc = mg->mg_class;
1231 * We can only consider skipping this metaslab group if it's
1232 * in the normal metaslab class and there are other metaslab
1233 * groups to select from. Otherwise, we always consider it eligible
1236 if ((mc != spa_normal_class(spa) &&
1237 mc != spa_special_class(spa) &&
1238 mc != spa_dedup_class(spa)) ||
1243 * If the metaslab group's mg_allocatable flag is set (see comments
1244 * in metaslab_group_alloc_update() for more information) and
1245 * the allocation throttle is disabled then allow allocations to this
1246 * device. However, if the allocation throttle is enabled then
1247 * check if we have reached our allocation limit (mga_alloc_queue_depth)
1248 * to determine if we should allow allocations to this metaslab group.
1249 * If all metaslab groups are no longer considered allocatable
1250 * (mc_alloc_groups == 0) or we're trying to allocate the smallest
1251 * gang block size then we allow allocations on this metaslab group
1252 * regardless of the mg_allocatable or throttle settings.
1254 if (mg->mg_allocatable) {
1255 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
1257 uint64_t qmax = mga->mga_cur_max_alloc_queue_depth;
1259 if (!mc->mc_alloc_throttle_enabled)
1263 * If this metaslab group does not have any free space, then
1264 * there is no point in looking further.
1266 if (mg->mg_no_free_space)
1270 * Relax allocation throttling for ditto blocks. Due to
1271 * random imbalances in allocation it tends to push copies
1272 * to one vdev, that looks a bit better at the moment.
1274 qmax = qmax * (4 + d) / 4;
1276 qdepth = zfs_refcount_count(&mga->mga_alloc_queue_depth);
1279 * If this metaslab group is below its qmax or it's
1280 * the only allocatable metasable group, then attempt
1281 * to allocate from it.
1283 if (qdepth < qmax || mc->mc_alloc_groups == 1)
1285 ASSERT3U(mc->mc_alloc_groups, >, 1);
1288 * Since this metaslab group is at or over its qmax, we
1289 * need to determine if there are metaslab groups after this
1290 * one that might be able to handle this allocation. This is
1291 * racy since we can't hold the locks for all metaslab
1292 * groups at the same time when we make this check.
1294 for (metaslab_group_t *mgp = mg->mg_next;
1295 mgp != rotor; mgp = mgp->mg_next) {
1296 metaslab_group_allocator_t *mgap =
1297 &mgp->mg_allocator[allocator];
1298 qmax = mgap->mga_cur_max_alloc_queue_depth;
1299 qmax = qmax * (4 + d) / 4;
1301 zfs_refcount_count(&mgap->mga_alloc_queue_depth);
1304 * If there is another metaslab group that
1305 * might be able to handle the allocation, then
1306 * we return false so that we skip this group.
1308 if (qdepth < qmax && !mgp->mg_no_free_space)
1313 * We didn't find another group to handle the allocation
1314 * so we can't skip this metaslab group even though
1315 * we are at or over our qmax.
1319 } else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
1326 * ==========================================================================
1327 * Range tree callbacks
1328 * ==========================================================================
1332 * Comparison function for the private size-ordered tree using 32-bit
1333 * ranges. Tree is sorted by size, larger sizes at the end of the tree.
1336 metaslab_rangesize32_compare(const void *x1, const void *x2)
1338 const range_seg32_t *r1 = x1;
1339 const range_seg32_t *r2 = x2;
1341 uint64_t rs_size1 = r1->rs_end - r1->rs_start;
1342 uint64_t rs_size2 = r2->rs_end - r2->rs_start;
1344 int cmp = TREE_CMP(rs_size1, rs_size2);
1348 return (TREE_CMP(r1->rs_start, r2->rs_start));
1352 * Comparison function for the private size-ordered tree using 64-bit
1353 * ranges. Tree is sorted by size, larger sizes at the end of the tree.
1356 metaslab_rangesize64_compare(const void *x1, const void *x2)
1358 const range_seg64_t *r1 = x1;
1359 const range_seg64_t *r2 = x2;
1361 uint64_t rs_size1 = r1->rs_end - r1->rs_start;
1362 uint64_t rs_size2 = r2->rs_end - r2->rs_start;
1364 int cmp = TREE_CMP(rs_size1, rs_size2);
1368 return (TREE_CMP(r1->rs_start, r2->rs_start));
1370 typedef struct metaslab_rt_arg {
1371 zfs_btree_t *mra_bt;
1372 uint32_t mra_floor_shift;
1373 } metaslab_rt_arg_t;
1377 metaslab_rt_arg_t *mra;
1381 metaslab_size_sorted_add(void *arg, uint64_t start, uint64_t size)
1383 struct mssa_arg *mssap = arg;
1384 range_tree_t *rt = mssap->rt;
1385 metaslab_rt_arg_t *mrap = mssap->mra;
1386 range_seg_max_t seg = {0};
1387 rs_set_start(&seg, rt, start);
1388 rs_set_end(&seg, rt, start + size);
1389 metaslab_rt_add(rt, &seg, mrap);
1393 metaslab_size_tree_full_load(range_tree_t *rt)
1395 metaslab_rt_arg_t *mrap = rt->rt_arg;
1396 METASLABSTAT_BUMP(metaslabstat_reload_tree);
1397 ASSERT0(zfs_btree_numnodes(mrap->mra_bt));
1398 mrap->mra_floor_shift = 0;
1399 struct mssa_arg arg = {0};
1402 range_tree_walk(rt, metaslab_size_sorted_add, &arg);
1406 * Create any block allocator specific components. The current allocators
1407 * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
1411 metaslab_rt_create(range_tree_t *rt, void *arg)
1413 metaslab_rt_arg_t *mrap = arg;
1414 zfs_btree_t *size_tree = mrap->mra_bt;
1417 int (*compare) (const void *, const void *);
1418 switch (rt->rt_type) {
1420 size = sizeof (range_seg32_t);
1421 compare = metaslab_rangesize32_compare;
1424 size = sizeof (range_seg64_t);
1425 compare = metaslab_rangesize64_compare;
1428 panic("Invalid range seg type %d", rt->rt_type);
1430 zfs_btree_create(size_tree, compare, size);
1431 mrap->mra_floor_shift = metaslab_by_size_min_shift;
1436 metaslab_rt_destroy(range_tree_t *rt, void *arg)
1438 metaslab_rt_arg_t *mrap = arg;
1439 zfs_btree_t *size_tree = mrap->mra_bt;
1441 zfs_btree_destroy(size_tree);
1442 kmem_free(mrap, sizeof (*mrap));
1447 metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
1449 metaslab_rt_arg_t *mrap = arg;
1450 zfs_btree_t *size_tree = mrap->mra_bt;
1452 if (rs_get_end(rs, rt) - rs_get_start(rs, rt) <
1453 (1 << mrap->mra_floor_shift))
1456 zfs_btree_add(size_tree, rs);
1461 metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
1463 metaslab_rt_arg_t *mrap = arg;
1464 zfs_btree_t *size_tree = mrap->mra_bt;
1466 if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < (1 <<
1467 mrap->mra_floor_shift))
1470 zfs_btree_remove(size_tree, rs);
1475 metaslab_rt_vacate(range_tree_t *rt, void *arg)
1477 metaslab_rt_arg_t *mrap = arg;
1478 zfs_btree_t *size_tree = mrap->mra_bt;
1479 zfs_btree_clear(size_tree);
1480 zfs_btree_destroy(size_tree);
1482 metaslab_rt_create(rt, arg);
1485 static range_tree_ops_t metaslab_rt_ops = {
1486 .rtop_create = metaslab_rt_create,
1487 .rtop_destroy = metaslab_rt_destroy,
1488 .rtop_add = metaslab_rt_add,
1489 .rtop_remove = metaslab_rt_remove,
1490 .rtop_vacate = metaslab_rt_vacate
1494 * ==========================================================================
1495 * Common allocator routines
1496 * ==========================================================================
1500 * Return the maximum contiguous segment within the metaslab.
1503 metaslab_largest_allocatable(metaslab_t *msp)
1505 zfs_btree_t *t = &msp->ms_allocatable_by_size;
1510 if (zfs_btree_numnodes(t) == 0)
1511 metaslab_size_tree_full_load(msp->ms_allocatable);
1513 rs = zfs_btree_last(t, NULL);
1517 return (rs_get_end(rs, msp->ms_allocatable) - rs_get_start(rs,
1518 msp->ms_allocatable));
1522 * Return the maximum contiguous segment within the unflushed frees of this
1526 metaslab_largest_unflushed_free(metaslab_t *msp)
1528 ASSERT(MUTEX_HELD(&msp->ms_lock));
1530 if (msp->ms_unflushed_frees == NULL)
1533 if (zfs_btree_numnodes(&msp->ms_unflushed_frees_by_size) == 0)
1534 metaslab_size_tree_full_load(msp->ms_unflushed_frees);
1535 range_seg_t *rs = zfs_btree_last(&msp->ms_unflushed_frees_by_size,
1541 * When a range is freed from the metaslab, that range is added to
1542 * both the unflushed frees and the deferred frees. While the block
1543 * will eventually be usable, if the metaslab were loaded the range
1544 * would not be added to the ms_allocatable tree until TXG_DEFER_SIZE
1545 * txgs had passed. As a result, when attempting to estimate an upper
1546 * bound for the largest currently-usable free segment in the
1547 * metaslab, we need to not consider any ranges currently in the defer
1548 * trees. This algorithm approximates the largest available chunk in
1549 * the largest range in the unflushed_frees tree by taking the first
1550 * chunk. While this may be a poor estimate, it should only remain so
1551 * briefly and should eventually self-correct as frees are no longer
1552 * deferred. Similar logic applies to the ms_freed tree. See
1553 * metaslab_load() for more details.
1555 * There are two primary sources of inaccuracy in this estimate. Both
1556 * are tolerated for performance reasons. The first source is that we
1557 * only check the largest segment for overlaps. Smaller segments may
1558 * have more favorable overlaps with the other trees, resulting in
1559 * larger usable chunks. Second, we only look at the first chunk in
1560 * the largest segment; there may be other usable chunks in the
1561 * largest segment, but we ignore them.
1563 uint64_t rstart = rs_get_start(rs, msp->ms_unflushed_frees);
1564 uint64_t rsize = rs_get_end(rs, msp->ms_unflushed_frees) - rstart;
1565 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1568 boolean_t found = range_tree_find_in(msp->ms_defer[t], rstart,
1569 rsize, &start, &size);
1571 if (rstart == start)
1573 rsize = start - rstart;
1579 boolean_t found = range_tree_find_in(msp->ms_freed, rstart,
1580 rsize, &start, &size);
1582 rsize = start - rstart;
1587 static range_seg_t *
1588 metaslab_block_find(zfs_btree_t *t, range_tree_t *rt, uint64_t start,
1589 uint64_t size, zfs_btree_index_t *where)
1592 range_seg_max_t rsearch;
1594 rs_set_start(&rsearch, rt, start);
1595 rs_set_end(&rsearch, rt, start + size);
1597 rs = zfs_btree_find(t, &rsearch, where);
1599 rs = zfs_btree_next(t, where, where);
1605 #if defined(WITH_DF_BLOCK_ALLOCATOR) || \
1606 defined(WITH_CF_BLOCK_ALLOCATOR)
1609 * This is a helper function that can be used by the allocator to find a
1610 * suitable block to allocate. This will search the specified B-tree looking
1611 * for a block that matches the specified criteria.
1614 metaslab_block_picker(range_tree_t *rt, uint64_t *cursor, uint64_t size,
1615 uint64_t max_search)
1618 *cursor = rt->rt_start;
1619 zfs_btree_t *bt = &rt->rt_root;
1620 zfs_btree_index_t where;
1621 range_seg_t *rs = metaslab_block_find(bt, rt, *cursor, size, &where);
1622 uint64_t first_found;
1623 int count_searched = 0;
1626 first_found = rs_get_start(rs, rt);
1628 while (rs != NULL && (rs_get_start(rs, rt) - first_found <=
1629 max_search || count_searched < metaslab_min_search_count)) {
1630 uint64_t offset = rs_get_start(rs, rt);
1631 if (offset + size <= rs_get_end(rs, rt)) {
1632 *cursor = offset + size;
1635 rs = zfs_btree_next(bt, &where, &where);
1642 #endif /* WITH_DF/CF_BLOCK_ALLOCATOR */
1644 #if defined(WITH_DF_BLOCK_ALLOCATOR)
1646 * ==========================================================================
1647 * Dynamic Fit (df) block allocator
1649 * Search for a free chunk of at least this size, starting from the last
1650 * offset (for this alignment of block) looking for up to
1651 * metaslab_df_max_search bytes (16MB). If a large enough free chunk is not
1652 * found within 16MB, then return a free chunk of exactly the requested size (or
1655 * If it seems like searching from the last offset will be unproductive, skip
1656 * that and just return a free chunk of exactly the requested size (or larger).
1657 * This is based on metaslab_df_alloc_threshold and metaslab_df_free_pct. This
1658 * mechanism is probably not very useful and may be removed in the future.
1660 * The behavior when not searching can be changed to return the largest free
1661 * chunk, instead of a free chunk of exactly the requested size, by setting
1662 * metaslab_df_use_largest_segment.
1663 * ==========================================================================
1666 metaslab_df_alloc(metaslab_t *msp, uint64_t size)
1669 * Find the largest power of 2 block size that evenly divides the
1670 * requested size. This is used to try to allocate blocks with similar
1671 * alignment from the same area of the metaslab (i.e. same cursor
1672 * bucket) but it does not guarantee that other allocations sizes
1673 * may exist in the same region.
1675 uint64_t align = size & -size;
1676 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1677 range_tree_t *rt = msp->ms_allocatable;
1678 int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
1681 ASSERT(MUTEX_HELD(&msp->ms_lock));
1684 * If we're running low on space, find a segment based on size,
1685 * rather than iterating based on offset.
1687 if (metaslab_largest_allocatable(msp) < metaslab_df_alloc_threshold ||
1688 free_pct < metaslab_df_free_pct) {
1691 offset = metaslab_block_picker(rt,
1692 cursor, size, metaslab_df_max_search);
1697 if (zfs_btree_numnodes(&msp->ms_allocatable_by_size) == 0)
1698 metaslab_size_tree_full_load(msp->ms_allocatable);
1700 if (metaslab_df_use_largest_segment) {
1701 /* use largest free segment */
1702 rs = zfs_btree_last(&msp->ms_allocatable_by_size, NULL);
1704 zfs_btree_index_t where;
1705 /* use segment of this size, or next largest */
1706 rs = metaslab_block_find(&msp->ms_allocatable_by_size,
1707 rt, msp->ms_start, size, &where);
1709 if (rs != NULL && rs_get_start(rs, rt) + size <= rs_get_end(rs,
1711 offset = rs_get_start(rs, rt);
1712 *cursor = offset + size;
1719 static metaslab_ops_t metaslab_df_ops = {
1723 metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
1724 #endif /* WITH_DF_BLOCK_ALLOCATOR */
1726 #if defined(WITH_CF_BLOCK_ALLOCATOR)
1728 * ==========================================================================
1729 * Cursor fit block allocator -
1730 * Select the largest region in the metaslab, set the cursor to the beginning
1731 * of the range and the cursor_end to the end of the range. As allocations
1732 * are made advance the cursor. Continue allocating from the cursor until
1733 * the range is exhausted and then find a new range.
1734 * ==========================================================================
1737 metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
1739 range_tree_t *rt = msp->ms_allocatable;
1740 zfs_btree_t *t = &msp->ms_allocatable_by_size;
1741 uint64_t *cursor = &msp->ms_lbas[0];
1742 uint64_t *cursor_end = &msp->ms_lbas[1];
1743 uint64_t offset = 0;
1745 ASSERT(MUTEX_HELD(&msp->ms_lock));
1747 ASSERT3U(*cursor_end, >=, *cursor);
1749 if ((*cursor + size) > *cursor_end) {
1752 if (zfs_btree_numnodes(t) == 0)
1753 metaslab_size_tree_full_load(msp->ms_allocatable);
1754 rs = zfs_btree_last(t, NULL);
1755 if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) <
1759 *cursor = rs_get_start(rs, rt);
1760 *cursor_end = rs_get_end(rs, rt);
1769 static metaslab_ops_t metaslab_cf_ops = {
1773 metaslab_ops_t *zfs_metaslab_ops = &metaslab_cf_ops;
1774 #endif /* WITH_CF_BLOCK_ALLOCATOR */
1776 #if defined(WITH_NDF_BLOCK_ALLOCATOR)
1778 * ==========================================================================
1779 * New dynamic fit allocator -
1780 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1781 * contiguous blocks. If no region is found then just use the largest segment
1783 * ==========================================================================
1787 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1788 * to request from the allocator.
1790 uint64_t metaslab_ndf_clump_shift = 4;
1793 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
1795 zfs_btree_t *t = &msp->ms_allocatable->rt_root;
1796 range_tree_t *rt = msp->ms_allocatable;
1797 zfs_btree_index_t where;
1799 range_seg_max_t rsearch;
1800 uint64_t hbit = highbit64(size);
1801 uint64_t *cursor = &msp->ms_lbas[hbit - 1];
1802 uint64_t max_size = metaslab_largest_allocatable(msp);
1804 ASSERT(MUTEX_HELD(&msp->ms_lock));
1806 if (max_size < size)
1809 rs_set_start(&rsearch, rt, *cursor);
1810 rs_set_end(&rsearch, rt, *cursor + size);
1812 rs = zfs_btree_find(t, &rsearch, &where);
1813 if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) < size) {
1814 t = &msp->ms_allocatable_by_size;
1816 rs_set_start(&rsearch, rt, 0);
1817 rs_set_end(&rsearch, rt, MIN(max_size, 1ULL << (hbit +
1818 metaslab_ndf_clump_shift)));
1820 rs = zfs_btree_find(t, &rsearch, &where);
1822 rs = zfs_btree_next(t, &where, &where);
1826 if ((rs_get_end(rs, rt) - rs_get_start(rs, rt)) >= size) {
1827 *cursor = rs_get_start(rs, rt) + size;
1828 return (rs_get_start(rs, rt));
1833 static metaslab_ops_t metaslab_ndf_ops = {
1837 metaslab_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops;
1838 #endif /* WITH_NDF_BLOCK_ALLOCATOR */
1842 * ==========================================================================
1844 * ==========================================================================
1848 * Wait for any in-progress metaslab loads to complete.
1851 metaslab_load_wait(metaslab_t *msp)
1853 ASSERT(MUTEX_HELD(&msp->ms_lock));
1855 while (msp->ms_loading) {
1856 ASSERT(!msp->ms_loaded);
1857 cv_wait(&msp->ms_load_cv, &msp->ms_lock);
1862 * Wait for any in-progress flushing to complete.
1865 metaslab_flush_wait(metaslab_t *msp)
1867 ASSERT(MUTEX_HELD(&msp->ms_lock));
1869 while (msp->ms_flushing)
1870 cv_wait(&msp->ms_flush_cv, &msp->ms_lock);
1874 metaslab_idx_func(multilist_t *ml, void *arg)
1876 metaslab_t *msp = arg;
1877 return (msp->ms_id % multilist_get_num_sublists(ml));
1881 metaslab_allocated_space(metaslab_t *msp)
1883 return (msp->ms_allocated_space);
1887 * Verify that the space accounting on disk matches the in-core range_trees.
1890 metaslab_verify_space(metaslab_t *msp, uint64_t txg)
1892 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1893 uint64_t allocating = 0;
1894 uint64_t sm_free_space, msp_free_space;
1896 ASSERT(MUTEX_HELD(&msp->ms_lock));
1897 ASSERT(!msp->ms_condensing);
1899 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
1903 * We can only verify the metaslab space when we're called
1904 * from syncing context with a loaded metaslab that has an
1905 * allocated space map. Calling this in non-syncing context
1906 * does not provide a consistent view of the metaslab since
1907 * we're performing allocations in the future.
1909 if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
1914 * Even though the smp_alloc field can get negative,
1915 * when it comes to a metaslab's space map, that should
1916 * never be the case.
1918 ASSERT3S(space_map_allocated(msp->ms_sm), >=, 0);
1920 ASSERT3U(space_map_allocated(msp->ms_sm), >=,
1921 range_tree_space(msp->ms_unflushed_frees));
1923 ASSERT3U(metaslab_allocated_space(msp), ==,
1924 space_map_allocated(msp->ms_sm) +
1925 range_tree_space(msp->ms_unflushed_allocs) -
1926 range_tree_space(msp->ms_unflushed_frees));
1928 sm_free_space = msp->ms_size - metaslab_allocated_space(msp);
1931 * Account for future allocations since we would have
1932 * already deducted that space from the ms_allocatable.
1934 for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
1936 range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]);
1938 ASSERT3U(allocating + msp->ms_allocated_this_txg, ==,
1939 msp->ms_allocating_total);
1941 ASSERT3U(msp->ms_deferspace, ==,
1942 range_tree_space(msp->ms_defer[0]) +
1943 range_tree_space(msp->ms_defer[1]));
1945 msp_free_space = range_tree_space(msp->ms_allocatable) + allocating +
1946 msp->ms_deferspace + range_tree_space(msp->ms_freed);
1948 VERIFY3U(sm_free_space, ==, msp_free_space);
1952 metaslab_aux_histograms_clear(metaslab_t *msp)
1955 * Auxiliary histograms are only cleared when resetting them,
1956 * which can only happen while the metaslab is loaded.
1958 ASSERT(msp->ms_loaded);
1960 bzero(msp->ms_synchist, sizeof (msp->ms_synchist));
1961 for (int t = 0; t < TXG_DEFER_SIZE; t++)
1962 bzero(msp->ms_deferhist[t], sizeof (msp->ms_deferhist[t]));
1966 metaslab_aux_histogram_add(uint64_t *histogram, uint64_t shift,
1970 * This is modeled after space_map_histogram_add(), so refer to that
1971 * function for implementation details. We want this to work like
1972 * the space map histogram, and not the range tree histogram, as we
1973 * are essentially constructing a delta that will be later subtracted
1974 * from the space map histogram.
1977 for (int i = shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
1978 ASSERT3U(i, >=, idx + shift);
1979 histogram[idx] += rt->rt_histogram[i] << (i - idx - shift);
1981 if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
1982 ASSERT3U(idx + shift, ==, i);
1984 ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
1990 * Called at every sync pass that the metaslab gets synced.
1992 * The reason is that we want our auxiliary histograms to be updated
1993 * wherever the metaslab's space map histogram is updated. This way
1994 * we stay consistent on which parts of the metaslab space map's
1995 * histogram are currently not available for allocations (e.g because
1996 * they are in the defer, freed, and freeing trees).
1999 metaslab_aux_histograms_update(metaslab_t *msp)
2001 space_map_t *sm = msp->ms_sm;
2005 * This is similar to the metaslab's space map histogram updates
2006 * that take place in metaslab_sync(). The only difference is that
2007 * we only care about segments that haven't made it into the
2008 * ms_allocatable tree yet.
2010 if (msp->ms_loaded) {
2011 metaslab_aux_histograms_clear(msp);
2013 metaslab_aux_histogram_add(msp->ms_synchist,
2014 sm->sm_shift, msp->ms_freed);
2016 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2017 metaslab_aux_histogram_add(msp->ms_deferhist[t],
2018 sm->sm_shift, msp->ms_defer[t]);
2022 metaslab_aux_histogram_add(msp->ms_synchist,
2023 sm->sm_shift, msp->ms_freeing);
2027 * Called every time we are done syncing (writing to) the metaslab,
2028 * i.e. at the end of each sync pass.
2029 * [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist]
2032 metaslab_aux_histograms_update_done(metaslab_t *msp, boolean_t defer_allowed)
2034 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2035 space_map_t *sm = msp->ms_sm;
2039 * We came here from metaslab_init() when creating/opening a
2040 * pool, looking at a metaslab that hasn't had any allocations
2047 * This is similar to the actions that we take for the ms_freed
2048 * and ms_defer trees in metaslab_sync_done().
2050 uint64_t hist_index = spa_syncing_txg(spa) % TXG_DEFER_SIZE;
2051 if (defer_allowed) {
2052 bcopy(msp->ms_synchist, msp->ms_deferhist[hist_index],
2053 sizeof (msp->ms_synchist));
2055 bzero(msp->ms_deferhist[hist_index],
2056 sizeof (msp->ms_deferhist[hist_index]));
2058 bzero(msp->ms_synchist, sizeof (msp->ms_synchist));
2062 * Ensure that the metaslab's weight and fragmentation are consistent
2063 * with the contents of the histogram (either the range tree's histogram
2064 * or the space map's depending whether the metaslab is loaded).
2067 metaslab_verify_weight_and_frag(metaslab_t *msp)
2069 ASSERT(MUTEX_HELD(&msp->ms_lock));
2071 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
2075 * We can end up here from vdev_remove_complete(), in which case we
2076 * cannot do these assertions because we hold spa config locks and
2077 * thus we are not allowed to read from the DMU.
2079 * We check if the metaslab group has been removed and if that's
2080 * the case we return immediately as that would mean that we are
2081 * here from the aforementioned code path.
2083 if (msp->ms_group == NULL)
2087 * Devices being removed always return a weight of 0 and leave
2088 * fragmentation and ms_max_size as is - there is nothing for
2089 * us to verify here.
2091 vdev_t *vd = msp->ms_group->mg_vd;
2092 if (vd->vdev_removing)
2096 * If the metaslab is dirty it probably means that we've done
2097 * some allocations or frees that have changed our histograms
2098 * and thus the weight.
2100 for (int t = 0; t < TXG_SIZE; t++) {
2101 if (txg_list_member(&vd->vdev_ms_list, msp, t))
2106 * This verification checks that our in-memory state is consistent
2107 * with what's on disk. If the pool is read-only then there aren't
2108 * any changes and we just have the initially-loaded state.
2110 if (!spa_writeable(msp->ms_group->mg_vd->vdev_spa))
2113 /* some extra verification for in-core tree if you can */
2114 if (msp->ms_loaded) {
2115 range_tree_stat_verify(msp->ms_allocatable);
2116 VERIFY(space_map_histogram_verify(msp->ms_sm,
2117 msp->ms_allocatable));
2120 uint64_t weight = msp->ms_weight;
2121 uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
2122 boolean_t space_based = WEIGHT_IS_SPACEBASED(msp->ms_weight);
2123 uint64_t frag = msp->ms_fragmentation;
2124 uint64_t max_segsize = msp->ms_max_size;
2127 msp->ms_fragmentation = 0;
2130 * This function is used for verification purposes and thus should
2131 * not introduce any side-effects/mutations on the system's state.
2133 * Regardless of whether metaslab_weight() thinks this metaslab
2134 * should be active or not, we want to ensure that the actual weight
2135 * (and therefore the value of ms_weight) would be the same if it
2136 * was to be recalculated at this point.
2138 * In addition we set the nodirty flag so metaslab_weight() does
2139 * not dirty the metaslab for future TXGs (e.g. when trying to
2140 * force condensing to upgrade the metaslab spacemaps).
2142 msp->ms_weight = metaslab_weight(msp, B_TRUE) | was_active;
2144 VERIFY3U(max_segsize, ==, msp->ms_max_size);
2147 * If the weight type changed then there is no point in doing
2148 * verification. Revert fields to their original values.
2150 if ((space_based && !WEIGHT_IS_SPACEBASED(msp->ms_weight)) ||
2151 (!space_based && WEIGHT_IS_SPACEBASED(msp->ms_weight))) {
2152 msp->ms_fragmentation = frag;
2153 msp->ms_weight = weight;
2157 VERIFY3U(msp->ms_fragmentation, ==, frag);
2158 VERIFY3U(msp->ms_weight, ==, weight);
2162 * If we're over the zfs_metaslab_mem_limit, select the loaded metaslab from
2163 * this class that was used longest ago, and attempt to unload it. We don't
2164 * want to spend too much time in this loop to prevent performance
2165 * degradation, and we expect that most of the time this operation will
2166 * succeed. Between that and the normal unloading processing during txg sync,
2167 * we expect this to keep the metaslab memory usage under control.
2170 metaslab_potentially_evict(metaslab_class_t *mc)
2173 uint64_t allmem = arc_all_memory();
2174 uint64_t inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
2175 uint64_t size = spl_kmem_cache_entry_size(zfs_btree_leaf_cache);
2177 for (; allmem * zfs_metaslab_mem_limit / 100 < inuse * size &&
2178 tries < multilist_get_num_sublists(mc->mc_metaslab_txg_list) * 2;
2180 unsigned int idx = multilist_get_random_index(
2181 mc->mc_metaslab_txg_list);
2182 multilist_sublist_t *mls =
2183 multilist_sublist_lock(mc->mc_metaslab_txg_list, idx);
2184 metaslab_t *msp = multilist_sublist_head(mls);
2185 multilist_sublist_unlock(mls);
2186 while (msp != NULL && allmem * zfs_metaslab_mem_limit / 100 <
2188 VERIFY3P(mls, ==, multilist_sublist_lock(
2189 mc->mc_metaslab_txg_list, idx));
2191 metaslab_idx_func(mc->mc_metaslab_txg_list, msp));
2193 if (!multilist_link_active(&msp->ms_class_txg_node)) {
2194 multilist_sublist_unlock(mls);
2197 metaslab_t *next_msp = multilist_sublist_next(mls, msp);
2198 multilist_sublist_unlock(mls);
2200 * If the metaslab is currently loading there are two
2201 * cases. If it's the metaslab we're evicting, we
2202 * can't continue on or we'll panic when we attempt to
2203 * recursively lock the mutex. If it's another
2204 * metaslab that's loading, it can be safely skipped,
2205 * since we know it's very new and therefore not a
2206 * good eviction candidate. We check later once the
2207 * lock is held that the metaslab is fully loaded
2208 * before actually unloading it.
2210 if (msp->ms_loading) {
2213 spl_kmem_cache_inuse(zfs_btree_leaf_cache);
2217 * We can't unload metaslabs with no spacemap because
2218 * they're not ready to be unloaded yet. We can't
2219 * unload metaslabs with outstanding allocations
2220 * because doing so could cause the metaslab's weight
2221 * to decrease while it's unloaded, which violates an
2222 * invariant that we use to prevent unnecessary
2223 * loading. We also don't unload metaslabs that are
2224 * currently active because they are high-weight
2225 * metaslabs that are likely to be used in the near
2228 mutex_enter(&msp->ms_lock);
2229 if (msp->ms_allocator == -1 && msp->ms_sm != NULL &&
2230 msp->ms_allocating_total == 0) {
2231 metaslab_unload(msp);
2233 mutex_exit(&msp->ms_lock);
2235 inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
2242 metaslab_load_impl(metaslab_t *msp)
2246 ASSERT(MUTEX_HELD(&msp->ms_lock));
2247 ASSERT(msp->ms_loading);
2248 ASSERT(!msp->ms_condensing);
2251 * We temporarily drop the lock to unblock other operations while we
2252 * are reading the space map. Therefore, metaslab_sync() and
2253 * metaslab_sync_done() can run at the same time as we do.
2255 * If we are using the log space maps, metaslab_sync() can't write to
2256 * the metaslab's space map while we are loading as we only write to
2257 * it when we are flushing the metaslab, and that can't happen while
2258 * we are loading it.
2260 * If we are not using log space maps though, metaslab_sync() can
2261 * append to the space map while we are loading. Therefore we load
2262 * only entries that existed when we started the load. Additionally,
2263 * metaslab_sync_done() has to wait for the load to complete because
2264 * there are potential races like metaslab_load() loading parts of the
2265 * space map that are currently being appended by metaslab_sync(). If
2266 * we didn't, the ms_allocatable would have entries that
2267 * metaslab_sync_done() would try to re-add later.
2269 * That's why before dropping the lock we remember the synced length
2270 * of the metaslab and read up to that point of the space map,
2271 * ignoring entries appended by metaslab_sync() that happen after we
2274 uint64_t length = msp->ms_synced_length;
2275 mutex_exit(&msp->ms_lock);
2277 hrtime_t load_start = gethrtime();
2278 metaslab_rt_arg_t *mrap;
2279 if (msp->ms_allocatable->rt_arg == NULL) {
2280 mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
2282 mrap = msp->ms_allocatable->rt_arg;
2283 msp->ms_allocatable->rt_ops = NULL;
2284 msp->ms_allocatable->rt_arg = NULL;
2286 mrap->mra_bt = &msp->ms_allocatable_by_size;
2287 mrap->mra_floor_shift = metaslab_by_size_min_shift;
2289 if (msp->ms_sm != NULL) {
2290 error = space_map_load_length(msp->ms_sm, msp->ms_allocatable,
2293 /* Now, populate the size-sorted tree. */
2294 metaslab_rt_create(msp->ms_allocatable, mrap);
2295 msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
2296 msp->ms_allocatable->rt_arg = mrap;
2298 struct mssa_arg arg = {0};
2299 arg.rt = msp->ms_allocatable;
2301 range_tree_walk(msp->ms_allocatable, metaslab_size_sorted_add,
2305 * Add the size-sorted tree first, since we don't need to load
2306 * the metaslab from the spacemap.
2308 metaslab_rt_create(msp->ms_allocatable, mrap);
2309 msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
2310 msp->ms_allocatable->rt_arg = mrap;
2312 * The space map has not been allocated yet, so treat
2313 * all the space in the metaslab as free and add it to the
2314 * ms_allocatable tree.
2316 range_tree_add(msp->ms_allocatable,
2317 msp->ms_start, msp->ms_size);
2321 * If the ms_sm doesn't exist, this means that this
2322 * metaslab hasn't gone through metaslab_sync() and
2323 * thus has never been dirtied. So we shouldn't
2324 * expect any unflushed allocs or frees from previous
2327 ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
2328 ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
2333 * We need to grab the ms_sync_lock to prevent metaslab_sync() from
2334 * changing the ms_sm (or log_sm) and the metaslab's range trees
2335 * while we are about to use them and populate the ms_allocatable.
2336 * The ms_lock is insufficient for this because metaslab_sync() doesn't
2337 * hold the ms_lock while writing the ms_checkpointing tree to disk.
2339 mutex_enter(&msp->ms_sync_lock);
2340 mutex_enter(&msp->ms_lock);
2342 ASSERT(!msp->ms_condensing);
2343 ASSERT(!msp->ms_flushing);
2346 mutex_exit(&msp->ms_sync_lock);
2350 ASSERT3P(msp->ms_group, !=, NULL);
2351 msp->ms_loaded = B_TRUE;
2354 * Apply all the unflushed changes to ms_allocatable right
2355 * away so any manipulations we do below have a clear view
2356 * of what is allocated and what is free.
2358 range_tree_walk(msp->ms_unflushed_allocs,
2359 range_tree_remove, msp->ms_allocatable);
2360 range_tree_walk(msp->ms_unflushed_frees,
2361 range_tree_add, msp->ms_allocatable);
2363 ASSERT3P(msp->ms_group, !=, NULL);
2364 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2365 if (spa_syncing_log_sm(spa) != NULL) {
2366 ASSERT(spa_feature_is_enabled(spa,
2367 SPA_FEATURE_LOG_SPACEMAP));
2370 * If we use a log space map we add all the segments
2371 * that are in ms_unflushed_frees so they are available
2374 * ms_allocatable needs to contain all free segments
2375 * that are ready for allocations (thus not segments
2376 * from ms_freeing, ms_freed, and the ms_defer trees).
2377 * But if we grab the lock in this code path at a sync
2378 * pass later that 1, then it also contains the
2379 * segments of ms_freed (they were added to it earlier
2380 * in this path through ms_unflushed_frees). So we
2381 * need to remove all the segments that exist in
2382 * ms_freed from ms_allocatable as they will be added
2383 * later in metaslab_sync_done().
2385 * When there's no log space map, the ms_allocatable
2386 * correctly doesn't contain any segments that exist
2387 * in ms_freed [see ms_synced_length].
2389 range_tree_walk(msp->ms_freed,
2390 range_tree_remove, msp->ms_allocatable);
2394 * If we are not using the log space map, ms_allocatable
2395 * contains the segments that exist in the ms_defer trees
2396 * [see ms_synced_length]. Thus we need to remove them
2397 * from ms_allocatable as they will be added again in
2398 * metaslab_sync_done().
2400 * If we are using the log space map, ms_allocatable still
2401 * contains the segments that exist in the ms_defer trees.
2402 * Not because it read them through the ms_sm though. But
2403 * because these segments are part of ms_unflushed_frees
2404 * whose segments we add to ms_allocatable earlier in this
2407 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2408 range_tree_walk(msp->ms_defer[t],
2409 range_tree_remove, msp->ms_allocatable);
2413 * Call metaslab_recalculate_weight_and_sort() now that the
2414 * metaslab is loaded so we get the metaslab's real weight.
2416 * Unless this metaslab was created with older software and
2417 * has not yet been converted to use segment-based weight, we
2418 * expect the new weight to be better or equal to the weight
2419 * that the metaslab had while it was not loaded. This is
2420 * because the old weight does not take into account the
2421 * consolidation of adjacent segments between TXGs. [see
2422 * comment for ms_synchist and ms_deferhist[] for more info]
2424 uint64_t weight = msp->ms_weight;
2425 uint64_t max_size = msp->ms_max_size;
2426 metaslab_recalculate_weight_and_sort(msp);
2427 if (!WEIGHT_IS_SPACEBASED(weight))
2428 ASSERT3U(weight, <=, msp->ms_weight);
2429 msp->ms_max_size = metaslab_largest_allocatable(msp);
2430 ASSERT3U(max_size, <=, msp->ms_max_size);
2431 hrtime_t load_end = gethrtime();
2432 msp->ms_load_time = load_end;
2433 zfs_dbgmsg("metaslab_load: txg %llu, spa %s, vdev_id %llu, "
2434 "ms_id %llu, smp_length %llu, "
2435 "unflushed_allocs %llu, unflushed_frees %llu, "
2436 "freed %llu, defer %llu + %llu, unloaded time %llu ms, "
2437 "loading_time %lld ms, ms_max_size %llu, "
2438 "max size error %lld, "
2439 "old_weight %llx, new_weight %llx",
2440 spa_syncing_txg(spa), spa_name(spa),
2441 msp->ms_group->mg_vd->vdev_id, msp->ms_id,
2442 space_map_length(msp->ms_sm),
2443 range_tree_space(msp->ms_unflushed_allocs),
2444 range_tree_space(msp->ms_unflushed_frees),
2445 range_tree_space(msp->ms_freed),
2446 range_tree_space(msp->ms_defer[0]),
2447 range_tree_space(msp->ms_defer[1]),
2448 (longlong_t)((load_start - msp->ms_unload_time) / 1000000),
2449 (longlong_t)((load_end - load_start) / 1000000),
2450 msp->ms_max_size, msp->ms_max_size - max_size,
2451 weight, msp->ms_weight);
2453 metaslab_verify_space(msp, spa_syncing_txg(spa));
2454 mutex_exit(&msp->ms_sync_lock);
2459 metaslab_load(metaslab_t *msp)
2461 ASSERT(MUTEX_HELD(&msp->ms_lock));
2464 * There may be another thread loading the same metaslab, if that's
2465 * the case just wait until the other thread is done and return.
2467 metaslab_load_wait(msp);
2470 VERIFY(!msp->ms_loading);
2471 ASSERT(!msp->ms_condensing);
2474 * We set the loading flag BEFORE potentially dropping the lock to
2475 * wait for an ongoing flush (see ms_flushing below). This way other
2476 * threads know that there is already a thread that is loading this
2479 msp->ms_loading = B_TRUE;
2482 * Wait for any in-progress flushing to finish as we drop the ms_lock
2483 * both here (during space_map_load()) and in metaslab_flush() (when
2484 * we flush our changes to the ms_sm).
2486 if (msp->ms_flushing)
2487 metaslab_flush_wait(msp);
2490 * In the possibility that we were waiting for the metaslab to be
2491 * flushed (where we temporarily dropped the ms_lock), ensure that
2492 * no one else loaded the metaslab somehow.
2494 ASSERT(!msp->ms_loaded);
2497 * If we're loading a metaslab in the normal class, consider evicting
2498 * another one to keep our memory usage under the limit defined by the
2499 * zfs_metaslab_mem_limit tunable.
2501 if (spa_normal_class(msp->ms_group->mg_class->mc_spa) ==
2502 msp->ms_group->mg_class) {
2503 metaslab_potentially_evict(msp->ms_group->mg_class);
2506 int error = metaslab_load_impl(msp);
2508 ASSERT(MUTEX_HELD(&msp->ms_lock));
2509 msp->ms_loading = B_FALSE;
2510 cv_broadcast(&msp->ms_load_cv);
2516 metaslab_unload(metaslab_t *msp)
2518 ASSERT(MUTEX_HELD(&msp->ms_lock));
2521 * This can happen if a metaslab is selected for eviction (in
2522 * metaslab_potentially_evict) and then unloaded during spa_sync (via
2523 * metaslab_class_evict_old).
2525 if (!msp->ms_loaded)
2528 range_tree_vacate(msp->ms_allocatable, NULL, NULL);
2529 msp->ms_loaded = B_FALSE;
2530 msp->ms_unload_time = gethrtime();
2532 msp->ms_activation_weight = 0;
2533 msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
2535 if (msp->ms_group != NULL) {
2536 metaslab_class_t *mc = msp->ms_group->mg_class;
2537 multilist_sublist_t *mls =
2538 multilist_sublist_lock_obj(mc->mc_metaslab_txg_list, msp);
2539 if (multilist_link_active(&msp->ms_class_txg_node))
2540 multilist_sublist_remove(mls, msp);
2541 multilist_sublist_unlock(mls);
2543 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2544 zfs_dbgmsg("metaslab_unload: txg %llu, spa %s, vdev_id %llu, "
2545 "ms_id %llu, weight %llx, "
2546 "selected txg %llu (%llu ms ago), alloc_txg %llu, "
2547 "loaded %llu ms ago, max_size %llu",
2548 spa_syncing_txg(spa), spa_name(spa),
2549 msp->ms_group->mg_vd->vdev_id, msp->ms_id,
2551 msp->ms_selected_txg,
2552 (msp->ms_unload_time - msp->ms_selected_time) / 1000 / 1000,
2554 (msp->ms_unload_time - msp->ms_load_time) / 1000 / 1000,
2559 * We explicitly recalculate the metaslab's weight based on its space
2560 * map (as it is now not loaded). We want unload metaslabs to always
2561 * have their weights calculated from the space map histograms, while
2562 * loaded ones have it calculated from their in-core range tree
2563 * [see metaslab_load()]. This way, the weight reflects the information
2564 * available in-core, whether it is loaded or not.
2566 * If ms_group == NULL means that we came here from metaslab_fini(),
2567 * at which point it doesn't make sense for us to do the recalculation
2570 if (msp->ms_group != NULL)
2571 metaslab_recalculate_weight_and_sort(msp);
2575 * We want to optimize the memory use of the per-metaslab range
2576 * trees. To do this, we store the segments in the range trees in
2577 * units of sectors, zero-indexing from the start of the metaslab. If
2578 * the vdev_ms_shift - the vdev_ashift is less than 32, we can store
2579 * the ranges using two uint32_ts, rather than two uint64_ts.
2582 metaslab_calculate_range_tree_type(vdev_t *vdev, metaslab_t *msp,
2583 uint64_t *start, uint64_t *shift)
2585 if (vdev->vdev_ms_shift - vdev->vdev_ashift < 32 &&
2586 !zfs_metaslab_force_large_segs) {
2587 *shift = vdev->vdev_ashift;
2588 *start = msp->ms_start;
2589 return (RANGE_SEG32);
2593 return (RANGE_SEG64);
2598 metaslab_set_selected_txg(metaslab_t *msp, uint64_t txg)
2600 ASSERT(MUTEX_HELD(&msp->ms_lock));
2601 metaslab_class_t *mc = msp->ms_group->mg_class;
2602 multilist_sublist_t *mls =
2603 multilist_sublist_lock_obj(mc->mc_metaslab_txg_list, msp);
2604 if (multilist_link_active(&msp->ms_class_txg_node))
2605 multilist_sublist_remove(mls, msp);
2606 msp->ms_selected_txg = txg;
2607 msp->ms_selected_time = gethrtime();
2608 multilist_sublist_insert_tail(mls, msp);
2609 multilist_sublist_unlock(mls);
2613 metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta,
2614 int64_t defer_delta, int64_t space_delta)
2616 vdev_space_update(vd, alloc_delta, defer_delta, space_delta);
2618 ASSERT3P(vd->vdev_spa->spa_root_vdev, ==, vd->vdev_parent);
2619 ASSERT(vd->vdev_ms_count != 0);
2621 metaslab_class_space_update(mc, alloc_delta, defer_delta, space_delta,
2622 vdev_deflated_space(vd, space_delta));
2626 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object,
2627 uint64_t txg, metaslab_t **msp)
2629 vdev_t *vd = mg->mg_vd;
2630 spa_t *spa = vd->vdev_spa;
2631 objset_t *mos = spa->spa_meta_objset;
2635 ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
2636 mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
2637 mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL);
2638 cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
2639 cv_init(&ms->ms_flush_cv, NULL, CV_DEFAULT, NULL);
2640 multilist_link_init(&ms->ms_class_txg_node);
2643 ms->ms_start = id << vd->vdev_ms_shift;
2644 ms->ms_size = 1ULL << vd->vdev_ms_shift;
2645 ms->ms_allocator = -1;
2646 ms->ms_new = B_TRUE;
2648 vdev_ops_t *ops = vd->vdev_ops;
2649 if (ops->vdev_op_metaslab_init != NULL)
2650 ops->vdev_op_metaslab_init(vd, &ms->ms_start, &ms->ms_size);
2653 * We only open space map objects that already exist. All others
2654 * will be opened when we finally allocate an object for it.
2657 * When called from vdev_expand(), we can't call into the DMU as
2658 * we are holding the spa_config_lock as a writer and we would
2659 * deadlock [see relevant comment in vdev_metaslab_init()]. in
2660 * that case, the object parameter is zero though, so we won't
2661 * call into the DMU.
2664 error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
2665 ms->ms_size, vd->vdev_ashift);
2668 kmem_free(ms, sizeof (metaslab_t));
2672 ASSERT(ms->ms_sm != NULL);
2673 ms->ms_allocated_space = space_map_allocated(ms->ms_sm);
2676 uint64_t shift, start;
2677 range_seg_type_t type =
2678 metaslab_calculate_range_tree_type(vd, ms, &start, &shift);
2680 ms->ms_allocatable = range_tree_create(NULL, type, NULL, start, shift);
2681 for (int t = 0; t < TXG_SIZE; t++) {
2682 ms->ms_allocating[t] = range_tree_create(NULL, type,
2683 NULL, start, shift);
2685 ms->ms_freeing = range_tree_create(NULL, type, NULL, start, shift);
2686 ms->ms_freed = range_tree_create(NULL, type, NULL, start, shift);
2687 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2688 ms->ms_defer[t] = range_tree_create(NULL, type, NULL,
2691 ms->ms_checkpointing =
2692 range_tree_create(NULL, type, NULL, start, shift);
2693 ms->ms_unflushed_allocs =
2694 range_tree_create(NULL, type, NULL, start, shift);
2696 metaslab_rt_arg_t *mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
2697 mrap->mra_bt = &ms->ms_unflushed_frees_by_size;
2698 mrap->mra_floor_shift = metaslab_by_size_min_shift;
2699 ms->ms_unflushed_frees = range_tree_create(&metaslab_rt_ops,
2700 type, mrap, start, shift);
2702 ms->ms_trim = range_tree_create(NULL, type, NULL, start, shift);
2704 metaslab_group_add(mg, ms);
2705 metaslab_set_fragmentation(ms, B_FALSE);
2708 * If we're opening an existing pool (txg == 0) or creating
2709 * a new one (txg == TXG_INITIAL), all space is available now.
2710 * If we're adding space to an existing pool, the new space
2711 * does not become available until after this txg has synced.
2712 * The metaslab's weight will also be initialized when we sync
2713 * out this txg. This ensures that we don't attempt to allocate
2714 * from it before we have initialized it completely.
2716 if (txg <= TXG_INITIAL) {
2717 metaslab_sync_done(ms, 0);
2718 metaslab_space_update(vd, mg->mg_class,
2719 metaslab_allocated_space(ms), 0, 0);
2723 vdev_dirty(vd, 0, NULL, txg);
2724 vdev_dirty(vd, VDD_METASLAB, ms, txg);
2733 metaslab_fini_flush_data(metaslab_t *msp)
2735 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2737 if (metaslab_unflushed_txg(msp) == 0) {
2738 ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL),
2742 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
2744 mutex_enter(&spa->spa_flushed_ms_lock);
2745 avl_remove(&spa->spa_metaslabs_by_flushed, msp);
2746 mutex_exit(&spa->spa_flushed_ms_lock);
2748 spa_log_sm_decrement_mscount(spa, metaslab_unflushed_txg(msp));
2749 spa_log_summary_decrement_mscount(spa, metaslab_unflushed_txg(msp));
2753 metaslab_unflushed_changes_memused(metaslab_t *ms)
2755 return ((range_tree_numsegs(ms->ms_unflushed_allocs) +
2756 range_tree_numsegs(ms->ms_unflushed_frees)) *
2757 ms->ms_unflushed_allocs->rt_root.bt_elem_size);
2761 metaslab_fini(metaslab_t *msp)
2763 metaslab_group_t *mg = msp->ms_group;
2764 vdev_t *vd = mg->mg_vd;
2765 spa_t *spa = vd->vdev_spa;
2767 metaslab_fini_flush_data(msp);
2769 metaslab_group_remove(mg, msp);
2771 mutex_enter(&msp->ms_lock);
2772 VERIFY(msp->ms_group == NULL);
2775 * If this metaslab hasn't been through metaslab_sync_done() yet its
2776 * space hasn't been accounted for in its vdev and doesn't need to be
2780 metaslab_space_update(vd, mg->mg_class,
2781 -metaslab_allocated_space(msp), 0, -msp->ms_size);
2784 space_map_close(msp->ms_sm);
2787 metaslab_unload(msp);
2789 range_tree_destroy(msp->ms_allocatable);
2790 range_tree_destroy(msp->ms_freeing);
2791 range_tree_destroy(msp->ms_freed);
2793 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
2794 metaslab_unflushed_changes_memused(msp));
2795 spa->spa_unflushed_stats.sus_memused -=
2796 metaslab_unflushed_changes_memused(msp);
2797 range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
2798 range_tree_destroy(msp->ms_unflushed_allocs);
2799 range_tree_destroy(msp->ms_checkpointing);
2800 range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
2801 range_tree_destroy(msp->ms_unflushed_frees);
2803 for (int t = 0; t < TXG_SIZE; t++) {
2804 range_tree_destroy(msp->ms_allocating[t]);
2806 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2807 range_tree_destroy(msp->ms_defer[t]);
2809 ASSERT0(msp->ms_deferspace);
2811 for (int t = 0; t < TXG_SIZE; t++)
2812 ASSERT(!txg_list_member(&vd->vdev_ms_list, msp, t));
2814 range_tree_vacate(msp->ms_trim, NULL, NULL);
2815 range_tree_destroy(msp->ms_trim);
2817 mutex_exit(&msp->ms_lock);
2818 cv_destroy(&msp->ms_load_cv);
2819 cv_destroy(&msp->ms_flush_cv);
2820 mutex_destroy(&msp->ms_lock);
2821 mutex_destroy(&msp->ms_sync_lock);
2822 ASSERT3U(msp->ms_allocator, ==, -1);
2824 kmem_free(msp, sizeof (metaslab_t));
2827 #define FRAGMENTATION_TABLE_SIZE 17
2830 * This table defines a segment size based fragmentation metric that will
2831 * allow each metaslab to derive its own fragmentation value. This is done
2832 * by calculating the space in each bucket of the spacemap histogram and
2833 * multiplying that by the fragmentation metric in this table. Doing
2834 * this for all buckets and dividing it by the total amount of free
2835 * space in this metaslab (i.e. the total free space in all buckets) gives
2836 * us the fragmentation metric. This means that a high fragmentation metric
2837 * equates to most of the free space being comprised of small segments.
2838 * Conversely, if the metric is low, then most of the free space is in
2839 * large segments. A 10% change in fragmentation equates to approximately
2840 * double the number of segments.
2842 * This table defines 0% fragmented space using 16MB segments. Testing has
2843 * shown that segments that are greater than or equal to 16MB do not suffer
2844 * from drastic performance problems. Using this value, we derive the rest
2845 * of the table. Since the fragmentation value is never stored on disk, it
2846 * is possible to change these calculations in the future.
2848 int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
2868 * Calculate the metaslab's fragmentation metric and set ms_fragmentation.
2869 * Setting this value to ZFS_FRAG_INVALID means that the metaslab has not
2870 * been upgraded and does not support this metric. Otherwise, the return
2871 * value should be in the range [0, 100].
2874 metaslab_set_fragmentation(metaslab_t *msp, boolean_t nodirty)
2876 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2877 uint64_t fragmentation = 0;
2879 boolean_t feature_enabled = spa_feature_is_enabled(spa,
2880 SPA_FEATURE_SPACEMAP_HISTOGRAM);
2882 if (!feature_enabled) {
2883 msp->ms_fragmentation = ZFS_FRAG_INVALID;
2888 * A null space map means that the entire metaslab is free
2889 * and thus is not fragmented.
2891 if (msp->ms_sm == NULL) {
2892 msp->ms_fragmentation = 0;
2897 * If this metaslab's space map has not been upgraded, flag it
2898 * so that we upgrade next time we encounter it.
2900 if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
2901 uint64_t txg = spa_syncing_txg(spa);
2902 vdev_t *vd = msp->ms_group->mg_vd;
2905 * If we've reached the final dirty txg, then we must
2906 * be shutting down the pool. We don't want to dirty
2907 * any data past this point so skip setting the condense
2908 * flag. We can retry this action the next time the pool
2909 * is imported. We also skip marking this metaslab for
2910 * condensing if the caller has explicitly set nodirty.
2913 spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
2914 msp->ms_condense_wanted = B_TRUE;
2915 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
2916 zfs_dbgmsg("txg %llu, requesting force condense: "
2917 "ms_id %llu, vdev_id %llu", txg, msp->ms_id,
2920 msp->ms_fragmentation = ZFS_FRAG_INVALID;
2924 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
2926 uint8_t shift = msp->ms_sm->sm_shift;
2928 int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
2929 FRAGMENTATION_TABLE_SIZE - 1);
2931 if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
2934 space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
2937 ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
2938 fragmentation += space * zfs_frag_table[idx];
2942 fragmentation /= total;
2943 ASSERT3U(fragmentation, <=, 100);
2945 msp->ms_fragmentation = fragmentation;
2949 * Compute a weight -- a selection preference value -- for the given metaslab.
2950 * This is based on the amount of free space, the level of fragmentation,
2951 * the LBA range, and whether the metaslab is loaded.
2954 metaslab_space_weight(metaslab_t *msp)
2956 metaslab_group_t *mg = msp->ms_group;
2957 vdev_t *vd = mg->mg_vd;
2958 uint64_t weight, space;
2960 ASSERT(MUTEX_HELD(&msp->ms_lock));
2963 * The baseline weight is the metaslab's free space.
2965 space = msp->ms_size - metaslab_allocated_space(msp);
2967 if (metaslab_fragmentation_factor_enabled &&
2968 msp->ms_fragmentation != ZFS_FRAG_INVALID) {
2970 * Use the fragmentation information to inversely scale
2971 * down the baseline weight. We need to ensure that we
2972 * don't exclude this metaslab completely when it's 100%
2973 * fragmented. To avoid this we reduce the fragmented value
2976 space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
2979 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
2980 * this metaslab again. The fragmentation metric may have
2981 * decreased the space to something smaller than
2982 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
2983 * so that we can consume any remaining space.
2985 if (space > 0 && space < SPA_MINBLOCKSIZE)
2986 space = SPA_MINBLOCKSIZE;
2991 * Modern disks have uniform bit density and constant angular velocity.
2992 * Therefore, the outer recording zones are faster (higher bandwidth)
2993 * than the inner zones by the ratio of outer to inner track diameter,
2994 * which is typically around 2:1. We account for this by assigning
2995 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
2996 * In effect, this means that we'll select the metaslab with the most
2997 * free bandwidth rather than simply the one with the most free space.
2999 if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) {
3000 weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
3001 ASSERT(weight >= space && weight <= 2 * space);
3005 * If this metaslab is one we're actively using, adjust its
3006 * weight to make it preferable to any inactive metaslab so
3007 * we'll polish it off. If the fragmentation on this metaslab
3008 * has exceed our threshold, then don't mark it active.
3010 if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
3011 msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
3012 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
3015 WEIGHT_SET_SPACEBASED(weight);
3020 * Return the weight of the specified metaslab, according to the segment-based
3021 * weighting algorithm. The metaslab must be loaded. This function can
3022 * be called within a sync pass since it relies only on the metaslab's
3023 * range tree which is always accurate when the metaslab is loaded.
3026 metaslab_weight_from_range_tree(metaslab_t *msp)
3028 uint64_t weight = 0;
3029 uint32_t segments = 0;
3031 ASSERT(msp->ms_loaded);
3033 for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
3035 uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
3036 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
3039 segments += msp->ms_allocatable->rt_histogram[i];
3042 * The range tree provides more precision than the space map
3043 * and must be downgraded so that all values fit within the
3044 * space map's histogram. This allows us to compare loaded
3045 * vs. unloaded metaslabs to determine which metaslab is
3046 * considered "best".
3051 if (segments != 0) {
3052 WEIGHT_SET_COUNT(weight, segments);
3053 WEIGHT_SET_INDEX(weight, i);
3054 WEIGHT_SET_ACTIVE(weight, 0);
3062 * Calculate the weight based on the on-disk histogram. Should be applied
3063 * only to unloaded metaslabs (i.e no incoming allocations) in-order to
3064 * give results consistent with the on-disk state
3067 metaslab_weight_from_spacemap(metaslab_t *msp)
3069 space_map_t *sm = msp->ms_sm;
3070 ASSERT(!msp->ms_loaded);
3072 ASSERT3U(space_map_object(sm), !=, 0);
3073 ASSERT3U(sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
3076 * Create a joint histogram from all the segments that have made
3077 * it to the metaslab's space map histogram, that are not yet
3078 * available for allocation because they are still in the freeing
3079 * pipeline (e.g. freeing, freed, and defer trees). Then subtract
3080 * these segments from the space map's histogram to get a more
3083 uint64_t deferspace_histogram[SPACE_MAP_HISTOGRAM_SIZE] = {0};
3084 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
3085 deferspace_histogram[i] += msp->ms_synchist[i];
3086 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3087 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
3088 deferspace_histogram[i] += msp->ms_deferhist[t][i];
3092 uint64_t weight = 0;
3093 for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
3094 ASSERT3U(sm->sm_phys->smp_histogram[i], >=,
3095 deferspace_histogram[i]);
3097 sm->sm_phys->smp_histogram[i] - deferspace_histogram[i];
3099 WEIGHT_SET_COUNT(weight, count);
3100 WEIGHT_SET_INDEX(weight, i + sm->sm_shift);
3101 WEIGHT_SET_ACTIVE(weight, 0);
3109 * Compute a segment-based weight for the specified metaslab. The weight
3110 * is determined by highest bucket in the histogram. The information
3111 * for the highest bucket is encoded into the weight value.
3114 metaslab_segment_weight(metaslab_t *msp)
3116 metaslab_group_t *mg = msp->ms_group;
3117 uint64_t weight = 0;
3118 uint8_t shift = mg->mg_vd->vdev_ashift;
3120 ASSERT(MUTEX_HELD(&msp->ms_lock));
3123 * The metaslab is completely free.
3125 if (metaslab_allocated_space(msp) == 0) {
3126 int idx = highbit64(msp->ms_size) - 1;
3127 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
3129 if (idx < max_idx) {
3130 WEIGHT_SET_COUNT(weight, 1ULL);
3131 WEIGHT_SET_INDEX(weight, idx);
3133 WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
3134 WEIGHT_SET_INDEX(weight, max_idx);
3136 WEIGHT_SET_ACTIVE(weight, 0);
3137 ASSERT(!WEIGHT_IS_SPACEBASED(weight));
3141 ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
3144 * If the metaslab is fully allocated then just make the weight 0.
3146 if (metaslab_allocated_space(msp) == msp->ms_size)
3149 * If the metaslab is already loaded, then use the range tree to
3150 * determine the weight. Otherwise, we rely on the space map information
3151 * to generate the weight.
3153 if (msp->ms_loaded) {
3154 weight = metaslab_weight_from_range_tree(msp);
3156 weight = metaslab_weight_from_spacemap(msp);
3160 * If the metaslab was active the last time we calculated its weight
3161 * then keep it active. We want to consume the entire region that
3162 * is associated with this weight.
3164 if (msp->ms_activation_weight != 0 && weight != 0)
3165 WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
3170 * Determine if we should attempt to allocate from this metaslab. If the
3171 * metaslab is loaded, then we can determine if the desired allocation
3172 * can be satisfied by looking at the size of the maximum free segment
3173 * on that metaslab. Otherwise, we make our decision based on the metaslab's
3174 * weight. For segment-based weighting we can determine the maximum
3175 * allocation based on the index encoded in its value. For space-based
3176 * weights we rely on the entire weight (excluding the weight-type bit).
3179 metaslab_should_allocate(metaslab_t *msp, uint64_t asize, boolean_t try_hard)
3182 * If the metaslab is loaded, ms_max_size is definitive and we can use
3183 * the fast check. If it's not, the ms_max_size is a lower bound (once
3184 * set), and we should use the fast check as long as we're not in
3185 * try_hard and it's been less than zfs_metaslab_max_size_cache_sec
3186 * seconds since the metaslab was unloaded.
3188 if (msp->ms_loaded ||
3189 (msp->ms_max_size != 0 && !try_hard && gethrtime() <
3190 msp->ms_unload_time + SEC2NSEC(zfs_metaslab_max_size_cache_sec)))
3191 return (msp->ms_max_size >= asize);
3193 boolean_t should_allocate;
3194 if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
3196 * The metaslab segment weight indicates segments in the
3197 * range [2^i, 2^(i+1)), where i is the index in the weight.
3198 * Since the asize might be in the middle of the range, we
3199 * should attempt the allocation if asize < 2^(i+1).
3201 should_allocate = (asize <
3202 1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
3204 should_allocate = (asize <=
3205 (msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
3208 return (should_allocate);
3212 metaslab_weight(metaslab_t *msp, boolean_t nodirty)
3214 vdev_t *vd = msp->ms_group->mg_vd;
3215 spa_t *spa = vd->vdev_spa;
3218 ASSERT(MUTEX_HELD(&msp->ms_lock));
3220 metaslab_set_fragmentation(msp, nodirty);
3223 * Update the maximum size. If the metaslab is loaded, this will
3224 * ensure that we get an accurate maximum size if newly freed space
3225 * has been added back into the free tree. If the metaslab is
3226 * unloaded, we check if there's a larger free segment in the
3227 * unflushed frees. This is a lower bound on the largest allocatable
3228 * segment size. Coalescing of adjacent entries may reveal larger
3229 * allocatable segments, but we aren't aware of those until loading
3230 * the space map into a range tree.
3232 if (msp->ms_loaded) {
3233 msp->ms_max_size = metaslab_largest_allocatable(msp);
3235 msp->ms_max_size = MAX(msp->ms_max_size,
3236 metaslab_largest_unflushed_free(msp));
3240 * Segment-based weighting requires space map histogram support.
3242 if (zfs_metaslab_segment_weight_enabled &&
3243 spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
3244 (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
3245 sizeof (space_map_phys_t))) {
3246 weight = metaslab_segment_weight(msp);
3248 weight = metaslab_space_weight(msp);
3254 metaslab_recalculate_weight_and_sort(metaslab_t *msp)
3256 ASSERT(MUTEX_HELD(&msp->ms_lock));
3258 /* note: we preserve the mask (e.g. indication of primary, etc..) */
3259 uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
3260 metaslab_group_sort(msp->ms_group, msp,
3261 metaslab_weight(msp, B_FALSE) | was_active);
3265 metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp,
3266 int allocator, uint64_t activation_weight)
3268 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
3269 ASSERT(MUTEX_HELD(&msp->ms_lock));
3272 * If we're activating for the claim code, we don't want to actually
3273 * set the metaslab up for a specific allocator.
3275 if (activation_weight == METASLAB_WEIGHT_CLAIM) {
3276 ASSERT0(msp->ms_activation_weight);
3277 msp->ms_activation_weight = msp->ms_weight;
3278 metaslab_group_sort(mg, msp, msp->ms_weight |
3283 metaslab_t **mspp = (activation_weight == METASLAB_WEIGHT_PRIMARY ?
3284 &mga->mga_primary : &mga->mga_secondary);
3286 mutex_enter(&mg->mg_lock);
3287 if (*mspp != NULL) {
3288 mutex_exit(&mg->mg_lock);
3293 ASSERT3S(msp->ms_allocator, ==, -1);
3294 msp->ms_allocator = allocator;
3295 msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY);
3297 ASSERT0(msp->ms_activation_weight);
3298 msp->ms_activation_weight = msp->ms_weight;
3299 metaslab_group_sort_impl(mg, msp,
3300 msp->ms_weight | activation_weight);
3301 mutex_exit(&mg->mg_lock);
3307 metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight)
3309 ASSERT(MUTEX_HELD(&msp->ms_lock));
3312 * The current metaslab is already activated for us so there
3313 * is nothing to do. Already activated though, doesn't mean
3314 * that this metaslab is activated for our allocator nor our
3315 * requested activation weight. The metaslab could have started
3316 * as an active one for our allocator but changed allocators
3317 * while we were waiting to grab its ms_lock or we stole it
3318 * [see find_valid_metaslab()]. This means that there is a
3319 * possibility of passivating a metaslab of another allocator
3320 * or from a different activation mask, from this thread.
3322 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
3323 ASSERT(msp->ms_loaded);
3327 int error = metaslab_load(msp);
3329 metaslab_group_sort(msp->ms_group, msp, 0);
3334 * When entering metaslab_load() we may have dropped the
3335 * ms_lock because we were loading this metaslab, or we
3336 * were waiting for another thread to load it for us. In
3337 * that scenario, we recheck the weight of the metaslab
3338 * to see if it was activated by another thread.
3340 * If the metaslab was activated for another allocator or
3341 * it was activated with a different activation weight (e.g.
3342 * we wanted to make it a primary but it was activated as
3343 * secondary) we return error (EBUSY).
3345 * If the metaslab was activated for the same allocator
3346 * and requested activation mask, skip activating it.
3348 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
3349 if (msp->ms_allocator != allocator)
3352 if ((msp->ms_weight & activation_weight) == 0)
3353 return (SET_ERROR(EBUSY));
3355 EQUIV((activation_weight == METASLAB_WEIGHT_PRIMARY),
3361 * If the metaslab has literally 0 space, it will have weight 0. In
3362 * that case, don't bother activating it. This can happen if the
3363 * metaslab had space during find_valid_metaslab, but another thread
3364 * loaded it and used all that space while we were waiting to grab the
3367 if (msp->ms_weight == 0) {
3368 ASSERT0(range_tree_space(msp->ms_allocatable));
3369 return (SET_ERROR(ENOSPC));
3372 if ((error = metaslab_activate_allocator(msp->ms_group, msp,
3373 allocator, activation_weight)) != 0) {
3377 ASSERT(msp->ms_loaded);
3378 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
3384 metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp,
3387 ASSERT(MUTEX_HELD(&msp->ms_lock));
3388 ASSERT(msp->ms_loaded);
3390 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
3391 metaslab_group_sort(mg, msp, weight);
3395 mutex_enter(&mg->mg_lock);
3396 ASSERT3P(msp->ms_group, ==, mg);
3397 ASSERT3S(0, <=, msp->ms_allocator);
3398 ASSERT3U(msp->ms_allocator, <, mg->mg_allocators);
3400 metaslab_group_allocator_t *mga = &mg->mg_allocator[msp->ms_allocator];
3401 if (msp->ms_primary) {
3402 ASSERT3P(mga->mga_primary, ==, msp);
3403 ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
3404 mga->mga_primary = NULL;
3406 ASSERT3P(mga->mga_secondary, ==, msp);
3407 ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
3408 mga->mga_secondary = NULL;
3410 msp->ms_allocator = -1;
3411 metaslab_group_sort_impl(mg, msp, weight);
3412 mutex_exit(&mg->mg_lock);
3416 metaslab_passivate(metaslab_t *msp, uint64_t weight)
3418 uint64_t size __maybe_unused = weight & ~METASLAB_WEIGHT_TYPE;
3421 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
3422 * this metaslab again. In that case, it had better be empty,
3423 * or we would be leaving space on the table.
3425 ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) ||
3426 size >= SPA_MINBLOCKSIZE ||
3427 range_tree_space(msp->ms_allocatable) == 0);
3428 ASSERT0(weight & METASLAB_ACTIVE_MASK);
3430 ASSERT(msp->ms_activation_weight != 0);
3431 msp->ms_activation_weight = 0;
3432 metaslab_passivate_allocator(msp->ms_group, msp, weight);
3433 ASSERT0(msp->ms_weight & METASLAB_ACTIVE_MASK);
3437 * Segment-based metaslabs are activated once and remain active until
3438 * we either fail an allocation attempt (similar to space-based metaslabs)
3439 * or have exhausted the free space in zfs_metaslab_switch_threshold
3440 * buckets since the metaslab was activated. This function checks to see
3441 * if we've exhausted the zfs_metaslab_switch_threshold buckets in the
3442 * metaslab and passivates it proactively. This will allow us to select a
3443 * metaslab with a larger contiguous region, if any, remaining within this
3444 * metaslab group. If we're in sync pass > 1, then we continue using this
3445 * metaslab so that we don't dirty more block and cause more sync passes.
3448 metaslab_segment_may_passivate(metaslab_t *msp)
3450 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3452 if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
3456 * Since we are in the middle of a sync pass, the most accurate
3457 * information that is accessible to us is the in-core range tree
3458 * histogram; calculate the new weight based on that information.
3460 uint64_t weight = metaslab_weight_from_range_tree(msp);
3461 int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
3462 int current_idx = WEIGHT_GET_INDEX(weight);
3464 if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
3465 metaslab_passivate(msp, weight);
3469 metaslab_preload(void *arg)
3471 metaslab_t *msp = arg;
3472 metaslab_class_t *mc = msp->ms_group->mg_class;
3473 spa_t *spa = mc->mc_spa;
3474 fstrans_cookie_t cookie = spl_fstrans_mark();
3476 ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
3478 mutex_enter(&msp->ms_lock);
3479 (void) metaslab_load(msp);
3480 metaslab_set_selected_txg(msp, spa_syncing_txg(spa));
3481 mutex_exit(&msp->ms_lock);
3482 spl_fstrans_unmark(cookie);
3486 metaslab_group_preload(metaslab_group_t *mg)
3488 spa_t *spa = mg->mg_vd->vdev_spa;
3490 avl_tree_t *t = &mg->mg_metaslab_tree;
3493 if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
3494 taskq_wait_outstanding(mg->mg_taskq, 0);
3498 mutex_enter(&mg->mg_lock);
3501 * Load the next potential metaslabs
3503 for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
3504 ASSERT3P(msp->ms_group, ==, mg);
3507 * We preload only the maximum number of metaslabs specified
3508 * by metaslab_preload_limit. If a metaslab is being forced
3509 * to condense then we preload it too. This will ensure
3510 * that force condensing happens in the next txg.
3512 if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
3516 VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
3517 msp, TQ_SLEEP) != TASKQID_INVALID);
3519 mutex_exit(&mg->mg_lock);
3523 * Determine if the space map's on-disk footprint is past our tolerance for
3524 * inefficiency. We would like to use the following criteria to make our
3527 * 1. Do not condense if the size of the space map object would dramatically
3528 * increase as a result of writing out the free space range tree.
3530 * 2. Condense if the on on-disk space map representation is at least
3531 * zfs_condense_pct/100 times the size of the optimal representation
3532 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, optimal = 1.1MB).
3534 * 3. Do not condense if the on-disk size of the space map does not actually
3537 * Unfortunately, we cannot compute the on-disk size of the space map in this
3538 * context because we cannot accurately compute the effects of compression, etc.
3539 * Instead, we apply the heuristic described in the block comment for
3540 * zfs_metaslab_condense_block_threshold - we only condense if the space used
3541 * is greater than a threshold number of blocks.
3544 metaslab_should_condense(metaslab_t *msp)
3546 space_map_t *sm = msp->ms_sm;
3547 vdev_t *vd = msp->ms_group->mg_vd;
3548 uint64_t vdev_blocksize = 1 << vd->vdev_ashift;
3550 ASSERT(MUTEX_HELD(&msp->ms_lock));
3551 ASSERT(msp->ms_loaded);
3553 ASSERT3U(spa_sync_pass(vd->vdev_spa), ==, 1);
3556 * We always condense metaslabs that are empty and metaslabs for
3557 * which a condense request has been made.
3559 if (range_tree_numsegs(msp->ms_allocatable) == 0 ||
3560 msp->ms_condense_wanted)
3563 uint64_t record_size = MAX(sm->sm_blksz, vdev_blocksize);
3564 uint64_t object_size = space_map_length(sm);
3565 uint64_t optimal_size = space_map_estimate_optimal_size(sm,
3566 msp->ms_allocatable, SM_NO_VDEVID);
3568 return (object_size >= (optimal_size * zfs_condense_pct / 100) &&
3569 object_size > zfs_metaslab_condense_block_threshold * record_size);
3573 * Condense the on-disk space map representation to its minimized form.
3574 * The minimized form consists of a small number of allocations followed
3575 * by the entries of the free range tree (ms_allocatable). The condensed
3576 * spacemap contains all the entries of previous TXGs (including those in
3577 * the pool-wide log spacemaps; thus this is effectively a superset of
3578 * metaslab_flush()), but this TXG's entries still need to be written.
3581 metaslab_condense(metaslab_t *msp, dmu_tx_t *tx)
3583 range_tree_t *condense_tree;
3584 space_map_t *sm = msp->ms_sm;
3585 uint64_t txg = dmu_tx_get_txg(tx);
3586 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3588 ASSERT(MUTEX_HELD(&msp->ms_lock));
3589 ASSERT(msp->ms_loaded);
3590 ASSERT(msp->ms_sm != NULL);
3593 * In order to condense the space map, we need to change it so it
3594 * only describes which segments are currently allocated and free.
3596 * All the current free space resides in the ms_allocatable, all
3597 * the ms_defer trees, and all the ms_allocating trees. We ignore
3598 * ms_freed because it is empty because we're in sync pass 1. We
3599 * ignore ms_freeing because these changes are not yet reflected
3600 * in the spacemap (they will be written later this txg).
3602 * So to truncate the space map to represent all the entries of
3603 * previous TXGs we do the following:
3605 * 1] We create a range tree (condense tree) that is 100% empty.
3606 * 2] We add to it all segments found in the ms_defer trees
3607 * as those segments are marked as free in the original space
3608 * map. We do the same with the ms_allocating trees for the same
3609 * reason. Adding these segments should be a relatively
3610 * inexpensive operation since we expect these trees to have a
3611 * small number of nodes.
3612 * 3] We vacate any unflushed allocs, since they are not frees we
3613 * need to add to the condense tree. Then we vacate any
3614 * unflushed frees as they should already be part of ms_allocatable.
3615 * 4] At this point, we would ideally like to add all segments
3616 * in the ms_allocatable tree from the condense tree. This way
3617 * we would write all the entries of the condense tree as the
3618 * condensed space map, which would only contain freed
3619 * segments with everything else assumed to be allocated.
3621 * Doing so can be prohibitively expensive as ms_allocatable can
3622 * be large, and therefore computationally expensive to add to
3623 * the condense_tree. Instead we first sync out an entry marking
3624 * everything as allocated, then the condense_tree and then the
3625 * ms_allocatable, in the condensed space map. While this is not
3626 * optimal, it is typically close to optimal and more importantly
3627 * much cheaper to compute.
3629 * 5] Finally, as both of the unflushed trees were written to our
3630 * new and condensed metaslab space map, we basically flushed
3631 * all the unflushed changes to disk, thus we call
3632 * metaslab_flush_update().
3634 ASSERT3U(spa_sync_pass(spa), ==, 1);
3635 ASSERT(range_tree_is_empty(msp->ms_freed)); /* since it is pass 1 */
3637 zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, "
3638 "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg,
3639 msp->ms_id, msp, msp->ms_group->mg_vd->vdev_id,
3640 spa->spa_name, space_map_length(msp->ms_sm),
3641 range_tree_numsegs(msp->ms_allocatable),
3642 msp->ms_condense_wanted ? "TRUE" : "FALSE");
3644 msp->ms_condense_wanted = B_FALSE;
3646 range_seg_type_t type;
3647 uint64_t shift, start;
3648 type = metaslab_calculate_range_tree_type(msp->ms_group->mg_vd, msp,
3651 condense_tree = range_tree_create(NULL, type, NULL, start, shift);
3653 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3654 range_tree_walk(msp->ms_defer[t],
3655 range_tree_add, condense_tree);
3658 for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
3659 range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK],
3660 range_tree_add, condense_tree);
3663 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
3664 metaslab_unflushed_changes_memused(msp));
3665 spa->spa_unflushed_stats.sus_memused -=
3666 metaslab_unflushed_changes_memused(msp);
3667 range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
3668 range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
3671 * We're about to drop the metaslab's lock thus allowing other
3672 * consumers to change it's content. Set the metaslab's ms_condensing
3673 * flag to ensure that allocations on this metaslab do not occur
3674 * while we're in the middle of committing it to disk. This is only
3675 * critical for ms_allocatable as all other range trees use per TXG
3676 * views of their content.
3678 msp->ms_condensing = B_TRUE;
3680 mutex_exit(&msp->ms_lock);
3681 uint64_t object = space_map_object(msp->ms_sm);
3682 space_map_truncate(sm,
3683 spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
3684 zfs_metaslab_sm_blksz_with_log : zfs_metaslab_sm_blksz_no_log, tx);
3687 * space_map_truncate() may have reallocated the spacemap object.
3688 * If so, update the vdev_ms_array.
3690 if (space_map_object(msp->ms_sm) != object) {
3691 object = space_map_object(msp->ms_sm);
3692 dmu_write(spa->spa_meta_objset,
3693 msp->ms_group->mg_vd->vdev_ms_array, sizeof (uint64_t) *
3694 msp->ms_id, sizeof (uint64_t), &object, tx);
3699 * When the log space map feature is enabled, each space map will
3700 * always have ALLOCS followed by FREES for each sync pass. This is
3701 * typically true even when the log space map feature is disabled,
3702 * except from the case where a metaslab goes through metaslab_sync()
3703 * and gets condensed. In that case the metaslab's space map will have
3704 * ALLOCS followed by FREES (due to condensing) followed by ALLOCS
3705 * followed by FREES (due to space_map_write() in metaslab_sync()) for
3708 range_tree_t *tmp_tree = range_tree_create(NULL, type, NULL, start,
3710 range_tree_add(tmp_tree, msp->ms_start, msp->ms_size);
3711 space_map_write(sm, tmp_tree, SM_ALLOC, SM_NO_VDEVID, tx);
3712 space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx);
3713 space_map_write(sm, condense_tree, SM_FREE, SM_NO_VDEVID, tx);
3715 range_tree_vacate(condense_tree, NULL, NULL);
3716 range_tree_destroy(condense_tree);
3717 range_tree_vacate(tmp_tree, NULL, NULL);
3718 range_tree_destroy(tmp_tree);
3719 mutex_enter(&msp->ms_lock);
3721 msp->ms_condensing = B_FALSE;
3722 metaslab_flush_update(msp, tx);
3726 * Called when the metaslab has been flushed (its own spacemap now reflects
3727 * all the contents of the pool-wide spacemap log). Updates the metaslab's
3728 * metadata and any pool-wide related log space map data (e.g. summary,
3729 * obsolete logs, etc..) to reflect that.
3732 metaslab_flush_update(metaslab_t *msp, dmu_tx_t *tx)
3734 metaslab_group_t *mg = msp->ms_group;
3735 spa_t *spa = mg->mg_vd->vdev_spa;
3737 ASSERT(MUTEX_HELD(&msp->ms_lock));
3739 ASSERT3U(spa_sync_pass(spa), ==, 1);
3740 ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
3741 ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
3744 * Just because a metaslab got flushed, that doesn't mean that
3745 * it will pass through metaslab_sync_done(). Thus, make sure to
3746 * update ms_synced_length here in case it doesn't.
3748 msp->ms_synced_length = space_map_length(msp->ms_sm);
3751 * We may end up here from metaslab_condense() without the
3752 * feature being active. In that case this is a no-op.
3754 if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
3757 ASSERT(spa_syncing_log_sm(spa) != NULL);
3758 ASSERT(msp->ms_sm != NULL);
3759 ASSERT(metaslab_unflushed_txg(msp) != 0);
3760 ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), ==, msp);
3762 VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(spa));
3764 /* update metaslab's position in our flushing tree */
3765 uint64_t ms_prev_flushed_txg = metaslab_unflushed_txg(msp);
3766 mutex_enter(&spa->spa_flushed_ms_lock);
3767 avl_remove(&spa->spa_metaslabs_by_flushed, msp);
3768 metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
3769 avl_add(&spa->spa_metaslabs_by_flushed, msp);
3770 mutex_exit(&spa->spa_flushed_ms_lock);
3772 /* update metaslab counts of spa_log_sm_t nodes */
3773 spa_log_sm_decrement_mscount(spa, ms_prev_flushed_txg);
3774 spa_log_sm_increment_current_mscount(spa);
3776 /* cleanup obsolete logs if any */
3777 uint64_t log_blocks_before = spa_log_sm_nblocks(spa);
3778 spa_cleanup_old_sm_logs(spa, tx);
3779 uint64_t log_blocks_after = spa_log_sm_nblocks(spa);
3780 VERIFY3U(log_blocks_after, <=, log_blocks_before);
3782 /* update log space map summary */
3783 uint64_t blocks_gone = log_blocks_before - log_blocks_after;
3784 spa_log_summary_add_flushed_metaslab(spa);
3785 spa_log_summary_decrement_mscount(spa, ms_prev_flushed_txg);
3786 spa_log_summary_decrement_blkcount(spa, blocks_gone);
3790 metaslab_flush(metaslab_t *msp, dmu_tx_t *tx)
3792 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3794 ASSERT(MUTEX_HELD(&msp->ms_lock));
3795 ASSERT3U(spa_sync_pass(spa), ==, 1);
3796 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
3798 ASSERT(msp->ms_sm != NULL);
3799 ASSERT(metaslab_unflushed_txg(msp) != 0);
3800 ASSERT(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL) != NULL);
3803 * There is nothing wrong with flushing the same metaslab twice, as
3804 * this codepath should work on that case. However, the current
3805 * flushing scheme makes sure to avoid this situation as we would be
3806 * making all these calls without having anything meaningful to write
3807 * to disk. We assert this behavior here.
3809 ASSERT3U(metaslab_unflushed_txg(msp), <, dmu_tx_get_txg(tx));
3812 * We can not flush while loading, because then we would
3813 * not load the ms_unflushed_{allocs,frees}.
3815 if (msp->ms_loading)
3818 metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3819 metaslab_verify_weight_and_frag(msp);
3822 * Metaslab condensing is effectively flushing. Therefore if the
3823 * metaslab can be condensed we can just condense it instead of
3826 * Note that metaslab_condense() does call metaslab_flush_update()
3827 * so we can just return immediately after condensing. We also
3828 * don't need to care about setting ms_flushing or broadcasting
3829 * ms_flush_cv, even if we temporarily drop the ms_lock in
3830 * metaslab_condense(), as the metaslab is already loaded.
3832 if (msp->ms_loaded && metaslab_should_condense(msp)) {
3833 metaslab_group_t *mg = msp->ms_group;
3836 * For all histogram operations below refer to the
3837 * comments of metaslab_sync() where we follow a
3838 * similar procedure.
3840 metaslab_group_histogram_verify(mg);
3841 metaslab_class_histogram_verify(mg->mg_class);
3842 metaslab_group_histogram_remove(mg, msp);
3844 metaslab_condense(msp, tx);
3846 space_map_histogram_clear(msp->ms_sm);
3847 space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
3848 ASSERT(range_tree_is_empty(msp->ms_freed));
3849 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3850 space_map_histogram_add(msp->ms_sm,
3851 msp->ms_defer[t], tx);
3853 metaslab_aux_histograms_update(msp);
3855 metaslab_group_histogram_add(mg, msp);
3856 metaslab_group_histogram_verify(mg);
3857 metaslab_class_histogram_verify(mg->mg_class);
3859 metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3862 * Since we recreated the histogram (and potentially
3863 * the ms_sm too while condensing) ensure that the
3864 * weight is updated too because we are not guaranteed
3865 * that this metaslab is dirty and will go through
3866 * metaslab_sync_done().
3868 metaslab_recalculate_weight_and_sort(msp);
3872 msp->ms_flushing = B_TRUE;
3873 uint64_t sm_len_before = space_map_length(msp->ms_sm);
3875 mutex_exit(&msp->ms_lock);
3876 space_map_write(msp->ms_sm, msp->ms_unflushed_allocs, SM_ALLOC,
3878 space_map_write(msp->ms_sm, msp->ms_unflushed_frees, SM_FREE,
3880 mutex_enter(&msp->ms_lock);
3882 uint64_t sm_len_after = space_map_length(msp->ms_sm);
3883 if (zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) {
3884 zfs_dbgmsg("flushing: txg %llu, spa %s, vdev_id %llu, "
3885 "ms_id %llu, unflushed_allocs %llu, unflushed_frees %llu, "
3886 "appended %llu bytes", dmu_tx_get_txg(tx), spa_name(spa),
3887 msp->ms_group->mg_vd->vdev_id, msp->ms_id,
3888 range_tree_space(msp->ms_unflushed_allocs),
3889 range_tree_space(msp->ms_unflushed_frees),
3890 (sm_len_after - sm_len_before));
3893 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
3894 metaslab_unflushed_changes_memused(msp));
3895 spa->spa_unflushed_stats.sus_memused -=
3896 metaslab_unflushed_changes_memused(msp);
3897 range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
3898 range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
3900 metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3901 metaslab_verify_weight_and_frag(msp);
3903 metaslab_flush_update(msp, tx);
3905 metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3906 metaslab_verify_weight_and_frag(msp);
3908 msp->ms_flushing = B_FALSE;
3909 cv_broadcast(&msp->ms_flush_cv);
3914 * Write a metaslab to disk in the context of the specified transaction group.
3917 metaslab_sync(metaslab_t *msp, uint64_t txg)
3919 metaslab_group_t *mg = msp->ms_group;
3920 vdev_t *vd = mg->mg_vd;
3921 spa_t *spa = vd->vdev_spa;
3922 objset_t *mos = spa_meta_objset(spa);
3923 range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK];
3926 ASSERT(!vd->vdev_ishole);
3929 * This metaslab has just been added so there's no work to do now.
3932 ASSERT0(range_tree_space(alloctree));
3933 ASSERT0(range_tree_space(msp->ms_freeing));
3934 ASSERT0(range_tree_space(msp->ms_freed));
3935 ASSERT0(range_tree_space(msp->ms_checkpointing));
3936 ASSERT0(range_tree_space(msp->ms_trim));
3941 * Normally, we don't want to process a metaslab if there are no
3942 * allocations or frees to perform. However, if the metaslab is being
3943 * forced to condense, it's loaded and we're not beyond the final
3944 * dirty txg, we need to let it through. Not condensing beyond the
3945 * final dirty txg prevents an issue where metaslabs that need to be
3946 * condensed but were loaded for other reasons could cause a panic
3947 * here. By only checking the txg in that branch of the conditional,
3948 * we preserve the utility of the VERIFY statements in all other
3951 if (range_tree_is_empty(alloctree) &&
3952 range_tree_is_empty(msp->ms_freeing) &&
3953 range_tree_is_empty(msp->ms_checkpointing) &&
3954 !(msp->ms_loaded && msp->ms_condense_wanted &&
3955 txg <= spa_final_dirty_txg(spa)))
3959 VERIFY3U(txg, <=, spa_final_dirty_txg(spa));
3962 * The only state that can actually be changing concurrently
3963 * with metaslab_sync() is the metaslab's ms_allocatable. No
3964 * other thread can be modifying this txg's alloc, freeing,
3965 * freed, or space_map_phys_t. We drop ms_lock whenever we
3966 * could call into the DMU, because the DMU can call down to
3967 * us (e.g. via zio_free()) at any time.
3969 * The spa_vdev_remove_thread() can be reading metaslab state
3970 * concurrently, and it is locked out by the ms_sync_lock.
3971 * Note that the ms_lock is insufficient for this, because it
3972 * is dropped by space_map_write().
3974 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
3977 * Generate a log space map if one doesn't exist already.
3979 spa_generate_syncing_log_sm(spa, tx);
3981 if (msp->ms_sm == NULL) {
3982 uint64_t new_object = space_map_alloc(mos,
3983 spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
3984 zfs_metaslab_sm_blksz_with_log :
3985 zfs_metaslab_sm_blksz_no_log, tx);
3986 VERIFY3U(new_object, !=, 0);
3988 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
3989 msp->ms_id, sizeof (uint64_t), &new_object, tx);
3991 VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
3992 msp->ms_start, msp->ms_size, vd->vdev_ashift));
3993 ASSERT(msp->ms_sm != NULL);
3995 ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
3996 ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
3997 ASSERT0(metaslab_allocated_space(msp));
4000 if (metaslab_unflushed_txg(msp) == 0 &&
4001 spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
4002 ASSERT(spa_syncing_log_sm(spa) != NULL);
4004 metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
4005 spa_log_sm_increment_current_mscount(spa);
4006 spa_log_summary_add_flushed_metaslab(spa);
4008 ASSERT(msp->ms_sm != NULL);
4009 mutex_enter(&spa->spa_flushed_ms_lock);
4010 avl_add(&spa->spa_metaslabs_by_flushed, msp);
4011 mutex_exit(&spa->spa_flushed_ms_lock);
4013 ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
4014 ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
4017 if (!range_tree_is_empty(msp->ms_checkpointing) &&
4018 vd->vdev_checkpoint_sm == NULL) {
4019 ASSERT(spa_has_checkpoint(spa));
4021 uint64_t new_object = space_map_alloc(mos,
4022 zfs_vdev_standard_sm_blksz, tx);
4023 VERIFY3U(new_object, !=, 0);
4025 VERIFY0(space_map_open(&vd->vdev_checkpoint_sm,
4026 mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift));
4027 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
4030 * We save the space map object as an entry in vdev_top_zap
4031 * so it can be retrieved when the pool is reopened after an
4032 * export or through zdb.
4034 VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset,
4035 vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
4036 sizeof (new_object), 1, &new_object, tx));
4039 mutex_enter(&msp->ms_sync_lock);
4040 mutex_enter(&msp->ms_lock);
4043 * Note: metaslab_condense() clears the space map's histogram.
4044 * Therefore we must verify and remove this histogram before
4047 metaslab_group_histogram_verify(mg);
4048 metaslab_class_histogram_verify(mg->mg_class);
4049 metaslab_group_histogram_remove(mg, msp);
4051 if (spa->spa_sync_pass == 1 && msp->ms_loaded &&
4052 metaslab_should_condense(msp))
4053 metaslab_condense(msp, tx);
4056 * We'll be going to disk to sync our space accounting, thus we
4057 * drop the ms_lock during that time so allocations coming from
4058 * open-context (ZIL) for future TXGs do not block.
4060 mutex_exit(&msp->ms_lock);
4061 space_map_t *log_sm = spa_syncing_log_sm(spa);
4062 if (log_sm != NULL) {
4063 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
4065 space_map_write(log_sm, alloctree, SM_ALLOC,
4067 space_map_write(log_sm, msp->ms_freeing, SM_FREE,
4069 mutex_enter(&msp->ms_lock);
4071 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
4072 metaslab_unflushed_changes_memused(msp));
4073 spa->spa_unflushed_stats.sus_memused -=
4074 metaslab_unflushed_changes_memused(msp);
4075 range_tree_remove_xor_add(alloctree,
4076 msp->ms_unflushed_frees, msp->ms_unflushed_allocs);
4077 range_tree_remove_xor_add(msp->ms_freeing,
4078 msp->ms_unflushed_allocs, msp->ms_unflushed_frees);
4079 spa->spa_unflushed_stats.sus_memused +=
4080 metaslab_unflushed_changes_memused(msp);
4082 ASSERT(!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
4084 space_map_write(msp->ms_sm, alloctree, SM_ALLOC,
4086 space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE,
4088 mutex_enter(&msp->ms_lock);
4091 msp->ms_allocated_space += range_tree_space(alloctree);
4092 ASSERT3U(msp->ms_allocated_space, >=,
4093 range_tree_space(msp->ms_freeing));
4094 msp->ms_allocated_space -= range_tree_space(msp->ms_freeing);
4096 if (!range_tree_is_empty(msp->ms_checkpointing)) {
4097 ASSERT(spa_has_checkpoint(spa));
4098 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
4101 * Since we are doing writes to disk and the ms_checkpointing
4102 * tree won't be changing during that time, we drop the
4103 * ms_lock while writing to the checkpoint space map, for the
4104 * same reason mentioned above.
4106 mutex_exit(&msp->ms_lock);
4107 space_map_write(vd->vdev_checkpoint_sm,
4108 msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx);
4109 mutex_enter(&msp->ms_lock);
4111 spa->spa_checkpoint_info.sci_dspace +=
4112 range_tree_space(msp->ms_checkpointing);
4113 vd->vdev_stat.vs_checkpoint_space +=
4114 range_tree_space(msp->ms_checkpointing);
4115 ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==,
4116 -space_map_allocated(vd->vdev_checkpoint_sm));
4118 range_tree_vacate(msp->ms_checkpointing, NULL, NULL);
4121 if (msp->ms_loaded) {
4123 * When the space map is loaded, we have an accurate
4124 * histogram in the range tree. This gives us an opportunity
4125 * to bring the space map's histogram up-to-date so we clear
4126 * it first before updating it.
4128 space_map_histogram_clear(msp->ms_sm);
4129 space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
4132 * Since we've cleared the histogram we need to add back
4133 * any free space that has already been processed, plus
4134 * any deferred space. This allows the on-disk histogram
4135 * to accurately reflect all free space even if some space
4136 * is not yet available for allocation (i.e. deferred).
4138 space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx);
4141 * Add back any deferred free space that has not been
4142 * added back into the in-core free tree yet. This will
4143 * ensure that we don't end up with a space map histogram
4144 * that is completely empty unless the metaslab is fully
4147 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
4148 space_map_histogram_add(msp->ms_sm,
4149 msp->ms_defer[t], tx);
4154 * Always add the free space from this sync pass to the space
4155 * map histogram. We want to make sure that the on-disk histogram
4156 * accounts for all free space. If the space map is not loaded,
4157 * then we will lose some accuracy but will correct it the next
4158 * time we load the space map.
4160 space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx);
4161 metaslab_aux_histograms_update(msp);
4163 metaslab_group_histogram_add(mg, msp);
4164 metaslab_group_histogram_verify(mg);
4165 metaslab_class_histogram_verify(mg->mg_class);
4168 * For sync pass 1, we avoid traversing this txg's free range tree
4169 * and instead will just swap the pointers for freeing and freed.
4170 * We can safely do this since the freed_tree is guaranteed to be
4171 * empty on the initial pass.
4173 * Keep in mind that even if we are currently using a log spacemap
4174 * we want current frees to end up in the ms_allocatable (but not
4175 * get appended to the ms_sm) so their ranges can be reused as usual.
4177 if (spa_sync_pass(spa) == 1) {
4178 range_tree_swap(&msp->ms_freeing, &msp->ms_freed);
4179 ASSERT0(msp->ms_allocated_this_txg);
4181 range_tree_vacate(msp->ms_freeing,
4182 range_tree_add, msp->ms_freed);
4184 msp->ms_allocated_this_txg += range_tree_space(alloctree);
4185 range_tree_vacate(alloctree, NULL, NULL);
4187 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
4188 ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg)
4190 ASSERT0(range_tree_space(msp->ms_freeing));
4191 ASSERT0(range_tree_space(msp->ms_checkpointing));
4193 mutex_exit(&msp->ms_lock);
4196 * Verify that the space map object ID has been recorded in the
4200 VERIFY0(dmu_read(mos, vd->vdev_ms_array,
4201 msp->ms_id * sizeof (uint64_t), sizeof (uint64_t), &object, 0));
4202 VERIFY3U(object, ==, space_map_object(msp->ms_sm));
4204 mutex_exit(&msp->ms_sync_lock);
4209 metaslab_evict(metaslab_t *msp, uint64_t txg)
4211 if (!msp->ms_loaded || msp->ms_disabled != 0)
4214 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
4215 VERIFY0(range_tree_space(
4216 msp->ms_allocating[(txg + t) & TXG_MASK]));
4218 if (msp->ms_allocator != -1)
4219 metaslab_passivate(msp, msp->ms_weight & ~METASLAB_ACTIVE_MASK);
4221 if (!metaslab_debug_unload)
4222 metaslab_unload(msp);
4226 * Called after a transaction group has completely synced to mark
4227 * all of the metaslab's free space as usable.
4230 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
4232 metaslab_group_t *mg = msp->ms_group;
4233 vdev_t *vd = mg->mg_vd;
4234 spa_t *spa = vd->vdev_spa;
4235 range_tree_t **defer_tree;
4236 int64_t alloc_delta, defer_delta;
4237 boolean_t defer_allowed = B_TRUE;
4239 ASSERT(!vd->vdev_ishole);
4241 mutex_enter(&msp->ms_lock);
4244 /* this is a new metaslab, add its capacity to the vdev */
4245 metaslab_space_update(vd, mg->mg_class, 0, 0, msp->ms_size);
4247 /* there should be no allocations nor frees at this point */
4248 VERIFY0(msp->ms_allocated_this_txg);
4249 VERIFY0(range_tree_space(msp->ms_freed));
4252 ASSERT0(range_tree_space(msp->ms_freeing));
4253 ASSERT0(range_tree_space(msp->ms_checkpointing));
4255 defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE];
4257 uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
4258 metaslab_class_get_alloc(spa_normal_class(spa));
4259 if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing) {
4260 defer_allowed = B_FALSE;
4264 alloc_delta = msp->ms_allocated_this_txg -
4265 range_tree_space(msp->ms_freed);
4267 if (defer_allowed) {
4268 defer_delta = range_tree_space(msp->ms_freed) -
4269 range_tree_space(*defer_tree);
4271 defer_delta -= range_tree_space(*defer_tree);
4273 metaslab_space_update(vd, mg->mg_class, alloc_delta + defer_delta,
4276 if (spa_syncing_log_sm(spa) == NULL) {
4278 * If there's a metaslab_load() in progress and we don't have
4279 * a log space map, it means that we probably wrote to the
4280 * metaslab's space map. If this is the case, we need to
4281 * make sure that we wait for the load to complete so that we
4282 * have a consistent view at the in-core side of the metaslab.
4284 metaslab_load_wait(msp);
4286 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
4290 * When auto-trimming is enabled, free ranges which are added to
4291 * ms_allocatable are also be added to ms_trim. The ms_trim tree is
4292 * periodically consumed by the vdev_autotrim_thread() which issues
4293 * trims for all ranges and then vacates the tree. The ms_trim tree
4294 * can be discarded at any time with the sole consequence of recent
4295 * frees not being trimmed.
4297 if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON) {
4298 range_tree_walk(*defer_tree, range_tree_add, msp->ms_trim);
4299 if (!defer_allowed) {
4300 range_tree_walk(msp->ms_freed, range_tree_add,
4304 range_tree_vacate(msp->ms_trim, NULL, NULL);
4308 * Move the frees from the defer_tree back to the free
4309 * range tree (if it's loaded). Swap the freed_tree and
4310 * the defer_tree -- this is safe to do because we've
4311 * just emptied out the defer_tree.
4313 range_tree_vacate(*defer_tree,
4314 msp->ms_loaded ? range_tree_add : NULL, msp->ms_allocatable);
4315 if (defer_allowed) {
4316 range_tree_swap(&msp->ms_freed, defer_tree);
4318 range_tree_vacate(msp->ms_freed,
4319 msp->ms_loaded ? range_tree_add : NULL,
4320 msp->ms_allocatable);
4323 msp->ms_synced_length = space_map_length(msp->ms_sm);
4325 msp->ms_deferspace += defer_delta;
4326 ASSERT3S(msp->ms_deferspace, >=, 0);
4327 ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
4328 if (msp->ms_deferspace != 0) {
4330 * Keep syncing this metaslab until all deferred frees
4331 * are back in circulation.
4333 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
4335 metaslab_aux_histograms_update_done(msp, defer_allowed);
4338 msp->ms_new = B_FALSE;
4339 mutex_enter(&mg->mg_lock);
4341 mutex_exit(&mg->mg_lock);
4345 * Re-sort metaslab within its group now that we've adjusted
4346 * its allocatable space.
4348 metaslab_recalculate_weight_and_sort(msp);
4350 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
4351 ASSERT0(range_tree_space(msp->ms_freeing));
4352 ASSERT0(range_tree_space(msp->ms_freed));
4353 ASSERT0(range_tree_space(msp->ms_checkpointing));
4354 msp->ms_allocating_total -= msp->ms_allocated_this_txg;
4355 msp->ms_allocated_this_txg = 0;
4356 mutex_exit(&msp->ms_lock);
4360 metaslab_sync_reassess(metaslab_group_t *mg)
4362 spa_t *spa = mg->mg_class->mc_spa;
4364 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
4365 metaslab_group_alloc_update(mg);
4366 mg->mg_fragmentation = metaslab_group_fragmentation(mg);
4369 * Preload the next potential metaslabs but only on active
4370 * metaslab groups. We can get into a state where the metaslab
4371 * is no longer active since we dirty metaslabs as we remove a
4372 * a device, thus potentially making the metaslab group eligible
4375 if (mg->mg_activation_count > 0) {
4376 metaslab_group_preload(mg);
4378 spa_config_exit(spa, SCL_ALLOC, FTAG);
4382 * When writing a ditto block (i.e. more than one DVA for a given BP) on
4383 * the same vdev as an existing DVA of this BP, then try to allocate it
4384 * on a different metaslab than existing DVAs (i.e. a unique metaslab).
4387 metaslab_is_unique(metaslab_t *msp, dva_t *dva)
4391 if (DVA_GET_ASIZE(dva) == 0)
4394 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
4397 dva_ms_id = DVA_GET_OFFSET(dva) >> msp->ms_group->mg_vd->vdev_ms_shift;
4399 return (msp->ms_id != dva_ms_id);
4403 * ==========================================================================
4404 * Metaslab allocation tracing facility
4405 * ==========================================================================
4409 * Add an allocation trace element to the allocation tracing list.
4412 metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
4413 metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset,
4416 metaslab_alloc_trace_t *mat;
4418 if (!metaslab_trace_enabled)
4422 * When the tracing list reaches its maximum we remove
4423 * the second element in the list before adding a new one.
4424 * By removing the second element we preserve the original
4425 * entry as a clue to what allocations steps have already been
4428 if (zal->zal_size == metaslab_trace_max_entries) {
4429 metaslab_alloc_trace_t *mat_next;
4431 panic("too many entries in allocation list");
4433 METASLABSTAT_BUMP(metaslabstat_trace_over_limit);
4435 mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
4436 list_remove(&zal->zal_list, mat_next);
4437 kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
4440 mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
4441 list_link_init(&mat->mat_list_node);
4444 mat->mat_size = psize;
4445 mat->mat_dva_id = dva_id;
4446 mat->mat_offset = offset;
4447 mat->mat_weight = 0;
4448 mat->mat_allocator = allocator;
4451 mat->mat_weight = msp->ms_weight;
4454 * The list is part of the zio so locking is not required. Only
4455 * a single thread will perform allocations for a given zio.
4457 list_insert_tail(&zal->zal_list, mat);
4460 ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
4464 metaslab_trace_init(zio_alloc_list_t *zal)
4466 list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
4467 offsetof(metaslab_alloc_trace_t, mat_list_node));
4472 metaslab_trace_fini(zio_alloc_list_t *zal)
4474 metaslab_alloc_trace_t *mat;
4476 while ((mat = list_remove_head(&zal->zal_list)) != NULL)
4477 kmem_cache_free(metaslab_alloc_trace_cache, mat);
4478 list_destroy(&zal->zal_list);
4483 * ==========================================================================
4484 * Metaslab block operations
4485 * ==========================================================================
4489 metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags,
4492 if (!(flags & METASLAB_ASYNC_ALLOC) ||
4493 (flags & METASLAB_DONT_THROTTLE))
4496 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
4497 if (!mg->mg_class->mc_alloc_throttle_enabled)
4500 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4501 (void) zfs_refcount_add(&mga->mga_alloc_queue_depth, tag);
4505 metaslab_group_increment_qdepth(metaslab_group_t *mg, int allocator)
4507 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4508 metaslab_class_allocator_t *mca =
4509 &mg->mg_class->mc_allocator[allocator];
4510 uint64_t max = mg->mg_max_alloc_queue_depth;
4511 uint64_t cur = mga->mga_cur_max_alloc_queue_depth;
4513 if (atomic_cas_64(&mga->mga_cur_max_alloc_queue_depth,
4514 cur, cur + 1) == cur) {
4515 atomic_inc_64(&mca->mca_alloc_max_slots);
4518 cur = mga->mga_cur_max_alloc_queue_depth;
4523 metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags,
4524 int allocator, boolean_t io_complete)
4526 if (!(flags & METASLAB_ASYNC_ALLOC) ||
4527 (flags & METASLAB_DONT_THROTTLE))
4530 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
4531 if (!mg->mg_class->mc_alloc_throttle_enabled)
4534 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4535 (void) zfs_refcount_remove(&mga->mga_alloc_queue_depth, tag);
4537 metaslab_group_increment_qdepth(mg, allocator);
4541 metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag,
4545 const dva_t *dva = bp->blk_dva;
4546 int ndvas = BP_GET_NDVAS(bp);
4548 for (int d = 0; d < ndvas; d++) {
4549 uint64_t vdev = DVA_GET_VDEV(&dva[d]);
4550 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
4551 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4552 VERIFY(zfs_refcount_not_held(&mga->mga_alloc_queue_depth, tag));
4558 metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
4561 range_tree_t *rt = msp->ms_allocatable;
4562 metaslab_class_t *mc = msp->ms_group->mg_class;
4564 ASSERT(MUTEX_HELD(&msp->ms_lock));
4565 VERIFY(!msp->ms_condensing);
4566 VERIFY0(msp->ms_disabled);
4568 start = mc->mc_ops->msop_alloc(msp, size);
4569 if (start != -1ULL) {
4570 metaslab_group_t *mg = msp->ms_group;
4571 vdev_t *vd = mg->mg_vd;
4573 VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
4574 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
4575 VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
4576 range_tree_remove(rt, start, size);
4577 range_tree_clear(msp->ms_trim, start, size);
4579 if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
4580 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
4582 range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size);
4583 msp->ms_allocating_total += size;
4585 /* Track the last successful allocation */
4586 msp->ms_alloc_txg = txg;
4587 metaslab_verify_space(msp, txg);
4591 * Now that we've attempted the allocation we need to update the
4592 * metaslab's maximum block size since it may have changed.
4594 msp->ms_max_size = metaslab_largest_allocatable(msp);
4599 * Find the metaslab with the highest weight that is less than what we've
4600 * already tried. In the common case, this means that we will examine each
4601 * metaslab at most once. Note that concurrent callers could reorder metaslabs
4602 * by activation/passivation once we have dropped the mg_lock. If a metaslab is
4603 * activated by another thread, and we fail to allocate from the metaslab we
4604 * have selected, we may not try the newly-activated metaslab, and instead
4605 * activate another metaslab. This is not optimal, but generally does not cause
4606 * any problems (a possible exception being if every metaslab is completely full
4607 * except for the newly-activated metaslab which we fail to examine).
4610 find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight,
4611 dva_t *dva, int d, boolean_t want_unique, uint64_t asize, int allocator,
4612 boolean_t try_hard, zio_alloc_list_t *zal, metaslab_t *search,
4613 boolean_t *was_active)
4616 avl_tree_t *t = &mg->mg_metaslab_tree;
4617 metaslab_t *msp = avl_find(t, search, &idx);
4619 msp = avl_nearest(t, idx, AVL_AFTER);
4622 for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
4625 if (!try_hard && tries > zfs_metaslab_find_max_tries) {
4626 METASLABSTAT_BUMP(metaslabstat_too_many_tries);
4631 if (!metaslab_should_allocate(msp, asize, try_hard)) {
4632 metaslab_trace_add(zal, mg, msp, asize, d,
4633 TRACE_TOO_SMALL, allocator);
4638 * If the selected metaslab is condensing or disabled,
4641 if (msp->ms_condensing || msp->ms_disabled > 0)
4644 *was_active = msp->ms_allocator != -1;
4646 * If we're activating as primary, this is our first allocation
4647 * from this disk, so we don't need to check how close we are.
4648 * If the metaslab under consideration was already active,
4649 * we're getting desperate enough to steal another allocator's
4650 * metaslab, so we still don't care about distances.
4652 if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active)
4655 for (i = 0; i < d; i++) {
4657 !metaslab_is_unique(msp, &dva[i]))
4658 break; /* try another metaslab */
4665 search->ms_weight = msp->ms_weight;
4666 search->ms_start = msp->ms_start + 1;
4667 search->ms_allocator = msp->ms_allocator;
4668 search->ms_primary = msp->ms_primary;
4674 metaslab_active_mask_verify(metaslab_t *msp)
4676 ASSERT(MUTEX_HELD(&msp->ms_lock));
4678 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
4681 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0)
4684 if (msp->ms_weight & METASLAB_WEIGHT_PRIMARY) {
4685 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
4686 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
4687 VERIFY3S(msp->ms_allocator, !=, -1);
4688 VERIFY(msp->ms_primary);
4692 if (msp->ms_weight & METASLAB_WEIGHT_SECONDARY) {
4693 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
4694 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
4695 VERIFY3S(msp->ms_allocator, !=, -1);
4696 VERIFY(!msp->ms_primary);
4700 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
4701 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
4702 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
4703 VERIFY3S(msp->ms_allocator, ==, -1);
4710 metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
4711 uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d,
4712 int allocator, boolean_t try_hard)
4714 metaslab_t *msp = NULL;
4715 uint64_t offset = -1ULL;
4717 uint64_t activation_weight = METASLAB_WEIGHT_PRIMARY;
4718 for (int i = 0; i < d; i++) {
4719 if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
4720 DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
4721 activation_weight = METASLAB_WEIGHT_SECONDARY;
4722 } else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
4723 DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
4724 activation_weight = METASLAB_WEIGHT_CLAIM;
4730 * If we don't have enough metaslabs active to fill the entire array, we
4731 * just use the 0th slot.
4733 if (mg->mg_ms_ready < mg->mg_allocators * 3)
4735 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4737 ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2);
4739 metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP);
4740 search->ms_weight = UINT64_MAX;
4741 search->ms_start = 0;
4743 * At the end of the metaslab tree are the already-active metaslabs,
4744 * first the primaries, then the secondaries. When we resume searching
4745 * through the tree, we need to consider ms_allocator and ms_primary so
4746 * we start in the location right after where we left off, and don't
4747 * accidentally loop forever considering the same metaslabs.
4749 search->ms_allocator = -1;
4750 search->ms_primary = B_TRUE;
4752 boolean_t was_active = B_FALSE;
4754 mutex_enter(&mg->mg_lock);
4756 if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
4757 mga->mga_primary != NULL) {
4758 msp = mga->mga_primary;
4761 * Even though we don't hold the ms_lock for the
4762 * primary metaslab, those fields should not
4763 * change while we hold the mg_lock. Thus it is
4764 * safe to make assertions on them.
4766 ASSERT(msp->ms_primary);
4767 ASSERT3S(msp->ms_allocator, ==, allocator);
4768 ASSERT(msp->ms_loaded);
4770 was_active = B_TRUE;
4771 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
4772 } else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
4773 mga->mga_secondary != NULL) {
4774 msp = mga->mga_secondary;
4777 * See comment above about the similar assertions
4778 * for the primary metaslab.
4780 ASSERT(!msp->ms_primary);
4781 ASSERT3S(msp->ms_allocator, ==, allocator);
4782 ASSERT(msp->ms_loaded);
4784 was_active = B_TRUE;
4785 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
4787 msp = find_valid_metaslab(mg, activation_weight, dva, d,
4788 want_unique, asize, allocator, try_hard, zal,
4789 search, &was_active);
4792 mutex_exit(&mg->mg_lock);
4794 kmem_free(search, sizeof (*search));
4797 mutex_enter(&msp->ms_lock);
4799 metaslab_active_mask_verify(msp);
4802 * This code is disabled out because of issues with
4803 * tracepoints in non-gpl kernel modules.
4806 DTRACE_PROBE3(ms__activation__attempt,
4807 metaslab_t *, msp, uint64_t, activation_weight,
4808 boolean_t, was_active);
4812 * Ensure that the metaslab we have selected is still
4813 * capable of handling our request. It's possible that
4814 * another thread may have changed the weight while we
4815 * were blocked on the metaslab lock. We check the
4816 * active status first to see if we need to set_selected_txg
4819 if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
4820 ASSERT3S(msp->ms_allocator, ==, -1);
4821 mutex_exit(&msp->ms_lock);
4826 * If the metaslab was activated for another allocator
4827 * while we were waiting in the ms_lock above, or it's
4828 * a primary and we're seeking a secondary (or vice versa),
4829 * we go back and select a new metaslab.
4831 if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) &&
4832 (msp->ms_allocator != -1) &&
4833 (msp->ms_allocator != allocator || ((activation_weight ==
4834 METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) {
4835 ASSERT(msp->ms_loaded);
4836 ASSERT((msp->ms_weight & METASLAB_WEIGHT_CLAIM) ||
4837 msp->ms_allocator != -1);
4838 mutex_exit(&msp->ms_lock);
4843 * This metaslab was used for claiming regions allocated
4844 * by the ZIL during pool import. Once these regions are
4845 * claimed we don't need to keep the CLAIM bit set
4846 * anymore. Passivate this metaslab to zero its activation
4849 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM &&
4850 activation_weight != METASLAB_WEIGHT_CLAIM) {
4851 ASSERT(msp->ms_loaded);
4852 ASSERT3S(msp->ms_allocator, ==, -1);
4853 metaslab_passivate(msp, msp->ms_weight &
4854 ~METASLAB_WEIGHT_CLAIM);
4855 mutex_exit(&msp->ms_lock);
4859 metaslab_set_selected_txg(msp, txg);
4861 int activation_error =
4862 metaslab_activate(msp, allocator, activation_weight);
4863 metaslab_active_mask_verify(msp);
4866 * If the metaslab was activated by another thread for
4867 * another allocator or activation_weight (EBUSY), or it
4868 * failed because another metaslab was assigned as primary
4869 * for this allocator (EEXIST) we continue using this
4870 * metaslab for our allocation, rather than going on to a
4871 * worse metaslab (we waited for that metaslab to be loaded
4874 * If the activation failed due to an I/O error or ENOSPC we
4875 * skip to the next metaslab.
4877 boolean_t activated;
4878 if (activation_error == 0) {
4880 } else if (activation_error == EBUSY ||
4881 activation_error == EEXIST) {
4882 activated = B_FALSE;
4884 mutex_exit(&msp->ms_lock);
4887 ASSERT(msp->ms_loaded);
4890 * Now that we have the lock, recheck to see if we should
4891 * continue to use this metaslab for this allocation. The
4892 * the metaslab is now loaded so metaslab_should_allocate()
4893 * can accurately determine if the allocation attempt should
4896 if (!metaslab_should_allocate(msp, asize, try_hard)) {
4897 /* Passivate this metaslab and select a new one. */
4898 metaslab_trace_add(zal, mg, msp, asize, d,
4899 TRACE_TOO_SMALL, allocator);
4904 * If this metaslab is currently condensing then pick again
4905 * as we can't manipulate this metaslab until it's committed
4906 * to disk. If this metaslab is being initialized, we shouldn't
4907 * allocate from it since the allocated region might be
4908 * overwritten after allocation.
4910 if (msp->ms_condensing) {
4911 metaslab_trace_add(zal, mg, msp, asize, d,
4912 TRACE_CONDENSING, allocator);
4914 metaslab_passivate(msp, msp->ms_weight &
4915 ~METASLAB_ACTIVE_MASK);
4917 mutex_exit(&msp->ms_lock);
4919 } else if (msp->ms_disabled > 0) {
4920 metaslab_trace_add(zal, mg, msp, asize, d,
4921 TRACE_DISABLED, allocator);
4923 metaslab_passivate(msp, msp->ms_weight &
4924 ~METASLAB_ACTIVE_MASK);
4926 mutex_exit(&msp->ms_lock);
4930 offset = metaslab_block_alloc(msp, asize, txg);
4931 metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator);
4933 if (offset != -1ULL) {
4934 /* Proactively passivate the metaslab, if needed */
4936 metaslab_segment_may_passivate(msp);
4940 ASSERT(msp->ms_loaded);
4943 * This code is disabled out because of issues with
4944 * tracepoints in non-gpl kernel modules.
4947 DTRACE_PROBE2(ms__alloc__failure, metaslab_t *, msp,
4952 * We were unable to allocate from this metaslab so determine
4953 * a new weight for this metaslab. Now that we have loaded
4954 * the metaslab we can provide a better hint to the metaslab
4957 * For space-based metaslabs, we use the maximum block size.
4958 * This information is only available when the metaslab
4959 * is loaded and is more accurate than the generic free
4960 * space weight that was calculated by metaslab_weight().
4961 * This information allows us to quickly compare the maximum
4962 * available allocation in the metaslab to the allocation
4963 * size being requested.
4965 * For segment-based metaslabs, determine the new weight
4966 * based on the highest bucket in the range tree. We
4967 * explicitly use the loaded segment weight (i.e. the range
4968 * tree histogram) since it contains the space that is
4969 * currently available for allocation and is accurate
4970 * even within a sync pass.
4973 if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
4974 weight = metaslab_largest_allocatable(msp);
4975 WEIGHT_SET_SPACEBASED(weight);
4977 weight = metaslab_weight_from_range_tree(msp);
4981 metaslab_passivate(msp, weight);
4984 * For the case where we use the metaslab that is
4985 * active for another allocator we want to make
4986 * sure that we retain the activation mask.
4988 * Note that we could attempt to use something like
4989 * metaslab_recalculate_weight_and_sort() that
4990 * retains the activation mask here. That function
4991 * uses metaslab_weight() to set the weight though
4992 * which is not as accurate as the calculations
4995 weight |= msp->ms_weight & METASLAB_ACTIVE_MASK;
4996 metaslab_group_sort(mg, msp, weight);
4998 metaslab_active_mask_verify(msp);
5001 * We have just failed an allocation attempt, check
5002 * that metaslab_should_allocate() agrees. Otherwise,
5003 * we may end up in an infinite loop retrying the same
5006 ASSERT(!metaslab_should_allocate(msp, asize, try_hard));
5008 mutex_exit(&msp->ms_lock);
5010 mutex_exit(&msp->ms_lock);
5011 kmem_free(search, sizeof (*search));
5016 metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
5017 uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d,
5018 int allocator, boolean_t try_hard)
5021 ASSERT(mg->mg_initialized);
5023 offset = metaslab_group_alloc_normal(mg, zal, asize, txg, want_unique,
5024 dva, d, allocator, try_hard);
5026 mutex_enter(&mg->mg_lock);
5027 if (offset == -1ULL) {
5028 mg->mg_failed_allocations++;
5029 metaslab_trace_add(zal, mg, NULL, asize, d,
5030 TRACE_GROUP_FAILURE, allocator);
5031 if (asize == SPA_GANGBLOCKSIZE) {
5033 * This metaslab group was unable to allocate
5034 * the minimum gang block size so it must be out of
5035 * space. We must notify the allocation throttle
5036 * to start skipping allocation attempts to this
5037 * metaslab group until more space becomes available.
5038 * Note: this failure cannot be caused by the
5039 * allocation throttle since the allocation throttle
5040 * is only responsible for skipping devices and
5041 * not failing block allocations.
5043 mg->mg_no_free_space = B_TRUE;
5046 mg->mg_allocations++;
5047 mutex_exit(&mg->mg_lock);
5052 * Allocate a block for the specified i/o.
5055 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
5056 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
5057 zio_alloc_list_t *zal, int allocator)
5059 metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
5060 metaslab_group_t *mg, *fast_mg, *rotor;
5062 boolean_t try_hard = B_FALSE;
5064 ASSERT(!DVA_IS_VALID(&dva[d]));
5067 * For testing, make some blocks above a certain size be gang blocks.
5068 * This will result in more split blocks when using device removal,
5069 * and a large number of split blocks coupled with ztest-induced
5070 * damage can result in extremely long reconstruction times. This
5071 * will also test spilling from special to normal.
5073 if (psize >= metaslab_force_ganging && (spa_get_random(100) < 3)) {
5074 metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG,
5076 return (SET_ERROR(ENOSPC));
5080 * Start at the rotor and loop through all mgs until we find something.
5081 * Note that there's no locking on mca_rotor or mca_aliquot because
5082 * nothing actually breaks if we miss a few updates -- we just won't
5083 * allocate quite as evenly. It all balances out over time.
5085 * If we are doing ditto or log blocks, try to spread them across
5086 * consecutive vdevs. If we're forced to reuse a vdev before we've
5087 * allocated all of our ditto blocks, then try and spread them out on
5088 * that vdev as much as possible. If it turns out to not be possible,
5089 * gradually lower our standards until anything becomes acceptable.
5090 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
5091 * gives us hope of containing our fault domains to something we're
5092 * able to reason about. Otherwise, any two top-level vdev failures
5093 * will guarantee the loss of data. With consecutive allocation,
5094 * only two adjacent top-level vdev failures will result in data loss.
5096 * If we are doing gang blocks (hintdva is non-NULL), try to keep
5097 * ourselves on the same vdev as our gang block header. That
5098 * way, we can hope for locality in vdev_cache, plus it makes our
5099 * fault domains something tractable.
5102 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
5105 * It's possible the vdev we're using as the hint no
5106 * longer exists or its mg has been closed (e.g. by
5107 * device removal). Consult the rotor when
5110 if (vd != NULL && vd->vdev_mg != NULL) {
5111 mg = vdev_get_mg(vd, mc);
5113 if (flags & METASLAB_HINTBP_AVOID &&
5114 mg->mg_next != NULL)
5117 mg = mca->mca_rotor;
5119 } else if (d != 0) {
5120 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
5121 mg = vd->vdev_mg->mg_next;
5122 } else if (flags & METASLAB_FASTWRITE) {
5123 mg = fast_mg = mca->mca_rotor;
5126 if (fast_mg->mg_vd->vdev_pending_fastwrite <
5127 mg->mg_vd->vdev_pending_fastwrite)
5129 } while ((fast_mg = fast_mg->mg_next) != mca->mca_rotor);
5132 ASSERT(mca->mca_rotor != NULL);
5133 mg = mca->mca_rotor;
5137 * If the hint put us into the wrong metaslab class, or into a
5138 * metaslab group that has been passivated, just follow the rotor.
5140 if (mg->mg_class != mc || mg->mg_activation_count <= 0)
5141 mg = mca->mca_rotor;
5146 boolean_t allocatable;
5148 ASSERT(mg->mg_activation_count == 1);
5152 * Don't allocate from faulted devices.
5155 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
5156 allocatable = vdev_allocatable(vd);
5157 spa_config_exit(spa, SCL_ZIO, FTAG);
5159 allocatable = vdev_allocatable(vd);
5163 * Determine if the selected metaslab group is eligible
5164 * for allocations. If we're ganging then don't allow
5165 * this metaslab group to skip allocations since that would
5166 * inadvertently return ENOSPC and suspend the pool
5167 * even though space is still available.
5169 if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) {
5170 allocatable = metaslab_group_allocatable(mg, rotor,
5171 psize, allocator, d);
5175 metaslab_trace_add(zal, mg, NULL, psize, d,
5176 TRACE_NOT_ALLOCATABLE, allocator);
5180 ASSERT(mg->mg_initialized);
5183 * Avoid writing single-copy data to a failing,
5184 * non-redundant vdev, unless we've already tried all
5187 if ((vd->vdev_stat.vs_write_errors > 0 ||
5188 vd->vdev_state < VDEV_STATE_HEALTHY) &&
5189 d == 0 && !try_hard && vd->vdev_children == 0) {
5190 metaslab_trace_add(zal, mg, NULL, psize, d,
5191 TRACE_VDEV_ERROR, allocator);
5195 ASSERT(mg->mg_class == mc);
5197 uint64_t asize = vdev_psize_to_asize(vd, psize);
5198 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
5201 * If we don't need to try hard, then require that the
5202 * block be on a different metaslab from any other DVAs
5203 * in this BP (unique=true). If we are trying hard, then
5204 * allow any metaslab to be used (unique=false).
5206 uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg,
5207 !try_hard, dva, d, allocator, try_hard);
5209 if (offset != -1ULL) {
5211 * If we've just selected this metaslab group,
5212 * figure out whether the corresponding vdev is
5213 * over- or under-used relative to the pool,
5214 * and set an allocation bias to even it out.
5216 * Bias is also used to compensate for unequally
5217 * sized vdevs so that space is allocated fairly.
5219 if (mca->mca_aliquot == 0 && metaslab_bias_enabled) {
5220 vdev_stat_t *vs = &vd->vdev_stat;
5221 int64_t vs_free = vs->vs_space - vs->vs_alloc;
5222 int64_t mc_free = mc->mc_space - mc->mc_alloc;
5226 * Calculate how much more or less we should
5227 * try to allocate from this device during
5228 * this iteration around the rotor.
5230 * This basically introduces a zero-centered
5231 * bias towards the devices with the most
5232 * free space, while compensating for vdev
5236 * vdev V1 = 16M/128M
5237 * vdev V2 = 16M/128M
5238 * ratio(V1) = 100% ratio(V2) = 100%
5240 * vdev V1 = 16M/128M
5241 * vdev V2 = 64M/128M
5242 * ratio(V1) = 127% ratio(V2) = 72%
5244 * vdev V1 = 16M/128M
5245 * vdev V2 = 64M/512M
5246 * ratio(V1) = 40% ratio(V2) = 160%
5248 ratio = (vs_free * mc->mc_alloc_groups * 100) /
5250 mg->mg_bias = ((ratio - 100) *
5251 (int64_t)mg->mg_aliquot) / 100;
5252 } else if (!metaslab_bias_enabled) {
5256 if ((flags & METASLAB_FASTWRITE) ||
5257 atomic_add_64_nv(&mca->mca_aliquot, asize) >=
5258 mg->mg_aliquot + mg->mg_bias) {
5259 mca->mca_rotor = mg->mg_next;
5260 mca->mca_aliquot = 0;
5263 DVA_SET_VDEV(&dva[d], vd->vdev_id);
5264 DVA_SET_OFFSET(&dva[d], offset);
5265 DVA_SET_GANG(&dva[d],
5266 ((flags & METASLAB_GANG_HEADER) ? 1 : 0));
5267 DVA_SET_ASIZE(&dva[d], asize);
5269 if (flags & METASLAB_FASTWRITE) {
5270 atomic_add_64(&vd->vdev_pending_fastwrite,
5277 mca->mca_rotor = mg->mg_next;
5278 mca->mca_aliquot = 0;
5279 } while ((mg = mg->mg_next) != rotor);
5282 * If we haven't tried hard, perhaps do so now.
5284 if (!try_hard && (zfs_metaslab_try_hard_before_gang ||
5285 GANG_ALLOCATION(flags) || (flags & METASLAB_ZIL) != 0 ||
5286 psize <= 1 << spa->spa_min_ashift)) {
5287 METASLABSTAT_BUMP(metaslabstat_try_hard);
5292 bzero(&dva[d], sizeof (dva_t));
5294 metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator);
5295 return (SET_ERROR(ENOSPC));
5299 metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize,
5300 boolean_t checkpoint)
5303 spa_t *spa = vd->vdev_spa;
5305 ASSERT(vdev_is_concrete(vd));
5306 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5307 ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
5309 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5311 VERIFY(!msp->ms_condensing);
5312 VERIFY3U(offset, >=, msp->ms_start);
5313 VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size);
5314 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
5315 VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift));
5317 metaslab_check_free_impl(vd, offset, asize);
5319 mutex_enter(&msp->ms_lock);
5320 if (range_tree_is_empty(msp->ms_freeing) &&
5321 range_tree_is_empty(msp->ms_checkpointing)) {
5322 vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa));
5326 ASSERT(spa_has_checkpoint(spa));
5327 range_tree_add(msp->ms_checkpointing, offset, asize);
5329 range_tree_add(msp->ms_freeing, offset, asize);
5331 mutex_exit(&msp->ms_lock);
5336 metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
5337 uint64_t size, void *arg)
5339 boolean_t *checkpoint = arg;
5341 ASSERT3P(checkpoint, !=, NULL);
5343 if (vd->vdev_ops->vdev_op_remap != NULL)
5344 vdev_indirect_mark_obsolete(vd, offset, size);
5346 metaslab_free_impl(vd, offset, size, *checkpoint);
5350 metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size,
5351 boolean_t checkpoint)
5353 spa_t *spa = vd->vdev_spa;
5355 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5357 if (spa_syncing_txg(spa) > spa_freeze_txg(spa))
5360 if (spa->spa_vdev_removal != NULL &&
5361 spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id &&
5362 vdev_is_concrete(vd)) {
5364 * Note: we check if the vdev is concrete because when
5365 * we complete the removal, we first change the vdev to be
5366 * an indirect vdev (in open context), and then (in syncing
5367 * context) clear spa_vdev_removal.
5369 free_from_removing_vdev(vd, offset, size);
5370 } else if (vd->vdev_ops->vdev_op_remap != NULL) {
5371 vdev_indirect_mark_obsolete(vd, offset, size);
5372 vd->vdev_ops->vdev_op_remap(vd, offset, size,
5373 metaslab_free_impl_cb, &checkpoint);
5375 metaslab_free_concrete(vd, offset, size, checkpoint);
5379 typedef struct remap_blkptr_cb_arg {
5381 spa_remap_cb_t rbca_cb;
5382 vdev_t *rbca_remap_vd;
5383 uint64_t rbca_remap_offset;
5385 } remap_blkptr_cb_arg_t;
5388 remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
5389 uint64_t size, void *arg)
5391 remap_blkptr_cb_arg_t *rbca = arg;
5392 blkptr_t *bp = rbca->rbca_bp;
5394 /* We can not remap split blocks. */
5395 if (size != DVA_GET_ASIZE(&bp->blk_dva[0]))
5397 ASSERT0(inner_offset);
5399 if (rbca->rbca_cb != NULL) {
5401 * At this point we know that we are not handling split
5402 * blocks and we invoke the callback on the previous
5403 * vdev which must be indirect.
5405 ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops);
5407 rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id,
5408 rbca->rbca_remap_offset, size, rbca->rbca_cb_arg);
5410 /* set up remap_blkptr_cb_arg for the next call */
5411 rbca->rbca_remap_vd = vd;
5412 rbca->rbca_remap_offset = offset;
5416 * The phys birth time is that of dva[0]. This ensures that we know
5417 * when each dva was written, so that resilver can determine which
5418 * blocks need to be scrubbed (i.e. those written during the time
5419 * the vdev was offline). It also ensures that the key used in
5420 * the ARC hash table is unique (i.e. dva[0] + phys_birth). If
5421 * we didn't change the phys_birth, a lookup in the ARC for a
5422 * remapped BP could find the data that was previously stored at
5423 * this vdev + offset.
5425 vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa,
5426 DVA_GET_VDEV(&bp->blk_dva[0]));
5427 vdev_indirect_births_t *vib = oldvd->vdev_indirect_births;
5428 bp->blk_phys_birth = vdev_indirect_births_physbirth(vib,
5429 DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0]));
5431 DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id);
5432 DVA_SET_OFFSET(&bp->blk_dva[0], offset);
5436 * If the block pointer contains any indirect DVAs, modify them to refer to
5437 * concrete DVAs. Note that this will sometimes not be possible, leaving
5438 * the indirect DVA in place. This happens if the indirect DVA spans multiple
5439 * segments in the mapping (i.e. it is a "split block").
5441 * If the BP was remapped, calls the callback on the original dva (note the
5442 * callback can be called multiple times if the original indirect DVA refers
5443 * to another indirect DVA, etc).
5445 * Returns TRUE if the BP was remapped.
5448 spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg)
5450 remap_blkptr_cb_arg_t rbca;
5452 if (!zfs_remap_blkptr_enable)
5455 if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS))
5459 * Dedup BP's can not be remapped, because ddt_phys_select() depends
5460 * on DVA[0] being the same in the BP as in the DDT (dedup table).
5462 if (BP_GET_DEDUP(bp))
5466 * Gang blocks can not be remapped, because
5467 * zio_checksum_gang_verifier() depends on the DVA[0] that's in
5468 * the BP used to read the gang block header (GBH) being the same
5469 * as the DVA[0] that we allocated for the GBH.
5475 * Embedded BP's have no DVA to remap.
5477 if (BP_GET_NDVAS(bp) < 1)
5481 * Note: we only remap dva[0]. If we remapped other dvas, we
5482 * would no longer know what their phys birth txg is.
5484 dva_t *dva = &bp->blk_dva[0];
5486 uint64_t offset = DVA_GET_OFFSET(dva);
5487 uint64_t size = DVA_GET_ASIZE(dva);
5488 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
5490 if (vd->vdev_ops->vdev_op_remap == NULL)
5494 rbca.rbca_cb = callback;
5495 rbca.rbca_remap_vd = vd;
5496 rbca.rbca_remap_offset = offset;
5497 rbca.rbca_cb_arg = arg;
5500 * remap_blkptr_cb() will be called in order for each level of
5501 * indirection, until a concrete vdev is reached or a split block is
5502 * encountered. old_vd and old_offset are updated within the callback
5503 * as we go from the one indirect vdev to the next one (either concrete
5504 * or indirect again) in that order.
5506 vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca);
5508 /* Check if the DVA wasn't remapped because it is a split block */
5509 if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id)
5516 * Undo the allocation of a DVA which happened in the given transaction group.
5519 metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
5523 uint64_t vdev = DVA_GET_VDEV(dva);
5524 uint64_t offset = DVA_GET_OFFSET(dva);
5525 uint64_t size = DVA_GET_ASIZE(dva);
5527 ASSERT(DVA_IS_VALID(dva));
5528 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5530 if (txg > spa_freeze_txg(spa))
5533 if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) ||
5534 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
5535 zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
5536 (u_longlong_t)vdev, (u_longlong_t)offset,
5537 (u_longlong_t)size);
5541 ASSERT(!vd->vdev_removing);
5542 ASSERT(vdev_is_concrete(vd));
5543 ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
5544 ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
5546 if (DVA_GET_GANG(dva))
5547 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
5549 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5551 mutex_enter(&msp->ms_lock);
5552 range_tree_remove(msp->ms_allocating[txg & TXG_MASK],
5554 msp->ms_allocating_total -= size;
5556 VERIFY(!msp->ms_condensing);
5557 VERIFY3U(offset, >=, msp->ms_start);
5558 VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
5559 VERIFY3U(range_tree_space(msp->ms_allocatable) + size, <=,
5561 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
5562 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
5563 range_tree_add(msp->ms_allocatable, offset, size);
5564 mutex_exit(&msp->ms_lock);
5568 * Free the block represented by the given DVA.
5571 metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint)
5573 uint64_t vdev = DVA_GET_VDEV(dva);
5574 uint64_t offset = DVA_GET_OFFSET(dva);
5575 uint64_t size = DVA_GET_ASIZE(dva);
5576 vdev_t *vd = vdev_lookup_top(spa, vdev);
5578 ASSERT(DVA_IS_VALID(dva));
5579 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5581 if (DVA_GET_GANG(dva)) {
5582 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
5585 metaslab_free_impl(vd, offset, size, checkpoint);
5589 * Reserve some allocation slots. The reservation system must be called
5590 * before we call into the allocator. If there aren't any available slots
5591 * then the I/O will be throttled until an I/O completes and its slots are
5592 * freed up. The function returns true if it was successful in placing
5596 metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator,
5597 zio_t *zio, int flags)
5599 metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
5600 uint64_t available_slots = 0;
5601 boolean_t slot_reserved = B_FALSE;
5602 uint64_t max = mca->mca_alloc_max_slots;
5604 ASSERT(mc->mc_alloc_throttle_enabled);
5605 mutex_enter(&mc->mc_lock);
5607 uint64_t reserved_slots = zfs_refcount_count(&mca->mca_alloc_slots);
5608 if (reserved_slots < max)
5609 available_slots = max - reserved_slots;
5611 if (slots <= available_slots || GANG_ALLOCATION(flags) ||
5612 flags & METASLAB_MUST_RESERVE) {
5614 * We reserve the slots individually so that we can unreserve
5615 * them individually when an I/O completes.
5617 for (int d = 0; d < slots; d++)
5618 zfs_refcount_add(&mca->mca_alloc_slots, zio);
5619 zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
5620 slot_reserved = B_TRUE;
5623 mutex_exit(&mc->mc_lock);
5624 return (slot_reserved);
5628 metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots,
5629 int allocator, zio_t *zio)
5631 metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
5633 ASSERT(mc->mc_alloc_throttle_enabled);
5634 mutex_enter(&mc->mc_lock);
5635 for (int d = 0; d < slots; d++)
5636 zfs_refcount_remove(&mca->mca_alloc_slots, zio);
5637 mutex_exit(&mc->mc_lock);
5641 metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size,
5645 spa_t *spa = vd->vdev_spa;
5648 if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count)
5649 return (SET_ERROR(ENXIO));
5651 ASSERT3P(vd->vdev_ms, !=, NULL);
5652 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5654 mutex_enter(&msp->ms_lock);
5656 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) {
5657 error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM);
5658 if (error == EBUSY) {
5659 ASSERT(msp->ms_loaded);
5660 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
5666 !range_tree_contains(msp->ms_allocatable, offset, size))
5667 error = SET_ERROR(ENOENT);
5669 if (error || txg == 0) { /* txg == 0 indicates dry run */
5670 mutex_exit(&msp->ms_lock);
5674 VERIFY(!msp->ms_condensing);
5675 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
5676 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
5677 VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=,
5679 range_tree_remove(msp->ms_allocatable, offset, size);
5680 range_tree_clear(msp->ms_trim, offset, size);
5682 if (spa_writeable(spa)) { /* don't dirty if we're zdb(8) */
5683 metaslab_class_t *mc = msp->ms_group->mg_class;
5684 multilist_sublist_t *mls =
5685 multilist_sublist_lock_obj(mc->mc_metaslab_txg_list, msp);
5686 if (!multilist_link_active(&msp->ms_class_txg_node)) {
5687 msp->ms_selected_txg = txg;
5688 multilist_sublist_insert_head(mls, msp);
5690 multilist_sublist_unlock(mls);
5692 if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
5693 vdev_dirty(vd, VDD_METASLAB, msp, txg);
5694 range_tree_add(msp->ms_allocating[txg & TXG_MASK],
5696 msp->ms_allocating_total += size;
5699 mutex_exit(&msp->ms_lock);
5704 typedef struct metaslab_claim_cb_arg_t {
5707 } metaslab_claim_cb_arg_t;
5711 metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
5712 uint64_t size, void *arg)
5714 metaslab_claim_cb_arg_t *mcca_arg = arg;
5716 if (mcca_arg->mcca_error == 0) {
5717 mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset,
5718 size, mcca_arg->mcca_txg);
5723 metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg)
5725 if (vd->vdev_ops->vdev_op_remap != NULL) {
5726 metaslab_claim_cb_arg_t arg;
5729 * Only zdb(8) can claim on indirect vdevs. This is used
5730 * to detect leaks of mapped space (that are not accounted
5731 * for in the obsolete counts, spacemap, or bpobj).
5733 ASSERT(!spa_writeable(vd->vdev_spa));
5737 vd->vdev_ops->vdev_op_remap(vd, offset, size,
5738 metaslab_claim_impl_cb, &arg);
5740 if (arg.mcca_error == 0) {
5741 arg.mcca_error = metaslab_claim_concrete(vd,
5744 return (arg.mcca_error);
5746 return (metaslab_claim_concrete(vd, offset, size, txg));
5751 * Intent log support: upon opening the pool after a crash, notify the SPA
5752 * of blocks that the intent log has allocated for immediate write, but
5753 * which are still considered free by the SPA because the last transaction
5754 * group didn't commit yet.
5757 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
5759 uint64_t vdev = DVA_GET_VDEV(dva);
5760 uint64_t offset = DVA_GET_OFFSET(dva);
5761 uint64_t size = DVA_GET_ASIZE(dva);
5764 if ((vd = vdev_lookup_top(spa, vdev)) == NULL) {
5765 return (SET_ERROR(ENXIO));
5768 ASSERT(DVA_IS_VALID(dva));
5770 if (DVA_GET_GANG(dva))
5771 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
5773 return (metaslab_claim_impl(vd, offset, size, txg));
5777 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
5778 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
5779 zio_alloc_list_t *zal, zio_t *zio, int allocator)
5781 dva_t *dva = bp->blk_dva;
5782 dva_t *hintdva = (hintbp != NULL) ? hintbp->blk_dva : NULL;
5785 ASSERT(bp->blk_birth == 0);
5786 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
5788 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
5790 if (mc->mc_allocator[allocator].mca_rotor == NULL) {
5791 /* no vdevs in this class */
5792 spa_config_exit(spa, SCL_ALLOC, FTAG);
5793 return (SET_ERROR(ENOSPC));
5796 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
5797 ASSERT(BP_GET_NDVAS(bp) == 0);
5798 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
5799 ASSERT3P(zal, !=, NULL);
5801 for (int d = 0; d < ndvas; d++) {
5802 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
5803 txg, flags, zal, allocator);
5805 for (d--; d >= 0; d--) {
5806 metaslab_unalloc_dva(spa, &dva[d], txg);
5807 metaslab_group_alloc_decrement(spa,
5808 DVA_GET_VDEV(&dva[d]), zio, flags,
5809 allocator, B_FALSE);
5810 bzero(&dva[d], sizeof (dva_t));
5812 spa_config_exit(spa, SCL_ALLOC, FTAG);
5816 * Update the metaslab group's queue depth
5817 * based on the newly allocated dva.
5819 metaslab_group_alloc_increment(spa,
5820 DVA_GET_VDEV(&dva[d]), zio, flags, allocator);
5824 ASSERT(BP_GET_NDVAS(bp) == ndvas);
5826 spa_config_exit(spa, SCL_ALLOC, FTAG);
5828 BP_SET_BIRTH(bp, txg, 0);
5834 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
5836 const dva_t *dva = bp->blk_dva;
5837 int ndvas = BP_GET_NDVAS(bp);
5839 ASSERT(!BP_IS_HOLE(bp));
5840 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
5843 * If we have a checkpoint for the pool we need to make sure that
5844 * the blocks that we free that are part of the checkpoint won't be
5845 * reused until the checkpoint is discarded or we revert to it.
5847 * The checkpoint flag is passed down the metaslab_free code path
5848 * and is set whenever we want to add a block to the checkpoint's
5849 * accounting. That is, we "checkpoint" blocks that existed at the
5850 * time the checkpoint was created and are therefore referenced by
5851 * the checkpointed uberblock.
5853 * Note that, we don't checkpoint any blocks if the current
5854 * syncing txg <= spa_checkpoint_txg. We want these frees to sync
5855 * normally as they will be referenced by the checkpointed uberblock.
5857 boolean_t checkpoint = B_FALSE;
5858 if (bp->blk_birth <= spa->spa_checkpoint_txg &&
5859 spa_syncing_txg(spa) > spa->spa_checkpoint_txg) {
5861 * At this point, if the block is part of the checkpoint
5862 * there is no way it was created in the current txg.
5865 ASSERT3U(spa_syncing_txg(spa), ==, txg);
5866 checkpoint = B_TRUE;
5869 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
5871 for (int d = 0; d < ndvas; d++) {
5873 metaslab_unalloc_dva(spa, &dva[d], txg);
5875 ASSERT3U(txg, ==, spa_syncing_txg(spa));
5876 metaslab_free_dva(spa, &dva[d], checkpoint);
5880 spa_config_exit(spa, SCL_FREE, FTAG);
5884 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
5886 const dva_t *dva = bp->blk_dva;
5887 int ndvas = BP_GET_NDVAS(bp);
5890 ASSERT(!BP_IS_HOLE(bp));
5894 * First do a dry run to make sure all DVAs are claimable,
5895 * so we don't have to unwind from partial failures below.
5897 if ((error = metaslab_claim(spa, bp, 0)) != 0)
5901 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
5903 for (int d = 0; d < ndvas; d++) {
5904 error = metaslab_claim_dva(spa, &dva[d], txg);
5909 spa_config_exit(spa, SCL_ALLOC, FTAG);
5911 ASSERT(error == 0 || txg == 0);
5917 metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp)
5919 const dva_t *dva = bp->blk_dva;
5920 int ndvas = BP_GET_NDVAS(bp);
5921 uint64_t psize = BP_GET_PSIZE(bp);
5925 ASSERT(!BP_IS_HOLE(bp));
5926 ASSERT(!BP_IS_EMBEDDED(bp));
5929 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
5931 for (d = 0; d < ndvas; d++) {
5932 if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
5934 atomic_add_64(&vd->vdev_pending_fastwrite, psize);
5937 spa_config_exit(spa, SCL_VDEV, FTAG);
5941 metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp)
5943 const dva_t *dva = bp->blk_dva;
5944 int ndvas = BP_GET_NDVAS(bp);
5945 uint64_t psize = BP_GET_PSIZE(bp);
5949 ASSERT(!BP_IS_HOLE(bp));
5950 ASSERT(!BP_IS_EMBEDDED(bp));
5953 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
5955 for (d = 0; d < ndvas; d++) {
5956 if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
5958 ASSERT3U(vd->vdev_pending_fastwrite, >=, psize);
5959 atomic_sub_64(&vd->vdev_pending_fastwrite, psize);
5962 spa_config_exit(spa, SCL_VDEV, FTAG);
5967 metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset,
5968 uint64_t size, void *arg)
5970 if (vd->vdev_ops == &vdev_indirect_ops)
5973 metaslab_check_free_impl(vd, offset, size);
5977 metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size)
5980 spa_t *spa __maybe_unused = vd->vdev_spa;
5982 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
5985 if (vd->vdev_ops->vdev_op_remap != NULL) {
5986 vd->vdev_ops->vdev_op_remap(vd, offset, size,
5987 metaslab_check_free_impl_cb, NULL);
5991 ASSERT(vdev_is_concrete(vd));
5992 ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
5993 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5995 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5997 mutex_enter(&msp->ms_lock);
5998 if (msp->ms_loaded) {
5999 range_tree_verify_not_present(msp->ms_allocatable,
6004 * Check all segments that currently exist in the freeing pipeline.
6006 * It would intuitively make sense to also check the current allocating
6007 * tree since metaslab_unalloc_dva() exists for extents that are
6008 * allocated and freed in the same sync pass within the same txg.
6009 * Unfortunately there are places (e.g. the ZIL) where we allocate a
6010 * segment but then we free part of it within the same txg
6011 * [see zil_sync()]. Thus, we don't call range_tree_verify() in the
6012 * current allocating tree.
6014 range_tree_verify_not_present(msp->ms_freeing, offset, size);
6015 range_tree_verify_not_present(msp->ms_checkpointing, offset, size);
6016 range_tree_verify_not_present(msp->ms_freed, offset, size);
6017 for (int j = 0; j < TXG_DEFER_SIZE; j++)
6018 range_tree_verify_not_present(msp->ms_defer[j], offset, size);
6019 range_tree_verify_not_present(msp->ms_trim, offset, size);
6020 mutex_exit(&msp->ms_lock);
6024 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
6026 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
6029 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
6030 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
6031 uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
6032 vdev_t *vd = vdev_lookup_top(spa, vdev);
6033 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
6034 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
6036 if (DVA_GET_GANG(&bp->blk_dva[i]))
6037 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
6039 ASSERT3P(vd, !=, NULL);
6041 metaslab_check_free_impl(vd, offset, size);
6043 spa_config_exit(spa, SCL_VDEV, FTAG);
6047 metaslab_group_disable_wait(metaslab_group_t *mg)
6049 ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
6050 while (mg->mg_disabled_updating) {
6051 cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
6056 metaslab_group_disabled_increment(metaslab_group_t *mg)
6058 ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
6059 ASSERT(mg->mg_disabled_updating);
6061 while (mg->mg_ms_disabled >= max_disabled_ms) {
6062 cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
6064 mg->mg_ms_disabled++;
6065 ASSERT3U(mg->mg_ms_disabled, <=, max_disabled_ms);
6069 * Mark the metaslab as disabled to prevent any allocations on this metaslab.
6070 * We must also track how many metaslabs are currently disabled within a
6071 * metaslab group and limit them to prevent allocation failures from
6072 * occurring because all metaslabs are disabled.
6075 metaslab_disable(metaslab_t *msp)
6077 ASSERT(!MUTEX_HELD(&msp->ms_lock));
6078 metaslab_group_t *mg = msp->ms_group;
6080 mutex_enter(&mg->mg_ms_disabled_lock);
6083 * To keep an accurate count of how many threads have disabled
6084 * a specific metaslab group, we only allow one thread to mark
6085 * the metaslab group at a time. This ensures that the value of
6086 * ms_disabled will be accurate when we decide to mark a metaslab
6087 * group as disabled. To do this we force all other threads
6088 * to wait till the metaslab's mg_disabled_updating flag is no
6091 metaslab_group_disable_wait(mg);
6092 mg->mg_disabled_updating = B_TRUE;
6093 if (msp->ms_disabled == 0) {
6094 metaslab_group_disabled_increment(mg);
6096 mutex_enter(&msp->ms_lock);
6098 mutex_exit(&msp->ms_lock);
6100 mg->mg_disabled_updating = B_FALSE;
6101 cv_broadcast(&mg->mg_ms_disabled_cv);
6102 mutex_exit(&mg->mg_ms_disabled_lock);
6106 metaslab_enable(metaslab_t *msp, boolean_t sync, boolean_t unload)
6108 metaslab_group_t *mg = msp->ms_group;
6109 spa_t *spa = mg->mg_vd->vdev_spa;
6112 * Wait for the outstanding IO to be synced to prevent newly
6113 * allocated blocks from being overwritten. This used by
6114 * initialize and TRIM which are modifying unallocated space.
6117 txg_wait_synced(spa_get_dsl(spa), 0);
6119 mutex_enter(&mg->mg_ms_disabled_lock);
6120 mutex_enter(&msp->ms_lock);
6121 if (--msp->ms_disabled == 0) {
6122 mg->mg_ms_disabled--;
6123 cv_broadcast(&mg->mg_ms_disabled_cv);
6125 metaslab_unload(msp);
6127 mutex_exit(&msp->ms_lock);
6128 mutex_exit(&mg->mg_ms_disabled_lock);
6132 metaslab_update_ondisk_flush_data(metaslab_t *ms, dmu_tx_t *tx)
6134 vdev_t *vd = ms->ms_group->mg_vd;
6135 spa_t *spa = vd->vdev_spa;
6136 objset_t *mos = spa_meta_objset(spa);
6138 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
6140 metaslab_unflushed_phys_t entry = {
6141 .msp_unflushed_txg = metaslab_unflushed_txg(ms),
6143 uint64_t entry_size = sizeof (entry);
6144 uint64_t entry_offset = ms->ms_id * entry_size;
6146 uint64_t object = 0;
6147 int err = zap_lookup(mos, vd->vdev_top_zap,
6148 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
6150 if (err == ENOENT) {
6151 object = dmu_object_alloc(mos, DMU_OTN_UINT64_METADATA,
6152 SPA_OLD_MAXBLOCKSIZE, DMU_OT_NONE, 0, tx);
6153 VERIFY0(zap_add(mos, vd->vdev_top_zap,
6154 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
6160 dmu_write(spa_meta_objset(spa), object, entry_offset, entry_size,
6165 metaslab_set_unflushed_txg(metaslab_t *ms, uint64_t txg, dmu_tx_t *tx)
6167 spa_t *spa = ms->ms_group->mg_vd->vdev_spa;
6169 if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
6172 ms->ms_unflushed_txg = txg;
6173 metaslab_update_ondisk_flush_data(ms, tx);
6177 metaslab_unflushed_txg(metaslab_t *ms)
6179 return (ms->ms_unflushed_txg);
6182 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, aliquot, ULONG, ZMOD_RW,
6183 "Allocation granularity (a.k.a. stripe size)");
6185 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_load, INT, ZMOD_RW,
6186 "Load all metaslabs when pool is first opened");
6188 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_unload, INT, ZMOD_RW,
6189 "Prevent metaslabs from being unloaded");
6191 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_enabled, INT, ZMOD_RW,
6192 "Preload potential metaslabs during reassessment");
6194 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay, INT, ZMOD_RW,
6195 "Delay in txgs after metaslab was last used before unloading");
6197 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay_ms, INT, ZMOD_RW,
6198 "Delay in milliseconds after metaslab was last used before unloading");
6201 ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, noalloc_threshold, INT, ZMOD_RW,
6202 "Percentage of metaslab group size that should be free to make it "
6203 "eligible for allocation");
6205 ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, fragmentation_threshold, INT, ZMOD_RW,
6206 "Percentage of metaslab group size that should be considered eligible "
6207 "for allocations unless all metaslab groups within the metaslab class "
6208 "have also crossed this threshold");
6210 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, fragmentation_threshold, INT,
6211 ZMOD_RW, "Fragmentation for metaslab to allow allocation");
6213 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, fragmentation_factor_enabled, INT, ZMOD_RW,
6214 "Use the fragmentation metric to prefer less fragmented metaslabs");
6217 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, lba_weighting_enabled, INT, ZMOD_RW,
6218 "Prefer metaslabs with lower LBAs");
6220 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, bias_enabled, INT, ZMOD_RW,
6221 "Enable metaslab group biasing");
6223 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, segment_weight_enabled, INT,
6224 ZMOD_RW, "Enable segment-based metaslab selection");
6226 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, switch_threshold, INT, ZMOD_RW,
6227 "Segment-based metaslab selection maximum buckets before switching");
6229 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging, ULONG, ZMOD_RW,
6230 "Blocks larger than this size are forced to be gang blocks");
6232 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_max_search, INT, ZMOD_RW,
6233 "Max distance (bytes) to search forward before using size tree");
6235 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_use_largest_segment, INT, ZMOD_RW,
6236 "When looking in size tree, use largest segment instead of exact fit");
6238 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, max_size_cache_sec, ULONG,
6239 ZMOD_RW, "How long to trust the cached max chunk size of a metaslab");
6241 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, mem_limit, INT, ZMOD_RW,
6242 "Percentage of memory that can be used to store metaslab range trees");
6244 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, try_hard_before_gang, INT,
6245 ZMOD_RW, "Try hard to allocate before ganging");
6247 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, find_max_tries, INT, ZMOD_RW,
6248 "Normally only consider this many of the best metaslabs in each vdev");