4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
26 * Copyright (c) 2017, Intel Corporation.
29 #include <sys/zfs_context.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/space_map.h>
33 #include <sys/metaslab_impl.h>
34 #include <sys/vdev_impl.h>
36 #include <sys/spa_impl.h>
37 #include <sys/zfeature.h>
38 #include <sys/vdev_indirect_mapping.h>
41 SYSCTL_DECL(_vfs_zfs);
42 SYSCTL_NODE(_vfs_zfs, OID_AUTO, metaslab, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
45 #define GANG_ALLOCATION(flags) \
46 ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
48 uint64_t metaslab_aliquot = 512ULL << 10;
49 uint64_t metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
50 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, force_ganging, CTLFLAG_RWTUN,
51 &metaslab_force_ganging, 0,
52 "Force gang block allocation for blocks larger than or equal to this value");
55 * Since we can touch multiple metaslabs (and their respective space maps)
56 * with each transaction group, we benefit from having a smaller space map
57 * block size since it allows us to issue more I/O operations scattered
60 int zfs_metaslab_sm_blksz = (1 << 12);
61 SYSCTL_INT(_vfs_zfs, OID_AUTO, metaslab_sm_blksz, CTLFLAG_RDTUN,
62 &zfs_metaslab_sm_blksz, 0,
63 "Block size for metaslab DTL space map. Power of 2 and greater than 4096.");
66 * The in-core space map representation is more compact than its on-disk form.
67 * The zfs_condense_pct determines how much more compact the in-core
68 * space map representation must be before we compact it on-disk.
69 * Values should be greater than or equal to 100.
71 int zfs_condense_pct = 200;
72 SYSCTL_INT(_vfs_zfs, OID_AUTO, condense_pct, CTLFLAG_RWTUN,
74 "Condense on-disk spacemap when it is more than this many percents"
75 " of in-memory counterpart");
78 * Condensing a metaslab is not guaranteed to actually reduce the amount of
79 * space used on disk. In particular, a space map uses data in increments of
80 * MAX(1 << ashift, space_map_blksize), so a metaslab might use the
81 * same number of blocks after condensing. Since the goal of condensing is to
82 * reduce the number of IOPs required to read the space map, we only want to
83 * condense when we can be sure we will reduce the number of blocks used by the
84 * space map. Unfortunately, we cannot precisely compute whether or not this is
85 * the case in metaslab_should_condense since we are holding ms_lock. Instead,
86 * we apply the following heuristic: do not condense a spacemap unless the
87 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
90 int zfs_metaslab_condense_block_threshold = 4;
93 * The zfs_mg_noalloc_threshold defines which metaslab groups should
94 * be eligible for allocation. The value is defined as a percentage of
95 * free space. Metaslab groups that have more free space than
96 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
97 * a metaslab group's free space is less than or equal to the
98 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
99 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
100 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
101 * groups are allowed to accept allocations. Gang blocks are always
102 * eligible to allocate on any metaslab group. The default value of 0 means
103 * no metaslab group will be excluded based on this criterion.
105 int zfs_mg_noalloc_threshold = 0;
106 SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_noalloc_threshold, CTLFLAG_RWTUN,
107 &zfs_mg_noalloc_threshold, 0,
108 "Percentage of metaslab group size that should be free"
109 " to make it eligible for allocation");
112 * Metaslab groups are considered eligible for allocations if their
113 * fragmenation metric (measured as a percentage) is less than or equal to
114 * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold
115 * then it will be skipped unless all metaslab groups within the metaslab
116 * class have also crossed this threshold.
118 int zfs_mg_fragmentation_threshold = 85;
119 SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_fragmentation_threshold, CTLFLAG_RWTUN,
120 &zfs_mg_fragmentation_threshold, 0,
121 "Percentage of metaslab group size that should be considered "
122 "eligible for allocations unless all metaslab groups within the metaslab class "
123 "have also crossed this threshold");
126 * Allow metaslabs to keep their active state as long as their fragmentation
127 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
128 * active metaslab that exceeds this threshold will no longer keep its active
129 * status allowing better metaslabs to be selected.
131 int zfs_metaslab_fragmentation_threshold = 70;
132 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, fragmentation_threshold, CTLFLAG_RWTUN,
133 &zfs_metaslab_fragmentation_threshold, 0,
134 "Maximum percentage of metaslab fragmentation level to keep their active state");
137 * When set will load all metaslabs when pool is first opened.
139 int metaslab_debug_load = 0;
140 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_load, CTLFLAG_RWTUN,
141 &metaslab_debug_load, 0,
142 "Load all metaslabs when pool is first opened");
145 * When set will prevent metaslabs from being unloaded.
147 int metaslab_debug_unload = 0;
148 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_unload, CTLFLAG_RWTUN,
149 &metaslab_debug_unload, 0,
150 "Prevent metaslabs from being unloaded");
153 * Minimum size which forces the dynamic allocator to change
154 * it's allocation strategy. Once the space map cannot satisfy
155 * an allocation of this size then it switches to using more
156 * aggressive strategy (i.e search by size rather than offset).
158 uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
159 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold, CTLFLAG_RWTUN,
160 &metaslab_df_alloc_threshold, 0,
161 "Minimum size which forces the dynamic allocator to change it's allocation strategy");
164 * The minimum free space, in percent, which must be available
165 * in a space map to continue allocations in a first-fit fashion.
166 * Once the space map's free space drops below this level we dynamically
167 * switch to using best-fit allocations.
169 int metaslab_df_free_pct = 4;
170 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct, CTLFLAG_RWTUN,
171 &metaslab_df_free_pct, 0,
172 "The minimum free space, in percent, which must be available in a "
173 "space map to continue allocations in a first-fit fashion");
176 * A metaslab is considered "free" if it contains a contiguous
177 * segment which is greater than metaslab_min_alloc_size.
179 uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS;
180 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, min_alloc_size, CTLFLAG_RWTUN,
181 &metaslab_min_alloc_size, 0,
182 "A metaslab is considered \"free\" if it contains a contiguous "
183 "segment which is greater than vfs.zfs.metaslab.min_alloc_size");
186 * Percentage of all cpus that can be used by the metaslab taskq.
188 int metaslab_load_pct = 50;
189 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, load_pct, CTLFLAG_RWTUN,
190 &metaslab_load_pct, 0,
191 "Percentage of cpus that can be used by the metaslab taskq");
194 * Determines how many txgs a metaslab may remain loaded without having any
195 * allocations from it. As long as a metaslab continues to be used we will
198 int metaslab_unload_delay = TXG_SIZE * 2;
199 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, unload_delay, CTLFLAG_RWTUN,
200 &metaslab_unload_delay, 0,
201 "Number of TXGs that an unused metaslab can be kept in memory");
204 * Max number of metaslabs per group to preload.
206 int metaslab_preload_limit = SPA_DVAS_PER_BP;
207 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_limit, CTLFLAG_RWTUN,
208 &metaslab_preload_limit, 0,
209 "Max number of metaslabs per group to preload");
212 * Enable/disable preloading of metaslab.
214 boolean_t metaslab_preload_enabled = B_TRUE;
215 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_enabled, CTLFLAG_RWTUN,
216 &metaslab_preload_enabled, 0,
217 "Max number of metaslabs per group to preload");
220 * Enable/disable fragmentation weighting on metaslabs.
222 boolean_t metaslab_fragmentation_factor_enabled = B_TRUE;
223 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, fragmentation_factor_enabled, CTLFLAG_RWTUN,
224 &metaslab_fragmentation_factor_enabled, 0,
225 "Enable fragmentation weighting on metaslabs");
228 * Enable/disable lba weighting (i.e. outer tracks are given preference).
230 boolean_t metaslab_lba_weighting_enabled = B_TRUE;
231 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, lba_weighting_enabled, CTLFLAG_RWTUN,
232 &metaslab_lba_weighting_enabled, 0,
233 "Enable LBA weighting (i.e. outer tracks are given preference)");
236 * Enable/disable metaslab group biasing.
238 boolean_t metaslab_bias_enabled = B_TRUE;
239 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, bias_enabled, CTLFLAG_RWTUN,
240 &metaslab_bias_enabled, 0,
241 "Enable metaslab group biasing");
244 * Enable/disable remapping of indirect DVAs to their concrete vdevs.
246 boolean_t zfs_remap_blkptr_enable = B_TRUE;
249 * Enable/disable segment-based metaslab selection.
251 boolean_t zfs_metaslab_segment_weight_enabled = B_TRUE;
254 * When using segment-based metaslab selection, we will continue
255 * allocating from the active metaslab until we have exhausted
256 * zfs_metaslab_switch_threshold of its buckets.
258 int zfs_metaslab_switch_threshold = 2;
261 * Internal switch to enable/disable the metaslab allocation tracing
264 #ifdef _METASLAB_TRACING
265 boolean_t metaslab_trace_enabled = B_TRUE;
269 * Maximum entries that the metaslab allocation tracing facility will keep
270 * in a given list when running in non-debug mode. We limit the number
271 * of entries in non-debug mode to prevent us from using up too much memory.
272 * The limit should be sufficiently large that we don't expect any allocation
273 * to every exceed this value. In debug mode, the system will panic if this
274 * limit is ever reached allowing for further investigation.
276 #ifdef _METASLAB_TRACING
277 uint64_t metaslab_trace_max_entries = 5000;
280 static uint64_t metaslab_weight(metaslab_t *);
281 static void metaslab_set_fragmentation(metaslab_t *);
282 static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t);
283 static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t);
284 static void metaslab_passivate(metaslab_t *msp, uint64_t weight);
285 static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp);
286 #ifdef _METASLAB_TRACING
287 kmem_cache_t *metaslab_alloc_trace_cache;
291 * ==========================================================================
293 * ==========================================================================
296 metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
298 metaslab_class_t *mc;
300 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
305 mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
306 mc->mc_alloc_slots = kmem_zalloc(spa->spa_alloc_count *
307 sizeof (zfs_refcount_t), KM_SLEEP);
308 mc->mc_alloc_max_slots = kmem_zalloc(spa->spa_alloc_count *
309 sizeof (uint64_t), KM_SLEEP);
310 for (int i = 0; i < spa->spa_alloc_count; i++)
311 zfs_refcount_create_tracked(&mc->mc_alloc_slots[i]);
317 metaslab_class_destroy(metaslab_class_t *mc)
319 ASSERT(mc->mc_rotor == NULL);
320 ASSERT(mc->mc_alloc == 0);
321 ASSERT(mc->mc_deferred == 0);
322 ASSERT(mc->mc_space == 0);
323 ASSERT(mc->mc_dspace == 0);
325 for (int i = 0; i < mc->mc_spa->spa_alloc_count; i++)
326 zfs_refcount_destroy(&mc->mc_alloc_slots[i]);
327 kmem_free(mc->mc_alloc_slots, mc->mc_spa->spa_alloc_count *
328 sizeof (zfs_refcount_t));
329 kmem_free(mc->mc_alloc_max_slots, mc->mc_spa->spa_alloc_count *
331 mutex_destroy(&mc->mc_lock);
332 kmem_free(mc, sizeof (metaslab_class_t));
336 metaslab_class_validate(metaslab_class_t *mc)
338 metaslab_group_t *mg;
342 * Must hold one of the spa_config locks.
344 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
345 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
347 if ((mg = mc->mc_rotor) == NULL)
352 ASSERT(vd->vdev_mg != NULL);
353 ASSERT3P(vd->vdev_top, ==, vd);
354 ASSERT3P(mg->mg_class, ==, mc);
355 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
356 } while ((mg = mg->mg_next) != mc->mc_rotor);
362 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
363 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
365 atomic_add_64(&mc->mc_alloc, alloc_delta);
366 atomic_add_64(&mc->mc_deferred, defer_delta);
367 atomic_add_64(&mc->mc_space, space_delta);
368 atomic_add_64(&mc->mc_dspace, dspace_delta);
372 metaslab_class_minblocksize_update(metaslab_class_t *mc)
374 metaslab_group_t *mg;
376 uint64_t minashift = UINT64_MAX;
378 if ((mg = mc->mc_rotor) == NULL) {
379 mc->mc_minblocksize = SPA_MINBLOCKSIZE;
385 if (vd->vdev_ashift < minashift)
386 minashift = vd->vdev_ashift;
387 } while ((mg = mg->mg_next) != mc->mc_rotor);
389 mc->mc_minblocksize = 1ULL << minashift;
393 metaslab_class_get_alloc(metaslab_class_t *mc)
395 return (mc->mc_alloc);
399 metaslab_class_get_deferred(metaslab_class_t *mc)
401 return (mc->mc_deferred);
405 metaslab_class_get_space(metaslab_class_t *mc)
407 return (mc->mc_space);
411 metaslab_class_get_dspace(metaslab_class_t *mc)
413 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
417 metaslab_class_get_minblocksize(metaslab_class_t *mc)
419 return (mc->mc_minblocksize);
423 metaslab_class_histogram_verify(metaslab_class_t *mc)
425 spa_t *spa = mc->mc_spa;
426 vdev_t *rvd = spa->spa_root_vdev;
430 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
433 mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
436 for (int c = 0; c < rvd->vdev_children; c++) {
437 vdev_t *tvd = rvd->vdev_child[c];
438 metaslab_group_t *mg = tvd->vdev_mg;
441 * Skip any holes, uninitialized top-levels, or
442 * vdevs that are not in this metalab class.
444 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
445 mg->mg_class != mc) {
449 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
450 mc_hist[i] += mg->mg_histogram[i];
453 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
454 VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
456 kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
460 * Calculate the metaslab class's fragmentation metric. The metric
461 * is weighted based on the space contribution of each metaslab group.
462 * The return value will be a number between 0 and 100 (inclusive), or
463 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
464 * zfs_frag_table for more information about the metric.
467 metaslab_class_fragmentation(metaslab_class_t *mc)
469 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
470 uint64_t fragmentation = 0;
472 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
474 for (int c = 0; c < rvd->vdev_children; c++) {
475 vdev_t *tvd = rvd->vdev_child[c];
476 metaslab_group_t *mg = tvd->vdev_mg;
479 * Skip any holes, uninitialized top-levels,
480 * or vdevs that are not in this metalab class.
482 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
483 mg->mg_class != mc) {
488 * If a metaslab group does not contain a fragmentation
489 * metric then just bail out.
491 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
492 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
493 return (ZFS_FRAG_INVALID);
497 * Determine how much this metaslab_group is contributing
498 * to the overall pool fragmentation metric.
500 fragmentation += mg->mg_fragmentation *
501 metaslab_group_get_space(mg);
503 fragmentation /= metaslab_class_get_space(mc);
505 ASSERT3U(fragmentation, <=, 100);
506 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
507 return (fragmentation);
511 * Calculate the amount of expandable space that is available in
512 * this metaslab class. If a device is expanded then its expandable
513 * space will be the amount of allocatable space that is currently not
514 * part of this metaslab class.
517 metaslab_class_expandable_space(metaslab_class_t *mc)
519 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
522 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
523 for (int c = 0; c < rvd->vdev_children; c++) {
525 vdev_t *tvd = rvd->vdev_child[c];
526 metaslab_group_t *mg = tvd->vdev_mg;
528 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
529 mg->mg_class != mc) {
534 * Calculate if we have enough space to add additional
535 * metaslabs. We report the expandable space in terms
536 * of the metaslab size since that's the unit of expansion.
537 * Adjust by efi system partition size.
539 tspace = tvd->vdev_max_asize - tvd->vdev_asize;
540 if (tspace > mc->mc_spa->spa_bootsize) {
541 tspace -= mc->mc_spa->spa_bootsize;
543 space += P2ALIGN(tspace, 1ULL << tvd->vdev_ms_shift);
545 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
550 metaslab_compare(const void *x1, const void *x2)
552 const metaslab_t *m1 = (const metaslab_t *)x1;
553 const metaslab_t *m2 = (const metaslab_t *)x2;
557 if (m1->ms_allocator != -1 && m1->ms_primary)
559 else if (m1->ms_allocator != -1 && !m1->ms_primary)
561 if (m2->ms_allocator != -1 && m2->ms_primary)
563 else if (m2->ms_allocator != -1 && !m2->ms_primary)
567 * Sort inactive metaslabs first, then primaries, then secondaries. When
568 * selecting a metaslab to allocate from, an allocator first tries its
569 * primary, then secondary active metaslab. If it doesn't have active
570 * metaslabs, or can't allocate from them, it searches for an inactive
571 * metaslab to activate. If it can't find a suitable one, it will steal
572 * a primary or secondary metaslab from another allocator.
579 int cmp = AVL_CMP(m2->ms_weight, m1->ms_weight);
583 IMPLY(AVL_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2);
585 return (AVL_CMP(m1->ms_start, m2->ms_start));
589 metaslab_allocated_space(metaslab_t *msp)
591 return (msp->ms_allocated_space);
595 * Verify that the space accounting on disk matches the in-core range_trees.
598 metaslab_verify_space(metaslab_t *msp, uint64_t txg)
600 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
601 uint64_t allocating = 0;
602 uint64_t sm_free_space, msp_free_space;
604 ASSERT(MUTEX_HELD(&msp->ms_lock));
605 ASSERT(!msp->ms_condensing);
607 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
611 * We can only verify the metaslab space when we're called
612 * from syncing context with a loaded metaslab that has an
613 * allocated space map. Calling this in non-syncing context
614 * does not provide a consistent view of the metaslab since
615 * we're performing allocations in the future.
617 if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
622 * Even though the smp_alloc field can get negative (e.g.
623 * see vdev_checkpoint_sm), that should never be the case
624 * when it come's to a metaslab's space map.
626 ASSERT3S(space_map_allocated(msp->ms_sm), >=, 0);
628 sm_free_space = msp->ms_size - metaslab_allocated_space(msp);
631 * Account for future allocations since we would have
632 * already deducted that space from the ms_allocatable.
634 for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
636 range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]);
639 ASSERT3U(msp->ms_deferspace, ==,
640 range_tree_space(msp->ms_defer[0]) +
641 range_tree_space(msp->ms_defer[1]));
643 msp_free_space = range_tree_space(msp->ms_allocatable) + allocating +
644 msp->ms_deferspace + range_tree_space(msp->ms_freed);
646 VERIFY3U(sm_free_space, ==, msp_free_space);
650 * ==========================================================================
652 * ==========================================================================
655 * Update the allocatable flag and the metaslab group's capacity.
656 * The allocatable flag is set to true if the capacity is below
657 * the zfs_mg_noalloc_threshold or has a fragmentation value that is
658 * greater than zfs_mg_fragmentation_threshold. If a metaslab group
659 * transitions from allocatable to non-allocatable or vice versa then the
660 * metaslab group's class is updated to reflect the transition.
663 metaslab_group_alloc_update(metaslab_group_t *mg)
665 vdev_t *vd = mg->mg_vd;
666 metaslab_class_t *mc = mg->mg_class;
667 vdev_stat_t *vs = &vd->vdev_stat;
668 boolean_t was_allocatable;
669 boolean_t was_initialized;
671 ASSERT(vd == vd->vdev_top);
672 ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==,
675 mutex_enter(&mg->mg_lock);
676 was_allocatable = mg->mg_allocatable;
677 was_initialized = mg->mg_initialized;
679 mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
682 mutex_enter(&mc->mc_lock);
685 * If the metaslab group was just added then it won't
686 * have any space until we finish syncing out this txg.
687 * At that point we will consider it initialized and available
688 * for allocations. We also don't consider non-activated
689 * metaslab groups (e.g. vdevs that are in the middle of being removed)
690 * to be initialized, because they can't be used for allocation.
692 mg->mg_initialized = metaslab_group_initialized(mg);
693 if (!was_initialized && mg->mg_initialized) {
695 } else if (was_initialized && !mg->mg_initialized) {
696 ASSERT3U(mc->mc_groups, >, 0);
699 if (mg->mg_initialized)
700 mg->mg_no_free_space = B_FALSE;
703 * A metaslab group is considered allocatable if it has plenty
704 * of free space or is not heavily fragmented. We only take
705 * fragmentation into account if the metaslab group has a valid
706 * fragmentation metric (i.e. a value between 0 and 100).
708 mg->mg_allocatable = (mg->mg_activation_count > 0 &&
709 mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
710 (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
711 mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
714 * The mc_alloc_groups maintains a count of the number of
715 * groups in this metaslab class that are still above the
716 * zfs_mg_noalloc_threshold. This is used by the allocating
717 * threads to determine if they should avoid allocations to
718 * a given group. The allocator will avoid allocations to a group
719 * if that group has reached or is below the zfs_mg_noalloc_threshold
720 * and there are still other groups that are above the threshold.
721 * When a group transitions from allocatable to non-allocatable or
722 * vice versa we update the metaslab class to reflect that change.
723 * When the mc_alloc_groups value drops to 0 that means that all
724 * groups have reached the zfs_mg_noalloc_threshold making all groups
725 * eligible for allocations. This effectively means that all devices
726 * are balanced again.
728 if (was_allocatable && !mg->mg_allocatable)
729 mc->mc_alloc_groups--;
730 else if (!was_allocatable && mg->mg_allocatable)
731 mc->mc_alloc_groups++;
732 mutex_exit(&mc->mc_lock);
734 mutex_exit(&mg->mg_lock);
738 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
740 metaslab_group_t *mg;
742 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
743 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
744 mutex_init(&mg->mg_ms_initialize_lock, NULL, MUTEX_DEFAULT, NULL);
745 cv_init(&mg->mg_ms_initialize_cv, NULL, CV_DEFAULT, NULL);
746 mg->mg_primaries = kmem_zalloc(allocators * sizeof (metaslab_t *),
748 mg->mg_secondaries = kmem_zalloc(allocators * sizeof (metaslab_t *),
750 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
751 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
754 mg->mg_activation_count = 0;
755 mg->mg_initialized = B_FALSE;
756 mg->mg_no_free_space = B_TRUE;
757 mg->mg_allocators = allocators;
759 mg->mg_alloc_queue_depth = kmem_zalloc(allocators *
760 sizeof (zfs_refcount_t), KM_SLEEP);
761 mg->mg_cur_max_alloc_queue_depth = kmem_zalloc(allocators *
762 sizeof (uint64_t), KM_SLEEP);
763 for (int i = 0; i < allocators; i++) {
764 zfs_refcount_create_tracked(&mg->mg_alloc_queue_depth[i]);
765 mg->mg_cur_max_alloc_queue_depth[i] = 0;
768 mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
769 minclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT);
775 metaslab_group_destroy(metaslab_group_t *mg)
777 ASSERT(mg->mg_prev == NULL);
778 ASSERT(mg->mg_next == NULL);
780 * We may have gone below zero with the activation count
781 * either because we never activated in the first place or
782 * because we're done, and possibly removing the vdev.
784 ASSERT(mg->mg_activation_count <= 0);
786 taskq_destroy(mg->mg_taskq);
787 avl_destroy(&mg->mg_metaslab_tree);
788 kmem_free(mg->mg_primaries, mg->mg_allocators * sizeof (metaslab_t *));
789 kmem_free(mg->mg_secondaries, mg->mg_allocators *
790 sizeof (metaslab_t *));
791 mutex_destroy(&mg->mg_lock);
792 mutex_destroy(&mg->mg_ms_initialize_lock);
793 cv_destroy(&mg->mg_ms_initialize_cv);
795 for (int i = 0; i < mg->mg_allocators; i++) {
796 zfs_refcount_destroy(&mg->mg_alloc_queue_depth[i]);
797 mg->mg_cur_max_alloc_queue_depth[i] = 0;
799 kmem_free(mg->mg_alloc_queue_depth, mg->mg_allocators *
800 sizeof (zfs_refcount_t));
801 kmem_free(mg->mg_cur_max_alloc_queue_depth, mg->mg_allocators *
804 kmem_free(mg, sizeof (metaslab_group_t));
808 metaslab_group_activate(metaslab_group_t *mg)
810 metaslab_class_t *mc = mg->mg_class;
811 metaslab_group_t *mgprev, *mgnext;
813 ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER), !=, 0);
815 ASSERT(mc->mc_rotor != mg);
816 ASSERT(mg->mg_prev == NULL);
817 ASSERT(mg->mg_next == NULL);
818 ASSERT(mg->mg_activation_count <= 0);
820 if (++mg->mg_activation_count <= 0)
823 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
824 metaslab_group_alloc_update(mg);
826 if ((mgprev = mc->mc_rotor) == NULL) {
830 mgnext = mgprev->mg_next;
831 mg->mg_prev = mgprev;
832 mg->mg_next = mgnext;
833 mgprev->mg_next = mg;
834 mgnext->mg_prev = mg;
837 metaslab_class_minblocksize_update(mc);
841 * Passivate a metaslab group and remove it from the allocation rotor.
842 * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating
843 * a metaslab group. This function will momentarily drop spa_config_locks
844 * that are lower than the SCL_ALLOC lock (see comment below).
847 metaslab_group_passivate(metaslab_group_t *mg)
849 metaslab_class_t *mc = mg->mg_class;
850 spa_t *spa = mc->mc_spa;
851 metaslab_group_t *mgprev, *mgnext;
852 int locks = spa_config_held(spa, SCL_ALL, RW_WRITER);
854 ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==,
855 (SCL_ALLOC | SCL_ZIO));
857 if (--mg->mg_activation_count != 0) {
858 ASSERT(mc->mc_rotor != mg);
859 ASSERT(mg->mg_prev == NULL);
860 ASSERT(mg->mg_next == NULL);
861 ASSERT(mg->mg_activation_count < 0);
866 * The spa_config_lock is an array of rwlocks, ordered as
867 * follows (from highest to lowest):
868 * SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC >
869 * SCL_ZIO > SCL_FREE > SCL_VDEV
870 * (For more information about the spa_config_lock see spa_misc.c)
871 * The higher the lock, the broader its coverage. When we passivate
872 * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO
873 * config locks. However, the metaslab group's taskq might be trying
874 * to preload metaslabs so we must drop the SCL_ZIO lock and any
875 * lower locks to allow the I/O to complete. At a minimum,
876 * we continue to hold the SCL_ALLOC lock, which prevents any future
877 * allocations from taking place and any changes to the vdev tree.
879 spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa);
880 taskq_wait(mg->mg_taskq);
881 spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER);
882 metaslab_group_alloc_update(mg);
883 for (int i = 0; i < mg->mg_allocators; i++) {
884 metaslab_t *msp = mg->mg_primaries[i];
886 mutex_enter(&msp->ms_lock);
887 metaslab_passivate(msp,
888 metaslab_weight_from_range_tree(msp));
889 mutex_exit(&msp->ms_lock);
891 msp = mg->mg_secondaries[i];
893 mutex_enter(&msp->ms_lock);
894 metaslab_passivate(msp,
895 metaslab_weight_from_range_tree(msp));
896 mutex_exit(&msp->ms_lock);
900 mgprev = mg->mg_prev;
901 mgnext = mg->mg_next;
906 mc->mc_rotor = mgnext;
907 mgprev->mg_next = mgnext;
908 mgnext->mg_prev = mgprev;
913 metaslab_class_minblocksize_update(mc);
917 metaslab_group_initialized(metaslab_group_t *mg)
919 vdev_t *vd = mg->mg_vd;
920 vdev_stat_t *vs = &vd->vdev_stat;
922 return (vs->vs_space != 0 && mg->mg_activation_count > 0);
926 metaslab_group_get_space(metaslab_group_t *mg)
928 return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count);
932 metaslab_group_histogram_verify(metaslab_group_t *mg)
935 vdev_t *vd = mg->mg_vd;
936 uint64_t ashift = vd->vdev_ashift;
939 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
942 mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
945 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
946 SPACE_MAP_HISTOGRAM_SIZE + ashift);
948 for (int m = 0; m < vd->vdev_ms_count; m++) {
949 metaslab_t *msp = vd->vdev_ms[m];
952 /* skip if not active or not a member */
953 if (msp->ms_sm == NULL || msp->ms_group != mg)
956 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
957 mg_hist[i + ashift] +=
958 msp->ms_sm->sm_phys->smp_histogram[i];
961 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
962 VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
964 kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
968 metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
970 metaslab_class_t *mc = mg->mg_class;
971 uint64_t ashift = mg->mg_vd->vdev_ashift;
973 ASSERT(MUTEX_HELD(&msp->ms_lock));
974 if (msp->ms_sm == NULL)
977 mutex_enter(&mg->mg_lock);
978 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
979 mg->mg_histogram[i + ashift] +=
980 msp->ms_sm->sm_phys->smp_histogram[i];
981 mc->mc_histogram[i + ashift] +=
982 msp->ms_sm->sm_phys->smp_histogram[i];
984 mutex_exit(&mg->mg_lock);
988 metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
990 metaslab_class_t *mc = mg->mg_class;
991 uint64_t ashift = mg->mg_vd->vdev_ashift;
993 ASSERT(MUTEX_HELD(&msp->ms_lock));
994 if (msp->ms_sm == NULL)
997 mutex_enter(&mg->mg_lock);
998 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
999 ASSERT3U(mg->mg_histogram[i + ashift], >=,
1000 msp->ms_sm->sm_phys->smp_histogram[i]);
1001 ASSERT3U(mc->mc_histogram[i + ashift], >=,
1002 msp->ms_sm->sm_phys->smp_histogram[i]);
1004 mg->mg_histogram[i + ashift] -=
1005 msp->ms_sm->sm_phys->smp_histogram[i];
1006 mc->mc_histogram[i + ashift] -=
1007 msp->ms_sm->sm_phys->smp_histogram[i];
1009 mutex_exit(&mg->mg_lock);
1013 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
1015 ASSERT(msp->ms_group == NULL);
1016 mutex_enter(&mg->mg_lock);
1019 avl_add(&mg->mg_metaslab_tree, msp);
1020 mutex_exit(&mg->mg_lock);
1022 mutex_enter(&msp->ms_lock);
1023 metaslab_group_histogram_add(mg, msp);
1024 mutex_exit(&msp->ms_lock);
1028 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
1030 mutex_enter(&msp->ms_lock);
1031 metaslab_group_histogram_remove(mg, msp);
1032 mutex_exit(&msp->ms_lock);
1034 mutex_enter(&mg->mg_lock);
1035 ASSERT(msp->ms_group == mg);
1036 avl_remove(&mg->mg_metaslab_tree, msp);
1037 msp->ms_group = NULL;
1038 mutex_exit(&mg->mg_lock);
1042 metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
1044 ASSERT(MUTEX_HELD(&mg->mg_lock));
1045 ASSERT(msp->ms_group == mg);
1046 avl_remove(&mg->mg_metaslab_tree, msp);
1047 msp->ms_weight = weight;
1048 avl_add(&mg->mg_metaslab_tree, msp);
1053 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
1056 * Although in principle the weight can be any value, in
1057 * practice we do not use values in the range [1, 511].
1059 ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
1060 ASSERT(MUTEX_HELD(&msp->ms_lock));
1062 mutex_enter(&mg->mg_lock);
1063 metaslab_group_sort_impl(mg, msp, weight);
1064 mutex_exit(&mg->mg_lock);
1068 * Calculate the fragmentation for a given metaslab group. We can use
1069 * a simple average here since all metaslabs within the group must have
1070 * the same size. The return value will be a value between 0 and 100
1071 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
1072 * group have a fragmentation metric.
1075 metaslab_group_fragmentation(metaslab_group_t *mg)
1077 vdev_t *vd = mg->mg_vd;
1078 uint64_t fragmentation = 0;
1079 uint64_t valid_ms = 0;
1081 for (int m = 0; m < vd->vdev_ms_count; m++) {
1082 metaslab_t *msp = vd->vdev_ms[m];
1084 if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
1086 if (msp->ms_group != mg)
1090 fragmentation += msp->ms_fragmentation;
1093 if (valid_ms <= mg->mg_vd->vdev_ms_count / 2)
1094 return (ZFS_FRAG_INVALID);
1096 fragmentation /= valid_ms;
1097 ASSERT3U(fragmentation, <=, 100);
1098 return (fragmentation);
1102 * Determine if a given metaslab group should skip allocations. A metaslab
1103 * group should avoid allocations if its free capacity is less than the
1104 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
1105 * zfs_mg_fragmentation_threshold and there is at least one metaslab group
1106 * that can still handle allocations. If the allocation throttle is enabled
1107 * then we skip allocations to devices that have reached their maximum
1108 * allocation queue depth unless the selected metaslab group is the only
1109 * eligible group remaining.
1112 metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
1113 uint64_t psize, int allocator, int d)
1115 spa_t *spa = mg->mg_vd->vdev_spa;
1116 metaslab_class_t *mc = mg->mg_class;
1119 * We can only consider skipping this metaslab group if it's
1120 * in the normal metaslab class and there are other metaslab
1121 * groups to select from. Otherwise, we always consider it eligible
1124 if ((mc != spa_normal_class(spa) &&
1125 mc != spa_special_class(spa) &&
1126 mc != spa_dedup_class(spa)) ||
1131 * If the metaslab group's mg_allocatable flag is set (see comments
1132 * in metaslab_group_alloc_update() for more information) and
1133 * the allocation throttle is disabled then allow allocations to this
1134 * device. However, if the allocation throttle is enabled then
1135 * check if we have reached our allocation limit (mg_alloc_queue_depth)
1136 * to determine if we should allow allocations to this metaslab group.
1137 * If all metaslab groups are no longer considered allocatable
1138 * (mc_alloc_groups == 0) or we're trying to allocate the smallest
1139 * gang block size then we allow allocations on this metaslab group
1140 * regardless of the mg_allocatable or throttle settings.
1142 if (mg->mg_allocatable) {
1143 metaslab_group_t *mgp;
1145 uint64_t qmax = mg->mg_cur_max_alloc_queue_depth[allocator];
1147 if (!mc->mc_alloc_throttle_enabled)
1151 * If this metaslab group does not have any free space, then
1152 * there is no point in looking further.
1154 if (mg->mg_no_free_space)
1158 * Relax allocation throttling for ditto blocks. Due to
1159 * random imbalances in allocation it tends to push copies
1160 * to one vdev, that looks a bit better at the moment.
1162 qmax = qmax * (4 + d) / 4;
1164 qdepth = zfs_refcount_count(
1165 &mg->mg_alloc_queue_depth[allocator]);
1168 * If this metaslab group is below its qmax or it's
1169 * the only allocatable metasable group, then attempt
1170 * to allocate from it.
1172 if (qdepth < qmax || mc->mc_alloc_groups == 1)
1174 ASSERT3U(mc->mc_alloc_groups, >, 1);
1177 * Since this metaslab group is at or over its qmax, we
1178 * need to determine if there are metaslab groups after this
1179 * one that might be able to handle this allocation. This is
1180 * racy since we can't hold the locks for all metaslab
1181 * groups at the same time when we make this check.
1183 for (mgp = mg->mg_next; mgp != rotor; mgp = mgp->mg_next) {
1184 qmax = mgp->mg_cur_max_alloc_queue_depth[allocator];
1185 qmax = qmax * (4 + d) / 4;
1186 qdepth = zfs_refcount_count(
1187 &mgp->mg_alloc_queue_depth[allocator]);
1190 * If there is another metaslab group that
1191 * might be able to handle the allocation, then
1192 * we return false so that we skip this group.
1194 if (qdepth < qmax && !mgp->mg_no_free_space)
1199 * We didn't find another group to handle the allocation
1200 * so we can't skip this metaslab group even though
1201 * we are at or over our qmax.
1205 } else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
1212 * ==========================================================================
1213 * Range tree callbacks
1214 * ==========================================================================
1218 * Comparison function for the private size-ordered tree. Tree is sorted
1219 * by size, larger sizes at the end of the tree.
1222 metaslab_rangesize_compare(const void *x1, const void *x2)
1224 const range_seg_t *r1 = x1;
1225 const range_seg_t *r2 = x2;
1226 uint64_t rs_size1 = r1->rs_end - r1->rs_start;
1227 uint64_t rs_size2 = r2->rs_end - r2->rs_start;
1229 int cmp = AVL_CMP(rs_size1, rs_size2);
1233 return (AVL_CMP(r1->rs_start, r2->rs_start));
1237 * ==========================================================================
1238 * Common allocator routines
1239 * ==========================================================================
1243 * Return the maximum contiguous segment within the metaslab.
1246 metaslab_block_maxsize(metaslab_t *msp)
1248 avl_tree_t *t = &msp->ms_allocatable_by_size;
1251 if (t == NULL || (rs = avl_last(t)) == NULL)
1254 return (rs->rs_end - rs->rs_start);
1257 static range_seg_t *
1258 metaslab_block_find(avl_tree_t *t, uint64_t start, uint64_t size)
1260 range_seg_t *rs, rsearch;
1263 rsearch.rs_start = start;
1264 rsearch.rs_end = start + size;
1266 rs = avl_find(t, &rsearch, &where);
1268 rs = avl_nearest(t, where, AVL_AFTER);
1275 * This is a helper function that can be used by the allocator to find
1276 * a suitable block to allocate. This will search the specified AVL
1277 * tree looking for a block that matches the specified criteria.
1280 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
1283 range_seg_t *rs = metaslab_block_find(t, *cursor, size);
1285 while (rs != NULL) {
1286 uint64_t offset = P2ROUNDUP(rs->rs_start, align);
1288 if (offset + size <= rs->rs_end) {
1289 *cursor = offset + size;
1292 rs = AVL_NEXT(t, rs);
1296 * If we know we've searched the whole map (*cursor == 0), give up.
1297 * Otherwise, reset the cursor to the beginning and try again.
1303 return (metaslab_block_picker(t, cursor, size, align));
1307 * ==========================================================================
1308 * The first-fit block allocator
1309 * ==========================================================================
1312 metaslab_ff_alloc(metaslab_t *msp, uint64_t size)
1315 * Find the largest power of 2 block size that evenly divides the
1316 * requested size. This is used to try to allocate blocks with similar
1317 * alignment from the same area of the metaslab (i.e. same cursor
1318 * bucket) but it does not guarantee that other allocations sizes
1319 * may exist in the same region.
1321 uint64_t align = size & -size;
1322 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1323 avl_tree_t *t = &msp->ms_allocatable->rt_root;
1325 return (metaslab_block_picker(t, cursor, size, align));
1328 static metaslab_ops_t metaslab_ff_ops = {
1333 * ==========================================================================
1334 * Dynamic block allocator -
1335 * Uses the first fit allocation scheme until space get low and then
1336 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
1337 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
1338 * ==========================================================================
1341 metaslab_df_alloc(metaslab_t *msp, uint64_t size)
1344 * Find the largest power of 2 block size that evenly divides the
1345 * requested size. This is used to try to allocate blocks with similar
1346 * alignment from the same area of the metaslab (i.e. same cursor
1347 * bucket) but it does not guarantee that other allocations sizes
1348 * may exist in the same region.
1350 uint64_t align = size & -size;
1351 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1352 range_tree_t *rt = msp->ms_allocatable;
1353 avl_tree_t *t = &rt->rt_root;
1354 uint64_t max_size = metaslab_block_maxsize(msp);
1355 int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
1357 ASSERT(MUTEX_HELD(&msp->ms_lock));
1358 ASSERT3U(avl_numnodes(t), ==,
1359 avl_numnodes(&msp->ms_allocatable_by_size));
1361 if (max_size < size)
1365 * If we're running low on space switch to using the size
1366 * sorted AVL tree (best-fit).
1368 if (max_size < metaslab_df_alloc_threshold ||
1369 free_pct < metaslab_df_free_pct) {
1370 t = &msp->ms_allocatable_by_size;
1374 return (metaslab_block_picker(t, cursor, size, 1ULL));
1377 static metaslab_ops_t metaslab_df_ops = {
1382 * ==========================================================================
1383 * Cursor fit block allocator -
1384 * Select the largest region in the metaslab, set the cursor to the beginning
1385 * of the range and the cursor_end to the end of the range. As allocations
1386 * are made advance the cursor. Continue allocating from the cursor until
1387 * the range is exhausted and then find a new range.
1388 * ==========================================================================
1391 metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
1393 range_tree_t *rt = msp->ms_allocatable;
1394 avl_tree_t *t = &msp->ms_allocatable_by_size;
1395 uint64_t *cursor = &msp->ms_lbas[0];
1396 uint64_t *cursor_end = &msp->ms_lbas[1];
1397 uint64_t offset = 0;
1399 ASSERT(MUTEX_HELD(&msp->ms_lock));
1400 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root));
1402 ASSERT3U(*cursor_end, >=, *cursor);
1404 if ((*cursor + size) > *cursor_end) {
1407 rs = avl_last(&msp->ms_allocatable_by_size);
1408 if (rs == NULL || (rs->rs_end - rs->rs_start) < size)
1411 *cursor = rs->rs_start;
1412 *cursor_end = rs->rs_end;
1421 static metaslab_ops_t metaslab_cf_ops = {
1426 * ==========================================================================
1427 * New dynamic fit allocator -
1428 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1429 * contiguous blocks. If no region is found then just use the largest segment
1431 * ==========================================================================
1435 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1436 * to request from the allocator.
1438 uint64_t metaslab_ndf_clump_shift = 4;
1441 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
1443 avl_tree_t *t = &msp->ms_allocatable->rt_root;
1445 range_seg_t *rs, rsearch;
1446 uint64_t hbit = highbit64(size);
1447 uint64_t *cursor = &msp->ms_lbas[hbit - 1];
1448 uint64_t max_size = metaslab_block_maxsize(msp);
1450 ASSERT(MUTEX_HELD(&msp->ms_lock));
1451 ASSERT3U(avl_numnodes(t), ==,
1452 avl_numnodes(&msp->ms_allocatable_by_size));
1454 if (max_size < size)
1457 rsearch.rs_start = *cursor;
1458 rsearch.rs_end = *cursor + size;
1460 rs = avl_find(t, &rsearch, &where);
1461 if (rs == NULL || (rs->rs_end - rs->rs_start) < size) {
1462 t = &msp->ms_allocatable_by_size;
1464 rsearch.rs_start = 0;
1465 rsearch.rs_end = MIN(max_size,
1466 1ULL << (hbit + metaslab_ndf_clump_shift));
1467 rs = avl_find(t, &rsearch, &where);
1469 rs = avl_nearest(t, where, AVL_AFTER);
1473 if ((rs->rs_end - rs->rs_start) >= size) {
1474 *cursor = rs->rs_start + size;
1475 return (rs->rs_start);
1480 static metaslab_ops_t metaslab_ndf_ops = {
1484 metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
1487 * ==========================================================================
1489 * ==========================================================================
1493 metaslab_aux_histograms_clear(metaslab_t *msp)
1496 * Auxiliary histograms are only cleared when resetting them,
1497 * which can only happen while the metaslab is loaded.
1499 ASSERT(msp->ms_loaded);
1501 bzero(msp->ms_synchist, sizeof (msp->ms_synchist));
1502 for (int t = 0; t < TXG_DEFER_SIZE; t++)
1503 bzero(msp->ms_deferhist[t], sizeof (msp->ms_deferhist[t]));
1507 metaslab_aux_histogram_add(uint64_t *histogram, uint64_t shift,
1511 * This is modeled after space_map_histogram_add(), so refer to that
1512 * function for implementation details. We want this to work like
1513 * the space map histogram, and not the range tree histogram, as we
1514 * are essentially constructing a delta that will be later subtracted
1515 * from the space map histogram.
1518 for (int i = shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
1519 ASSERT3U(i, >=, idx + shift);
1520 histogram[idx] += rt->rt_histogram[i] << (i - idx - shift);
1522 if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
1523 ASSERT3U(idx + shift, ==, i);
1525 ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
1531 * Called at every sync pass that the metaslab gets synced.
1533 * The reason is that we want our auxiliary histograms to be updated
1534 * wherever the metaslab's space map histogram is updated. This way
1535 * we stay consistent on which parts of the metaslab space map's
1536 * histogram are currently not available for allocations (e.g because
1537 * they are in the defer, freed, and freeing trees).
1540 metaslab_aux_histograms_update(metaslab_t *msp)
1542 space_map_t *sm = msp->ms_sm;
1546 * This is similar to the metaslab's space map histogram updates
1547 * that take place in metaslab_sync(). The only difference is that
1548 * we only care about segments that haven't made it into the
1549 * ms_allocatable tree yet.
1551 if (msp->ms_loaded) {
1552 metaslab_aux_histograms_clear(msp);
1554 metaslab_aux_histogram_add(msp->ms_synchist,
1555 sm->sm_shift, msp->ms_freed);
1557 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1558 metaslab_aux_histogram_add(msp->ms_deferhist[t],
1559 sm->sm_shift, msp->ms_defer[t]);
1563 metaslab_aux_histogram_add(msp->ms_synchist,
1564 sm->sm_shift, msp->ms_freeing);
1568 * Called every time we are done syncing (writing to) the metaslab,
1569 * i.e. at the end of each sync pass.
1570 * [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist]
1573 metaslab_aux_histograms_update_done(metaslab_t *msp, boolean_t defer_allowed)
1575 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1576 space_map_t *sm = msp->ms_sm;
1580 * We came here from metaslab_init() when creating/opening a
1581 * pool, looking at a metaslab that hasn't had any allocations
1588 * This is similar to the actions that we take for the ms_freed
1589 * and ms_defer trees in metaslab_sync_done().
1591 uint64_t hist_index = spa_syncing_txg(spa) % TXG_DEFER_SIZE;
1592 if (defer_allowed) {
1593 bcopy(msp->ms_synchist, msp->ms_deferhist[hist_index],
1594 sizeof (msp->ms_synchist));
1596 bzero(msp->ms_deferhist[hist_index],
1597 sizeof (msp->ms_deferhist[hist_index]));
1599 bzero(msp->ms_synchist, sizeof (msp->ms_synchist));
1603 * Ensure that the metaslab's weight and fragmentation are consistent
1604 * with the contents of the histogram (either the range tree's histogram
1605 * or the space map's depending whether the metaslab is loaded).
1608 metaslab_verify_weight_and_frag(metaslab_t *msp)
1610 ASSERT(MUTEX_HELD(&msp->ms_lock));
1612 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
1615 /* see comment in metaslab_verify_unflushed_changes() */
1616 if (msp->ms_group == NULL)
1620 * Devices being removed always return a weight of 0 and leave
1621 * fragmentation and ms_max_size as is - there is nothing for
1622 * us to verify here.
1624 vdev_t *vd = msp->ms_group->mg_vd;
1625 if (vd->vdev_removing)
1629 * If the metaslab is dirty it probably means that we've done
1630 * some allocations or frees that have changed our histograms
1631 * and thus the weight.
1633 for (int t = 0; t < TXG_SIZE; t++) {
1634 if (txg_list_member(&vd->vdev_ms_list, msp, t))
1639 * This verification checks that our in-memory state is consistent
1640 * with what's on disk. If the pool is read-only then there aren't
1641 * any changes and we just have the initially-loaded state.
1643 if (!spa_writeable(msp->ms_group->mg_vd->vdev_spa))
1646 /* some extra verification for in-core tree if you can */
1647 if (msp->ms_loaded) {
1648 range_tree_stat_verify(msp->ms_allocatable);
1649 VERIFY(space_map_histogram_verify(msp->ms_sm,
1650 msp->ms_allocatable));
1653 uint64_t weight = msp->ms_weight;
1654 uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
1655 boolean_t space_based = WEIGHT_IS_SPACEBASED(msp->ms_weight);
1656 uint64_t frag = msp->ms_fragmentation;
1657 uint64_t max_segsize = msp->ms_max_size;
1660 msp->ms_fragmentation = 0;
1661 msp->ms_max_size = 0;
1664 * This function is used for verification purposes. Regardless of
1665 * whether metaslab_weight() thinks this metaslab should be active or
1666 * not, we want to ensure that the actual weight (and therefore the
1667 * value of ms_weight) would be the same if it was to be recalculated
1670 msp->ms_weight = metaslab_weight(msp) | was_active;
1672 VERIFY3U(max_segsize, ==, msp->ms_max_size);
1675 * If the weight type changed then there is no point in doing
1676 * verification. Revert fields to their original values.
1678 if ((space_based && !WEIGHT_IS_SPACEBASED(msp->ms_weight)) ||
1679 (!space_based && WEIGHT_IS_SPACEBASED(msp->ms_weight))) {
1680 msp->ms_fragmentation = frag;
1681 msp->ms_weight = weight;
1685 VERIFY3U(msp->ms_fragmentation, ==, frag);
1686 VERIFY3U(msp->ms_weight, ==, weight);
1690 * Wait for any in-progress metaslab loads to complete.
1693 metaslab_load_wait(metaslab_t *msp)
1695 ASSERT(MUTEX_HELD(&msp->ms_lock));
1697 while (msp->ms_loading) {
1698 ASSERT(!msp->ms_loaded);
1699 cv_wait(&msp->ms_load_cv, &msp->ms_lock);
1704 metaslab_load_impl(metaslab_t *msp)
1708 ASSERT(MUTEX_HELD(&msp->ms_lock));
1709 ASSERT(msp->ms_loading);
1710 ASSERT(!msp->ms_condensing);
1713 * We temporarily drop the lock to unblock other operations while we
1714 * are reading the space map. Therefore, metaslab_sync() and
1715 * metaslab_sync_done() can run at the same time as we do.
1717 * metaslab_sync() can append to the space map while we are loading.
1718 * Therefore we load only entries that existed when we started the
1719 * load. Additionally, metaslab_sync_done() has to wait for the load
1720 * to complete because there are potential races like metaslab_load()
1721 * loading parts of the space map that are currently being appended
1722 * by metaslab_sync(). If we didn't, the ms_allocatable would have
1723 * entries that metaslab_sync_done() would try to re-add later.
1725 * That's why before dropping the lock we remember the synced length
1726 * of the metaslab and read up to that point of the space map,
1727 * ignoring entries appended by metaslab_sync() that happen after we
1730 uint64_t length = msp->ms_synced_length;
1731 mutex_exit(&msp->ms_lock);
1733 if (msp->ms_sm != NULL) {
1734 error = space_map_load_length(msp->ms_sm, msp->ms_allocatable,
1738 * The space map has not been allocated yet, so treat
1739 * all the space in the metaslab as free and add it to the
1740 * ms_allocatable tree.
1742 range_tree_add(msp->ms_allocatable,
1743 msp->ms_start, msp->ms_size);
1747 * We need to grab the ms_sync_lock to prevent metaslab_sync() from
1748 * changing the ms_sm and the metaslab's range trees while we are
1749 * about to use them and populate the ms_allocatable. The ms_lock
1750 * is insufficient for this because metaslab_sync() doesn't hold
1751 * the ms_lock while writing the ms_checkpointing tree to disk.
1753 mutex_enter(&msp->ms_sync_lock);
1754 mutex_enter(&msp->ms_lock);
1755 ASSERT(!msp->ms_condensing);
1758 mutex_exit(&msp->ms_sync_lock);
1762 ASSERT3P(msp->ms_group, !=, NULL);
1763 msp->ms_loaded = B_TRUE;
1766 * The ms_allocatable contains the segments that exist in the
1767 * ms_defer trees [see ms_synced_length]. Thus we need to remove
1768 * them from ms_allocatable as they will be added again in
1769 * metaslab_sync_done().
1771 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1772 range_tree_walk(msp->ms_defer[t],
1773 range_tree_remove, msp->ms_allocatable);
1777 * Call metaslab_recalculate_weight_and_sort() now that the
1778 * metaslab is loaded so we get the metaslab's real weight.
1780 * Unless this metaslab was created with older software and
1781 * has not yet been converted to use segment-based weight, we
1782 * expect the new weight to be better or equal to the weight
1783 * that the metaslab had while it was not loaded. This is
1784 * because the old weight does not take into account the
1785 * consolidation of adjacent segments between TXGs. [see
1786 * comment for ms_synchist and ms_deferhist[] for more info]
1788 uint64_t weight = msp->ms_weight;
1789 metaslab_recalculate_weight_and_sort(msp);
1790 if (!WEIGHT_IS_SPACEBASED(weight))
1791 ASSERT3U(weight, <=, msp->ms_weight);
1792 msp->ms_max_size = metaslab_block_maxsize(msp);
1794 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1795 metaslab_verify_space(msp, spa_syncing_txg(spa));
1796 mutex_exit(&msp->ms_sync_lock);
1802 metaslab_load(metaslab_t *msp)
1804 ASSERT(MUTEX_HELD(&msp->ms_lock));
1807 * There may be another thread loading the same metaslab, if that's
1808 * the case just wait until the other thread is done and return.
1810 metaslab_load_wait(msp);
1813 VERIFY(!msp->ms_loading);
1814 ASSERT(!msp->ms_condensing);
1816 msp->ms_loading = B_TRUE;
1817 int error = metaslab_load_impl(msp);
1818 msp->ms_loading = B_FALSE;
1819 cv_broadcast(&msp->ms_load_cv);
1825 metaslab_unload(metaslab_t *msp)
1827 ASSERT(MUTEX_HELD(&msp->ms_lock));
1829 metaslab_verify_weight_and_frag(msp);
1831 range_tree_vacate(msp->ms_allocatable, NULL, NULL);
1832 msp->ms_loaded = B_FALSE;
1834 msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
1835 msp->ms_max_size = 0;
1838 * We explicitly recalculate the metaslab's weight based on its space
1839 * map (as it is now not loaded). We want unload metaslabs to always
1840 * have their weights calculated from the space map histograms, while
1841 * loaded ones have it calculated from their in-core range tree
1842 * [see metaslab_load()]. This way, the weight reflects the information
1843 * available in-core, whether it is loaded or not
1845 * If ms_group == NULL means that we came here from metaslab_fini(),
1846 * at which point it doesn't make sense for us to do the recalculation
1849 if (msp->ms_group != NULL)
1850 metaslab_recalculate_weight_and_sort(msp);
1854 metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta,
1855 int64_t defer_delta, int64_t space_delta)
1857 vdev_space_update(vd, alloc_delta, defer_delta, space_delta);
1859 ASSERT3P(vd->vdev_spa->spa_root_vdev, ==, vd->vdev_parent);
1860 ASSERT(vd->vdev_ms_count != 0);
1862 metaslab_class_space_update(mc, alloc_delta, defer_delta, space_delta,
1863 vdev_deflated_space(vd, space_delta));
1867 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg,
1870 vdev_t *vd = mg->mg_vd;
1871 spa_t *spa = vd->vdev_spa;
1872 objset_t *mos = spa->spa_meta_objset;
1876 ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
1877 mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
1878 mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL);
1879 cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
1882 ms->ms_start = id << vd->vdev_ms_shift;
1883 ms->ms_size = 1ULL << vd->vdev_ms_shift;
1884 ms->ms_allocator = -1;
1885 ms->ms_new = B_TRUE;
1888 * We only open space map objects that already exist. All others
1889 * will be opened when we finally allocate an object for it.
1892 * When called from vdev_expand(), we can't call into the DMU as
1893 * we are holding the spa_config_lock as a writer and we would
1894 * deadlock [see relevant comment in vdev_metaslab_init()]. in
1895 * that case, the object parameter is zero though, so we won't
1896 * call into the DMU.
1899 error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
1900 ms->ms_size, vd->vdev_ashift);
1903 kmem_free(ms, sizeof (metaslab_t));
1907 ASSERT(ms->ms_sm != NULL);
1908 ASSERT3S(space_map_allocated(ms->ms_sm), >=, 0);
1909 ms->ms_allocated_space = space_map_allocated(ms->ms_sm);
1913 * We create the ms_allocatable here, but we don't create the
1914 * other range trees until metaslab_sync_done(). This serves
1915 * two purposes: it allows metaslab_sync_done() to detect the
1916 * addition of new space; and for debugging, it ensures that
1917 * we'd data fault on any attempt to use this metaslab before
1920 ms->ms_allocatable = range_tree_create_impl(&rt_avl_ops, &ms->ms_allocatable_by_size,
1921 metaslab_rangesize_compare, 0);
1922 metaslab_group_add(mg, ms);
1924 metaslab_set_fragmentation(ms);
1927 * If we're opening an existing pool (txg == 0) or creating
1928 * a new one (txg == TXG_INITIAL), all space is available now.
1929 * If we're adding space to an existing pool, the new space
1930 * does not become available until after this txg has synced.
1931 * The metaslab's weight will also be initialized when we sync
1932 * out this txg. This ensures that we don't attempt to allocate
1933 * from it before we have initialized it completely.
1935 if (txg <= TXG_INITIAL) {
1936 metaslab_sync_done(ms, 0);
1937 metaslab_space_update(vd, mg->mg_class,
1938 metaslab_allocated_space(ms), 0, 0);
1942 * If metaslab_debug_load is set and we're initializing a metaslab
1943 * that has an allocated space map object then load the space map
1944 * so that we can verify frees.
1946 if (metaslab_debug_load && ms->ms_sm != NULL) {
1947 mutex_enter(&ms->ms_lock);
1948 VERIFY0(metaslab_load(ms));
1949 mutex_exit(&ms->ms_lock);
1953 vdev_dirty(vd, 0, NULL, txg);
1954 vdev_dirty(vd, VDD_METASLAB, ms, txg);
1963 metaslab_fini(metaslab_t *msp)
1965 metaslab_group_t *mg = msp->ms_group;
1966 vdev_t *vd = mg->mg_vd;
1968 metaslab_group_remove(mg, msp);
1970 mutex_enter(&msp->ms_lock);
1971 VERIFY(msp->ms_group == NULL);
1972 metaslab_space_update(vd, mg->mg_class,
1973 -metaslab_allocated_space(msp), 0, -msp->ms_size);
1975 space_map_close(msp->ms_sm);
1977 metaslab_unload(msp);
1979 range_tree_destroy(msp->ms_allocatable);
1980 range_tree_destroy(msp->ms_freeing);
1981 range_tree_destroy(msp->ms_freed);
1983 for (int t = 0; t < TXG_SIZE; t++) {
1984 range_tree_destroy(msp->ms_allocating[t]);
1987 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1988 range_tree_destroy(msp->ms_defer[t]);
1990 ASSERT0(msp->ms_deferspace);
1992 range_tree_destroy(msp->ms_checkpointing);
1994 for (int t = 0; t < TXG_SIZE; t++)
1995 ASSERT(!txg_list_member(&vd->vdev_ms_list, msp, t));
1997 mutex_exit(&msp->ms_lock);
1998 cv_destroy(&msp->ms_load_cv);
1999 mutex_destroy(&msp->ms_lock);
2000 mutex_destroy(&msp->ms_sync_lock);
2001 ASSERT3U(msp->ms_allocator, ==, -1);
2003 kmem_free(msp, sizeof (metaslab_t));
2006 #define FRAGMENTATION_TABLE_SIZE 17
2009 * This table defines a segment size based fragmentation metric that will
2010 * allow each metaslab to derive its own fragmentation value. This is done
2011 * by calculating the space in each bucket of the spacemap histogram and
2012 * multiplying that by the fragmentation metric in this table. Doing
2013 * this for all buckets and dividing it by the total amount of free
2014 * space in this metaslab (i.e. the total free space in all buckets) gives
2015 * us the fragmentation metric. This means that a high fragmentation metric
2016 * equates to most of the free space being comprised of small segments.
2017 * Conversely, if the metric is low, then most of the free space is in
2018 * large segments. A 10% change in fragmentation equates to approximately
2019 * double the number of segments.
2021 * This table defines 0% fragmented space using 16MB segments. Testing has
2022 * shown that segments that are greater than or equal to 16MB do not suffer
2023 * from drastic performance problems. Using this value, we derive the rest
2024 * of the table. Since the fragmentation value is never stored on disk, it
2025 * is possible to change these calculations in the future.
2027 int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
2047 * Calculate the metaslab's fragmentation metric and set ms_fragmentation.
2048 * Setting this value to ZFS_FRAG_INVALID means that the metaslab has not
2049 * been upgraded and does not support this metric. Otherwise, the return
2050 * value should be in the range [0, 100].
2053 metaslab_set_fragmentation(metaslab_t *msp)
2055 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2056 uint64_t fragmentation = 0;
2058 boolean_t feature_enabled = spa_feature_is_enabled(spa,
2059 SPA_FEATURE_SPACEMAP_HISTOGRAM);
2061 if (!feature_enabled) {
2062 msp->ms_fragmentation = ZFS_FRAG_INVALID;
2067 * A null space map means that the entire metaslab is free
2068 * and thus is not fragmented.
2070 if (msp->ms_sm == NULL) {
2071 msp->ms_fragmentation = 0;
2076 * If this metaslab's space map has not been upgraded, flag it
2077 * so that we upgrade next time we encounter it.
2079 if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
2080 uint64_t txg = spa_syncing_txg(spa);
2081 vdev_t *vd = msp->ms_group->mg_vd;
2084 * If we've reached the final dirty txg, then we must
2085 * be shutting down the pool. We don't want to dirty
2086 * any data past this point so skip setting the condense
2087 * flag. We can retry this action the next time the pool
2090 if (spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
2091 msp->ms_condense_wanted = B_TRUE;
2092 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
2093 zfs_dbgmsg("txg %llu, requesting force condense: "
2094 "ms_id %llu, vdev_id %llu", txg, msp->ms_id,
2097 msp->ms_fragmentation = ZFS_FRAG_INVALID;
2101 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
2103 uint8_t shift = msp->ms_sm->sm_shift;
2105 int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
2106 FRAGMENTATION_TABLE_SIZE - 1);
2108 if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
2111 space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
2114 ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
2115 fragmentation += space * zfs_frag_table[idx];
2119 fragmentation /= total;
2120 ASSERT3U(fragmentation, <=, 100);
2122 msp->ms_fragmentation = fragmentation;
2126 * Compute a weight -- a selection preference value -- for the given metaslab.
2127 * This is based on the amount of free space, the level of fragmentation,
2128 * the LBA range, and whether the metaslab is loaded.
2131 metaslab_space_weight(metaslab_t *msp)
2133 metaslab_group_t *mg = msp->ms_group;
2134 vdev_t *vd = mg->mg_vd;
2135 uint64_t weight, space;
2137 ASSERT(MUTEX_HELD(&msp->ms_lock));
2138 ASSERT(!vd->vdev_removing);
2141 * The baseline weight is the metaslab's free space.
2143 space = msp->ms_size - metaslab_allocated_space(msp);
2145 if (metaslab_fragmentation_factor_enabled &&
2146 msp->ms_fragmentation != ZFS_FRAG_INVALID) {
2148 * Use the fragmentation information to inversely scale
2149 * down the baseline weight. We need to ensure that we
2150 * don't exclude this metaslab completely when it's 100%
2151 * fragmented. To avoid this we reduce the fragmented value
2154 space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
2157 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
2158 * this metaslab again. The fragmentation metric may have
2159 * decreased the space to something smaller than
2160 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
2161 * so that we can consume any remaining space.
2163 if (space > 0 && space < SPA_MINBLOCKSIZE)
2164 space = SPA_MINBLOCKSIZE;
2169 * Modern disks have uniform bit density and constant angular velocity.
2170 * Therefore, the outer recording zones are faster (higher bandwidth)
2171 * than the inner zones by the ratio of outer to inner track diameter,
2172 * which is typically around 2:1. We account for this by assigning
2173 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
2174 * In effect, this means that we'll select the metaslab with the most
2175 * free bandwidth rather than simply the one with the most free space.
2177 if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) {
2178 weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
2179 ASSERT(weight >= space && weight <= 2 * space);
2183 * If this metaslab is one we're actively using, adjust its
2184 * weight to make it preferable to any inactive metaslab so
2185 * we'll polish it off. If the fragmentation on this metaslab
2186 * has exceed our threshold, then don't mark it active.
2188 if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
2189 msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
2190 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
2193 WEIGHT_SET_SPACEBASED(weight);
2198 * Return the weight of the specified metaslab, according to the segment-based
2199 * weighting algorithm. The metaslab must be loaded. This function can
2200 * be called within a sync pass since it relies only on the metaslab's
2201 * range tree which is always accurate when the metaslab is loaded.
2204 metaslab_weight_from_range_tree(metaslab_t *msp)
2206 uint64_t weight = 0;
2207 uint32_t segments = 0;
2209 ASSERT(msp->ms_loaded);
2211 for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
2213 uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
2214 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
2217 segments += msp->ms_allocatable->rt_histogram[i];
2220 * The range tree provides more precision than the space map
2221 * and must be downgraded so that all values fit within the
2222 * space map's histogram. This allows us to compare loaded
2223 * vs. unloaded metaslabs to determine which metaslab is
2224 * considered "best".
2229 if (segments != 0) {
2230 WEIGHT_SET_COUNT(weight, segments);
2231 WEIGHT_SET_INDEX(weight, i);
2232 WEIGHT_SET_ACTIVE(weight, 0);
2240 * Calculate the weight based on the on-disk histogram. This should only
2241 * be called after a sync pass has completely finished since the on-disk
2242 * information is updated in metaslab_sync().
2245 metaslab_weight_from_spacemap(metaslab_t *msp)
2247 space_map_t *sm = msp->ms_sm;
2248 ASSERT(!msp->ms_loaded);
2250 ASSERT3U(space_map_object(sm), !=, 0);
2251 ASSERT3U(sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
2254 * Create a joint histogram from all the segments that have made
2255 * it to the metaslab's space map histogram, that are not yet
2256 * available for allocation because they are still in the freeing
2257 * pipeline (e.g. freeing, freed, and defer trees). Then subtract
2258 * these segments from the space map's histogram to get a more
2261 uint64_t deferspace_histogram[SPACE_MAP_HISTOGRAM_SIZE] = {0};
2262 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
2263 deferspace_histogram[i] += msp->ms_synchist[i];
2264 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2265 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
2266 deferspace_histogram[i] += msp->ms_deferhist[t][i];
2270 uint64_t weight = 0;
2271 for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
2272 ASSERT3U(sm->sm_phys->smp_histogram[i], >=,
2273 deferspace_histogram[i]);
2275 sm->sm_phys->smp_histogram[i] - deferspace_histogram[i];
2277 WEIGHT_SET_COUNT(weight, count);
2278 WEIGHT_SET_INDEX(weight, i + sm->sm_shift);
2279 WEIGHT_SET_ACTIVE(weight, 0);
2287 * Compute a segment-based weight for the specified metaslab. The weight
2288 * is determined by highest bucket in the histogram. The information
2289 * for the highest bucket is encoded into the weight value.
2292 metaslab_segment_weight(metaslab_t *msp)
2294 metaslab_group_t *mg = msp->ms_group;
2295 uint64_t weight = 0;
2296 uint8_t shift = mg->mg_vd->vdev_ashift;
2298 ASSERT(MUTEX_HELD(&msp->ms_lock));
2301 * The metaslab is completely free.
2303 if (metaslab_allocated_space(msp) == 0) {
2304 int idx = highbit64(msp->ms_size) - 1;
2305 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
2307 if (idx < max_idx) {
2308 WEIGHT_SET_COUNT(weight, 1ULL);
2309 WEIGHT_SET_INDEX(weight, idx);
2311 WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
2312 WEIGHT_SET_INDEX(weight, max_idx);
2314 WEIGHT_SET_ACTIVE(weight, 0);
2315 ASSERT(!WEIGHT_IS_SPACEBASED(weight));
2320 ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
2323 * If the metaslab is fully allocated then just make the weight 0.
2325 if (metaslab_allocated_space(msp) == msp->ms_size)
2328 * If the metaslab is already loaded, then use the range tree to
2329 * determine the weight. Otherwise, we rely on the space map information
2330 * to generate the weight.
2332 if (msp->ms_loaded) {
2333 weight = metaslab_weight_from_range_tree(msp);
2335 weight = metaslab_weight_from_spacemap(msp);
2339 * If the metaslab was active the last time we calculated its weight
2340 * then keep it active. We want to consume the entire region that
2341 * is associated with this weight.
2343 if (msp->ms_activation_weight != 0 && weight != 0)
2344 WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
2349 * Determine if we should attempt to allocate from this metaslab. If the
2350 * metaslab has a maximum size then we can quickly determine if the desired
2351 * allocation size can be satisfied. Otherwise, if we're using segment-based
2352 * weighting then we can determine the maximum allocation that this metaslab
2353 * can accommodate based on the index encoded in the weight. If we're using
2354 * space-based weights then rely on the entire weight (excluding the weight
2358 metaslab_should_allocate(metaslab_t *msp, uint64_t asize)
2360 boolean_t should_allocate;
2362 if (msp->ms_max_size != 0)
2363 return (msp->ms_max_size >= asize);
2365 if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
2367 * The metaslab segment weight indicates segments in the
2368 * range [2^i, 2^(i+1)), where i is the index in the weight.
2369 * Since the asize might be in the middle of the range, we
2370 * should attempt the allocation if asize < 2^(i+1).
2372 should_allocate = (asize <
2373 1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
2375 should_allocate = (asize <=
2376 (msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
2378 return (should_allocate);
2382 metaslab_weight(metaslab_t *msp)
2384 vdev_t *vd = msp->ms_group->mg_vd;
2385 spa_t *spa = vd->vdev_spa;
2388 ASSERT(MUTEX_HELD(&msp->ms_lock));
2391 * If this vdev is in the process of being removed, there is nothing
2392 * for us to do here.
2394 if (vd->vdev_removing)
2397 metaslab_set_fragmentation(msp);
2400 * Update the maximum size if the metaslab is loaded. This will
2401 * ensure that we get an accurate maximum size if newly freed space
2402 * has been added back into the free tree.
2405 msp->ms_max_size = metaslab_block_maxsize(msp);
2407 ASSERT0(msp->ms_max_size);
2410 * Segment-based weighting requires space map histogram support.
2412 if (zfs_metaslab_segment_weight_enabled &&
2413 spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
2414 (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
2415 sizeof (space_map_phys_t))) {
2416 weight = metaslab_segment_weight(msp);
2418 weight = metaslab_space_weight(msp);
2424 metaslab_recalculate_weight_and_sort(metaslab_t *msp)
2426 /* note: we preserve the mask (e.g. indication of primary, etc..) */
2427 uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
2428 metaslab_group_sort(msp->ms_group, msp,
2429 metaslab_weight(msp) | was_active);
2433 metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp,
2434 int allocator, uint64_t activation_weight)
2437 * If we're activating for the claim code, we don't want to actually
2438 * set the metaslab up for a specific allocator.
2440 if (activation_weight == METASLAB_WEIGHT_CLAIM)
2442 metaslab_t **arr = (activation_weight == METASLAB_WEIGHT_PRIMARY ?
2443 mg->mg_primaries : mg->mg_secondaries);
2445 ASSERT(MUTEX_HELD(&msp->ms_lock));
2446 mutex_enter(&mg->mg_lock);
2447 if (arr[allocator] != NULL) {
2448 mutex_exit(&mg->mg_lock);
2452 arr[allocator] = msp;
2453 ASSERT3S(msp->ms_allocator, ==, -1);
2454 msp->ms_allocator = allocator;
2455 msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY);
2456 mutex_exit(&mg->mg_lock);
2462 metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight)
2464 ASSERT(MUTEX_HELD(&msp->ms_lock));
2466 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
2467 int error = metaslab_load(msp);
2469 metaslab_group_sort(msp->ms_group, msp, 0);
2472 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
2474 * The metaslab was activated for another allocator
2475 * while we were waiting, we should reselect.
2479 if ((error = metaslab_activate_allocator(msp->ms_group, msp,
2480 allocator, activation_weight)) != 0) {
2484 msp->ms_activation_weight = msp->ms_weight;
2485 metaslab_group_sort(msp->ms_group, msp,
2486 msp->ms_weight | activation_weight);
2488 ASSERT(msp->ms_loaded);
2489 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
2495 metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp,
2498 ASSERT(MUTEX_HELD(&msp->ms_lock));
2499 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
2500 metaslab_group_sort(mg, msp, weight);
2504 mutex_enter(&mg->mg_lock);
2505 ASSERT3P(msp->ms_group, ==, mg);
2506 if (msp->ms_primary) {
2507 ASSERT3U(0, <=, msp->ms_allocator);
2508 ASSERT3U(msp->ms_allocator, <, mg->mg_allocators);
2509 ASSERT3P(mg->mg_primaries[msp->ms_allocator], ==, msp);
2510 ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
2511 mg->mg_primaries[msp->ms_allocator] = NULL;
2513 ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
2514 ASSERT3P(mg->mg_secondaries[msp->ms_allocator], ==, msp);
2515 mg->mg_secondaries[msp->ms_allocator] = NULL;
2517 msp->ms_allocator = -1;
2518 metaslab_group_sort_impl(mg, msp, weight);
2519 mutex_exit(&mg->mg_lock);
2523 metaslab_passivate(metaslab_t *msp, uint64_t weight)
2525 uint64_t size = weight & ~METASLAB_WEIGHT_TYPE;
2528 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
2529 * this metaslab again. In that case, it had better be empty,
2530 * or we would be leaving space on the table.
2532 ASSERT(size >= SPA_MINBLOCKSIZE ||
2533 range_tree_is_empty(msp->ms_allocatable));
2534 ASSERT0(weight & METASLAB_ACTIVE_MASK);
2536 msp->ms_activation_weight = 0;
2537 metaslab_passivate_allocator(msp->ms_group, msp, weight);
2538 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
2542 * Segment-based metaslabs are activated once and remain active until
2543 * we either fail an allocation attempt (similar to space-based metaslabs)
2544 * or have exhausted the free space in zfs_metaslab_switch_threshold
2545 * buckets since the metaslab was activated. This function checks to see
2546 * if we've exhaused the zfs_metaslab_switch_threshold buckets in the
2547 * metaslab and passivates it proactively. This will allow us to select a
2548 * metaslabs with larger contiguous region if any remaining within this
2549 * metaslab group. If we're in sync pass > 1, then we continue using this
2550 * metaslab so that we don't dirty more block and cause more sync passes.
2553 metaslab_segment_may_passivate(metaslab_t *msp)
2555 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2557 if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
2561 * Since we are in the middle of a sync pass, the most accurate
2562 * information that is accessible to us is the in-core range tree
2563 * histogram; calculate the new weight based on that information.
2565 uint64_t weight = metaslab_weight_from_range_tree(msp);
2566 int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
2567 int current_idx = WEIGHT_GET_INDEX(weight);
2569 if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
2570 metaslab_passivate(msp, weight);
2574 metaslab_preload(void *arg)
2576 metaslab_t *msp = arg;
2577 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2579 ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
2581 mutex_enter(&msp->ms_lock);
2582 (void) metaslab_load(msp);
2583 msp->ms_selected_txg = spa_syncing_txg(spa);
2584 mutex_exit(&msp->ms_lock);
2588 metaslab_group_preload(metaslab_group_t *mg)
2590 spa_t *spa = mg->mg_vd->vdev_spa;
2592 avl_tree_t *t = &mg->mg_metaslab_tree;
2595 if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
2596 taskq_wait(mg->mg_taskq);
2600 mutex_enter(&mg->mg_lock);
2603 * Load the next potential metaslabs
2605 for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
2606 ASSERT3P(msp->ms_group, ==, mg);
2609 * We preload only the maximum number of metaslabs specified
2610 * by metaslab_preload_limit. If a metaslab is being forced
2611 * to condense then we preload it too. This will ensure
2612 * that force condensing happens in the next txg.
2614 if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
2618 VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
2619 msp, TQ_SLEEP) != 0);
2621 mutex_exit(&mg->mg_lock);
2625 * Determine if the space map's on-disk footprint is past our tolerance
2626 * for inefficiency. We would like to use the following criteria to make
2629 * 1. The size of the space map object should not dramatically increase as a
2630 * result of writing out the free space range tree.
2632 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
2633 * times the size than the free space range tree representation
2634 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1MB).
2636 * 3. The on-disk size of the space map should actually decrease.
2638 * Unfortunately, we cannot compute the on-disk size of the space map in this
2639 * context because we cannot accurately compute the effects of compression, etc.
2640 * Instead, we apply the heuristic described in the block comment for
2641 * zfs_metaslab_condense_block_threshold - we only condense if the space used
2642 * is greater than a threshold number of blocks.
2645 metaslab_should_condense(metaslab_t *msp)
2647 space_map_t *sm = msp->ms_sm;
2648 vdev_t *vd = msp->ms_group->mg_vd;
2649 uint64_t vdev_blocksize = 1 << vd->vdev_ashift;
2650 uint64_t current_txg = spa_syncing_txg(vd->vdev_spa);
2652 ASSERT(MUTEX_HELD(&msp->ms_lock));
2653 ASSERT(msp->ms_loaded);
2656 * Allocations and frees in early passes are generally more space
2657 * efficient (in terms of blocks described in space map entries)
2658 * than the ones in later passes (e.g. we don't compress after
2659 * sync pass 5) and condensing a metaslab multiple times in a txg
2660 * could degrade performance.
2662 * Thus we prefer condensing each metaslab at most once every txg at
2663 * the earliest sync pass possible. If a metaslab is eligible for
2664 * condensing again after being considered for condensing within the
2665 * same txg, it will hopefully be dirty in the next txg where it will
2666 * be condensed at an earlier pass.
2668 if (msp->ms_condense_checked_txg == current_txg)
2670 msp->ms_condense_checked_txg = current_txg;
2673 * We always condense metaslabs that are empty and metaslabs for
2674 * which a condense request has been made.
2676 if (avl_is_empty(&msp->ms_allocatable_by_size) ||
2677 msp->ms_condense_wanted)
2680 uint64_t object_size = space_map_length(msp->ms_sm);
2681 uint64_t optimal_size = space_map_estimate_optimal_size(sm,
2682 msp->ms_allocatable, SM_NO_VDEVID);
2684 dmu_object_info_t doi;
2685 dmu_object_info_from_db(sm->sm_dbuf, &doi);
2686 uint64_t record_size = MAX(doi.doi_data_block_size, vdev_blocksize);
2688 return (object_size >= (optimal_size * zfs_condense_pct / 100) &&
2689 object_size > zfs_metaslab_condense_block_threshold * record_size);
2693 * Condense the on-disk space map representation to its minimized form.
2694 * The minimized form consists of a small number of allocations followed by
2695 * the entries of the free range tree.
2698 metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
2700 range_tree_t *condense_tree;
2701 space_map_t *sm = msp->ms_sm;
2703 ASSERT(MUTEX_HELD(&msp->ms_lock));
2704 ASSERT(msp->ms_loaded);
2706 zfs_dbgmsg("condensing: txg %llu, msp[%llu] %p, vdev id %llu, "
2707 "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg,
2708 msp->ms_id, msp, msp->ms_group->mg_vd->vdev_id,
2709 msp->ms_group->mg_vd->vdev_spa->spa_name,
2710 space_map_length(msp->ms_sm),
2711 avl_numnodes(&msp->ms_allocatable->rt_root),
2712 msp->ms_condense_wanted ? "TRUE" : "FALSE");
2714 msp->ms_condense_wanted = B_FALSE;
2717 * Create an range tree that is 100% allocated. We remove segments
2718 * that have been freed in this txg, any deferred frees that exist,
2719 * and any allocation in the future. Removing segments should be
2720 * a relatively inexpensive operation since we expect these trees to
2721 * have a small number of nodes.
2723 condense_tree = range_tree_create(NULL, NULL);
2724 range_tree_add(condense_tree, msp->ms_start, msp->ms_size);
2726 range_tree_walk(msp->ms_freeing, range_tree_remove, condense_tree);
2727 range_tree_walk(msp->ms_freed, range_tree_remove, condense_tree);
2729 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2730 range_tree_walk(msp->ms_defer[t],
2731 range_tree_remove, condense_tree);
2734 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
2735 range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK],
2736 range_tree_remove, condense_tree);
2740 * We're about to drop the metaslab's lock thus allowing
2741 * other consumers to change it's content. Set the
2742 * metaslab's ms_condensing flag to ensure that
2743 * allocations on this metaslab do not occur while we're
2744 * in the middle of committing it to disk. This is only critical
2745 * for ms_allocatable as all other range trees use per txg
2746 * views of their content.
2748 msp->ms_condensing = B_TRUE;
2750 mutex_exit(&msp->ms_lock);
2751 space_map_truncate(sm, zfs_metaslab_sm_blksz, tx);
2754 * While we would ideally like to create a space map representation
2755 * that consists only of allocation records, doing so can be
2756 * prohibitively expensive because the in-core free tree can be
2757 * large, and therefore computationally expensive to subtract
2758 * from the condense_tree. Instead we sync out two trees, a cheap
2759 * allocation only tree followed by the in-core free tree. While not
2760 * optimal, this is typically close to optimal, and much cheaper to
2763 space_map_write(sm, condense_tree, SM_ALLOC, SM_NO_VDEVID, tx);
2764 range_tree_vacate(condense_tree, NULL, NULL);
2765 range_tree_destroy(condense_tree);
2767 space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx);
2768 mutex_enter(&msp->ms_lock);
2769 msp->ms_condensing = B_FALSE;
2773 * Write a metaslab to disk in the context of the specified transaction group.
2776 metaslab_sync(metaslab_t *msp, uint64_t txg)
2778 metaslab_group_t *mg = msp->ms_group;
2779 vdev_t *vd = mg->mg_vd;
2780 spa_t *spa = vd->vdev_spa;
2781 objset_t *mos = spa_meta_objset(spa);
2782 range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK];
2784 uint64_t object = space_map_object(msp->ms_sm);
2786 ASSERT(!vd->vdev_ishole);
2789 * This metaslab has just been added so there's no work to do now.
2791 if (msp->ms_freeing == NULL) {
2792 ASSERT3P(alloctree, ==, NULL);
2796 ASSERT3P(alloctree, !=, NULL);
2797 ASSERT3P(msp->ms_freeing, !=, NULL);
2798 ASSERT3P(msp->ms_freed, !=, NULL);
2799 ASSERT3P(msp->ms_checkpointing, !=, NULL);
2802 * Normally, we don't want to process a metaslab if there are no
2803 * allocations or frees to perform. However, if the metaslab is being
2804 * forced to condense and it's loaded, we need to let it through.
2806 if (range_tree_is_empty(alloctree) &&
2807 range_tree_is_empty(msp->ms_freeing) &&
2808 range_tree_is_empty(msp->ms_checkpointing) &&
2809 !(msp->ms_loaded && msp->ms_condense_wanted))
2813 VERIFY(txg <= spa_final_dirty_txg(spa));
2816 * The only state that can actually be changing concurrently
2817 * with metaslab_sync() is the metaslab's ms_allocatable. No
2818 * other thread can be modifying this txg's alloc, freeing,
2819 * freed, or space_map_phys_t. We drop ms_lock whenever we
2820 * could call into the DMU, because the DMU can call down to
2821 * us (e.g. via zio_free()) at any time.
2823 * The spa_vdev_remove_thread() can be reading metaslab state
2824 * concurrently, and it is locked out by the ms_sync_lock.
2825 * Note that the ms_lock is insufficient for this, because it
2826 * is dropped by space_map_write().
2828 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
2830 if (msp->ms_sm == NULL) {
2831 uint64_t new_object;
2833 new_object = space_map_alloc(mos, zfs_metaslab_sm_blksz, tx);
2834 VERIFY3U(new_object, !=, 0);
2836 VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
2837 msp->ms_start, msp->ms_size, vd->vdev_ashift));
2839 ASSERT(msp->ms_sm != NULL);
2840 ASSERT0(metaslab_allocated_space(msp));
2843 if (!range_tree_is_empty(msp->ms_checkpointing) &&
2844 vd->vdev_checkpoint_sm == NULL) {
2845 ASSERT(spa_has_checkpoint(spa));
2847 uint64_t new_object = space_map_alloc(mos,
2848 vdev_standard_sm_blksz, tx);
2849 VERIFY3U(new_object, !=, 0);
2851 VERIFY0(space_map_open(&vd->vdev_checkpoint_sm,
2852 mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift));
2853 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
2856 * We save the space map object as an entry in vdev_top_zap
2857 * so it can be retrieved when the pool is reopened after an
2858 * export or through zdb.
2860 VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset,
2861 vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
2862 sizeof (new_object), 1, &new_object, tx));
2865 mutex_enter(&msp->ms_sync_lock);
2866 mutex_enter(&msp->ms_lock);
2869 * Note: metaslab_condense() clears the space map's histogram.
2870 * Therefore we must verify and remove this histogram before
2873 metaslab_group_histogram_verify(mg);
2874 metaslab_class_histogram_verify(mg->mg_class);
2875 metaslab_group_histogram_remove(mg, msp);
2877 if (msp->ms_loaded && metaslab_should_condense(msp)) {
2878 metaslab_condense(msp, txg, tx);
2880 mutex_exit(&msp->ms_lock);
2881 space_map_write(msp->ms_sm, alloctree, SM_ALLOC,
2883 space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE,
2885 mutex_enter(&msp->ms_lock);
2888 msp->ms_allocated_space += range_tree_space(alloctree);
2889 ASSERT3U(msp->ms_allocated_space, >=,
2890 range_tree_space(msp->ms_freeing));
2891 msp->ms_allocated_space -= range_tree_space(msp->ms_freeing);
2893 if (!range_tree_is_empty(msp->ms_checkpointing)) {
2894 ASSERT(spa_has_checkpoint(spa));
2895 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
2898 * Since we are doing writes to disk and the ms_checkpointing
2899 * tree won't be changing during that time, we drop the
2900 * ms_lock while writing to the checkpoint space map.
2902 mutex_exit(&msp->ms_lock);
2903 space_map_write(vd->vdev_checkpoint_sm,
2904 msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx);
2905 mutex_enter(&msp->ms_lock);
2907 spa->spa_checkpoint_info.sci_dspace +=
2908 range_tree_space(msp->ms_checkpointing);
2909 vd->vdev_stat.vs_checkpoint_space +=
2910 range_tree_space(msp->ms_checkpointing);
2911 ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==,
2912 -space_map_allocated(vd->vdev_checkpoint_sm));
2914 range_tree_vacate(msp->ms_checkpointing, NULL, NULL);
2917 if (msp->ms_loaded) {
2919 * When the space map is loaded, we have an accurate
2920 * histogram in the range tree. This gives us an opportunity
2921 * to bring the space map's histogram up-to-date so we clear
2922 * it first before updating it.
2924 space_map_histogram_clear(msp->ms_sm);
2925 space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
2928 * Since we've cleared the histogram we need to add back
2929 * any free space that has already been processed, plus
2930 * any deferred space. This allows the on-disk histogram
2931 * to accurately reflect all free space even if some space
2932 * is not yet available for allocation (i.e. deferred).
2934 space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx);
2937 * Add back any deferred free space that has not been
2938 * added back into the in-core free tree yet. This will
2939 * ensure that we don't end up with a space map histogram
2940 * that is completely empty unless the metaslab is fully
2943 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2944 space_map_histogram_add(msp->ms_sm,
2945 msp->ms_defer[t], tx);
2950 * Always add the free space from this sync pass to the space
2951 * map histogram. We want to make sure that the on-disk histogram
2952 * accounts for all free space. If the space map is not loaded,
2953 * then we will lose some accuracy but will correct it the next
2954 * time we load the space map.
2956 space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx);
2957 metaslab_aux_histograms_update(msp);
2959 metaslab_group_histogram_add(mg, msp);
2960 metaslab_group_histogram_verify(mg);
2961 metaslab_class_histogram_verify(mg->mg_class);
2964 * For sync pass 1, we avoid traversing this txg's free range tree
2965 * and instead will just swap the pointers for freeing and freed.
2966 * We can safely do this since the freed_tree is guaranteed to be
2967 * empty on the initial pass.
2969 if (spa_sync_pass(spa) == 1) {
2970 range_tree_swap(&msp->ms_freeing, &msp->ms_freed);
2971 ASSERT0(msp->ms_allocated_this_txg);
2973 range_tree_vacate(msp->ms_freeing,
2974 range_tree_add, msp->ms_freed);
2976 msp->ms_allocated_this_txg += range_tree_space(alloctree);
2977 range_tree_vacate(alloctree, NULL, NULL);
2979 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
2980 ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg)
2982 ASSERT0(range_tree_space(msp->ms_freeing));
2983 ASSERT0(range_tree_space(msp->ms_checkpointing));
2985 mutex_exit(&msp->ms_lock);
2987 if (object != space_map_object(msp->ms_sm)) {
2988 object = space_map_object(msp->ms_sm);
2989 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
2990 msp->ms_id, sizeof (uint64_t), &object, tx);
2992 mutex_exit(&msp->ms_sync_lock);
2997 * Called after a transaction group has completely synced to mark
2998 * all of the metaslab's free space as usable.
3001 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
3003 metaslab_group_t *mg = msp->ms_group;
3004 vdev_t *vd = mg->mg_vd;
3005 spa_t *spa = vd->vdev_spa;
3006 range_tree_t **defer_tree;
3007 int64_t alloc_delta, defer_delta;
3008 boolean_t defer_allowed = B_TRUE;
3010 ASSERT(!vd->vdev_ishole);
3012 mutex_enter(&msp->ms_lock);
3015 * If this metaslab is just becoming available, initialize its
3016 * range trees and add its capacity to the vdev.
3018 if (msp->ms_freed == NULL) {
3019 for (int t = 0; t < TXG_SIZE; t++) {
3020 ASSERT(msp->ms_allocating[t] == NULL);
3022 msp->ms_allocating[t] = range_tree_create(NULL, NULL);
3025 ASSERT3P(msp->ms_freeing, ==, NULL);
3026 msp->ms_freeing = range_tree_create(NULL, NULL);
3028 ASSERT3P(msp->ms_freed, ==, NULL);
3029 msp->ms_freed = range_tree_create(NULL, NULL);
3031 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3032 ASSERT(msp->ms_defer[t] == NULL);
3034 msp->ms_defer[t] = range_tree_create(NULL, NULL);
3037 ASSERT3P(msp->ms_checkpointing, ==, NULL);
3038 msp->ms_checkpointing = range_tree_create(NULL, NULL);
3040 metaslab_space_update(vd, mg->mg_class, 0, 0, msp->ms_size);
3042 ASSERT0(range_tree_space(msp->ms_freeing));
3043 ASSERT0(range_tree_space(msp->ms_checkpointing));
3045 defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE];
3047 uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
3048 metaslab_class_get_alloc(spa_normal_class(spa));
3049 if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing) {
3050 defer_allowed = B_FALSE;
3054 alloc_delta = msp->ms_allocated_this_txg -
3055 range_tree_space(msp->ms_freed);
3056 if (defer_allowed) {
3057 defer_delta = range_tree_space(msp->ms_freed) -
3058 range_tree_space(*defer_tree);
3060 defer_delta -= range_tree_space(*defer_tree);
3063 metaslab_space_update(vd, mg->mg_class, alloc_delta + defer_delta,
3067 * If there's a metaslab_load() in progress, wait for it to complete
3068 * so that we have a consistent view of the in-core space map.
3070 metaslab_load_wait(msp);
3073 * Move the frees from the defer_tree back to the free
3074 * range tree (if it's loaded). Swap the freed_tree and
3075 * the defer_tree -- this is safe to do because we've
3076 * just emptied out the defer_tree.
3078 range_tree_vacate(*defer_tree,
3079 msp->ms_loaded ? range_tree_add : NULL, msp->ms_allocatable);
3080 if (defer_allowed) {
3081 range_tree_swap(&msp->ms_freed, defer_tree);
3083 range_tree_vacate(msp->ms_freed,
3084 msp->ms_loaded ? range_tree_add : NULL,
3085 msp->ms_allocatable);
3088 msp->ms_synced_length = space_map_length(msp->ms_sm);
3090 msp->ms_deferspace += defer_delta;
3091 ASSERT3S(msp->ms_deferspace, >=, 0);
3092 ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
3093 if (msp->ms_deferspace != 0) {
3095 * Keep syncing this metaslab until all deferred frees
3096 * are back in circulation.
3098 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
3100 metaslab_aux_histograms_update_done(msp, defer_allowed);
3103 msp->ms_new = B_FALSE;
3104 mutex_enter(&mg->mg_lock);
3106 mutex_exit(&mg->mg_lock);
3110 * Re-sort metaslab within its group now that we've adjusted
3111 * its allocatable space.
3113 metaslab_recalculate_weight_and_sort(msp);
3116 * If the metaslab is loaded and we've not tried to load or allocate
3117 * from it in 'metaslab_unload_delay' txgs, then unload it.
3119 if (msp->ms_loaded &&
3120 msp->ms_initializing == 0 &&
3121 msp->ms_selected_txg + metaslab_unload_delay < txg) {
3122 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
3123 VERIFY0(range_tree_space(
3124 msp->ms_allocating[(txg + t) & TXG_MASK]));
3126 if (msp->ms_allocator != -1) {
3127 metaslab_passivate(msp, msp->ms_weight &
3128 ~METASLAB_ACTIVE_MASK);
3131 if (!metaslab_debug_unload)
3132 metaslab_unload(msp);
3135 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
3136 ASSERT0(range_tree_space(msp->ms_freeing));
3137 ASSERT0(range_tree_space(msp->ms_freed));
3138 ASSERT0(range_tree_space(msp->ms_checkpointing));
3140 msp->ms_allocated_this_txg = 0;
3141 mutex_exit(&msp->ms_lock);
3145 metaslab_sync_reassess(metaslab_group_t *mg)
3147 spa_t *spa = mg->mg_class->mc_spa;
3149 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
3150 metaslab_group_alloc_update(mg);
3151 mg->mg_fragmentation = metaslab_group_fragmentation(mg);
3154 * Preload the next potential metaslabs but only on active
3155 * metaslab groups. We can get into a state where the metaslab
3156 * is no longer active since we dirty metaslabs as we remove a
3157 * a device, thus potentially making the metaslab group eligible
3160 if (mg->mg_activation_count > 0) {
3161 metaslab_group_preload(mg);
3163 spa_config_exit(spa, SCL_ALLOC, FTAG);
3167 * When writing a ditto block (i.e. more than one DVA for a given BP) on
3168 * the same vdev as an existing DVA of this BP, then try to allocate it
3169 * on a different metaslab than existing DVAs (i.e. a unique metaslab).
3172 metaslab_is_unique(metaslab_t *msp, dva_t *dva)
3176 if (DVA_GET_ASIZE(dva) == 0)
3179 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
3182 dva_ms_id = DVA_GET_OFFSET(dva) >> msp->ms_group->mg_vd->vdev_ms_shift;
3184 return (msp->ms_id != dva_ms_id);
3188 * ==========================================================================
3189 * Metaslab allocation tracing facility
3190 * ==========================================================================
3192 #ifdef _METASLAB_TRACING
3193 kstat_t *metaslab_trace_ksp;
3194 kstat_named_t metaslab_trace_over_limit;
3197 metaslab_alloc_trace_init(void)
3199 ASSERT(metaslab_alloc_trace_cache == NULL);
3200 metaslab_alloc_trace_cache = kmem_cache_create(
3201 "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
3202 0, NULL, NULL, NULL, NULL, NULL, 0);
3203 metaslab_trace_ksp = kstat_create("zfs", 0, "metaslab_trace_stats",
3204 "misc", KSTAT_TYPE_NAMED, 1, KSTAT_FLAG_VIRTUAL);
3205 if (metaslab_trace_ksp != NULL) {
3206 metaslab_trace_ksp->ks_data = &metaslab_trace_over_limit;
3207 kstat_named_init(&metaslab_trace_over_limit,
3208 "metaslab_trace_over_limit", KSTAT_DATA_UINT64);
3209 kstat_install(metaslab_trace_ksp);
3214 metaslab_alloc_trace_fini(void)
3216 if (metaslab_trace_ksp != NULL) {
3217 kstat_delete(metaslab_trace_ksp);
3218 metaslab_trace_ksp = NULL;
3220 kmem_cache_destroy(metaslab_alloc_trace_cache);
3221 metaslab_alloc_trace_cache = NULL;
3225 * Add an allocation trace element to the allocation tracing list.
3228 metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
3229 metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset,
3232 if (!metaslab_trace_enabled)
3236 * When the tracing list reaches its maximum we remove
3237 * the second element in the list before adding a new one.
3238 * By removing the second element we preserve the original
3239 * entry as a clue to what allocations steps have already been
3242 if (zal->zal_size == metaslab_trace_max_entries) {
3243 metaslab_alloc_trace_t *mat_next;
3245 panic("too many entries in allocation list");
3247 atomic_inc_64(&metaslab_trace_over_limit.value.ui64);
3249 mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
3250 list_remove(&zal->zal_list, mat_next);
3251 kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
3254 metaslab_alloc_trace_t *mat =
3255 kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
3256 list_link_init(&mat->mat_list_node);
3259 mat->mat_size = psize;
3260 mat->mat_dva_id = dva_id;
3261 mat->mat_offset = offset;
3262 mat->mat_weight = 0;
3263 mat->mat_allocator = allocator;
3266 mat->mat_weight = msp->ms_weight;
3269 * The list is part of the zio so locking is not required. Only
3270 * a single thread will perform allocations for a given zio.
3272 list_insert_tail(&zal->zal_list, mat);
3275 ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
3279 metaslab_trace_init(zio_alloc_list_t *zal)
3281 list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
3282 offsetof(metaslab_alloc_trace_t, mat_list_node));
3287 metaslab_trace_fini(zio_alloc_list_t *zal)
3289 metaslab_alloc_trace_t *mat;
3291 while ((mat = list_remove_head(&zal->zal_list)) != NULL)
3292 kmem_cache_free(metaslab_alloc_trace_cache, mat);
3293 list_destroy(&zal->zal_list);
3299 #define metaslab_trace_add(zal, mg, msp, psize, id, off, alloc)
3302 metaslab_alloc_trace_init(void)
3307 metaslab_alloc_trace_fini(void)
3312 metaslab_trace_init(zio_alloc_list_t *zal)
3317 metaslab_trace_fini(zio_alloc_list_t *zal)
3321 #endif /* _METASLAB_TRACING */
3324 * ==========================================================================
3325 * Metaslab block operations
3326 * ==========================================================================
3330 metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags,
3333 if (!(flags & METASLAB_ASYNC_ALLOC) ||
3334 (flags & METASLAB_DONT_THROTTLE))
3337 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
3338 if (!mg->mg_class->mc_alloc_throttle_enabled)
3341 (void) zfs_refcount_add(&mg->mg_alloc_queue_depth[allocator], tag);
3345 metaslab_group_increment_qdepth(metaslab_group_t *mg, int allocator)
3347 uint64_t max = mg->mg_max_alloc_queue_depth;
3348 uint64_t cur = mg->mg_cur_max_alloc_queue_depth[allocator];
3350 if (atomic_cas_64(&mg->mg_cur_max_alloc_queue_depth[allocator],
3351 cur, cur + 1) == cur) {
3353 &mg->mg_class->mc_alloc_max_slots[allocator]);
3356 cur = mg->mg_cur_max_alloc_queue_depth[allocator];
3361 metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags,
3362 int allocator, boolean_t io_complete)
3364 if (!(flags & METASLAB_ASYNC_ALLOC) ||
3365 (flags & METASLAB_DONT_THROTTLE))
3368 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
3369 if (!mg->mg_class->mc_alloc_throttle_enabled)
3372 (void) zfs_refcount_remove(&mg->mg_alloc_queue_depth[allocator], tag);
3374 metaslab_group_increment_qdepth(mg, allocator);
3378 metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag,
3382 const dva_t *dva = bp->blk_dva;
3383 int ndvas = BP_GET_NDVAS(bp);
3385 for (int d = 0; d < ndvas; d++) {
3386 uint64_t vdev = DVA_GET_VDEV(&dva[d]);
3387 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
3388 VERIFY(zfs_refcount_not_held(
3389 &mg->mg_alloc_queue_depth[allocator], tag));
3395 metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
3398 range_tree_t *rt = msp->ms_allocatable;
3399 metaslab_class_t *mc = msp->ms_group->mg_class;
3401 VERIFY(!msp->ms_condensing);
3402 VERIFY0(msp->ms_initializing);
3404 start = mc->mc_ops->msop_alloc(msp, size);
3405 if (start != -1ULL) {
3406 metaslab_group_t *mg = msp->ms_group;
3407 vdev_t *vd = mg->mg_vd;
3409 VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
3410 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
3411 VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
3412 range_tree_remove(rt, start, size);
3414 if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
3415 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
3417 range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size);
3419 /* Track the last successful allocation */
3420 msp->ms_alloc_txg = txg;
3421 metaslab_verify_space(msp, txg);
3425 * Now that we've attempted the allocation we need to update the
3426 * metaslab's maximum block size since it may have changed.
3428 msp->ms_max_size = metaslab_block_maxsize(msp);
3433 * Find the metaslab with the highest weight that is less than what we've
3434 * already tried. In the common case, this means that we will examine each
3435 * metaslab at most once. Note that concurrent callers could reorder metaslabs
3436 * by activation/passivation once we have dropped the mg_lock. If a metaslab is
3437 * activated by another thread, and we fail to allocate from the metaslab we
3438 * have selected, we may not try the newly-activated metaslab, and instead
3439 * activate another metaslab. This is not optimal, but generally does not cause
3440 * any problems (a possible exception being if every metaslab is completely full
3441 * except for the the newly-activated metaslab which we fail to examine).
3444 find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight,
3445 dva_t *dva, int d, boolean_t want_unique, uint64_t asize, int allocator,
3446 zio_alloc_list_t *zal, metaslab_t *search, boolean_t *was_active)
3449 avl_tree_t *t = &mg->mg_metaslab_tree;
3450 metaslab_t *msp = avl_find(t, search, &idx);
3452 msp = avl_nearest(t, idx, AVL_AFTER);
3454 for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
3456 if (!metaslab_should_allocate(msp, asize)) {
3457 metaslab_trace_add(zal, mg, msp, asize, d,
3458 TRACE_TOO_SMALL, allocator);
3463 * If the selected metaslab is condensing or being
3464 * initialized, skip it.
3466 if (msp->ms_condensing || msp->ms_initializing > 0)
3469 *was_active = msp->ms_allocator != -1;
3471 * If we're activating as primary, this is our first allocation
3472 * from this disk, so we don't need to check how close we are.
3473 * If the metaslab under consideration was already active,
3474 * we're getting desperate enough to steal another allocator's
3475 * metaslab, so we still don't care about distances.
3477 if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active)
3480 for (i = 0; i < d; i++) {
3482 !metaslab_is_unique(msp, &dva[i]))
3483 break; /* try another metaslab */
3490 search->ms_weight = msp->ms_weight;
3491 search->ms_start = msp->ms_start + 1;
3492 search->ms_allocator = msp->ms_allocator;
3493 search->ms_primary = msp->ms_primary;
3500 metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
3501 uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva,
3502 int d, int allocator)
3504 metaslab_t *msp = NULL;
3505 uint64_t offset = -1ULL;
3506 uint64_t activation_weight;
3508 activation_weight = METASLAB_WEIGHT_PRIMARY;
3509 for (int i = 0; i < d; i++) {
3510 if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
3511 DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
3512 activation_weight = METASLAB_WEIGHT_SECONDARY;
3513 } else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
3514 DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
3515 activation_weight = METASLAB_WEIGHT_CLAIM;
3521 * If we don't have enough metaslabs active to fill the entire array, we
3522 * just use the 0th slot.
3524 if (mg->mg_ms_ready < mg->mg_allocators * 3)
3527 ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2);
3529 metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP);
3530 search->ms_weight = UINT64_MAX;
3531 search->ms_start = 0;
3533 * At the end of the metaslab tree are the already-active metaslabs,
3534 * first the primaries, then the secondaries. When we resume searching
3535 * through the tree, we need to consider ms_allocator and ms_primary so
3536 * we start in the location right after where we left off, and don't
3537 * accidentally loop forever considering the same metaslabs.
3539 search->ms_allocator = -1;
3540 search->ms_primary = B_TRUE;
3542 boolean_t was_active = B_FALSE;
3544 mutex_enter(&mg->mg_lock);
3546 if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
3547 mg->mg_primaries[allocator] != NULL) {
3548 msp = mg->mg_primaries[allocator];
3549 was_active = B_TRUE;
3550 } else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
3551 mg->mg_secondaries[allocator] != NULL) {
3552 msp = mg->mg_secondaries[allocator];
3553 was_active = B_TRUE;
3555 msp = find_valid_metaslab(mg, activation_weight, dva, d,
3556 want_unique, asize, allocator, zal, search,
3560 mutex_exit(&mg->mg_lock);
3562 kmem_free(search, sizeof (*search));
3566 mutex_enter(&msp->ms_lock);
3568 * Ensure that the metaslab we have selected is still
3569 * capable of handling our request. It's possible that
3570 * another thread may have changed the weight while we
3571 * were blocked on the metaslab lock. We check the
3572 * active status first to see if we need to reselect
3575 if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
3576 mutex_exit(&msp->ms_lock);
3581 * If the metaslab is freshly activated for an allocator that
3582 * isn't the one we're allocating from, or if it's a primary and
3583 * we're seeking a secondary (or vice versa), we go back and
3584 * select a new metaslab.
3586 if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) &&
3587 (msp->ms_allocator != -1) &&
3588 (msp->ms_allocator != allocator || ((activation_weight ==
3589 METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) {
3590 mutex_exit(&msp->ms_lock);
3594 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM &&
3595 activation_weight != METASLAB_WEIGHT_CLAIM) {
3596 metaslab_passivate(msp, msp->ms_weight &
3597 ~METASLAB_WEIGHT_CLAIM);
3598 mutex_exit(&msp->ms_lock);
3602 if (metaslab_activate(msp, allocator, activation_weight) != 0) {
3603 mutex_exit(&msp->ms_lock);
3607 msp->ms_selected_txg = txg;
3610 * Now that we have the lock, recheck to see if we should
3611 * continue to use this metaslab for this allocation. The
3612 * the metaslab is now loaded so metaslab_should_allocate() can
3613 * accurately determine if the allocation attempt should
3616 if (!metaslab_should_allocate(msp, asize)) {
3617 /* Passivate this metaslab and select a new one. */
3618 metaslab_trace_add(zal, mg, msp, asize, d,
3619 TRACE_TOO_SMALL, allocator);
3624 * If this metaslab is currently condensing then pick again as
3625 * we can't manipulate this metaslab until it's committed
3626 * to disk. If this metaslab is being initialized, we shouldn't
3627 * allocate from it since the allocated region might be
3628 * overwritten after allocation.
3630 if (msp->ms_condensing) {
3631 metaslab_trace_add(zal, mg, msp, asize, d,
3632 TRACE_CONDENSING, allocator);
3633 metaslab_passivate(msp, msp->ms_weight &
3634 ~METASLAB_ACTIVE_MASK);
3635 mutex_exit(&msp->ms_lock);
3637 } else if (msp->ms_initializing > 0) {
3638 metaslab_trace_add(zal, mg, msp, asize, d,
3639 TRACE_INITIALIZING, allocator);
3640 metaslab_passivate(msp, msp->ms_weight &
3641 ~METASLAB_ACTIVE_MASK);
3642 mutex_exit(&msp->ms_lock);
3646 offset = metaslab_block_alloc(msp, asize, txg);
3647 metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator);
3649 if (offset != -1ULL) {
3650 /* Proactively passivate the metaslab, if needed */
3651 metaslab_segment_may_passivate(msp);
3655 ASSERT(msp->ms_loaded);
3658 * We were unable to allocate from this metaslab so determine
3659 * a new weight for this metaslab. Now that we have loaded
3660 * the metaslab we can provide a better hint to the metaslab
3663 * For space-based metaslabs, we use the maximum block size.
3664 * This information is only available when the metaslab
3665 * is loaded and is more accurate than the generic free
3666 * space weight that was calculated by metaslab_weight().
3667 * This information allows us to quickly compare the maximum
3668 * available allocation in the metaslab to the allocation
3669 * size being requested.
3671 * For segment-based metaslabs, determine the new weight
3672 * based on the highest bucket in the range tree. We
3673 * explicitly use the loaded segment weight (i.e. the range
3674 * tree histogram) since it contains the space that is
3675 * currently available for allocation and is accurate
3676 * even within a sync pass.
3678 if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
3679 uint64_t weight = metaslab_block_maxsize(msp);
3680 WEIGHT_SET_SPACEBASED(weight);
3681 metaslab_passivate(msp, weight);
3683 metaslab_passivate(msp,
3684 metaslab_weight_from_range_tree(msp));
3688 * We have just failed an allocation attempt, check
3689 * that metaslab_should_allocate() agrees. Otherwise,
3690 * we may end up in an infinite loop retrying the same
3693 ASSERT(!metaslab_should_allocate(msp, asize));
3695 mutex_exit(&msp->ms_lock);
3697 mutex_exit(&msp->ms_lock);
3698 kmem_free(search, sizeof (*search));
3703 metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
3704 uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva,
3705 int d, int allocator)
3708 ASSERT(mg->mg_initialized);
3710 offset = metaslab_group_alloc_normal(mg, zal, asize, txg, want_unique,
3713 mutex_enter(&mg->mg_lock);
3714 if (offset == -1ULL) {
3715 mg->mg_failed_allocations++;
3716 metaslab_trace_add(zal, mg, NULL, asize, d,
3717 TRACE_GROUP_FAILURE, allocator);
3718 if (asize == SPA_GANGBLOCKSIZE) {
3720 * This metaslab group was unable to allocate
3721 * the minimum gang block size so it must be out of
3722 * space. We must notify the allocation throttle
3723 * to start skipping allocation attempts to this
3724 * metaslab group until more space becomes available.
3725 * Note: this failure cannot be caused by the
3726 * allocation throttle since the allocation throttle
3727 * is only responsible for skipping devices and
3728 * not failing block allocations.
3730 mg->mg_no_free_space = B_TRUE;
3733 mg->mg_allocations++;
3734 mutex_exit(&mg->mg_lock);
3739 * Allocate a block for the specified i/o.
3742 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
3743 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
3744 zio_alloc_list_t *zal, int allocator)
3746 metaslab_group_t *mg, *rotor;
3748 boolean_t try_hard = B_FALSE;
3750 ASSERT(!DVA_IS_VALID(&dva[d]));
3753 * For testing, make some blocks above a certain size be gang blocks.
3754 * This will also test spilling from special to normal.
3756 if (psize >= metaslab_force_ganging && (ddi_get_lbolt() & 3) == 0) {
3757 metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG,
3759 return (SET_ERROR(ENOSPC));
3763 * Start at the rotor and loop through all mgs until we find something.
3764 * Note that there's no locking on mc_rotor or mc_aliquot because
3765 * nothing actually breaks if we miss a few updates -- we just won't
3766 * allocate quite as evenly. It all balances out over time.
3768 * If we are doing ditto or log blocks, try to spread them across
3769 * consecutive vdevs. If we're forced to reuse a vdev before we've
3770 * allocated all of our ditto blocks, then try and spread them out on
3771 * that vdev as much as possible. If it turns out to not be possible,
3772 * gradually lower our standards until anything becomes acceptable.
3773 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
3774 * gives us hope of containing our fault domains to something we're
3775 * able to reason about. Otherwise, any two top-level vdev failures
3776 * will guarantee the loss of data. With consecutive allocation,
3777 * only two adjacent top-level vdev failures will result in data loss.
3779 * If we are doing gang blocks (hintdva is non-NULL), try to keep
3780 * ourselves on the same vdev as our gang block header. That
3781 * way, we can hope for locality in vdev_cache, plus it makes our
3782 * fault domains something tractable.
3785 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
3788 * It's possible the vdev we're using as the hint no
3789 * longer exists or its mg has been closed (e.g. by
3790 * device removal). Consult the rotor when
3793 if (vd != NULL && vd->vdev_mg != NULL) {
3796 if (flags & METASLAB_HINTBP_AVOID &&
3797 mg->mg_next != NULL)
3802 } else if (d != 0) {
3803 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
3804 mg = vd->vdev_mg->mg_next;
3806 ASSERT(mc->mc_rotor != NULL);
3811 * If the hint put us into the wrong metaslab class, or into a
3812 * metaslab group that has been passivated, just follow the rotor.
3814 if (mg->mg_class != mc || mg->mg_activation_count <= 0)
3820 boolean_t allocatable;
3822 ASSERT(mg->mg_activation_count == 1);
3826 * Don't allocate from faulted devices.
3829 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
3830 allocatable = vdev_allocatable(vd);
3831 spa_config_exit(spa, SCL_ZIO, FTAG);
3833 allocatable = vdev_allocatable(vd);
3837 * Determine if the selected metaslab group is eligible
3838 * for allocations. If we're ganging then don't allow
3839 * this metaslab group to skip allocations since that would
3840 * inadvertently return ENOSPC and suspend the pool
3841 * even though space is still available.
3843 if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) {
3844 allocatable = metaslab_group_allocatable(mg, rotor,
3845 psize, allocator, d);
3849 metaslab_trace_add(zal, mg, NULL, psize, d,
3850 TRACE_NOT_ALLOCATABLE, allocator);
3854 ASSERT(mg->mg_initialized);
3857 * Avoid writing single-copy data to a failing,
3858 * non-redundant vdev, unless we've already tried all
3861 if ((vd->vdev_stat.vs_write_errors > 0 ||
3862 vd->vdev_state < VDEV_STATE_HEALTHY) &&
3863 d == 0 && !try_hard && vd->vdev_children == 0) {
3864 metaslab_trace_add(zal, mg, NULL, psize, d,
3865 TRACE_VDEV_ERROR, allocator);
3869 ASSERT(mg->mg_class == mc);
3871 uint64_t asize = vdev_psize_to_asize(vd, psize);
3872 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
3875 * If we don't need to try hard, then require that the
3876 * block be on an different metaslab from any other DVAs
3877 * in this BP (unique=true). If we are trying hard, then
3878 * allow any metaslab to be used (unique=false).
3880 uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg,
3881 !try_hard, dva, d, allocator);
3883 if (offset != -1ULL) {
3885 * If we've just selected this metaslab group,
3886 * figure out whether the corresponding vdev is
3887 * over- or under-used relative to the pool,
3888 * and set an allocation bias to even it out.
3890 if (mc->mc_aliquot == 0 && metaslab_bias_enabled) {
3891 vdev_stat_t *vs = &vd->vdev_stat;
3894 vu = (vs->vs_alloc * 100) / (vs->vs_space + 1);
3895 cu = (mc->mc_alloc * 100) / (mc->mc_space + 1);
3898 * Calculate how much more or less we should
3899 * try to allocate from this device during
3900 * this iteration around the rotor.
3901 * For example, if a device is 80% full
3902 * and the pool is 20% full then we should
3903 * reduce allocations by 60% on this device.
3905 * mg_bias = (20 - 80) * 512K / 100 = -307K
3907 * This reduces allocations by 307K for this
3910 mg->mg_bias = ((cu - vu) *
3911 (int64_t)mg->mg_aliquot) / 100;
3912 } else if (!metaslab_bias_enabled) {
3916 if (atomic_add_64_nv(&mc->mc_aliquot, asize) >=
3917 mg->mg_aliquot + mg->mg_bias) {
3918 mc->mc_rotor = mg->mg_next;
3922 DVA_SET_VDEV(&dva[d], vd->vdev_id);
3923 DVA_SET_OFFSET(&dva[d], offset);
3924 DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER));
3925 DVA_SET_ASIZE(&dva[d], asize);
3930 mc->mc_rotor = mg->mg_next;
3932 } while ((mg = mg->mg_next) != rotor);
3935 * If we haven't tried hard, do so now.
3942 bzero(&dva[d], sizeof (dva_t));
3944 metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator);
3945 return (SET_ERROR(ENOSPC));
3949 metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize,
3950 boolean_t checkpoint)
3953 spa_t *spa = vd->vdev_spa;
3955 ASSERT(vdev_is_concrete(vd));
3956 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
3957 ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
3959 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3961 VERIFY(!msp->ms_condensing);
3962 VERIFY3U(offset, >=, msp->ms_start);
3963 VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size);
3964 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
3965 VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift));
3967 metaslab_check_free_impl(vd, offset, asize);
3969 mutex_enter(&msp->ms_lock);
3970 if (range_tree_is_empty(msp->ms_freeing) &&
3971 range_tree_is_empty(msp->ms_checkpointing)) {
3972 vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa));
3976 ASSERT(spa_has_checkpoint(spa));
3977 range_tree_add(msp->ms_checkpointing, offset, asize);
3979 range_tree_add(msp->ms_freeing, offset, asize);
3981 mutex_exit(&msp->ms_lock);
3986 metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
3987 uint64_t size, void *arg)
3989 boolean_t *checkpoint = arg;
3991 ASSERT3P(checkpoint, !=, NULL);
3993 if (vd->vdev_ops->vdev_op_remap != NULL)
3994 vdev_indirect_mark_obsolete(vd, offset, size);
3996 metaslab_free_impl(vd, offset, size, *checkpoint);
4000 metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size,
4001 boolean_t checkpoint)
4003 spa_t *spa = vd->vdev_spa;
4005 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
4007 if (spa_syncing_txg(spa) > spa_freeze_txg(spa))
4010 if (spa->spa_vdev_removal != NULL &&
4011 spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id &&
4012 vdev_is_concrete(vd)) {
4014 * Note: we check if the vdev is concrete because when
4015 * we complete the removal, we first change the vdev to be
4016 * an indirect vdev (in open context), and then (in syncing
4017 * context) clear spa_vdev_removal.
4019 free_from_removing_vdev(vd, offset, size);
4020 } else if (vd->vdev_ops->vdev_op_remap != NULL) {
4021 vdev_indirect_mark_obsolete(vd, offset, size);
4022 vd->vdev_ops->vdev_op_remap(vd, offset, size,
4023 metaslab_free_impl_cb, &checkpoint);
4025 metaslab_free_concrete(vd, offset, size, checkpoint);
4029 typedef struct remap_blkptr_cb_arg {
4031 spa_remap_cb_t rbca_cb;
4032 vdev_t *rbca_remap_vd;
4033 uint64_t rbca_remap_offset;
4035 } remap_blkptr_cb_arg_t;
4038 remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
4039 uint64_t size, void *arg)
4041 remap_blkptr_cb_arg_t *rbca = arg;
4042 blkptr_t *bp = rbca->rbca_bp;
4044 /* We can not remap split blocks. */
4045 if (size != DVA_GET_ASIZE(&bp->blk_dva[0]))
4047 ASSERT0(inner_offset);
4049 if (rbca->rbca_cb != NULL) {
4051 * At this point we know that we are not handling split
4052 * blocks and we invoke the callback on the previous
4053 * vdev which must be indirect.
4055 ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops);
4057 rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id,
4058 rbca->rbca_remap_offset, size, rbca->rbca_cb_arg);
4060 /* set up remap_blkptr_cb_arg for the next call */
4061 rbca->rbca_remap_vd = vd;
4062 rbca->rbca_remap_offset = offset;
4066 * The phys birth time is that of dva[0]. This ensures that we know
4067 * when each dva was written, so that resilver can determine which
4068 * blocks need to be scrubbed (i.e. those written during the time
4069 * the vdev was offline). It also ensures that the key used in
4070 * the ARC hash table is unique (i.e. dva[0] + phys_birth). If
4071 * we didn't change the phys_birth, a lookup in the ARC for a
4072 * remapped BP could find the data that was previously stored at
4073 * this vdev + offset.
4075 vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa,
4076 DVA_GET_VDEV(&bp->blk_dva[0]));
4077 vdev_indirect_births_t *vib = oldvd->vdev_indirect_births;
4078 bp->blk_phys_birth = vdev_indirect_births_physbirth(vib,
4079 DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0]));
4081 DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id);
4082 DVA_SET_OFFSET(&bp->blk_dva[0], offset);
4086 * If the block pointer contains any indirect DVAs, modify them to refer to
4087 * concrete DVAs. Note that this will sometimes not be possible, leaving
4088 * the indirect DVA in place. This happens if the indirect DVA spans multiple
4089 * segments in the mapping (i.e. it is a "split block").
4091 * If the BP was remapped, calls the callback on the original dva (note the
4092 * callback can be called multiple times if the original indirect DVA refers
4093 * to another indirect DVA, etc).
4095 * Returns TRUE if the BP was remapped.
4098 spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg)
4100 remap_blkptr_cb_arg_t rbca;
4102 if (!zfs_remap_blkptr_enable)
4105 if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS))
4109 * Dedup BP's can not be remapped, because ddt_phys_select() depends
4110 * on DVA[0] being the same in the BP as in the DDT (dedup table).
4112 if (BP_GET_DEDUP(bp))
4116 * Gang blocks can not be remapped, because
4117 * zio_checksum_gang_verifier() depends on the DVA[0] that's in
4118 * the BP used to read the gang block header (GBH) being the same
4119 * as the DVA[0] that we allocated for the GBH.
4125 * Embedded BP's have no DVA to remap.
4127 if (BP_GET_NDVAS(bp) < 1)
4131 * Note: we only remap dva[0]. If we remapped other dvas, we
4132 * would no longer know what their phys birth txg is.
4134 dva_t *dva = &bp->blk_dva[0];
4136 uint64_t offset = DVA_GET_OFFSET(dva);
4137 uint64_t size = DVA_GET_ASIZE(dva);
4138 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
4140 if (vd->vdev_ops->vdev_op_remap == NULL)
4144 rbca.rbca_cb = callback;
4145 rbca.rbca_remap_vd = vd;
4146 rbca.rbca_remap_offset = offset;
4147 rbca.rbca_cb_arg = arg;
4150 * remap_blkptr_cb() will be called in order for each level of
4151 * indirection, until a concrete vdev is reached or a split block is
4152 * encountered. old_vd and old_offset are updated within the callback
4153 * as we go from the one indirect vdev to the next one (either concrete
4154 * or indirect again) in that order.
4156 vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca);
4158 /* Check if the DVA wasn't remapped because it is a split block */
4159 if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id)
4166 * Undo the allocation of a DVA which happened in the given transaction group.
4169 metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
4173 uint64_t vdev = DVA_GET_VDEV(dva);
4174 uint64_t offset = DVA_GET_OFFSET(dva);
4175 uint64_t size = DVA_GET_ASIZE(dva);
4177 ASSERT(DVA_IS_VALID(dva));
4178 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
4180 if (txg > spa_freeze_txg(spa))
4183 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
4184 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
4185 cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu",
4186 (u_longlong_t)vdev, (u_longlong_t)offset);
4191 ASSERT(!vd->vdev_removing);
4192 ASSERT(vdev_is_concrete(vd));
4193 ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
4194 ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
4196 if (DVA_GET_GANG(dva))
4197 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
4199 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
4201 mutex_enter(&msp->ms_lock);
4202 range_tree_remove(msp->ms_allocating[txg & TXG_MASK],
4205 VERIFY(!msp->ms_condensing);
4206 VERIFY3U(offset, >=, msp->ms_start);
4207 VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
4208 VERIFY3U(range_tree_space(msp->ms_allocatable) + size, <=,
4210 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
4211 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
4212 range_tree_add(msp->ms_allocatable, offset, size);
4213 mutex_exit(&msp->ms_lock);
4217 * Free the block represented by the given DVA.
4220 metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint)
4222 uint64_t vdev = DVA_GET_VDEV(dva);
4223 uint64_t offset = DVA_GET_OFFSET(dva);
4224 uint64_t size = DVA_GET_ASIZE(dva);
4225 vdev_t *vd = vdev_lookup_top(spa, vdev);
4227 ASSERT(DVA_IS_VALID(dva));
4228 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
4230 if (DVA_GET_GANG(dva)) {
4231 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
4234 metaslab_free_impl(vd, offset, size, checkpoint);
4238 * Reserve some allocation slots. The reservation system must be called
4239 * before we call into the allocator. If there aren't any available slots
4240 * then the I/O will be throttled until an I/O completes and its slots are
4241 * freed up. The function returns true if it was successful in placing
4245 metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator,
4246 zio_t *zio, int flags)
4248 uint64_t available_slots = 0;
4249 boolean_t slot_reserved = B_FALSE;
4250 uint64_t max = mc->mc_alloc_max_slots[allocator];
4252 ASSERT(mc->mc_alloc_throttle_enabled);
4253 mutex_enter(&mc->mc_lock);
4255 uint64_t reserved_slots =
4256 zfs_refcount_count(&mc->mc_alloc_slots[allocator]);
4257 if (reserved_slots < max)
4258 available_slots = max - reserved_slots;
4260 if (slots <= available_slots || GANG_ALLOCATION(flags) ||
4261 flags & METASLAB_MUST_RESERVE) {
4263 * We reserve the slots individually so that we can unreserve
4264 * them individually when an I/O completes.
4266 for (int d = 0; d < slots; d++) {
4268 zfs_refcount_add(&mc->mc_alloc_slots[allocator],
4271 zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
4272 slot_reserved = B_TRUE;
4275 mutex_exit(&mc->mc_lock);
4276 return (slot_reserved);
4280 metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots,
4281 int allocator, zio_t *zio)
4283 ASSERT(mc->mc_alloc_throttle_enabled);
4284 mutex_enter(&mc->mc_lock);
4285 for (int d = 0; d < slots; d++) {
4286 (void) zfs_refcount_remove(&mc->mc_alloc_slots[allocator],
4289 mutex_exit(&mc->mc_lock);
4293 metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size,
4297 spa_t *spa = vd->vdev_spa;
4300 if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count)
4303 ASSERT3P(vd->vdev_ms, !=, NULL);
4304 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
4306 mutex_enter(&msp->ms_lock);
4308 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded)
4309 error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM);
4311 * No need to fail in that case; someone else has activated the
4312 * metaslab, but that doesn't preclude us from using it.
4318 !range_tree_contains(msp->ms_allocatable, offset, size))
4319 error = SET_ERROR(ENOENT);
4321 if (error || txg == 0) { /* txg == 0 indicates dry run */
4322 mutex_exit(&msp->ms_lock);
4326 VERIFY(!msp->ms_condensing);
4327 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
4328 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
4329 VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=,
4331 range_tree_remove(msp->ms_allocatable, offset, size);
4333 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
4334 if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
4335 vdev_dirty(vd, VDD_METASLAB, msp, txg);
4336 range_tree_add(msp->ms_allocating[txg & TXG_MASK],
4340 mutex_exit(&msp->ms_lock);
4345 typedef struct metaslab_claim_cb_arg_t {
4348 } metaslab_claim_cb_arg_t;
4352 metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
4353 uint64_t size, void *arg)
4355 metaslab_claim_cb_arg_t *mcca_arg = arg;
4357 if (mcca_arg->mcca_error == 0) {
4358 mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset,
4359 size, mcca_arg->mcca_txg);
4364 metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg)
4366 if (vd->vdev_ops->vdev_op_remap != NULL) {
4367 metaslab_claim_cb_arg_t arg;
4370 * Only zdb(1M) can claim on indirect vdevs. This is used
4371 * to detect leaks of mapped space (that are not accounted
4372 * for in the obsolete counts, spacemap, or bpobj).
4374 ASSERT(!spa_writeable(vd->vdev_spa));
4378 vd->vdev_ops->vdev_op_remap(vd, offset, size,
4379 metaslab_claim_impl_cb, &arg);
4381 if (arg.mcca_error == 0) {
4382 arg.mcca_error = metaslab_claim_concrete(vd,
4385 return (arg.mcca_error);
4387 return (metaslab_claim_concrete(vd, offset, size, txg));
4392 * Intent log support: upon opening the pool after a crash, notify the SPA
4393 * of blocks that the intent log has allocated for immediate write, but
4394 * which are still considered free by the SPA because the last transaction
4395 * group didn't commit yet.
4398 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
4400 uint64_t vdev = DVA_GET_VDEV(dva);
4401 uint64_t offset = DVA_GET_OFFSET(dva);
4402 uint64_t size = DVA_GET_ASIZE(dva);
4405 if ((vd = vdev_lookup_top(spa, vdev)) == NULL) {
4406 return (SET_ERROR(ENXIO));
4409 ASSERT(DVA_IS_VALID(dva));
4411 if (DVA_GET_GANG(dva))
4412 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
4414 return (metaslab_claim_impl(vd, offset, size, txg));
4418 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
4419 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
4420 zio_alloc_list_t *zal, zio_t *zio, int allocator)
4422 dva_t *dva = bp->blk_dva;
4423 dva_t *hintdva = (hintbp != NULL) ? hintbp->blk_dva : NULL;
4426 ASSERT(bp->blk_birth == 0);
4427 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
4429 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
4431 if (mc->mc_rotor == NULL) { /* no vdevs in this class */
4432 spa_config_exit(spa, SCL_ALLOC, FTAG);
4433 return (SET_ERROR(ENOSPC));
4436 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
4437 ASSERT(BP_GET_NDVAS(bp) == 0);
4438 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
4439 ASSERT3P(zal, !=, NULL);
4441 for (int d = 0; d < ndvas; d++) {
4442 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
4443 txg, flags, zal, allocator);
4445 for (d--; d >= 0; d--) {
4446 metaslab_unalloc_dva(spa, &dva[d], txg);
4447 metaslab_group_alloc_decrement(spa,
4448 DVA_GET_VDEV(&dva[d]), zio, flags,
4449 allocator, B_FALSE);
4450 bzero(&dva[d], sizeof (dva_t));
4452 spa_config_exit(spa, SCL_ALLOC, FTAG);
4456 * Update the metaslab group's queue depth
4457 * based on the newly allocated dva.
4459 metaslab_group_alloc_increment(spa,
4460 DVA_GET_VDEV(&dva[d]), zio, flags, allocator);
4465 ASSERT(BP_GET_NDVAS(bp) == ndvas);
4467 spa_config_exit(spa, SCL_ALLOC, FTAG);
4469 BP_SET_BIRTH(bp, txg, txg);
4475 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
4477 const dva_t *dva = bp->blk_dva;
4478 int ndvas = BP_GET_NDVAS(bp);
4480 ASSERT(!BP_IS_HOLE(bp));
4481 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
4484 * If we have a checkpoint for the pool we need to make sure that
4485 * the blocks that we free that are part of the checkpoint won't be
4486 * reused until the checkpoint is discarded or we revert to it.
4488 * The checkpoint flag is passed down the metaslab_free code path
4489 * and is set whenever we want to add a block to the checkpoint's
4490 * accounting. That is, we "checkpoint" blocks that existed at the
4491 * time the checkpoint was created and are therefore referenced by
4492 * the checkpointed uberblock.
4494 * Note that, we don't checkpoint any blocks if the current
4495 * syncing txg <= spa_checkpoint_txg. We want these frees to sync
4496 * normally as they will be referenced by the checkpointed uberblock.
4498 boolean_t checkpoint = B_FALSE;
4499 if (bp->blk_birth <= spa->spa_checkpoint_txg &&
4500 spa_syncing_txg(spa) > spa->spa_checkpoint_txg) {
4502 * At this point, if the block is part of the checkpoint
4503 * there is no way it was created in the current txg.
4506 ASSERT3U(spa_syncing_txg(spa), ==, txg);
4507 checkpoint = B_TRUE;
4510 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
4512 for (int d = 0; d < ndvas; d++) {
4514 metaslab_unalloc_dva(spa, &dva[d], txg);
4516 ASSERT3U(txg, ==, spa_syncing_txg(spa));
4517 metaslab_free_dva(spa, &dva[d], checkpoint);
4521 spa_config_exit(spa, SCL_FREE, FTAG);
4525 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
4527 const dva_t *dva = bp->blk_dva;
4528 int ndvas = BP_GET_NDVAS(bp);
4531 ASSERT(!BP_IS_HOLE(bp));
4535 * First do a dry run to make sure all DVAs are claimable,
4536 * so we don't have to unwind from partial failures below.
4538 if ((error = metaslab_claim(spa, bp, 0)) != 0)
4542 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
4544 for (int d = 0; d < ndvas; d++) {
4545 error = metaslab_claim_dva(spa, &dva[d], txg);
4550 spa_config_exit(spa, SCL_ALLOC, FTAG);
4552 ASSERT(error == 0 || txg == 0);
4559 metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset,
4560 uint64_t size, void *arg)
4562 if (vd->vdev_ops == &vdev_indirect_ops)
4565 metaslab_check_free_impl(vd, offset, size);
4569 metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size)
4572 spa_t *spa = vd->vdev_spa;
4574 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
4577 if (vd->vdev_ops->vdev_op_remap != NULL) {
4578 vd->vdev_ops->vdev_op_remap(vd, offset, size,
4579 metaslab_check_free_impl_cb, NULL);
4583 ASSERT(vdev_is_concrete(vd));
4584 ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
4585 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
4587 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
4589 mutex_enter(&msp->ms_lock);
4590 if (msp->ms_loaded) {
4591 range_tree_verify_not_present(msp->ms_allocatable,
4595 range_tree_verify_not_present(msp->ms_freeing, offset, size);
4596 range_tree_verify_not_present(msp->ms_checkpointing, offset, size);
4597 range_tree_verify_not_present(msp->ms_freed, offset, size);
4598 for (int j = 0; j < TXG_DEFER_SIZE; j++)
4599 range_tree_verify_not_present(msp->ms_defer[j], offset, size);
4600 mutex_exit(&msp->ms_lock);
4604 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
4606 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
4609 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
4610 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
4611 uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
4612 vdev_t *vd = vdev_lookup_top(spa, vdev);
4613 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
4614 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
4616 if (DVA_GET_GANG(&bp->blk_dva[i]))
4617 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
4619 ASSERT3P(vd, !=, NULL);
4621 metaslab_check_free_impl(vd, offset, size);
4623 spa_config_exit(spa, SCL_VDEV, FTAG);