4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
28 #include <sys/zfs_context.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/space_map.h>
32 #include <sys/metaslab_impl.h>
33 #include <sys/vdev_impl.h>
35 #include <sys/spa_impl.h>
36 #include <sys/zfeature.h>
38 SYSCTL_DECL(_vfs_zfs);
39 SYSCTL_NODE(_vfs_zfs, OID_AUTO, metaslab, CTLFLAG_RW, 0, "ZFS metaslab");
41 #define GANG_ALLOCATION(flags) \
42 ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
44 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
45 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
46 #define METASLAB_ACTIVE_MASK \
47 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
49 uint64_t metaslab_aliquot = 512ULL << 10;
50 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
51 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, gang_bang, CTLFLAG_RWTUN,
52 &metaslab_gang_bang, 0,
53 "Force gang block allocation for blocks larger than or equal to this value");
56 * The in-core space map representation is more compact than its on-disk form.
57 * The zfs_condense_pct determines how much more compact the in-core
58 * space_map representation must be before we compact it on-disk.
59 * Values should be greater than or equal to 100.
61 int zfs_condense_pct = 200;
62 SYSCTL_INT(_vfs_zfs, OID_AUTO, condense_pct, CTLFLAG_RWTUN,
64 "Condense on-disk spacemap when it is more than this many percents"
65 " of in-memory counterpart");
68 * Condensing a metaslab is not guaranteed to actually reduce the amount of
69 * space used on disk. In particular, a space map uses data in increments of
70 * MAX(1 << ashift, space_map_blksize), so a metaslab might use the
71 * same number of blocks after condensing. Since the goal of condensing is to
72 * reduce the number of IOPs required to read the space map, we only want to
73 * condense when we can be sure we will reduce the number of blocks used by the
74 * space map. Unfortunately, we cannot precisely compute whether or not this is
75 * the case in metaslab_should_condense since we are holding ms_lock. Instead,
76 * we apply the following heuristic: do not condense a spacemap unless the
77 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
80 int zfs_metaslab_condense_block_threshold = 4;
83 * The zfs_mg_noalloc_threshold defines which metaslab groups should
84 * be eligible for allocation. The value is defined as a percentage of
85 * free space. Metaslab groups that have more free space than
86 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
87 * a metaslab group's free space is less than or equal to the
88 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
89 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
90 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
91 * groups are allowed to accept allocations. Gang blocks are always
92 * eligible to allocate on any metaslab group. The default value of 0 means
93 * no metaslab group will be excluded based on this criterion.
95 int zfs_mg_noalloc_threshold = 0;
96 SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_noalloc_threshold, CTLFLAG_RWTUN,
97 &zfs_mg_noalloc_threshold, 0,
98 "Percentage of metaslab group size that should be free"
99 " to make it eligible for allocation");
102 * Metaslab groups are considered eligible for allocations if their
103 * fragmenation metric (measured as a percentage) is less than or equal to
104 * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold
105 * then it will be skipped unless all metaslab groups within the metaslab
106 * class have also crossed this threshold.
108 int zfs_mg_fragmentation_threshold = 85;
109 SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_fragmentation_threshold, CTLFLAG_RWTUN,
110 &zfs_mg_fragmentation_threshold, 0,
111 "Percentage of metaslab group size that should be considered "
112 "eligible for allocations unless all metaslab groups within the metaslab class "
113 "have also crossed this threshold");
116 * Allow metaslabs to keep their active state as long as their fragmentation
117 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
118 * active metaslab that exceeds this threshold will no longer keep its active
119 * status allowing better metaslabs to be selected.
121 int zfs_metaslab_fragmentation_threshold = 70;
122 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, fragmentation_threshold, CTLFLAG_RWTUN,
123 &zfs_metaslab_fragmentation_threshold, 0,
124 "Maximum percentage of metaslab fragmentation level to keep their active state");
127 * When set will load all metaslabs when pool is first opened.
129 int metaslab_debug_load = 0;
130 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_load, CTLFLAG_RWTUN,
131 &metaslab_debug_load, 0,
132 "Load all metaslabs when pool is first opened");
135 * When set will prevent metaslabs from being unloaded.
137 int metaslab_debug_unload = 0;
138 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_unload, CTLFLAG_RWTUN,
139 &metaslab_debug_unload, 0,
140 "Prevent metaslabs from being unloaded");
143 * Minimum size which forces the dynamic allocator to change
144 * it's allocation strategy. Once the space map cannot satisfy
145 * an allocation of this size then it switches to using more
146 * aggressive strategy (i.e search by size rather than offset).
148 uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
149 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold, CTLFLAG_RWTUN,
150 &metaslab_df_alloc_threshold, 0,
151 "Minimum size which forces the dynamic allocator to change it's allocation strategy");
154 * The minimum free space, in percent, which must be available
155 * in a space map to continue allocations in a first-fit fashion.
156 * Once the space_map's free space drops below this level we dynamically
157 * switch to using best-fit allocations.
159 int metaslab_df_free_pct = 4;
160 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct, CTLFLAG_RWTUN,
161 &metaslab_df_free_pct, 0,
162 "The minimum free space, in percent, which must be available in a "
163 "space map to continue allocations in a first-fit fashion");
166 * A metaslab is considered "free" if it contains a contiguous
167 * segment which is greater than metaslab_min_alloc_size.
169 uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS;
170 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, min_alloc_size, CTLFLAG_RWTUN,
171 &metaslab_min_alloc_size, 0,
172 "A metaslab is considered \"free\" if it contains a contiguous "
173 "segment which is greater than vfs.zfs.metaslab.min_alloc_size");
176 * Percentage of all cpus that can be used by the metaslab taskq.
178 int metaslab_load_pct = 50;
179 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, load_pct, CTLFLAG_RWTUN,
180 &metaslab_load_pct, 0,
181 "Percentage of cpus that can be used by the metaslab taskq");
184 * Determines how many txgs a metaslab may remain loaded without having any
185 * allocations from it. As long as a metaslab continues to be used we will
188 int metaslab_unload_delay = TXG_SIZE * 2;
189 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, unload_delay, CTLFLAG_RWTUN,
190 &metaslab_unload_delay, 0,
191 "Number of TXGs that an unused metaslab can be kept in memory");
194 * Max number of metaslabs per group to preload.
196 int metaslab_preload_limit = SPA_DVAS_PER_BP;
197 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_limit, CTLFLAG_RWTUN,
198 &metaslab_preload_limit, 0,
199 "Max number of metaslabs per group to preload");
202 * Enable/disable preloading of metaslab.
204 boolean_t metaslab_preload_enabled = B_TRUE;
205 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_enabled, CTLFLAG_RWTUN,
206 &metaslab_preload_enabled, 0,
207 "Max number of metaslabs per group to preload");
210 * Enable/disable fragmentation weighting on metaslabs.
212 boolean_t metaslab_fragmentation_factor_enabled = B_TRUE;
213 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, fragmentation_factor_enabled, CTLFLAG_RWTUN,
214 &metaslab_fragmentation_factor_enabled, 0,
215 "Enable fragmentation weighting on metaslabs");
218 * Enable/disable lba weighting (i.e. outer tracks are given preference).
220 boolean_t metaslab_lba_weighting_enabled = B_TRUE;
221 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, lba_weighting_enabled, CTLFLAG_RWTUN,
222 &metaslab_lba_weighting_enabled, 0,
223 "Enable LBA weighting (i.e. outer tracks are given preference)");
226 * Enable/disable metaslab group biasing.
228 boolean_t metaslab_bias_enabled = B_TRUE;
229 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, bias_enabled, CTLFLAG_RWTUN,
230 &metaslab_bias_enabled, 0,
231 "Enable metaslab group biasing");
233 static uint64_t metaslab_fragmentation(metaslab_t *);
236 * ==========================================================================
238 * ==========================================================================
241 metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
243 metaslab_class_t *mc;
245 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
250 mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
251 refcount_create_tracked(&mc->mc_alloc_slots);
257 metaslab_class_destroy(metaslab_class_t *mc)
259 ASSERT(mc->mc_rotor == NULL);
260 ASSERT(mc->mc_alloc == 0);
261 ASSERT(mc->mc_deferred == 0);
262 ASSERT(mc->mc_space == 0);
263 ASSERT(mc->mc_dspace == 0);
265 refcount_destroy(&mc->mc_alloc_slots);
266 mutex_destroy(&mc->mc_lock);
267 kmem_free(mc, sizeof (metaslab_class_t));
271 metaslab_class_validate(metaslab_class_t *mc)
273 metaslab_group_t *mg;
277 * Must hold one of the spa_config locks.
279 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
280 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
282 if ((mg = mc->mc_rotor) == NULL)
287 ASSERT(vd->vdev_mg != NULL);
288 ASSERT3P(vd->vdev_top, ==, vd);
289 ASSERT3P(mg->mg_class, ==, mc);
290 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
291 } while ((mg = mg->mg_next) != mc->mc_rotor);
297 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
298 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
300 atomic_add_64(&mc->mc_alloc, alloc_delta);
301 atomic_add_64(&mc->mc_deferred, defer_delta);
302 atomic_add_64(&mc->mc_space, space_delta);
303 atomic_add_64(&mc->mc_dspace, dspace_delta);
307 metaslab_class_minblocksize_update(metaslab_class_t *mc)
309 metaslab_group_t *mg;
311 uint64_t minashift = UINT64_MAX;
313 if ((mg = mc->mc_rotor) == NULL) {
314 mc->mc_minblocksize = SPA_MINBLOCKSIZE;
320 if (vd->vdev_ashift < minashift)
321 minashift = vd->vdev_ashift;
322 } while ((mg = mg->mg_next) != mc->mc_rotor);
324 mc->mc_minblocksize = 1ULL << minashift;
328 metaslab_class_get_alloc(metaslab_class_t *mc)
330 return (mc->mc_alloc);
334 metaslab_class_get_deferred(metaslab_class_t *mc)
336 return (mc->mc_deferred);
340 metaslab_class_get_space(metaslab_class_t *mc)
342 return (mc->mc_space);
346 metaslab_class_get_dspace(metaslab_class_t *mc)
348 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
352 metaslab_class_get_minblocksize(metaslab_class_t *mc)
354 return (mc->mc_minblocksize);
358 metaslab_class_histogram_verify(metaslab_class_t *mc)
360 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
364 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
367 mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
370 for (int c = 0; c < rvd->vdev_children; c++) {
371 vdev_t *tvd = rvd->vdev_child[c];
372 metaslab_group_t *mg = tvd->vdev_mg;
375 * Skip any holes, uninitialized top-levels, or
376 * vdevs that are not in this metalab class.
378 if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
379 mg->mg_class != mc) {
383 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
384 mc_hist[i] += mg->mg_histogram[i];
387 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
388 VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
390 kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
394 * Calculate the metaslab class's fragmentation metric. The metric
395 * is weighted based on the space contribution of each metaslab group.
396 * The return value will be a number between 0 and 100 (inclusive), or
397 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
398 * zfs_frag_table for more information about the metric.
401 metaslab_class_fragmentation(metaslab_class_t *mc)
403 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
404 uint64_t fragmentation = 0;
406 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
408 for (int c = 0; c < rvd->vdev_children; c++) {
409 vdev_t *tvd = rvd->vdev_child[c];
410 metaslab_group_t *mg = tvd->vdev_mg;
413 * Skip any holes, uninitialized top-levels, or
414 * vdevs that are not in this metalab class.
416 if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
417 mg->mg_class != mc) {
422 * If a metaslab group does not contain a fragmentation
423 * metric then just bail out.
425 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
426 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
427 return (ZFS_FRAG_INVALID);
431 * Determine how much this metaslab_group is contributing
432 * to the overall pool fragmentation metric.
434 fragmentation += mg->mg_fragmentation *
435 metaslab_group_get_space(mg);
437 fragmentation /= metaslab_class_get_space(mc);
439 ASSERT3U(fragmentation, <=, 100);
440 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
441 return (fragmentation);
445 * Calculate the amount of expandable space that is available in
446 * this metaslab class. If a device is expanded then its expandable
447 * space will be the amount of allocatable space that is currently not
448 * part of this metaslab class.
451 metaslab_class_expandable_space(metaslab_class_t *mc)
453 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
456 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
457 for (int c = 0; c < rvd->vdev_children; c++) {
458 vdev_t *tvd = rvd->vdev_child[c];
459 metaslab_group_t *mg = tvd->vdev_mg;
461 if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
462 mg->mg_class != mc) {
467 * Calculate if we have enough space to add additional
468 * metaslabs. We report the expandable space in terms
469 * of the metaslab size since that's the unit of expansion.
471 space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize,
472 1ULL << tvd->vdev_ms_shift);
474 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
479 * ==========================================================================
481 * ==========================================================================
484 metaslab_compare(const void *x1, const void *x2)
486 const metaslab_t *m1 = x1;
487 const metaslab_t *m2 = x2;
489 if (m1->ms_weight < m2->ms_weight)
491 if (m1->ms_weight > m2->ms_weight)
495 * If the weights are identical, use the offset to force uniqueness.
497 if (m1->ms_start < m2->ms_start)
499 if (m1->ms_start > m2->ms_start)
502 ASSERT3P(m1, ==, m2);
508 * Update the allocatable flag and the metaslab group's capacity.
509 * The allocatable flag is set to true if the capacity is below
510 * the zfs_mg_noalloc_threshold or has a fragmentation value that is
511 * greater than zfs_mg_fragmentation_threshold. If a metaslab group
512 * transitions from allocatable to non-allocatable or vice versa then the
513 * metaslab group's class is updated to reflect the transition.
516 metaslab_group_alloc_update(metaslab_group_t *mg)
518 vdev_t *vd = mg->mg_vd;
519 metaslab_class_t *mc = mg->mg_class;
520 vdev_stat_t *vs = &vd->vdev_stat;
521 boolean_t was_allocatable;
522 boolean_t was_initialized;
524 ASSERT(vd == vd->vdev_top);
526 mutex_enter(&mg->mg_lock);
527 was_allocatable = mg->mg_allocatable;
528 was_initialized = mg->mg_initialized;
530 mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
533 mutex_enter(&mc->mc_lock);
536 * If the metaslab group was just added then it won't
537 * have any space until we finish syncing out this txg.
538 * At that point we will consider it initialized and available
539 * for allocations. We also don't consider non-activated
540 * metaslab groups (e.g. vdevs that are in the middle of being removed)
541 * to be initialized, because they can't be used for allocation.
543 mg->mg_initialized = metaslab_group_initialized(mg);
544 if (!was_initialized && mg->mg_initialized) {
546 } else if (was_initialized && !mg->mg_initialized) {
547 ASSERT3U(mc->mc_groups, >, 0);
550 if (mg->mg_initialized)
551 mg->mg_no_free_space = B_FALSE;
554 * A metaslab group is considered allocatable if it has plenty
555 * of free space or is not heavily fragmented. We only take
556 * fragmentation into account if the metaslab group has a valid
557 * fragmentation metric (i.e. a value between 0 and 100).
559 mg->mg_allocatable = (mg->mg_activation_count > 0 &&
560 mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
561 (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
562 mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
565 * The mc_alloc_groups maintains a count of the number of
566 * groups in this metaslab class that are still above the
567 * zfs_mg_noalloc_threshold. This is used by the allocating
568 * threads to determine if they should avoid allocations to
569 * a given group. The allocator will avoid allocations to a group
570 * if that group has reached or is below the zfs_mg_noalloc_threshold
571 * and there are still other groups that are above the threshold.
572 * When a group transitions from allocatable to non-allocatable or
573 * vice versa we update the metaslab class to reflect that change.
574 * When the mc_alloc_groups value drops to 0 that means that all
575 * groups have reached the zfs_mg_noalloc_threshold making all groups
576 * eligible for allocations. This effectively means that all devices
577 * are balanced again.
579 if (was_allocatable && !mg->mg_allocatable)
580 mc->mc_alloc_groups--;
581 else if (!was_allocatable && mg->mg_allocatable)
582 mc->mc_alloc_groups++;
583 mutex_exit(&mc->mc_lock);
585 mutex_exit(&mg->mg_lock);
589 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
591 metaslab_group_t *mg;
593 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
594 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
595 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
596 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
599 mg->mg_activation_count = 0;
600 mg->mg_initialized = B_FALSE;
601 mg->mg_no_free_space = B_TRUE;
602 refcount_create_tracked(&mg->mg_alloc_queue_depth);
604 mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
605 minclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT);
611 metaslab_group_destroy(metaslab_group_t *mg)
613 ASSERT(mg->mg_prev == NULL);
614 ASSERT(mg->mg_next == NULL);
616 * We may have gone below zero with the activation count
617 * either because we never activated in the first place or
618 * because we're done, and possibly removing the vdev.
620 ASSERT(mg->mg_activation_count <= 0);
622 taskq_destroy(mg->mg_taskq);
623 avl_destroy(&mg->mg_metaslab_tree);
624 mutex_destroy(&mg->mg_lock);
625 refcount_destroy(&mg->mg_alloc_queue_depth);
626 kmem_free(mg, sizeof (metaslab_group_t));
630 metaslab_group_activate(metaslab_group_t *mg)
632 metaslab_class_t *mc = mg->mg_class;
633 metaslab_group_t *mgprev, *mgnext;
635 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
637 ASSERT(mc->mc_rotor != mg);
638 ASSERT(mg->mg_prev == NULL);
639 ASSERT(mg->mg_next == NULL);
640 ASSERT(mg->mg_activation_count <= 0);
642 if (++mg->mg_activation_count <= 0)
645 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
646 metaslab_group_alloc_update(mg);
648 if ((mgprev = mc->mc_rotor) == NULL) {
652 mgnext = mgprev->mg_next;
653 mg->mg_prev = mgprev;
654 mg->mg_next = mgnext;
655 mgprev->mg_next = mg;
656 mgnext->mg_prev = mg;
659 metaslab_class_minblocksize_update(mc);
663 metaslab_group_passivate(metaslab_group_t *mg)
665 metaslab_class_t *mc = mg->mg_class;
666 metaslab_group_t *mgprev, *mgnext;
668 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
670 if (--mg->mg_activation_count != 0) {
671 ASSERT(mc->mc_rotor != mg);
672 ASSERT(mg->mg_prev == NULL);
673 ASSERT(mg->mg_next == NULL);
674 ASSERT(mg->mg_activation_count < 0);
678 taskq_wait(mg->mg_taskq);
679 metaslab_group_alloc_update(mg);
681 mgprev = mg->mg_prev;
682 mgnext = mg->mg_next;
687 mc->mc_rotor = mgnext;
688 mgprev->mg_next = mgnext;
689 mgnext->mg_prev = mgprev;
694 metaslab_class_minblocksize_update(mc);
698 metaslab_group_initialized(metaslab_group_t *mg)
700 vdev_t *vd = mg->mg_vd;
701 vdev_stat_t *vs = &vd->vdev_stat;
703 return (vs->vs_space != 0 && mg->mg_activation_count > 0);
707 metaslab_group_get_space(metaslab_group_t *mg)
709 return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count);
713 metaslab_group_histogram_verify(metaslab_group_t *mg)
716 vdev_t *vd = mg->mg_vd;
717 uint64_t ashift = vd->vdev_ashift;
720 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
723 mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
726 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
727 SPACE_MAP_HISTOGRAM_SIZE + ashift);
729 for (int m = 0; m < vd->vdev_ms_count; m++) {
730 metaslab_t *msp = vd->vdev_ms[m];
732 if (msp->ms_sm == NULL)
735 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
736 mg_hist[i + ashift] +=
737 msp->ms_sm->sm_phys->smp_histogram[i];
740 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
741 VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
743 kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
747 metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
749 metaslab_class_t *mc = mg->mg_class;
750 uint64_t ashift = mg->mg_vd->vdev_ashift;
752 ASSERT(MUTEX_HELD(&msp->ms_lock));
753 if (msp->ms_sm == NULL)
756 mutex_enter(&mg->mg_lock);
757 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
758 mg->mg_histogram[i + ashift] +=
759 msp->ms_sm->sm_phys->smp_histogram[i];
760 mc->mc_histogram[i + ashift] +=
761 msp->ms_sm->sm_phys->smp_histogram[i];
763 mutex_exit(&mg->mg_lock);
767 metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
769 metaslab_class_t *mc = mg->mg_class;
770 uint64_t ashift = mg->mg_vd->vdev_ashift;
772 ASSERT(MUTEX_HELD(&msp->ms_lock));
773 if (msp->ms_sm == NULL)
776 mutex_enter(&mg->mg_lock);
777 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
778 ASSERT3U(mg->mg_histogram[i + ashift], >=,
779 msp->ms_sm->sm_phys->smp_histogram[i]);
780 ASSERT3U(mc->mc_histogram[i + ashift], >=,
781 msp->ms_sm->sm_phys->smp_histogram[i]);
783 mg->mg_histogram[i + ashift] -=
784 msp->ms_sm->sm_phys->smp_histogram[i];
785 mc->mc_histogram[i + ashift] -=
786 msp->ms_sm->sm_phys->smp_histogram[i];
788 mutex_exit(&mg->mg_lock);
792 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
794 ASSERT(msp->ms_group == NULL);
795 mutex_enter(&mg->mg_lock);
798 avl_add(&mg->mg_metaslab_tree, msp);
799 mutex_exit(&mg->mg_lock);
801 mutex_enter(&msp->ms_lock);
802 metaslab_group_histogram_add(mg, msp);
803 mutex_exit(&msp->ms_lock);
807 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
809 mutex_enter(&msp->ms_lock);
810 metaslab_group_histogram_remove(mg, msp);
811 mutex_exit(&msp->ms_lock);
813 mutex_enter(&mg->mg_lock);
814 ASSERT(msp->ms_group == mg);
815 avl_remove(&mg->mg_metaslab_tree, msp);
816 msp->ms_group = NULL;
817 mutex_exit(&mg->mg_lock);
821 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
824 * Although in principle the weight can be any value, in
825 * practice we do not use values in the range [1, 511].
827 ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
828 ASSERT(MUTEX_HELD(&msp->ms_lock));
830 mutex_enter(&mg->mg_lock);
831 ASSERT(msp->ms_group == mg);
832 avl_remove(&mg->mg_metaslab_tree, msp);
833 msp->ms_weight = weight;
834 avl_add(&mg->mg_metaslab_tree, msp);
835 mutex_exit(&mg->mg_lock);
839 * Calculate the fragmentation for a given metaslab group. We can use
840 * a simple average here since all metaslabs within the group must have
841 * the same size. The return value will be a value between 0 and 100
842 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
843 * group have a fragmentation metric.
846 metaslab_group_fragmentation(metaslab_group_t *mg)
848 vdev_t *vd = mg->mg_vd;
849 uint64_t fragmentation = 0;
850 uint64_t valid_ms = 0;
852 for (int m = 0; m < vd->vdev_ms_count; m++) {
853 metaslab_t *msp = vd->vdev_ms[m];
855 if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
859 fragmentation += msp->ms_fragmentation;
862 if (valid_ms <= vd->vdev_ms_count / 2)
863 return (ZFS_FRAG_INVALID);
865 fragmentation /= valid_ms;
866 ASSERT3U(fragmentation, <=, 100);
867 return (fragmentation);
871 * Determine if a given metaslab group should skip allocations. A metaslab
872 * group should avoid allocations if its free capacity is less than the
873 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
874 * zfs_mg_fragmentation_threshold and there is at least one metaslab group
875 * that can still handle allocations. If the allocation throttle is enabled
876 * then we skip allocations to devices that have reached their maximum
877 * allocation queue depth unless the selected metaslab group is the only
878 * eligible group remaining.
881 metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
884 spa_t *spa = mg->mg_vd->vdev_spa;
885 metaslab_class_t *mc = mg->mg_class;
888 * We can only consider skipping this metaslab group if it's
889 * in the normal metaslab class and there are other metaslab
890 * groups to select from. Otherwise, we always consider it eligible
893 if (mc != spa_normal_class(spa) || mc->mc_groups <= 1)
897 * If the metaslab group's mg_allocatable flag is set (see comments
898 * in metaslab_group_alloc_update() for more information) and
899 * the allocation throttle is disabled then allow allocations to this
900 * device. However, if the allocation throttle is enabled then
901 * check if we have reached our allocation limit (mg_alloc_queue_depth)
902 * to determine if we should allow allocations to this metaslab group.
903 * If all metaslab groups are no longer considered allocatable
904 * (mc_alloc_groups == 0) or we're trying to allocate the smallest
905 * gang block size then we allow allocations on this metaslab group
906 * regardless of the mg_allocatable or throttle settings.
908 if (mg->mg_allocatable) {
909 metaslab_group_t *mgp;
911 uint64_t qmax = mg->mg_max_alloc_queue_depth;
913 if (!mc->mc_alloc_throttle_enabled)
917 * If this metaslab group does not have any free space, then
918 * there is no point in looking further.
920 if (mg->mg_no_free_space)
923 qdepth = refcount_count(&mg->mg_alloc_queue_depth);
926 * If this metaslab group is below its qmax or it's
927 * the only allocatable metasable group, then attempt
928 * to allocate from it.
930 if (qdepth < qmax || mc->mc_alloc_groups == 1)
932 ASSERT3U(mc->mc_alloc_groups, >, 1);
935 * Since this metaslab group is at or over its qmax, we
936 * need to determine if there are metaslab groups after this
937 * one that might be able to handle this allocation. This is
938 * racy since we can't hold the locks for all metaslab
939 * groups at the same time when we make this check.
941 for (mgp = mg->mg_next; mgp != rotor; mgp = mgp->mg_next) {
942 qmax = mgp->mg_max_alloc_queue_depth;
944 qdepth = refcount_count(&mgp->mg_alloc_queue_depth);
947 * If there is another metaslab group that
948 * might be able to handle the allocation, then
949 * we return false so that we skip this group.
951 if (qdepth < qmax && !mgp->mg_no_free_space)
956 * We didn't find another group to handle the allocation
957 * so we can't skip this metaslab group even though
958 * we are at or over our qmax.
962 } else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
969 * ==========================================================================
970 * Range tree callbacks
971 * ==========================================================================
975 * Comparison function for the private size-ordered tree. Tree is sorted
976 * by size, larger sizes at the end of the tree.
979 metaslab_rangesize_compare(const void *x1, const void *x2)
981 const range_seg_t *r1 = x1;
982 const range_seg_t *r2 = x2;
983 uint64_t rs_size1 = r1->rs_end - r1->rs_start;
984 uint64_t rs_size2 = r2->rs_end - r2->rs_start;
986 if (rs_size1 < rs_size2)
988 if (rs_size1 > rs_size2)
991 if (r1->rs_start < r2->rs_start)
994 if (r1->rs_start > r2->rs_start)
1001 * Create any block allocator specific components. The current allocators
1002 * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
1005 metaslab_rt_create(range_tree_t *rt, void *arg)
1007 metaslab_t *msp = arg;
1009 ASSERT3P(rt->rt_arg, ==, msp);
1010 ASSERT(msp->ms_tree == NULL);
1012 avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
1013 sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
1017 * Destroy the block allocator specific components.
1020 metaslab_rt_destroy(range_tree_t *rt, void *arg)
1022 metaslab_t *msp = arg;
1024 ASSERT3P(rt->rt_arg, ==, msp);
1025 ASSERT3P(msp->ms_tree, ==, rt);
1026 ASSERT0(avl_numnodes(&msp->ms_size_tree));
1028 avl_destroy(&msp->ms_size_tree);
1032 metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
1034 metaslab_t *msp = arg;
1036 ASSERT3P(rt->rt_arg, ==, msp);
1037 ASSERT3P(msp->ms_tree, ==, rt);
1038 VERIFY(!msp->ms_condensing);
1039 avl_add(&msp->ms_size_tree, rs);
1043 metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
1045 metaslab_t *msp = arg;
1047 ASSERT3P(rt->rt_arg, ==, msp);
1048 ASSERT3P(msp->ms_tree, ==, rt);
1049 VERIFY(!msp->ms_condensing);
1050 avl_remove(&msp->ms_size_tree, rs);
1054 metaslab_rt_vacate(range_tree_t *rt, void *arg)
1056 metaslab_t *msp = arg;
1058 ASSERT3P(rt->rt_arg, ==, msp);
1059 ASSERT3P(msp->ms_tree, ==, rt);
1062 * Normally one would walk the tree freeing nodes along the way.
1063 * Since the nodes are shared with the range trees we can avoid
1064 * walking all nodes and just reinitialize the avl tree. The nodes
1065 * will be freed by the range tree, so we don't want to free them here.
1067 avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
1068 sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
1071 static range_tree_ops_t metaslab_rt_ops = {
1073 metaslab_rt_destroy,
1080 * ==========================================================================
1081 * Metaslab block operations
1082 * ==========================================================================
1086 * Return the maximum contiguous segment within the metaslab.
1089 metaslab_block_maxsize(metaslab_t *msp)
1091 avl_tree_t *t = &msp->ms_size_tree;
1094 if (t == NULL || (rs = avl_last(t)) == NULL)
1097 return (rs->rs_end - rs->rs_start);
1101 metaslab_block_alloc(metaslab_t *msp, uint64_t size)
1104 range_tree_t *rt = msp->ms_tree;
1106 VERIFY(!msp->ms_condensing);
1108 start = msp->ms_ops->msop_alloc(msp, size);
1109 if (start != -1ULL) {
1110 vdev_t *vd = msp->ms_group->mg_vd;
1112 VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
1113 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
1114 VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
1115 range_tree_remove(rt, start, size);
1121 * ==========================================================================
1122 * Common allocator routines
1123 * ==========================================================================
1127 * This is a helper function that can be used by the allocator to find
1128 * a suitable block to allocate. This will search the specified AVL
1129 * tree looking for a block that matches the specified criteria.
1132 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
1135 range_seg_t *rs, rsearch;
1138 rsearch.rs_start = *cursor;
1139 rsearch.rs_end = *cursor + size;
1141 rs = avl_find(t, &rsearch, &where);
1143 rs = avl_nearest(t, where, AVL_AFTER);
1145 while (rs != NULL) {
1146 uint64_t offset = P2ROUNDUP(rs->rs_start, align);
1148 if (offset + size <= rs->rs_end) {
1149 *cursor = offset + size;
1152 rs = AVL_NEXT(t, rs);
1156 * If we know we've searched the whole map (*cursor == 0), give up.
1157 * Otherwise, reset the cursor to the beginning and try again.
1163 return (metaslab_block_picker(t, cursor, size, align));
1167 * ==========================================================================
1168 * The first-fit block allocator
1169 * ==========================================================================
1172 metaslab_ff_alloc(metaslab_t *msp, uint64_t size)
1175 * Find the largest power of 2 block size that evenly divides the
1176 * requested size. This is used to try to allocate blocks with similar
1177 * alignment from the same area of the metaslab (i.e. same cursor
1178 * bucket) but it does not guarantee that other allocations sizes
1179 * may exist in the same region.
1181 uint64_t align = size & -size;
1182 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1183 avl_tree_t *t = &msp->ms_tree->rt_root;
1185 return (metaslab_block_picker(t, cursor, size, align));
1188 static metaslab_ops_t metaslab_ff_ops = {
1193 * ==========================================================================
1194 * Dynamic block allocator -
1195 * Uses the first fit allocation scheme until space get low and then
1196 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
1197 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
1198 * ==========================================================================
1201 metaslab_df_alloc(metaslab_t *msp, uint64_t size)
1204 * Find the largest power of 2 block size that evenly divides the
1205 * requested size. This is used to try to allocate blocks with similar
1206 * alignment from the same area of the metaslab (i.e. same cursor
1207 * bucket) but it does not guarantee that other allocations sizes
1208 * may exist in the same region.
1210 uint64_t align = size & -size;
1211 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1212 range_tree_t *rt = msp->ms_tree;
1213 avl_tree_t *t = &rt->rt_root;
1214 uint64_t max_size = metaslab_block_maxsize(msp);
1215 int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
1217 ASSERT(MUTEX_HELD(&msp->ms_lock));
1218 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
1220 if (max_size < size)
1224 * If we're running low on space switch to using the size
1225 * sorted AVL tree (best-fit).
1227 if (max_size < metaslab_df_alloc_threshold ||
1228 free_pct < metaslab_df_free_pct) {
1229 t = &msp->ms_size_tree;
1233 return (metaslab_block_picker(t, cursor, size, 1ULL));
1236 static metaslab_ops_t metaslab_df_ops = {
1241 * ==========================================================================
1242 * Cursor fit block allocator -
1243 * Select the largest region in the metaslab, set the cursor to the beginning
1244 * of the range and the cursor_end to the end of the range. As allocations
1245 * are made advance the cursor. Continue allocating from the cursor until
1246 * the range is exhausted and then find a new range.
1247 * ==========================================================================
1250 metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
1252 range_tree_t *rt = msp->ms_tree;
1253 avl_tree_t *t = &msp->ms_size_tree;
1254 uint64_t *cursor = &msp->ms_lbas[0];
1255 uint64_t *cursor_end = &msp->ms_lbas[1];
1256 uint64_t offset = 0;
1258 ASSERT(MUTEX_HELD(&msp->ms_lock));
1259 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root));
1261 ASSERT3U(*cursor_end, >=, *cursor);
1263 if ((*cursor + size) > *cursor_end) {
1266 rs = avl_last(&msp->ms_size_tree);
1267 if (rs == NULL || (rs->rs_end - rs->rs_start) < size)
1270 *cursor = rs->rs_start;
1271 *cursor_end = rs->rs_end;
1280 static metaslab_ops_t metaslab_cf_ops = {
1285 * ==========================================================================
1286 * New dynamic fit allocator -
1287 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1288 * contiguous blocks. If no region is found then just use the largest segment
1290 * ==========================================================================
1294 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1295 * to request from the allocator.
1297 uint64_t metaslab_ndf_clump_shift = 4;
1300 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
1302 avl_tree_t *t = &msp->ms_tree->rt_root;
1304 range_seg_t *rs, rsearch;
1305 uint64_t hbit = highbit64(size);
1306 uint64_t *cursor = &msp->ms_lbas[hbit - 1];
1307 uint64_t max_size = metaslab_block_maxsize(msp);
1309 ASSERT(MUTEX_HELD(&msp->ms_lock));
1310 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
1312 if (max_size < size)
1315 rsearch.rs_start = *cursor;
1316 rsearch.rs_end = *cursor + size;
1318 rs = avl_find(t, &rsearch, &where);
1319 if (rs == NULL || (rs->rs_end - rs->rs_start) < size) {
1320 t = &msp->ms_size_tree;
1322 rsearch.rs_start = 0;
1323 rsearch.rs_end = MIN(max_size,
1324 1ULL << (hbit + metaslab_ndf_clump_shift));
1325 rs = avl_find(t, &rsearch, &where);
1327 rs = avl_nearest(t, where, AVL_AFTER);
1331 if ((rs->rs_end - rs->rs_start) >= size) {
1332 *cursor = rs->rs_start + size;
1333 return (rs->rs_start);
1338 static metaslab_ops_t metaslab_ndf_ops = {
1342 metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
1345 * ==========================================================================
1347 * ==========================================================================
1351 * Wait for any in-progress metaslab loads to complete.
1354 metaslab_load_wait(metaslab_t *msp)
1356 ASSERT(MUTEX_HELD(&msp->ms_lock));
1358 while (msp->ms_loading) {
1359 ASSERT(!msp->ms_loaded);
1360 cv_wait(&msp->ms_load_cv, &msp->ms_lock);
1365 metaslab_load(metaslab_t *msp)
1369 ASSERT(MUTEX_HELD(&msp->ms_lock));
1370 ASSERT(!msp->ms_loaded);
1371 ASSERT(!msp->ms_loading);
1373 msp->ms_loading = B_TRUE;
1376 * If the space map has not been allocated yet, then treat
1377 * all the space in the metaslab as free and add it to the
1380 if (msp->ms_sm != NULL)
1381 error = space_map_load(msp->ms_sm, msp->ms_tree, SM_FREE);
1383 range_tree_add(msp->ms_tree, msp->ms_start, msp->ms_size);
1385 msp->ms_loaded = (error == 0);
1386 msp->ms_loading = B_FALSE;
1388 if (msp->ms_loaded) {
1389 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1390 range_tree_walk(msp->ms_defertree[t],
1391 range_tree_remove, msp->ms_tree);
1394 cv_broadcast(&msp->ms_load_cv);
1399 metaslab_unload(metaslab_t *msp)
1401 ASSERT(MUTEX_HELD(&msp->ms_lock));
1402 range_tree_vacate(msp->ms_tree, NULL, NULL);
1403 msp->ms_loaded = B_FALSE;
1404 msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
1408 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg,
1411 vdev_t *vd = mg->mg_vd;
1412 objset_t *mos = vd->vdev_spa->spa_meta_objset;
1416 ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
1417 mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
1418 cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
1420 ms->ms_start = id << vd->vdev_ms_shift;
1421 ms->ms_size = 1ULL << vd->vdev_ms_shift;
1424 * We only open space map objects that already exist. All others
1425 * will be opened when we finally allocate an object for it.
1428 error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
1429 ms->ms_size, vd->vdev_ashift, &ms->ms_lock);
1432 kmem_free(ms, sizeof (metaslab_t));
1436 ASSERT(ms->ms_sm != NULL);
1440 * We create the main range tree here, but we don't create the
1441 * alloctree and freetree until metaslab_sync_done(). This serves
1442 * two purposes: it allows metaslab_sync_done() to detect the
1443 * addition of new space; and for debugging, it ensures that we'd
1444 * data fault on any attempt to use this metaslab before it's ready.
1446 ms->ms_tree = range_tree_create(&metaslab_rt_ops, ms, &ms->ms_lock);
1447 metaslab_group_add(mg, ms);
1449 ms->ms_fragmentation = metaslab_fragmentation(ms);
1450 ms->ms_ops = mg->mg_class->mc_ops;
1453 * If we're opening an existing pool (txg == 0) or creating
1454 * a new one (txg == TXG_INITIAL), all space is available now.
1455 * If we're adding space to an existing pool, the new space
1456 * does not become available until after this txg has synced.
1458 if (txg <= TXG_INITIAL)
1459 metaslab_sync_done(ms, 0);
1462 * If metaslab_debug_load is set and we're initializing a metaslab
1463 * that has an allocated space_map object then load the its space
1464 * map so that can verify frees.
1466 if (metaslab_debug_load && ms->ms_sm != NULL) {
1467 mutex_enter(&ms->ms_lock);
1468 VERIFY0(metaslab_load(ms));
1469 mutex_exit(&ms->ms_lock);
1473 vdev_dirty(vd, 0, NULL, txg);
1474 vdev_dirty(vd, VDD_METASLAB, ms, txg);
1483 metaslab_fini(metaslab_t *msp)
1485 metaslab_group_t *mg = msp->ms_group;
1487 metaslab_group_remove(mg, msp);
1489 mutex_enter(&msp->ms_lock);
1491 VERIFY(msp->ms_group == NULL);
1492 vdev_space_update(mg->mg_vd, -space_map_allocated(msp->ms_sm),
1494 space_map_close(msp->ms_sm);
1496 metaslab_unload(msp);
1497 range_tree_destroy(msp->ms_tree);
1499 for (int t = 0; t < TXG_SIZE; t++) {
1500 range_tree_destroy(msp->ms_alloctree[t]);
1501 range_tree_destroy(msp->ms_freetree[t]);
1504 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1505 range_tree_destroy(msp->ms_defertree[t]);
1508 ASSERT0(msp->ms_deferspace);
1510 mutex_exit(&msp->ms_lock);
1511 cv_destroy(&msp->ms_load_cv);
1512 mutex_destroy(&msp->ms_lock);
1514 kmem_free(msp, sizeof (metaslab_t));
1517 #define FRAGMENTATION_TABLE_SIZE 17
1520 * This table defines a segment size based fragmentation metric that will
1521 * allow each metaslab to derive its own fragmentation value. This is done
1522 * by calculating the space in each bucket of the spacemap histogram and
1523 * multiplying that by the fragmetation metric in this table. Doing
1524 * this for all buckets and dividing it by the total amount of free
1525 * space in this metaslab (i.e. the total free space in all buckets) gives
1526 * us the fragmentation metric. This means that a high fragmentation metric
1527 * equates to most of the free space being comprised of small segments.
1528 * Conversely, if the metric is low, then most of the free space is in
1529 * large segments. A 10% change in fragmentation equates to approximately
1530 * double the number of segments.
1532 * This table defines 0% fragmented space using 16MB segments. Testing has
1533 * shown that segments that are greater than or equal to 16MB do not suffer
1534 * from drastic performance problems. Using this value, we derive the rest
1535 * of the table. Since the fragmentation value is never stored on disk, it
1536 * is possible to change these calculations in the future.
1538 int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
1558 * Calclate the metaslab's fragmentation metric. A return value
1559 * of ZFS_FRAG_INVALID means that the metaslab has not been upgraded and does
1560 * not support this metric. Otherwise, the return value should be in the
1564 metaslab_fragmentation(metaslab_t *msp)
1566 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1567 uint64_t fragmentation = 0;
1569 boolean_t feature_enabled = spa_feature_is_enabled(spa,
1570 SPA_FEATURE_SPACEMAP_HISTOGRAM);
1572 if (!feature_enabled)
1573 return (ZFS_FRAG_INVALID);
1576 * A null space map means that the entire metaslab is free
1577 * and thus is not fragmented.
1579 if (msp->ms_sm == NULL)
1583 * If this metaslab's space_map has not been upgraded, flag it
1584 * so that we upgrade next time we encounter it.
1586 if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
1587 uint64_t txg = spa_syncing_txg(spa);
1588 vdev_t *vd = msp->ms_group->mg_vd;
1590 if (spa_writeable(spa)) {
1591 msp->ms_condense_wanted = B_TRUE;
1592 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
1593 spa_dbgmsg(spa, "txg %llu, requesting force condense: "
1594 "msp %p, vd %p", txg, msp, vd);
1596 return (ZFS_FRAG_INVALID);
1599 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1601 uint8_t shift = msp->ms_sm->sm_shift;
1602 int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
1603 FRAGMENTATION_TABLE_SIZE - 1);
1605 if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
1608 space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
1611 ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
1612 fragmentation += space * zfs_frag_table[idx];
1616 fragmentation /= total;
1617 ASSERT3U(fragmentation, <=, 100);
1618 return (fragmentation);
1622 * Compute a weight -- a selection preference value -- for the given metaslab.
1623 * This is based on the amount of free space, the level of fragmentation,
1624 * the LBA range, and whether the metaslab is loaded.
1627 metaslab_weight(metaslab_t *msp)
1629 metaslab_group_t *mg = msp->ms_group;
1630 vdev_t *vd = mg->mg_vd;
1631 uint64_t weight, space;
1633 ASSERT(MUTEX_HELD(&msp->ms_lock));
1636 * This vdev is in the process of being removed so there is nothing
1637 * for us to do here.
1639 if (vd->vdev_removing) {
1640 ASSERT0(space_map_allocated(msp->ms_sm));
1641 ASSERT0(vd->vdev_ms_shift);
1646 * The baseline weight is the metaslab's free space.
1648 space = msp->ms_size - space_map_allocated(msp->ms_sm);
1650 msp->ms_fragmentation = metaslab_fragmentation(msp);
1651 if (metaslab_fragmentation_factor_enabled &&
1652 msp->ms_fragmentation != ZFS_FRAG_INVALID) {
1654 * Use the fragmentation information to inversely scale
1655 * down the baseline weight. We need to ensure that we
1656 * don't exclude this metaslab completely when it's 100%
1657 * fragmented. To avoid this we reduce the fragmented value
1660 space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
1663 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
1664 * this metaslab again. The fragmentation metric may have
1665 * decreased the space to something smaller than
1666 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
1667 * so that we can consume any remaining space.
1669 if (space > 0 && space < SPA_MINBLOCKSIZE)
1670 space = SPA_MINBLOCKSIZE;
1675 * Modern disks have uniform bit density and constant angular velocity.
1676 * Therefore, the outer recording zones are faster (higher bandwidth)
1677 * than the inner zones by the ratio of outer to inner track diameter,
1678 * which is typically around 2:1. We account for this by assigning
1679 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
1680 * In effect, this means that we'll select the metaslab with the most
1681 * free bandwidth rather than simply the one with the most free space.
1683 if (metaslab_lba_weighting_enabled) {
1684 weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
1685 ASSERT(weight >= space && weight <= 2 * space);
1689 * If this metaslab is one we're actively using, adjust its
1690 * weight to make it preferable to any inactive metaslab so
1691 * we'll polish it off. If the fragmentation on this metaslab
1692 * has exceed our threshold, then don't mark it active.
1694 if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
1695 msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
1696 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
1703 metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
1705 ASSERT(MUTEX_HELD(&msp->ms_lock));
1707 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
1708 metaslab_load_wait(msp);
1709 if (!msp->ms_loaded) {
1710 int error = metaslab_load(msp);
1712 metaslab_group_sort(msp->ms_group, msp, 0);
1717 metaslab_group_sort(msp->ms_group, msp,
1718 msp->ms_weight | activation_weight);
1720 ASSERT(msp->ms_loaded);
1721 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
1727 metaslab_passivate(metaslab_t *msp, uint64_t size)
1730 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
1731 * this metaslab again. In that case, it had better be empty,
1732 * or we would be leaving space on the table.
1734 ASSERT(size >= SPA_MINBLOCKSIZE || range_tree_space(msp->ms_tree) == 0);
1735 metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size));
1736 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
1740 metaslab_preload(void *arg)
1742 metaslab_t *msp = arg;
1743 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1745 ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
1747 mutex_enter(&msp->ms_lock);
1748 metaslab_load_wait(msp);
1749 if (!msp->ms_loaded)
1750 (void) metaslab_load(msp);
1753 * Set the ms_access_txg value so that we don't unload it right away.
1755 msp->ms_access_txg = spa_syncing_txg(spa) + metaslab_unload_delay + 1;
1756 mutex_exit(&msp->ms_lock);
1760 metaslab_group_preload(metaslab_group_t *mg)
1762 spa_t *spa = mg->mg_vd->vdev_spa;
1764 avl_tree_t *t = &mg->mg_metaslab_tree;
1767 if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
1768 taskq_wait(mg->mg_taskq);
1772 mutex_enter(&mg->mg_lock);
1774 * Load the next potential metaslabs
1777 while (msp != NULL) {
1778 metaslab_t *msp_next = AVL_NEXT(t, msp);
1781 * We preload only the maximum number of metaslabs specified
1782 * by metaslab_preload_limit. If a metaslab is being forced
1783 * to condense then we preload it too. This will ensure
1784 * that force condensing happens in the next txg.
1786 if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
1792 * We must drop the metaslab group lock here to preserve
1793 * lock ordering with the ms_lock (when grabbing both
1794 * the mg_lock and the ms_lock, the ms_lock must be taken
1795 * first). As a result, it is possible that the ordering
1796 * of the metaslabs within the avl tree may change before
1797 * we reacquire the lock. The metaslab cannot be removed from
1798 * the tree while we're in syncing context so it is safe to
1799 * drop the mg_lock here. If the metaslabs are reordered
1800 * nothing will break -- we just may end up loading a
1801 * less than optimal one.
1803 mutex_exit(&mg->mg_lock);
1804 VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
1805 msp, TQ_SLEEP) != 0);
1806 mutex_enter(&mg->mg_lock);
1809 mutex_exit(&mg->mg_lock);
1813 * Determine if the space map's on-disk footprint is past our tolerance
1814 * for inefficiency. We would like to use the following criteria to make
1817 * 1. The size of the space map object should not dramatically increase as a
1818 * result of writing out the free space range tree.
1820 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
1821 * times the size than the free space range tree representation
1822 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1.MB).
1824 * 3. The on-disk size of the space map should actually decrease.
1826 * Checking the first condition is tricky since we don't want to walk
1827 * the entire AVL tree calculating the estimated on-disk size. Instead we
1828 * use the size-ordered range tree in the metaslab and calculate the
1829 * size required to write out the largest segment in our free tree. If the
1830 * size required to represent that segment on disk is larger than the space
1831 * map object then we avoid condensing this map.
1833 * To determine the second criterion we use a best-case estimate and assume
1834 * each segment can be represented on-disk as a single 64-bit entry. We refer
1835 * to this best-case estimate as the space map's minimal form.
1837 * Unfortunately, we cannot compute the on-disk size of the space map in this
1838 * context because we cannot accurately compute the effects of compression, etc.
1839 * Instead, we apply the heuristic described in the block comment for
1840 * zfs_metaslab_condense_block_threshold - we only condense if the space used
1841 * is greater than a threshold number of blocks.
1844 metaslab_should_condense(metaslab_t *msp)
1846 space_map_t *sm = msp->ms_sm;
1848 uint64_t size, entries, segsz, object_size, optimal_size, record_size;
1849 dmu_object_info_t doi;
1850 uint64_t vdev_blocksize = 1 << msp->ms_group->mg_vd->vdev_ashift;
1852 ASSERT(MUTEX_HELD(&msp->ms_lock));
1853 ASSERT(msp->ms_loaded);
1856 * Use the ms_size_tree range tree, which is ordered by size, to
1857 * obtain the largest segment in the free tree. We always condense
1858 * metaslabs that are empty and metaslabs for which a condense
1859 * request has been made.
1861 rs = avl_last(&msp->ms_size_tree);
1862 if (rs == NULL || msp->ms_condense_wanted)
1866 * Calculate the number of 64-bit entries this segment would
1867 * require when written to disk. If this single segment would be
1868 * larger on-disk than the entire current on-disk structure, then
1869 * clearly condensing will increase the on-disk structure size.
1871 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
1872 entries = size / (MIN(size, SM_RUN_MAX));
1873 segsz = entries * sizeof (uint64_t);
1875 optimal_size = sizeof (uint64_t) * avl_numnodes(&msp->ms_tree->rt_root);
1876 object_size = space_map_length(msp->ms_sm);
1878 dmu_object_info_from_db(sm->sm_dbuf, &doi);
1879 record_size = MAX(doi.doi_data_block_size, vdev_blocksize);
1881 return (segsz <= object_size &&
1882 object_size >= (optimal_size * zfs_condense_pct / 100) &&
1883 object_size > zfs_metaslab_condense_block_threshold * record_size);
1887 * Condense the on-disk space map representation to its minimized form.
1888 * The minimized form consists of a small number of allocations followed by
1889 * the entries of the free range tree.
1892 metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
1894 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1895 range_tree_t *freetree = msp->ms_freetree[txg & TXG_MASK];
1896 range_tree_t *condense_tree;
1897 space_map_t *sm = msp->ms_sm;
1899 ASSERT(MUTEX_HELD(&msp->ms_lock));
1900 ASSERT3U(spa_sync_pass(spa), ==, 1);
1901 ASSERT(msp->ms_loaded);
1904 spa_dbgmsg(spa, "condensing: txg %llu, msp[%llu] %p, vdev id %llu, "
1905 "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg,
1906 msp->ms_id, msp, msp->ms_group->mg_vd->vdev_id,
1907 msp->ms_group->mg_vd->vdev_spa->spa_name,
1908 space_map_length(msp->ms_sm), avl_numnodes(&msp->ms_tree->rt_root),
1909 msp->ms_condense_wanted ? "TRUE" : "FALSE");
1911 msp->ms_condense_wanted = B_FALSE;
1914 * Create an range tree that is 100% allocated. We remove segments
1915 * that have been freed in this txg, any deferred frees that exist,
1916 * and any allocation in the future. Removing segments should be
1917 * a relatively inexpensive operation since we expect these trees to
1918 * have a small number of nodes.
1920 condense_tree = range_tree_create(NULL, NULL, &msp->ms_lock);
1921 range_tree_add(condense_tree, msp->ms_start, msp->ms_size);
1924 * Remove what's been freed in this txg from the condense_tree.
1925 * Since we're in sync_pass 1, we know that all the frees from
1926 * this txg are in the freetree.
1928 range_tree_walk(freetree, range_tree_remove, condense_tree);
1930 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1931 range_tree_walk(msp->ms_defertree[t],
1932 range_tree_remove, condense_tree);
1935 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
1936 range_tree_walk(msp->ms_alloctree[(txg + t) & TXG_MASK],
1937 range_tree_remove, condense_tree);
1941 * We're about to drop the metaslab's lock thus allowing
1942 * other consumers to change it's content. Set the
1943 * metaslab's ms_condensing flag to ensure that
1944 * allocations on this metaslab do not occur while we're
1945 * in the middle of committing it to disk. This is only critical
1946 * for the ms_tree as all other range trees use per txg
1947 * views of their content.
1949 msp->ms_condensing = B_TRUE;
1951 mutex_exit(&msp->ms_lock);
1952 space_map_truncate(sm, tx);
1953 mutex_enter(&msp->ms_lock);
1956 * While we would ideally like to create a space_map representation
1957 * that consists only of allocation records, doing so can be
1958 * prohibitively expensive because the in-core free tree can be
1959 * large, and therefore computationally expensive to subtract
1960 * from the condense_tree. Instead we sync out two trees, a cheap
1961 * allocation only tree followed by the in-core free tree. While not
1962 * optimal, this is typically close to optimal, and much cheaper to
1965 space_map_write(sm, condense_tree, SM_ALLOC, tx);
1966 range_tree_vacate(condense_tree, NULL, NULL);
1967 range_tree_destroy(condense_tree);
1969 space_map_write(sm, msp->ms_tree, SM_FREE, tx);
1970 msp->ms_condensing = B_FALSE;
1974 * Write a metaslab to disk in the context of the specified transaction group.
1977 metaslab_sync(metaslab_t *msp, uint64_t txg)
1979 metaslab_group_t *mg = msp->ms_group;
1980 vdev_t *vd = mg->mg_vd;
1981 spa_t *spa = vd->vdev_spa;
1982 objset_t *mos = spa_meta_objset(spa);
1983 range_tree_t *alloctree = msp->ms_alloctree[txg & TXG_MASK];
1984 range_tree_t **freetree = &msp->ms_freetree[txg & TXG_MASK];
1985 range_tree_t **freed_tree =
1986 &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK];
1988 uint64_t object = space_map_object(msp->ms_sm);
1990 ASSERT(!vd->vdev_ishole);
1993 * This metaslab has just been added so there's no work to do now.
1995 if (*freetree == NULL) {
1996 ASSERT3P(alloctree, ==, NULL);
2000 ASSERT3P(alloctree, !=, NULL);
2001 ASSERT3P(*freetree, !=, NULL);
2002 ASSERT3P(*freed_tree, !=, NULL);
2005 * Normally, we don't want to process a metaslab if there
2006 * are no allocations or frees to perform. However, if the metaslab
2007 * is being forced to condense we need to let it through.
2009 if (range_tree_space(alloctree) == 0 &&
2010 range_tree_space(*freetree) == 0 &&
2011 !msp->ms_condense_wanted)
2015 * The only state that can actually be changing concurrently with
2016 * metaslab_sync() is the metaslab's ms_tree. No other thread can
2017 * be modifying this txg's alloctree, freetree, freed_tree, or
2018 * space_map_phys_t. Therefore, we only hold ms_lock to satify
2019 * space_map ASSERTs. We drop it whenever we call into the DMU,
2020 * because the DMU can call down to us (e.g. via zio_free()) at
2024 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
2026 if (msp->ms_sm == NULL) {
2027 uint64_t new_object;
2029 new_object = space_map_alloc(mos, tx);
2030 VERIFY3U(new_object, !=, 0);
2032 VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
2033 msp->ms_start, msp->ms_size, vd->vdev_ashift,
2035 ASSERT(msp->ms_sm != NULL);
2038 mutex_enter(&msp->ms_lock);
2041 * Note: metaslab_condense() clears the space_map's histogram.
2042 * Therefore we must verify and remove this histogram before
2045 metaslab_group_histogram_verify(mg);
2046 metaslab_class_histogram_verify(mg->mg_class);
2047 metaslab_group_histogram_remove(mg, msp);
2049 if (msp->ms_loaded && spa_sync_pass(spa) == 1 &&
2050 metaslab_should_condense(msp)) {
2051 metaslab_condense(msp, txg, tx);
2053 space_map_write(msp->ms_sm, alloctree, SM_ALLOC, tx);
2054 space_map_write(msp->ms_sm, *freetree, SM_FREE, tx);
2057 if (msp->ms_loaded) {
2059 * When the space map is loaded, we have an accruate
2060 * histogram in the range tree. This gives us an opportunity
2061 * to bring the space map's histogram up-to-date so we clear
2062 * it first before updating it.
2064 space_map_histogram_clear(msp->ms_sm);
2065 space_map_histogram_add(msp->ms_sm, msp->ms_tree, tx);
2068 * Since the space map is not loaded we simply update the
2069 * exisiting histogram with what was freed in this txg. This
2070 * means that the on-disk histogram may not have an accurate
2071 * view of the free space but it's close enough to allow
2072 * us to make allocation decisions.
2074 space_map_histogram_add(msp->ms_sm, *freetree, tx);
2076 metaslab_group_histogram_add(mg, msp);
2077 metaslab_group_histogram_verify(mg);
2078 metaslab_class_histogram_verify(mg->mg_class);
2081 * For sync pass 1, we avoid traversing this txg's free range tree
2082 * and instead will just swap the pointers for freetree and
2083 * freed_tree. We can safely do this since the freed_tree is
2084 * guaranteed to be empty on the initial pass.
2086 if (spa_sync_pass(spa) == 1) {
2087 range_tree_swap(freetree, freed_tree);
2089 range_tree_vacate(*freetree, range_tree_add, *freed_tree);
2091 range_tree_vacate(alloctree, NULL, NULL);
2093 ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
2094 ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK]));
2096 mutex_exit(&msp->ms_lock);
2098 if (object != space_map_object(msp->ms_sm)) {
2099 object = space_map_object(msp->ms_sm);
2100 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
2101 msp->ms_id, sizeof (uint64_t), &object, tx);
2107 * Called after a transaction group has completely synced to mark
2108 * all of the metaslab's free space as usable.
2111 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
2113 metaslab_group_t *mg = msp->ms_group;
2114 vdev_t *vd = mg->mg_vd;
2115 range_tree_t **freed_tree;
2116 range_tree_t **defer_tree;
2117 int64_t alloc_delta, defer_delta;
2119 ASSERT(!vd->vdev_ishole);
2121 mutex_enter(&msp->ms_lock);
2124 * If this metaslab is just becoming available, initialize its
2125 * alloctrees, freetrees, and defertree and add its capacity to
2128 if (msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK] == NULL) {
2129 for (int t = 0; t < TXG_SIZE; t++) {
2130 ASSERT(msp->ms_alloctree[t] == NULL);
2131 ASSERT(msp->ms_freetree[t] == NULL);
2133 msp->ms_alloctree[t] = range_tree_create(NULL, msp,
2135 msp->ms_freetree[t] = range_tree_create(NULL, msp,
2139 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2140 ASSERT(msp->ms_defertree[t] == NULL);
2142 msp->ms_defertree[t] = range_tree_create(NULL, msp,
2146 vdev_space_update(vd, 0, 0, msp->ms_size);
2149 freed_tree = &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK];
2150 defer_tree = &msp->ms_defertree[txg % TXG_DEFER_SIZE];
2152 alloc_delta = space_map_alloc_delta(msp->ms_sm);
2153 defer_delta = range_tree_space(*freed_tree) -
2154 range_tree_space(*defer_tree);
2156 vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
2158 ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
2159 ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK]));
2162 * If there's a metaslab_load() in progress, wait for it to complete
2163 * so that we have a consistent view of the in-core space map.
2165 metaslab_load_wait(msp);
2168 * Move the frees from the defer_tree back to the free
2169 * range tree (if it's loaded). Swap the freed_tree and the
2170 * defer_tree -- this is safe to do because we've just emptied out
2173 range_tree_vacate(*defer_tree,
2174 msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree);
2175 range_tree_swap(freed_tree, defer_tree);
2177 space_map_update(msp->ms_sm);
2179 msp->ms_deferspace += defer_delta;
2180 ASSERT3S(msp->ms_deferspace, >=, 0);
2181 ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
2182 if (msp->ms_deferspace != 0) {
2184 * Keep syncing this metaslab until all deferred frees
2185 * are back in circulation.
2187 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
2190 if (msp->ms_loaded && msp->ms_access_txg < txg) {
2191 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
2192 VERIFY0(range_tree_space(
2193 msp->ms_alloctree[(txg + t) & TXG_MASK]));
2196 if (!metaslab_debug_unload)
2197 metaslab_unload(msp);
2200 metaslab_group_sort(mg, msp, metaslab_weight(msp));
2201 mutex_exit(&msp->ms_lock);
2205 metaslab_sync_reassess(metaslab_group_t *mg)
2207 metaslab_group_alloc_update(mg);
2208 mg->mg_fragmentation = metaslab_group_fragmentation(mg);
2211 * Preload the next potential metaslabs
2213 metaslab_group_preload(mg);
2217 metaslab_distance(metaslab_t *msp, dva_t *dva)
2219 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
2220 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
2221 uint64_t start = msp->ms_id;
2223 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
2224 return (1ULL << 63);
2227 return ((start - offset) << ms_shift);
2229 return ((offset - start) << ms_shift);
2234 * ==========================================================================
2235 * Metaslab block operations
2236 * ==========================================================================
2240 metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags)
2242 if (!(flags & METASLAB_ASYNC_ALLOC) ||
2243 flags & METASLAB_DONT_THROTTLE)
2246 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2247 if (!mg->mg_class->mc_alloc_throttle_enabled)
2250 (void) refcount_add(&mg->mg_alloc_queue_depth, tag);
2254 metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags)
2256 if (!(flags & METASLAB_ASYNC_ALLOC) ||
2257 flags & METASLAB_DONT_THROTTLE)
2260 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2261 if (!mg->mg_class->mc_alloc_throttle_enabled)
2264 (void) refcount_remove(&mg->mg_alloc_queue_depth, tag);
2268 metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag)
2271 const dva_t *dva = bp->blk_dva;
2272 int ndvas = BP_GET_NDVAS(bp);
2274 for (int d = 0; d < ndvas; d++) {
2275 uint64_t vdev = DVA_GET_VDEV(&dva[d]);
2276 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2277 VERIFY(refcount_not_held(&mg->mg_alloc_queue_depth, tag));
2283 metaslab_group_alloc(metaslab_group_t *mg, uint64_t asize,
2284 uint64_t txg, uint64_t min_distance, dva_t *dva, int d)
2286 spa_t *spa = mg->mg_vd->vdev_spa;
2287 metaslab_t *msp = NULL;
2288 uint64_t offset = -1ULL;
2289 avl_tree_t *t = &mg->mg_metaslab_tree;
2290 uint64_t activation_weight;
2291 uint64_t target_distance;
2294 activation_weight = METASLAB_WEIGHT_PRIMARY;
2295 for (i = 0; i < d; i++) {
2296 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
2297 activation_weight = METASLAB_WEIGHT_SECONDARY;
2303 boolean_t was_active;
2305 mutex_enter(&mg->mg_lock);
2306 for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) {
2307 if (msp->ms_weight < asize) {
2308 spa_dbgmsg(spa, "%s: failed to meet weight "
2309 "requirement: vdev %llu, txg %llu, mg %p, "
2310 "msp %p, asize %llu, "
2311 "weight %llu", spa_name(spa),
2312 mg->mg_vd->vdev_id, txg,
2313 mg, msp, asize, msp->ms_weight);
2314 mutex_exit(&mg->mg_lock);
2319 * If the selected metaslab is condensing, skip it.
2321 if (msp->ms_condensing)
2324 was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
2325 if (activation_weight == METASLAB_WEIGHT_PRIMARY)
2328 target_distance = min_distance +
2329 (space_map_allocated(msp->ms_sm) != 0 ? 0 :
2332 for (i = 0; i < d; i++)
2333 if (metaslab_distance(msp, &dva[i]) <
2339 mutex_exit(&mg->mg_lock);
2343 mutex_enter(&msp->ms_lock);
2346 * Ensure that the metaslab we have selected is still
2347 * capable of handling our request. It's possible that
2348 * another thread may have changed the weight while we
2349 * were blocked on the metaslab lock.
2351 if (msp->ms_weight < asize || (was_active &&
2352 !(msp->ms_weight & METASLAB_ACTIVE_MASK) &&
2353 activation_weight == METASLAB_WEIGHT_PRIMARY)) {
2354 mutex_exit(&msp->ms_lock);
2358 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
2359 activation_weight == METASLAB_WEIGHT_PRIMARY) {
2360 metaslab_passivate(msp,
2361 msp->ms_weight & ~METASLAB_ACTIVE_MASK);
2362 mutex_exit(&msp->ms_lock);
2366 if (metaslab_activate(msp, activation_weight) != 0) {
2367 mutex_exit(&msp->ms_lock);
2372 * If this metaslab is currently condensing then pick again as
2373 * we can't manipulate this metaslab until it's committed
2376 if (msp->ms_condensing) {
2377 mutex_exit(&msp->ms_lock);
2381 if ((offset = metaslab_block_alloc(msp, asize)) != -1ULL)
2384 metaslab_passivate(msp, metaslab_block_maxsize(msp));
2385 mutex_exit(&msp->ms_lock);
2388 if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
2389 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
2391 range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, asize);
2392 msp->ms_access_txg = txg + metaslab_unload_delay;
2394 mutex_exit(&msp->ms_lock);
2399 * Allocate a block for the specified i/o.
2402 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
2403 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags)
2405 metaslab_group_t *mg, *rotor;
2409 int zio_lock = B_FALSE;
2410 boolean_t allocatable;
2414 ASSERT(!DVA_IS_VALID(&dva[d]));
2417 * For testing, make some blocks above a certain size be gang blocks.
2419 if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0)
2420 return (SET_ERROR(ENOSPC));
2423 * Start at the rotor and loop through all mgs until we find something.
2424 * Note that there's no locking on mc_rotor or mc_aliquot because
2425 * nothing actually breaks if we miss a few updates -- we just won't
2426 * allocate quite as evenly. It all balances out over time.
2428 * If we are doing ditto or log blocks, try to spread them across
2429 * consecutive vdevs. If we're forced to reuse a vdev before we've
2430 * allocated all of our ditto blocks, then try and spread them out on
2431 * that vdev as much as possible. If it turns out to not be possible,
2432 * gradually lower our standards until anything becomes acceptable.
2433 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
2434 * gives us hope of containing our fault domains to something we're
2435 * able to reason about. Otherwise, any two top-level vdev failures
2436 * will guarantee the loss of data. With consecutive allocation,
2437 * only two adjacent top-level vdev failures will result in data loss.
2439 * If we are doing gang blocks (hintdva is non-NULL), try to keep
2440 * ourselves on the same vdev as our gang block header. That
2441 * way, we can hope for locality in vdev_cache, plus it makes our
2442 * fault domains something tractable.
2445 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
2448 * It's possible the vdev we're using as the hint no
2449 * longer exists (i.e. removed). Consult the rotor when
2455 if (flags & METASLAB_HINTBP_AVOID &&
2456 mg->mg_next != NULL)
2461 } else if (d != 0) {
2462 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
2463 mg = vd->vdev_mg->mg_next;
2469 * If the hint put us into the wrong metaslab class, or into a
2470 * metaslab group that has been passivated, just follow the rotor.
2472 if (mg->mg_class != mc || mg->mg_activation_count <= 0)
2479 ASSERT(mg->mg_activation_count == 1);
2483 * Don't allocate from faulted devices.
2486 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
2487 allocatable = vdev_allocatable(vd);
2488 spa_config_exit(spa, SCL_ZIO, FTAG);
2490 allocatable = vdev_allocatable(vd);
2494 * Determine if the selected metaslab group is eligible
2495 * for allocations. If we're ganging then don't allow
2496 * this metaslab group to skip allocations since that would
2497 * inadvertently return ENOSPC and suspend the pool
2498 * even though space is still available.
2500 if (allocatable && !GANG_ALLOCATION(flags) && !zio_lock) {
2501 allocatable = metaslab_group_allocatable(mg, rotor,
2508 ASSERT(mg->mg_initialized);
2511 * Avoid writing single-copy data to a failing vdev.
2513 if ((vd->vdev_stat.vs_write_errors > 0 ||
2514 vd->vdev_state < VDEV_STATE_HEALTHY) &&
2515 d == 0 && dshift == 3 && vd->vdev_children == 0) {
2520 ASSERT(mg->mg_class == mc);
2522 distance = vd->vdev_asize >> dshift;
2523 if (distance <= (1ULL << vd->vdev_ms_shift))
2528 asize = vdev_psize_to_asize(vd, psize);
2529 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
2531 uint64_t offset = metaslab_group_alloc(mg, asize, txg,
2534 mutex_enter(&mg->mg_lock);
2535 if (offset == -1ULL) {
2536 mg->mg_failed_allocations++;
2537 if (asize == SPA_GANGBLOCKSIZE) {
2539 * This metaslab group was unable to allocate
2540 * the minimum gang block size so it must be
2541 * out of space. We must notify the allocation
2542 * throttle to start skipping allocation
2543 * attempts to this metaslab group until more
2544 * space becomes available.
2546 * Note: this failure cannot be caused by the
2547 * allocation throttle since the allocation
2548 * throttle is only responsible for skipping
2549 * devices and not failing block allocations.
2551 mg->mg_no_free_space = B_TRUE;
2554 mg->mg_allocations++;
2555 mutex_exit(&mg->mg_lock);
2557 if (offset != -1ULL) {
2559 * If we've just selected this metaslab group,
2560 * figure out whether the corresponding vdev is
2561 * over- or under-used relative to the pool,
2562 * and set an allocation bias to even it out.
2564 if (mc->mc_aliquot == 0 && metaslab_bias_enabled) {
2565 vdev_stat_t *vs = &vd->vdev_stat;
2568 vu = (vs->vs_alloc * 100) / (vs->vs_space + 1);
2569 cu = (mc->mc_alloc * 100) / (mc->mc_space + 1);
2572 * Calculate how much more or less we should
2573 * try to allocate from this device during
2574 * this iteration around the rotor.
2575 * For example, if a device is 80% full
2576 * and the pool is 20% full then we should
2577 * reduce allocations by 60% on this device.
2579 * mg_bias = (20 - 80) * 512K / 100 = -307K
2581 * This reduces allocations by 307K for this
2584 mg->mg_bias = ((cu - vu) *
2585 (int64_t)mg->mg_aliquot) / 100;
2586 } else if (!metaslab_bias_enabled) {
2590 if (atomic_add_64_nv(&mc->mc_aliquot, asize) >=
2591 mg->mg_aliquot + mg->mg_bias) {
2592 mc->mc_rotor = mg->mg_next;
2596 DVA_SET_VDEV(&dva[d], vd->vdev_id);
2597 DVA_SET_OFFSET(&dva[d], offset);
2598 DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER));
2599 DVA_SET_ASIZE(&dva[d], asize);
2604 mc->mc_rotor = mg->mg_next;
2606 } while ((mg = mg->mg_next) != rotor);
2610 ASSERT(dshift < 64);
2614 if (!allocatable && !zio_lock) {
2620 bzero(&dva[d], sizeof (dva_t));
2622 return (SET_ERROR(ENOSPC));
2626 * Free the block represented by DVA in the context of the specified
2627 * transaction group.
2630 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
2632 uint64_t vdev = DVA_GET_VDEV(dva);
2633 uint64_t offset = DVA_GET_OFFSET(dva);
2634 uint64_t size = DVA_GET_ASIZE(dva);
2638 ASSERT(DVA_IS_VALID(dva));
2640 if (txg > spa_freeze_txg(spa))
2643 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
2644 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
2645 cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu",
2646 (u_longlong_t)vdev, (u_longlong_t)offset);
2651 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
2653 if (DVA_GET_GANG(dva))
2654 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
2656 mutex_enter(&msp->ms_lock);
2659 range_tree_remove(msp->ms_alloctree[txg & TXG_MASK],
2662 VERIFY(!msp->ms_condensing);
2663 VERIFY3U(offset, >=, msp->ms_start);
2664 VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
2665 VERIFY3U(range_tree_space(msp->ms_tree) + size, <=,
2667 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
2668 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
2669 range_tree_add(msp->ms_tree, offset, size);
2671 if (range_tree_space(msp->ms_freetree[txg & TXG_MASK]) == 0)
2672 vdev_dirty(vd, VDD_METASLAB, msp, txg);
2673 range_tree_add(msp->ms_freetree[txg & TXG_MASK],
2677 mutex_exit(&msp->ms_lock);
2681 * Intent log support: upon opening the pool after a crash, notify the SPA
2682 * of blocks that the intent log has allocated for immediate write, but
2683 * which are still considered free by the SPA because the last transaction
2684 * group didn't commit yet.
2687 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
2689 uint64_t vdev = DVA_GET_VDEV(dva);
2690 uint64_t offset = DVA_GET_OFFSET(dva);
2691 uint64_t size = DVA_GET_ASIZE(dva);
2696 ASSERT(DVA_IS_VALID(dva));
2698 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
2699 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
2700 return (SET_ERROR(ENXIO));
2702 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
2704 if (DVA_GET_GANG(dva))
2705 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
2707 mutex_enter(&msp->ms_lock);
2709 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded)
2710 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
2712 if (error == 0 && !range_tree_contains(msp->ms_tree, offset, size))
2713 error = SET_ERROR(ENOENT);
2715 if (error || txg == 0) { /* txg == 0 indicates dry run */
2716 mutex_exit(&msp->ms_lock);
2720 VERIFY(!msp->ms_condensing);
2721 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
2722 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
2723 VERIFY3U(range_tree_space(msp->ms_tree) - size, <=, msp->ms_size);
2724 range_tree_remove(msp->ms_tree, offset, size);
2726 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
2727 if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
2728 vdev_dirty(vd, VDD_METASLAB, msp, txg);
2729 range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, size);
2732 mutex_exit(&msp->ms_lock);
2738 * Reserve some allocation slots. The reservation system must be called
2739 * before we call into the allocator. If there aren't any available slots
2740 * then the I/O will be throttled until an I/O completes and its slots are
2741 * freed up. The function returns true if it was successful in placing
2745 metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, zio_t *zio,
2748 uint64_t available_slots = 0;
2749 boolean_t slot_reserved = B_FALSE;
2751 ASSERT(mc->mc_alloc_throttle_enabled);
2752 mutex_enter(&mc->mc_lock);
2754 uint64_t reserved_slots = refcount_count(&mc->mc_alloc_slots);
2755 if (reserved_slots < mc->mc_alloc_max_slots)
2756 available_slots = mc->mc_alloc_max_slots - reserved_slots;
2758 if (slots <= available_slots || GANG_ALLOCATION(flags)) {
2760 * We reserve the slots individually so that we can unreserve
2761 * them individually when an I/O completes.
2763 for (int d = 0; d < slots; d++) {
2764 reserved_slots = refcount_add(&mc->mc_alloc_slots, zio);
2766 zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
2767 slot_reserved = B_TRUE;
2770 mutex_exit(&mc->mc_lock);
2771 return (slot_reserved);
2775 metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots, zio_t *zio)
2777 ASSERT(mc->mc_alloc_throttle_enabled);
2778 mutex_enter(&mc->mc_lock);
2779 for (int d = 0; d < slots; d++) {
2780 (void) refcount_remove(&mc->mc_alloc_slots, zio);
2782 mutex_exit(&mc->mc_lock);
2786 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
2787 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags, zio_t *zio)
2789 dva_t *dva = bp->blk_dva;
2790 dva_t *hintdva = hintbp->blk_dva;
2793 ASSERT(bp->blk_birth == 0);
2794 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
2796 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
2798 if (mc->mc_rotor == NULL) { /* no vdevs in this class */
2799 spa_config_exit(spa, SCL_ALLOC, FTAG);
2800 return (SET_ERROR(ENOSPC));
2803 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
2804 ASSERT(BP_GET_NDVAS(bp) == 0);
2805 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
2807 for (int d = 0; d < ndvas; d++) {
2808 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
2811 for (d--; d >= 0; d--) {
2812 metaslab_free_dva(spa, &dva[d], txg, B_TRUE);
2813 metaslab_group_alloc_decrement(spa,
2814 DVA_GET_VDEV(&dva[d]), zio, flags);
2815 bzero(&dva[d], sizeof (dva_t));
2817 spa_config_exit(spa, SCL_ALLOC, FTAG);
2821 * Update the metaslab group's queue depth
2822 * based on the newly allocated dva.
2824 metaslab_group_alloc_increment(spa,
2825 DVA_GET_VDEV(&dva[d]), zio, flags);
2830 ASSERT(BP_GET_NDVAS(bp) == ndvas);
2832 spa_config_exit(spa, SCL_ALLOC, FTAG);
2834 BP_SET_BIRTH(bp, txg, txg);
2840 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
2842 const dva_t *dva = bp->blk_dva;
2843 int ndvas = BP_GET_NDVAS(bp);
2845 ASSERT(!BP_IS_HOLE(bp));
2846 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
2848 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
2850 for (int d = 0; d < ndvas; d++)
2851 metaslab_free_dva(spa, &dva[d], txg, now);
2853 spa_config_exit(spa, SCL_FREE, FTAG);
2857 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
2859 const dva_t *dva = bp->blk_dva;
2860 int ndvas = BP_GET_NDVAS(bp);
2863 ASSERT(!BP_IS_HOLE(bp));
2867 * First do a dry run to make sure all DVAs are claimable,
2868 * so we don't have to unwind from partial failures below.
2870 if ((error = metaslab_claim(spa, bp, 0)) != 0)
2874 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
2876 for (int d = 0; d < ndvas; d++)
2877 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
2880 spa_config_exit(spa, SCL_ALLOC, FTAG);
2882 ASSERT(error == 0 || txg == 0);
2888 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
2890 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
2893 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2894 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
2895 uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
2896 vdev_t *vd = vdev_lookup_top(spa, vdev);
2897 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
2898 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
2899 metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
2902 range_tree_verify(msp->ms_tree, offset, size);
2904 for (int j = 0; j < TXG_SIZE; j++)
2905 range_tree_verify(msp->ms_freetree[j], offset, size);
2906 for (int j = 0; j < TXG_DEFER_SIZE; j++)
2907 range_tree_verify(msp->ms_defertree[j], offset, size);
2909 spa_config_exit(spa, SCL_VDEV, FTAG);