4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
28 #include <sys/zfs_context.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/space_map.h>
32 #include <sys/metaslab_impl.h>
33 #include <sys/vdev_impl.h>
35 #include <sys/spa_impl.h>
36 #include <sys/zfeature.h>
38 SYSCTL_DECL(_vfs_zfs);
39 SYSCTL_NODE(_vfs_zfs, OID_AUTO, metaslab, CTLFLAG_RW, 0, "ZFS metaslab");
41 #define GANG_ALLOCATION(flags) \
42 ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
44 uint64_t metaslab_aliquot = 512ULL << 10;
45 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
46 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, gang_bang, CTLFLAG_RWTUN,
47 &metaslab_gang_bang, 0,
48 "Force gang block allocation for blocks larger than or equal to this value");
51 * The in-core space map representation is more compact than its on-disk form.
52 * The zfs_condense_pct determines how much more compact the in-core
53 * space map representation must be before we compact it on-disk.
54 * Values should be greater than or equal to 100.
56 int zfs_condense_pct = 200;
57 SYSCTL_INT(_vfs_zfs, OID_AUTO, condense_pct, CTLFLAG_RWTUN,
59 "Condense on-disk spacemap when it is more than this many percents"
60 " of in-memory counterpart");
63 * Condensing a metaslab is not guaranteed to actually reduce the amount of
64 * space used on disk. In particular, a space map uses data in increments of
65 * MAX(1 << ashift, space_map_blksize), so a metaslab might use the
66 * same number of blocks after condensing. Since the goal of condensing is to
67 * reduce the number of IOPs required to read the space map, we only want to
68 * condense when we can be sure we will reduce the number of blocks used by the
69 * space map. Unfortunately, we cannot precisely compute whether or not this is
70 * the case in metaslab_should_condense since we are holding ms_lock. Instead,
71 * we apply the following heuristic: do not condense a spacemap unless the
72 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
75 int zfs_metaslab_condense_block_threshold = 4;
78 * The zfs_mg_noalloc_threshold defines which metaslab groups should
79 * be eligible for allocation. The value is defined as a percentage of
80 * free space. Metaslab groups that have more free space than
81 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
82 * a metaslab group's free space is less than or equal to the
83 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
84 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
85 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
86 * groups are allowed to accept allocations. Gang blocks are always
87 * eligible to allocate on any metaslab group. The default value of 0 means
88 * no metaslab group will be excluded based on this criterion.
90 int zfs_mg_noalloc_threshold = 0;
91 SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_noalloc_threshold, CTLFLAG_RWTUN,
92 &zfs_mg_noalloc_threshold, 0,
93 "Percentage of metaslab group size that should be free"
94 " to make it eligible for allocation");
97 * Metaslab groups are considered eligible for allocations if their
98 * fragmenation metric (measured as a percentage) is less than or equal to
99 * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold
100 * then it will be skipped unless all metaslab groups within the metaslab
101 * class have also crossed this threshold.
103 int zfs_mg_fragmentation_threshold = 85;
104 SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_fragmentation_threshold, CTLFLAG_RWTUN,
105 &zfs_mg_fragmentation_threshold, 0,
106 "Percentage of metaslab group size that should be considered "
107 "eligible for allocations unless all metaslab groups within the metaslab class "
108 "have also crossed this threshold");
111 * Allow metaslabs to keep their active state as long as their fragmentation
112 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
113 * active metaslab that exceeds this threshold will no longer keep its active
114 * status allowing better metaslabs to be selected.
116 int zfs_metaslab_fragmentation_threshold = 70;
117 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, fragmentation_threshold, CTLFLAG_RWTUN,
118 &zfs_metaslab_fragmentation_threshold, 0,
119 "Maximum percentage of metaslab fragmentation level to keep their active state");
122 * When set will load all metaslabs when pool is first opened.
124 int metaslab_debug_load = 0;
125 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_load, CTLFLAG_RWTUN,
126 &metaslab_debug_load, 0,
127 "Load all metaslabs when pool is first opened");
130 * When set will prevent metaslabs from being unloaded.
132 int metaslab_debug_unload = 0;
133 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_unload, CTLFLAG_RWTUN,
134 &metaslab_debug_unload, 0,
135 "Prevent metaslabs from being unloaded");
138 * Minimum size which forces the dynamic allocator to change
139 * it's allocation strategy. Once the space map cannot satisfy
140 * an allocation of this size then it switches to using more
141 * aggressive strategy (i.e search by size rather than offset).
143 uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
144 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold, CTLFLAG_RWTUN,
145 &metaslab_df_alloc_threshold, 0,
146 "Minimum size which forces the dynamic allocator to change it's allocation strategy");
149 * The minimum free space, in percent, which must be available
150 * in a space map to continue allocations in a first-fit fashion.
151 * Once the space map's free space drops below this level we dynamically
152 * switch to using best-fit allocations.
154 int metaslab_df_free_pct = 4;
155 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct, CTLFLAG_RWTUN,
156 &metaslab_df_free_pct, 0,
157 "The minimum free space, in percent, which must be available in a "
158 "space map to continue allocations in a first-fit fashion");
161 * A metaslab is considered "free" if it contains a contiguous
162 * segment which is greater than metaslab_min_alloc_size.
164 uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS;
165 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, min_alloc_size, CTLFLAG_RWTUN,
166 &metaslab_min_alloc_size, 0,
167 "A metaslab is considered \"free\" if it contains a contiguous "
168 "segment which is greater than vfs.zfs.metaslab.min_alloc_size");
171 * Percentage of all cpus that can be used by the metaslab taskq.
173 int metaslab_load_pct = 50;
174 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, load_pct, CTLFLAG_RWTUN,
175 &metaslab_load_pct, 0,
176 "Percentage of cpus that can be used by the metaslab taskq");
179 * Determines how many txgs a metaslab may remain loaded without having any
180 * allocations from it. As long as a metaslab continues to be used we will
183 int metaslab_unload_delay = TXG_SIZE * 2;
184 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, unload_delay, CTLFLAG_RWTUN,
185 &metaslab_unload_delay, 0,
186 "Number of TXGs that an unused metaslab can be kept in memory");
189 * Max number of metaslabs per group to preload.
191 int metaslab_preload_limit = SPA_DVAS_PER_BP;
192 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_limit, CTLFLAG_RWTUN,
193 &metaslab_preload_limit, 0,
194 "Max number of metaslabs per group to preload");
197 * Enable/disable preloading of metaslab.
199 boolean_t metaslab_preload_enabled = B_TRUE;
200 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_enabled, CTLFLAG_RWTUN,
201 &metaslab_preload_enabled, 0,
202 "Max number of metaslabs per group to preload");
205 * Enable/disable fragmentation weighting on metaslabs.
207 boolean_t metaslab_fragmentation_factor_enabled = B_TRUE;
208 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, fragmentation_factor_enabled, CTLFLAG_RWTUN,
209 &metaslab_fragmentation_factor_enabled, 0,
210 "Enable fragmentation weighting on metaslabs");
213 * Enable/disable lba weighting (i.e. outer tracks are given preference).
215 boolean_t metaslab_lba_weighting_enabled = B_TRUE;
216 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, lba_weighting_enabled, CTLFLAG_RWTUN,
217 &metaslab_lba_weighting_enabled, 0,
218 "Enable LBA weighting (i.e. outer tracks are given preference)");
221 * Enable/disable metaslab group biasing.
223 boolean_t metaslab_bias_enabled = B_TRUE;
224 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, bias_enabled, CTLFLAG_RWTUN,
225 &metaslab_bias_enabled, 0,
226 "Enable metaslab group biasing");
229 * Enable/disable segment-based metaslab selection.
231 boolean_t zfs_metaslab_segment_weight_enabled = B_TRUE;
234 * When using segment-based metaslab selection, we will continue
235 * allocating from the active metaslab until we have exhausted
236 * zfs_metaslab_switch_threshold of its buckets.
238 int zfs_metaslab_switch_threshold = 2;
241 * Internal switch to enable/disable the metaslab allocation tracing
244 boolean_t metaslab_trace_enabled = B_TRUE;
247 * Maximum entries that the metaslab allocation tracing facility will keep
248 * in a given list when running in non-debug mode. We limit the number
249 * of entries in non-debug mode to prevent us from using up too much memory.
250 * The limit should be sufficiently large that we don't expect any allocation
251 * to every exceed this value. In debug mode, the system will panic if this
252 * limit is ever reached allowing for further investigation.
254 uint64_t metaslab_trace_max_entries = 5000;
256 static uint64_t metaslab_weight(metaslab_t *);
257 static void metaslab_set_fragmentation(metaslab_t *);
259 kmem_cache_t *metaslab_alloc_trace_cache;
262 * ==========================================================================
264 * ==========================================================================
267 metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
269 metaslab_class_t *mc;
271 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
276 mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
277 refcount_create_tracked(&mc->mc_alloc_slots);
283 metaslab_class_destroy(metaslab_class_t *mc)
285 ASSERT(mc->mc_rotor == NULL);
286 ASSERT(mc->mc_alloc == 0);
287 ASSERT(mc->mc_deferred == 0);
288 ASSERT(mc->mc_space == 0);
289 ASSERT(mc->mc_dspace == 0);
291 refcount_destroy(&mc->mc_alloc_slots);
292 mutex_destroy(&mc->mc_lock);
293 kmem_free(mc, sizeof (metaslab_class_t));
297 metaslab_class_validate(metaslab_class_t *mc)
299 metaslab_group_t *mg;
303 * Must hold one of the spa_config locks.
305 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
306 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
308 if ((mg = mc->mc_rotor) == NULL)
313 ASSERT(vd->vdev_mg != NULL);
314 ASSERT3P(vd->vdev_top, ==, vd);
315 ASSERT3P(mg->mg_class, ==, mc);
316 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
317 } while ((mg = mg->mg_next) != mc->mc_rotor);
323 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
324 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
326 atomic_add_64(&mc->mc_alloc, alloc_delta);
327 atomic_add_64(&mc->mc_deferred, defer_delta);
328 atomic_add_64(&mc->mc_space, space_delta);
329 atomic_add_64(&mc->mc_dspace, dspace_delta);
333 metaslab_class_minblocksize_update(metaslab_class_t *mc)
335 metaslab_group_t *mg;
337 uint64_t minashift = UINT64_MAX;
339 if ((mg = mc->mc_rotor) == NULL) {
340 mc->mc_minblocksize = SPA_MINBLOCKSIZE;
346 if (vd->vdev_ashift < minashift)
347 minashift = vd->vdev_ashift;
348 } while ((mg = mg->mg_next) != mc->mc_rotor);
350 mc->mc_minblocksize = 1ULL << minashift;
354 metaslab_class_get_alloc(metaslab_class_t *mc)
356 return (mc->mc_alloc);
360 metaslab_class_get_deferred(metaslab_class_t *mc)
362 return (mc->mc_deferred);
366 metaslab_class_get_space(metaslab_class_t *mc)
368 return (mc->mc_space);
372 metaslab_class_get_dspace(metaslab_class_t *mc)
374 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
378 metaslab_class_get_minblocksize(metaslab_class_t *mc)
380 return (mc->mc_minblocksize);
384 metaslab_class_histogram_verify(metaslab_class_t *mc)
386 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
390 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
393 mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
396 for (int c = 0; c < rvd->vdev_children; c++) {
397 vdev_t *tvd = rvd->vdev_child[c];
398 metaslab_group_t *mg = tvd->vdev_mg;
401 * Skip any holes, uninitialized top-levels, or
402 * vdevs that are not in this metalab class.
404 if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
405 mg->mg_class != mc) {
409 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
410 mc_hist[i] += mg->mg_histogram[i];
413 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
414 VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
416 kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
420 * Calculate the metaslab class's fragmentation metric. The metric
421 * is weighted based on the space contribution of each metaslab group.
422 * The return value will be a number between 0 and 100 (inclusive), or
423 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
424 * zfs_frag_table for more information about the metric.
427 metaslab_class_fragmentation(metaslab_class_t *mc)
429 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
430 uint64_t fragmentation = 0;
432 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
434 for (int c = 0; c < rvd->vdev_children; c++) {
435 vdev_t *tvd = rvd->vdev_child[c];
436 metaslab_group_t *mg = tvd->vdev_mg;
439 * Skip any holes, uninitialized top-levels, or
440 * vdevs that are not in this metalab class.
442 if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
443 mg->mg_class != mc) {
448 * If a metaslab group does not contain a fragmentation
449 * metric then just bail out.
451 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
452 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
453 return (ZFS_FRAG_INVALID);
457 * Determine how much this metaslab_group is contributing
458 * to the overall pool fragmentation metric.
460 fragmentation += mg->mg_fragmentation *
461 metaslab_group_get_space(mg);
463 fragmentation /= metaslab_class_get_space(mc);
465 ASSERT3U(fragmentation, <=, 100);
466 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
467 return (fragmentation);
471 * Calculate the amount of expandable space that is available in
472 * this metaslab class. If a device is expanded then its expandable
473 * space will be the amount of allocatable space that is currently not
474 * part of this metaslab class.
477 metaslab_class_expandable_space(metaslab_class_t *mc)
479 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
482 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
483 for (int c = 0; c < rvd->vdev_children; c++) {
484 vdev_t *tvd = rvd->vdev_child[c];
485 metaslab_group_t *mg = tvd->vdev_mg;
487 if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
488 mg->mg_class != mc) {
493 * Calculate if we have enough space to add additional
494 * metaslabs. We report the expandable space in terms
495 * of the metaslab size since that's the unit of expansion.
497 space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize,
498 1ULL << tvd->vdev_ms_shift);
500 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
505 metaslab_compare(const void *x1, const void *x2)
507 const metaslab_t *m1 = x1;
508 const metaslab_t *m2 = x2;
510 if (m1->ms_weight < m2->ms_weight)
512 if (m1->ms_weight > m2->ms_weight)
516 * If the weights are identical, use the offset to force uniqueness.
518 if (m1->ms_start < m2->ms_start)
520 if (m1->ms_start > m2->ms_start)
523 ASSERT3P(m1, ==, m2);
529 * Verify that the space accounting on disk matches the in-core range_trees.
532 metaslab_verify_space(metaslab_t *msp, uint64_t txg)
534 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
535 uint64_t allocated = 0;
536 uint64_t sm_free_space, msp_free_space;
538 ASSERT(MUTEX_HELD(&msp->ms_lock));
540 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
544 * We can only verify the metaslab space when we're called
545 * from syncing context with a loaded metaslab that has an allocated
546 * space map. Calling this in non-syncing context does not
547 * provide a consistent view of the metaslab since we're performing
548 * allocations in the future.
550 if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
554 sm_free_space = msp->ms_size - space_map_allocated(msp->ms_sm) -
555 space_map_alloc_delta(msp->ms_sm);
558 * Account for future allocations since we would have already
559 * deducted that space from the ms_freetree.
561 for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
563 range_tree_space(msp->ms_alloctree[(txg + t) & TXG_MASK]);
566 msp_free_space = range_tree_space(msp->ms_tree) + allocated +
567 msp->ms_deferspace + range_tree_space(msp->ms_freedtree);
569 VERIFY3U(sm_free_space, ==, msp_free_space);
573 * ==========================================================================
575 * ==========================================================================
578 * Update the allocatable flag and the metaslab group's capacity.
579 * The allocatable flag is set to true if the capacity is below
580 * the zfs_mg_noalloc_threshold or has a fragmentation value that is
581 * greater than zfs_mg_fragmentation_threshold. If a metaslab group
582 * transitions from allocatable to non-allocatable or vice versa then the
583 * metaslab group's class is updated to reflect the transition.
586 metaslab_group_alloc_update(metaslab_group_t *mg)
588 vdev_t *vd = mg->mg_vd;
589 metaslab_class_t *mc = mg->mg_class;
590 vdev_stat_t *vs = &vd->vdev_stat;
591 boolean_t was_allocatable;
592 boolean_t was_initialized;
594 ASSERT(vd == vd->vdev_top);
596 mutex_enter(&mg->mg_lock);
597 was_allocatable = mg->mg_allocatable;
598 was_initialized = mg->mg_initialized;
600 mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
603 mutex_enter(&mc->mc_lock);
606 * If the metaslab group was just added then it won't
607 * have any space until we finish syncing out this txg.
608 * At that point we will consider it initialized and available
609 * for allocations. We also don't consider non-activated
610 * metaslab groups (e.g. vdevs that are in the middle of being removed)
611 * to be initialized, because they can't be used for allocation.
613 mg->mg_initialized = metaslab_group_initialized(mg);
614 if (!was_initialized && mg->mg_initialized) {
616 } else if (was_initialized && !mg->mg_initialized) {
617 ASSERT3U(mc->mc_groups, >, 0);
620 if (mg->mg_initialized)
621 mg->mg_no_free_space = B_FALSE;
624 * A metaslab group is considered allocatable if it has plenty
625 * of free space or is not heavily fragmented. We only take
626 * fragmentation into account if the metaslab group has a valid
627 * fragmentation metric (i.e. a value between 0 and 100).
629 mg->mg_allocatable = (mg->mg_activation_count > 0 &&
630 mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
631 (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
632 mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
635 * The mc_alloc_groups maintains a count of the number of
636 * groups in this metaslab class that are still above the
637 * zfs_mg_noalloc_threshold. This is used by the allocating
638 * threads to determine if they should avoid allocations to
639 * a given group. The allocator will avoid allocations to a group
640 * if that group has reached or is below the zfs_mg_noalloc_threshold
641 * and there are still other groups that are above the threshold.
642 * When a group transitions from allocatable to non-allocatable or
643 * vice versa we update the metaslab class to reflect that change.
644 * When the mc_alloc_groups value drops to 0 that means that all
645 * groups have reached the zfs_mg_noalloc_threshold making all groups
646 * eligible for allocations. This effectively means that all devices
647 * are balanced again.
649 if (was_allocatable && !mg->mg_allocatable)
650 mc->mc_alloc_groups--;
651 else if (!was_allocatable && mg->mg_allocatable)
652 mc->mc_alloc_groups++;
653 mutex_exit(&mc->mc_lock);
655 mutex_exit(&mg->mg_lock);
659 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
661 metaslab_group_t *mg;
663 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
664 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
665 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
666 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
669 mg->mg_activation_count = 0;
670 mg->mg_initialized = B_FALSE;
671 mg->mg_no_free_space = B_TRUE;
672 refcount_create_tracked(&mg->mg_alloc_queue_depth);
674 mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
675 minclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT);
681 metaslab_group_destroy(metaslab_group_t *mg)
683 ASSERT(mg->mg_prev == NULL);
684 ASSERT(mg->mg_next == NULL);
686 * We may have gone below zero with the activation count
687 * either because we never activated in the first place or
688 * because we're done, and possibly removing the vdev.
690 ASSERT(mg->mg_activation_count <= 0);
692 taskq_destroy(mg->mg_taskq);
693 avl_destroy(&mg->mg_metaslab_tree);
694 mutex_destroy(&mg->mg_lock);
695 refcount_destroy(&mg->mg_alloc_queue_depth);
696 kmem_free(mg, sizeof (metaslab_group_t));
700 metaslab_group_activate(metaslab_group_t *mg)
702 metaslab_class_t *mc = mg->mg_class;
703 metaslab_group_t *mgprev, *mgnext;
705 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
707 ASSERT(mc->mc_rotor != mg);
708 ASSERT(mg->mg_prev == NULL);
709 ASSERT(mg->mg_next == NULL);
710 ASSERT(mg->mg_activation_count <= 0);
712 if (++mg->mg_activation_count <= 0)
715 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
716 metaslab_group_alloc_update(mg);
718 if ((mgprev = mc->mc_rotor) == NULL) {
722 mgnext = mgprev->mg_next;
723 mg->mg_prev = mgprev;
724 mg->mg_next = mgnext;
725 mgprev->mg_next = mg;
726 mgnext->mg_prev = mg;
729 metaslab_class_minblocksize_update(mc);
733 metaslab_group_passivate(metaslab_group_t *mg)
735 metaslab_class_t *mc = mg->mg_class;
736 metaslab_group_t *mgprev, *mgnext;
738 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
740 if (--mg->mg_activation_count != 0) {
741 ASSERT(mc->mc_rotor != mg);
742 ASSERT(mg->mg_prev == NULL);
743 ASSERT(mg->mg_next == NULL);
744 ASSERT(mg->mg_activation_count < 0);
748 taskq_wait(mg->mg_taskq);
749 metaslab_group_alloc_update(mg);
751 mgprev = mg->mg_prev;
752 mgnext = mg->mg_next;
757 mc->mc_rotor = mgnext;
758 mgprev->mg_next = mgnext;
759 mgnext->mg_prev = mgprev;
764 metaslab_class_minblocksize_update(mc);
768 metaslab_group_initialized(metaslab_group_t *mg)
770 vdev_t *vd = mg->mg_vd;
771 vdev_stat_t *vs = &vd->vdev_stat;
773 return (vs->vs_space != 0 && mg->mg_activation_count > 0);
777 metaslab_group_get_space(metaslab_group_t *mg)
779 return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count);
783 metaslab_group_histogram_verify(metaslab_group_t *mg)
786 vdev_t *vd = mg->mg_vd;
787 uint64_t ashift = vd->vdev_ashift;
790 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
793 mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
796 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
797 SPACE_MAP_HISTOGRAM_SIZE + ashift);
799 for (int m = 0; m < vd->vdev_ms_count; m++) {
800 metaslab_t *msp = vd->vdev_ms[m];
802 if (msp->ms_sm == NULL)
805 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
806 mg_hist[i + ashift] +=
807 msp->ms_sm->sm_phys->smp_histogram[i];
810 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
811 VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
813 kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
817 metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
819 metaslab_class_t *mc = mg->mg_class;
820 uint64_t ashift = mg->mg_vd->vdev_ashift;
822 ASSERT(MUTEX_HELD(&msp->ms_lock));
823 if (msp->ms_sm == NULL)
826 mutex_enter(&mg->mg_lock);
827 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
828 mg->mg_histogram[i + ashift] +=
829 msp->ms_sm->sm_phys->smp_histogram[i];
830 mc->mc_histogram[i + ashift] +=
831 msp->ms_sm->sm_phys->smp_histogram[i];
833 mutex_exit(&mg->mg_lock);
837 metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
839 metaslab_class_t *mc = mg->mg_class;
840 uint64_t ashift = mg->mg_vd->vdev_ashift;
842 ASSERT(MUTEX_HELD(&msp->ms_lock));
843 if (msp->ms_sm == NULL)
846 mutex_enter(&mg->mg_lock);
847 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
848 ASSERT3U(mg->mg_histogram[i + ashift], >=,
849 msp->ms_sm->sm_phys->smp_histogram[i]);
850 ASSERT3U(mc->mc_histogram[i + ashift], >=,
851 msp->ms_sm->sm_phys->smp_histogram[i]);
853 mg->mg_histogram[i + ashift] -=
854 msp->ms_sm->sm_phys->smp_histogram[i];
855 mc->mc_histogram[i + ashift] -=
856 msp->ms_sm->sm_phys->smp_histogram[i];
858 mutex_exit(&mg->mg_lock);
862 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
864 ASSERT(msp->ms_group == NULL);
865 mutex_enter(&mg->mg_lock);
868 avl_add(&mg->mg_metaslab_tree, msp);
869 mutex_exit(&mg->mg_lock);
871 mutex_enter(&msp->ms_lock);
872 metaslab_group_histogram_add(mg, msp);
873 mutex_exit(&msp->ms_lock);
877 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
879 mutex_enter(&msp->ms_lock);
880 metaslab_group_histogram_remove(mg, msp);
881 mutex_exit(&msp->ms_lock);
883 mutex_enter(&mg->mg_lock);
884 ASSERT(msp->ms_group == mg);
885 avl_remove(&mg->mg_metaslab_tree, msp);
886 msp->ms_group = NULL;
887 mutex_exit(&mg->mg_lock);
891 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
894 * Although in principle the weight can be any value, in
895 * practice we do not use values in the range [1, 511].
897 ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
898 ASSERT(MUTEX_HELD(&msp->ms_lock));
900 mutex_enter(&mg->mg_lock);
901 ASSERT(msp->ms_group == mg);
902 avl_remove(&mg->mg_metaslab_tree, msp);
903 msp->ms_weight = weight;
904 avl_add(&mg->mg_metaslab_tree, msp);
905 mutex_exit(&mg->mg_lock);
909 * Calculate the fragmentation for a given metaslab group. We can use
910 * a simple average here since all metaslabs within the group must have
911 * the same size. The return value will be a value between 0 and 100
912 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
913 * group have a fragmentation metric.
916 metaslab_group_fragmentation(metaslab_group_t *mg)
918 vdev_t *vd = mg->mg_vd;
919 uint64_t fragmentation = 0;
920 uint64_t valid_ms = 0;
922 for (int m = 0; m < vd->vdev_ms_count; m++) {
923 metaslab_t *msp = vd->vdev_ms[m];
925 if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
929 fragmentation += msp->ms_fragmentation;
932 if (valid_ms <= vd->vdev_ms_count / 2)
933 return (ZFS_FRAG_INVALID);
935 fragmentation /= valid_ms;
936 ASSERT3U(fragmentation, <=, 100);
937 return (fragmentation);
941 * Determine if a given metaslab group should skip allocations. A metaslab
942 * group should avoid allocations if its free capacity is less than the
943 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
944 * zfs_mg_fragmentation_threshold and there is at least one metaslab group
945 * that can still handle allocations. If the allocation throttle is enabled
946 * then we skip allocations to devices that have reached their maximum
947 * allocation queue depth unless the selected metaslab group is the only
948 * eligible group remaining.
951 metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
954 spa_t *spa = mg->mg_vd->vdev_spa;
955 metaslab_class_t *mc = mg->mg_class;
958 * We can only consider skipping this metaslab group if it's
959 * in the normal metaslab class and there are other metaslab
960 * groups to select from. Otherwise, we always consider it eligible
963 if (mc != spa_normal_class(spa) || mc->mc_groups <= 1)
967 * If the metaslab group's mg_allocatable flag is set (see comments
968 * in metaslab_group_alloc_update() for more information) and
969 * the allocation throttle is disabled then allow allocations to this
970 * device. However, if the allocation throttle is enabled then
971 * check if we have reached our allocation limit (mg_alloc_queue_depth)
972 * to determine if we should allow allocations to this metaslab group.
973 * If all metaslab groups are no longer considered allocatable
974 * (mc_alloc_groups == 0) or we're trying to allocate the smallest
975 * gang block size then we allow allocations on this metaslab group
976 * regardless of the mg_allocatable or throttle settings.
978 if (mg->mg_allocatable) {
979 metaslab_group_t *mgp;
981 uint64_t qmax = mg->mg_max_alloc_queue_depth;
983 if (!mc->mc_alloc_throttle_enabled)
987 * If this metaslab group does not have any free space, then
988 * there is no point in looking further.
990 if (mg->mg_no_free_space)
993 qdepth = refcount_count(&mg->mg_alloc_queue_depth);
996 * If this metaslab group is below its qmax or it's
997 * the only allocatable metasable group, then attempt
998 * to allocate from it.
1000 if (qdepth < qmax || mc->mc_alloc_groups == 1)
1002 ASSERT3U(mc->mc_alloc_groups, >, 1);
1005 * Since this metaslab group is at or over its qmax, we
1006 * need to determine if there are metaslab groups after this
1007 * one that might be able to handle this allocation. This is
1008 * racy since we can't hold the locks for all metaslab
1009 * groups at the same time when we make this check.
1011 for (mgp = mg->mg_next; mgp != rotor; mgp = mgp->mg_next) {
1012 qmax = mgp->mg_max_alloc_queue_depth;
1014 qdepth = refcount_count(&mgp->mg_alloc_queue_depth);
1017 * If there is another metaslab group that
1018 * might be able to handle the allocation, then
1019 * we return false so that we skip this group.
1021 if (qdepth < qmax && !mgp->mg_no_free_space)
1026 * We didn't find another group to handle the allocation
1027 * so we can't skip this metaslab group even though
1028 * we are at or over our qmax.
1032 } else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
1039 * ==========================================================================
1040 * Range tree callbacks
1041 * ==========================================================================
1045 * Comparison function for the private size-ordered tree. Tree is sorted
1046 * by size, larger sizes at the end of the tree.
1049 metaslab_rangesize_compare(const void *x1, const void *x2)
1051 const range_seg_t *r1 = x1;
1052 const range_seg_t *r2 = x2;
1053 uint64_t rs_size1 = r1->rs_end - r1->rs_start;
1054 uint64_t rs_size2 = r2->rs_end - r2->rs_start;
1056 if (rs_size1 < rs_size2)
1058 if (rs_size1 > rs_size2)
1061 if (r1->rs_start < r2->rs_start)
1064 if (r1->rs_start > r2->rs_start)
1071 * Create any block allocator specific components. The current allocators
1072 * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
1075 metaslab_rt_create(range_tree_t *rt, void *arg)
1077 metaslab_t *msp = arg;
1079 ASSERT3P(rt->rt_arg, ==, msp);
1080 ASSERT(msp->ms_tree == NULL);
1082 avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
1083 sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
1087 * Destroy the block allocator specific components.
1090 metaslab_rt_destroy(range_tree_t *rt, void *arg)
1092 metaslab_t *msp = arg;
1094 ASSERT3P(rt->rt_arg, ==, msp);
1095 ASSERT3P(msp->ms_tree, ==, rt);
1096 ASSERT0(avl_numnodes(&msp->ms_size_tree));
1098 avl_destroy(&msp->ms_size_tree);
1102 metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
1104 metaslab_t *msp = arg;
1106 ASSERT3P(rt->rt_arg, ==, msp);
1107 ASSERT3P(msp->ms_tree, ==, rt);
1108 VERIFY(!msp->ms_condensing);
1109 avl_add(&msp->ms_size_tree, rs);
1113 metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
1115 metaslab_t *msp = arg;
1117 ASSERT3P(rt->rt_arg, ==, msp);
1118 ASSERT3P(msp->ms_tree, ==, rt);
1119 VERIFY(!msp->ms_condensing);
1120 avl_remove(&msp->ms_size_tree, rs);
1124 metaslab_rt_vacate(range_tree_t *rt, void *arg)
1126 metaslab_t *msp = arg;
1128 ASSERT3P(rt->rt_arg, ==, msp);
1129 ASSERT3P(msp->ms_tree, ==, rt);
1132 * Normally one would walk the tree freeing nodes along the way.
1133 * Since the nodes are shared with the range trees we can avoid
1134 * walking all nodes and just reinitialize the avl tree. The nodes
1135 * will be freed by the range tree, so we don't want to free them here.
1137 avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
1138 sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
1141 static range_tree_ops_t metaslab_rt_ops = {
1143 metaslab_rt_destroy,
1150 * ==========================================================================
1151 * Common allocator routines
1152 * ==========================================================================
1156 * Return the maximum contiguous segment within the metaslab.
1159 metaslab_block_maxsize(metaslab_t *msp)
1161 avl_tree_t *t = &msp->ms_size_tree;
1164 if (t == NULL || (rs = avl_last(t)) == NULL)
1167 return (rs->rs_end - rs->rs_start);
1170 static range_seg_t *
1171 metaslab_block_find(avl_tree_t *t, uint64_t start, uint64_t size)
1173 range_seg_t *rs, rsearch;
1176 rsearch.rs_start = start;
1177 rsearch.rs_end = start + size;
1179 rs = avl_find(t, &rsearch, &where);
1181 rs = avl_nearest(t, where, AVL_AFTER);
1188 * This is a helper function that can be used by the allocator to find
1189 * a suitable block to allocate. This will search the specified AVL
1190 * tree looking for a block that matches the specified criteria.
1193 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
1196 range_seg_t *rs = metaslab_block_find(t, *cursor, size);
1198 while (rs != NULL) {
1199 uint64_t offset = P2ROUNDUP(rs->rs_start, align);
1201 if (offset + size <= rs->rs_end) {
1202 *cursor = offset + size;
1205 rs = AVL_NEXT(t, rs);
1209 * If we know we've searched the whole map (*cursor == 0), give up.
1210 * Otherwise, reset the cursor to the beginning and try again.
1216 return (metaslab_block_picker(t, cursor, size, align));
1220 * ==========================================================================
1221 * The first-fit block allocator
1222 * ==========================================================================
1225 metaslab_ff_alloc(metaslab_t *msp, uint64_t size)
1228 * Find the largest power of 2 block size that evenly divides the
1229 * requested size. This is used to try to allocate blocks with similar
1230 * alignment from the same area of the metaslab (i.e. same cursor
1231 * bucket) but it does not guarantee that other allocations sizes
1232 * may exist in the same region.
1234 uint64_t align = size & -size;
1235 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1236 avl_tree_t *t = &msp->ms_tree->rt_root;
1238 return (metaslab_block_picker(t, cursor, size, align));
1241 static metaslab_ops_t metaslab_ff_ops = {
1246 * ==========================================================================
1247 * Dynamic block allocator -
1248 * Uses the first fit allocation scheme until space get low and then
1249 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
1250 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
1251 * ==========================================================================
1254 metaslab_df_alloc(metaslab_t *msp, uint64_t size)
1257 * Find the largest power of 2 block size that evenly divides the
1258 * requested size. This is used to try to allocate blocks with similar
1259 * alignment from the same area of the metaslab (i.e. same cursor
1260 * bucket) but it does not guarantee that other allocations sizes
1261 * may exist in the same region.
1263 uint64_t align = size & -size;
1264 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1265 range_tree_t *rt = msp->ms_tree;
1266 avl_tree_t *t = &rt->rt_root;
1267 uint64_t max_size = metaslab_block_maxsize(msp);
1268 int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
1270 ASSERT(MUTEX_HELD(&msp->ms_lock));
1271 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
1273 if (max_size < size)
1277 * If we're running low on space switch to using the size
1278 * sorted AVL tree (best-fit).
1280 if (max_size < metaslab_df_alloc_threshold ||
1281 free_pct < metaslab_df_free_pct) {
1282 t = &msp->ms_size_tree;
1286 return (metaslab_block_picker(t, cursor, size, 1ULL));
1289 static metaslab_ops_t metaslab_df_ops = {
1294 * ==========================================================================
1295 * Cursor fit block allocator -
1296 * Select the largest region in the metaslab, set the cursor to the beginning
1297 * of the range and the cursor_end to the end of the range. As allocations
1298 * are made advance the cursor. Continue allocating from the cursor until
1299 * the range is exhausted and then find a new range.
1300 * ==========================================================================
1303 metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
1305 range_tree_t *rt = msp->ms_tree;
1306 avl_tree_t *t = &msp->ms_size_tree;
1307 uint64_t *cursor = &msp->ms_lbas[0];
1308 uint64_t *cursor_end = &msp->ms_lbas[1];
1309 uint64_t offset = 0;
1311 ASSERT(MUTEX_HELD(&msp->ms_lock));
1312 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root));
1314 ASSERT3U(*cursor_end, >=, *cursor);
1316 if ((*cursor + size) > *cursor_end) {
1319 rs = avl_last(&msp->ms_size_tree);
1320 if (rs == NULL || (rs->rs_end - rs->rs_start) < size)
1323 *cursor = rs->rs_start;
1324 *cursor_end = rs->rs_end;
1333 static metaslab_ops_t metaslab_cf_ops = {
1338 * ==========================================================================
1339 * New dynamic fit allocator -
1340 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1341 * contiguous blocks. If no region is found then just use the largest segment
1343 * ==========================================================================
1347 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1348 * to request from the allocator.
1350 uint64_t metaslab_ndf_clump_shift = 4;
1353 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
1355 avl_tree_t *t = &msp->ms_tree->rt_root;
1357 range_seg_t *rs, rsearch;
1358 uint64_t hbit = highbit64(size);
1359 uint64_t *cursor = &msp->ms_lbas[hbit - 1];
1360 uint64_t max_size = metaslab_block_maxsize(msp);
1362 ASSERT(MUTEX_HELD(&msp->ms_lock));
1363 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
1365 if (max_size < size)
1368 rsearch.rs_start = *cursor;
1369 rsearch.rs_end = *cursor + size;
1371 rs = avl_find(t, &rsearch, &where);
1372 if (rs == NULL || (rs->rs_end - rs->rs_start) < size) {
1373 t = &msp->ms_size_tree;
1375 rsearch.rs_start = 0;
1376 rsearch.rs_end = MIN(max_size,
1377 1ULL << (hbit + metaslab_ndf_clump_shift));
1378 rs = avl_find(t, &rsearch, &where);
1380 rs = avl_nearest(t, where, AVL_AFTER);
1384 if ((rs->rs_end - rs->rs_start) >= size) {
1385 *cursor = rs->rs_start + size;
1386 return (rs->rs_start);
1391 static metaslab_ops_t metaslab_ndf_ops = {
1395 metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
1398 * ==========================================================================
1400 * ==========================================================================
1404 * Wait for any in-progress metaslab loads to complete.
1407 metaslab_load_wait(metaslab_t *msp)
1409 ASSERT(MUTEX_HELD(&msp->ms_lock));
1411 while (msp->ms_loading) {
1412 ASSERT(!msp->ms_loaded);
1413 cv_wait(&msp->ms_load_cv, &msp->ms_lock);
1418 metaslab_load(metaslab_t *msp)
1421 boolean_t success = B_FALSE;
1423 ASSERT(MUTEX_HELD(&msp->ms_lock));
1424 ASSERT(!msp->ms_loaded);
1425 ASSERT(!msp->ms_loading);
1427 msp->ms_loading = B_TRUE;
1430 * If the space map has not been allocated yet, then treat
1431 * all the space in the metaslab as free and add it to the
1434 if (msp->ms_sm != NULL)
1435 error = space_map_load(msp->ms_sm, msp->ms_tree, SM_FREE);
1437 range_tree_add(msp->ms_tree, msp->ms_start, msp->ms_size);
1439 success = (error == 0);
1440 msp->ms_loading = B_FALSE;
1443 ASSERT3P(msp->ms_group, !=, NULL);
1444 msp->ms_loaded = B_TRUE;
1446 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1447 range_tree_walk(msp->ms_defertree[t],
1448 range_tree_remove, msp->ms_tree);
1450 msp->ms_max_size = metaslab_block_maxsize(msp);
1452 cv_broadcast(&msp->ms_load_cv);
1457 metaslab_unload(metaslab_t *msp)
1459 ASSERT(MUTEX_HELD(&msp->ms_lock));
1460 range_tree_vacate(msp->ms_tree, NULL, NULL);
1461 msp->ms_loaded = B_FALSE;
1462 msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
1463 msp->ms_max_size = 0;
1467 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg,
1470 vdev_t *vd = mg->mg_vd;
1471 objset_t *mos = vd->vdev_spa->spa_meta_objset;
1475 ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
1476 mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
1477 cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
1479 ms->ms_start = id << vd->vdev_ms_shift;
1480 ms->ms_size = 1ULL << vd->vdev_ms_shift;
1483 * We only open space map objects that already exist. All others
1484 * will be opened when we finally allocate an object for it.
1487 error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
1488 ms->ms_size, vd->vdev_ashift, &ms->ms_lock);
1491 kmem_free(ms, sizeof (metaslab_t));
1495 ASSERT(ms->ms_sm != NULL);
1499 * We create the main range tree here, but we don't create the
1500 * other range trees until metaslab_sync_done(). This serves
1501 * two purposes: it allows metaslab_sync_done() to detect the
1502 * addition of new space; and for debugging, it ensures that we'd
1503 * data fault on any attempt to use this metaslab before it's ready.
1505 ms->ms_tree = range_tree_create(&metaslab_rt_ops, ms, &ms->ms_lock);
1506 metaslab_group_add(mg, ms);
1508 metaslab_set_fragmentation(ms);
1511 * If we're opening an existing pool (txg == 0) or creating
1512 * a new one (txg == TXG_INITIAL), all space is available now.
1513 * If we're adding space to an existing pool, the new space
1514 * does not become available until after this txg has synced.
1515 * The metaslab's weight will also be initialized when we sync
1516 * out this txg. This ensures that we don't attempt to allocate
1517 * from it before we have initialized it completely.
1519 if (txg <= TXG_INITIAL)
1520 metaslab_sync_done(ms, 0);
1523 * If metaslab_debug_load is set and we're initializing a metaslab
1524 * that has an allocated space map object then load the its space
1525 * map so that can verify frees.
1527 if (metaslab_debug_load && ms->ms_sm != NULL) {
1528 mutex_enter(&ms->ms_lock);
1529 VERIFY0(metaslab_load(ms));
1530 mutex_exit(&ms->ms_lock);
1534 vdev_dirty(vd, 0, NULL, txg);
1535 vdev_dirty(vd, VDD_METASLAB, ms, txg);
1544 metaslab_fini(metaslab_t *msp)
1546 metaslab_group_t *mg = msp->ms_group;
1548 metaslab_group_remove(mg, msp);
1550 mutex_enter(&msp->ms_lock);
1551 VERIFY(msp->ms_group == NULL);
1552 vdev_space_update(mg->mg_vd, -space_map_allocated(msp->ms_sm),
1554 space_map_close(msp->ms_sm);
1556 metaslab_unload(msp);
1557 range_tree_destroy(msp->ms_tree);
1558 range_tree_destroy(msp->ms_freeingtree);
1559 range_tree_destroy(msp->ms_freedtree);
1561 for (int t = 0; t < TXG_SIZE; t++) {
1562 range_tree_destroy(msp->ms_alloctree[t]);
1565 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1566 range_tree_destroy(msp->ms_defertree[t]);
1569 ASSERT0(msp->ms_deferspace);
1571 mutex_exit(&msp->ms_lock);
1572 cv_destroy(&msp->ms_load_cv);
1573 mutex_destroy(&msp->ms_lock);
1575 kmem_free(msp, sizeof (metaslab_t));
1578 #define FRAGMENTATION_TABLE_SIZE 17
1581 * This table defines a segment size based fragmentation metric that will
1582 * allow each metaslab to derive its own fragmentation value. This is done
1583 * by calculating the space in each bucket of the spacemap histogram and
1584 * multiplying that by the fragmetation metric in this table. Doing
1585 * this for all buckets and dividing it by the total amount of free
1586 * space in this metaslab (i.e. the total free space in all buckets) gives
1587 * us the fragmentation metric. This means that a high fragmentation metric
1588 * equates to most of the free space being comprised of small segments.
1589 * Conversely, if the metric is low, then most of the free space is in
1590 * large segments. A 10% change in fragmentation equates to approximately
1591 * double the number of segments.
1593 * This table defines 0% fragmented space using 16MB segments. Testing has
1594 * shown that segments that are greater than or equal to 16MB do not suffer
1595 * from drastic performance problems. Using this value, we derive the rest
1596 * of the table. Since the fragmentation value is never stored on disk, it
1597 * is possible to change these calculations in the future.
1599 int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
1619 * Calclate the metaslab's fragmentation metric. A return value
1620 * of ZFS_FRAG_INVALID means that the metaslab has not been upgraded and does
1621 * not support this metric. Otherwise, the return value should be in the
1625 metaslab_set_fragmentation(metaslab_t *msp)
1627 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1628 uint64_t fragmentation = 0;
1630 boolean_t feature_enabled = spa_feature_is_enabled(spa,
1631 SPA_FEATURE_SPACEMAP_HISTOGRAM);
1633 if (!feature_enabled) {
1634 msp->ms_fragmentation = ZFS_FRAG_INVALID;
1639 * A null space map means that the entire metaslab is free
1640 * and thus is not fragmented.
1642 if (msp->ms_sm == NULL) {
1643 msp->ms_fragmentation = 0;
1648 * If this metaslab's space map has not been upgraded, flag it
1649 * so that we upgrade next time we encounter it.
1651 if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
1652 uint64_t txg = spa_syncing_txg(spa);
1653 vdev_t *vd = msp->ms_group->mg_vd;
1656 * If we've reached the final dirty txg, then we must
1657 * be shutting down the pool. We don't want to dirty
1658 * any data past this point so skip setting the condense
1659 * flag. We can retry this action the next time the pool
1662 if (spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
1663 msp->ms_condense_wanted = B_TRUE;
1664 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
1665 spa_dbgmsg(spa, "txg %llu, requesting force condense: "
1666 "ms_id %llu, vdev_id %llu", txg, msp->ms_id,
1669 msp->ms_fragmentation = ZFS_FRAG_INVALID;
1673 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1675 uint8_t shift = msp->ms_sm->sm_shift;
1677 int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
1678 FRAGMENTATION_TABLE_SIZE - 1);
1680 if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
1683 space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
1686 ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
1687 fragmentation += space * zfs_frag_table[idx];
1691 fragmentation /= total;
1692 ASSERT3U(fragmentation, <=, 100);
1694 msp->ms_fragmentation = fragmentation;
1698 * Compute a weight -- a selection preference value -- for the given metaslab.
1699 * This is based on the amount of free space, the level of fragmentation,
1700 * the LBA range, and whether the metaslab is loaded.
1703 metaslab_space_weight(metaslab_t *msp)
1705 metaslab_group_t *mg = msp->ms_group;
1706 vdev_t *vd = mg->mg_vd;
1707 uint64_t weight, space;
1709 ASSERT(MUTEX_HELD(&msp->ms_lock));
1710 ASSERT(!vd->vdev_removing);
1713 * The baseline weight is the metaslab's free space.
1715 space = msp->ms_size - space_map_allocated(msp->ms_sm);
1717 if (metaslab_fragmentation_factor_enabled &&
1718 msp->ms_fragmentation != ZFS_FRAG_INVALID) {
1720 * Use the fragmentation information to inversely scale
1721 * down the baseline weight. We need to ensure that we
1722 * don't exclude this metaslab completely when it's 100%
1723 * fragmented. To avoid this we reduce the fragmented value
1726 space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
1729 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
1730 * this metaslab again. The fragmentation metric may have
1731 * decreased the space to something smaller than
1732 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
1733 * so that we can consume any remaining space.
1735 if (space > 0 && space < SPA_MINBLOCKSIZE)
1736 space = SPA_MINBLOCKSIZE;
1741 * Modern disks have uniform bit density and constant angular velocity.
1742 * Therefore, the outer recording zones are faster (higher bandwidth)
1743 * than the inner zones by the ratio of outer to inner track diameter,
1744 * which is typically around 2:1. We account for this by assigning
1745 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
1746 * In effect, this means that we'll select the metaslab with the most
1747 * free bandwidth rather than simply the one with the most free space.
1749 if (metaslab_lba_weighting_enabled) {
1750 weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
1751 ASSERT(weight >= space && weight <= 2 * space);
1755 * If this metaslab is one we're actively using, adjust its
1756 * weight to make it preferable to any inactive metaslab so
1757 * we'll polish it off. If the fragmentation on this metaslab
1758 * has exceed our threshold, then don't mark it active.
1760 if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
1761 msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
1762 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
1765 WEIGHT_SET_SPACEBASED(weight);
1770 * Return the weight of the specified metaslab, according to the segment-based
1771 * weighting algorithm. The metaslab must be loaded. This function can
1772 * be called within a sync pass since it relies only on the metaslab's
1773 * range tree which is always accurate when the metaslab is loaded.
1776 metaslab_weight_from_range_tree(metaslab_t *msp)
1778 uint64_t weight = 0;
1779 uint32_t segments = 0;
1781 ASSERT(msp->ms_loaded);
1783 for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
1785 uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
1786 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
1789 segments += msp->ms_tree->rt_histogram[i];
1792 * The range tree provides more precision than the space map
1793 * and must be downgraded so that all values fit within the
1794 * space map's histogram. This allows us to compare loaded
1795 * vs. unloaded metaslabs to determine which metaslab is
1796 * considered "best".
1801 if (segments != 0) {
1802 WEIGHT_SET_COUNT(weight, segments);
1803 WEIGHT_SET_INDEX(weight, i);
1804 WEIGHT_SET_ACTIVE(weight, 0);
1812 * Calculate the weight based on the on-disk histogram. This should only
1813 * be called after a sync pass has completely finished since the on-disk
1814 * information is updated in metaslab_sync().
1817 metaslab_weight_from_spacemap(metaslab_t *msp)
1819 uint64_t weight = 0;
1821 for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
1822 if (msp->ms_sm->sm_phys->smp_histogram[i] != 0) {
1823 WEIGHT_SET_COUNT(weight,
1824 msp->ms_sm->sm_phys->smp_histogram[i]);
1825 WEIGHT_SET_INDEX(weight, i +
1826 msp->ms_sm->sm_shift);
1827 WEIGHT_SET_ACTIVE(weight, 0);
1835 * Compute a segment-based weight for the specified metaslab. The weight
1836 * is determined by highest bucket in the histogram. The information
1837 * for the highest bucket is encoded into the weight value.
1840 metaslab_segment_weight(metaslab_t *msp)
1842 metaslab_group_t *mg = msp->ms_group;
1843 uint64_t weight = 0;
1844 uint8_t shift = mg->mg_vd->vdev_ashift;
1846 ASSERT(MUTEX_HELD(&msp->ms_lock));
1849 * The metaslab is completely free.
1851 if (space_map_allocated(msp->ms_sm) == 0) {
1852 int idx = highbit64(msp->ms_size) - 1;
1853 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
1855 if (idx < max_idx) {
1856 WEIGHT_SET_COUNT(weight, 1ULL);
1857 WEIGHT_SET_INDEX(weight, idx);
1859 WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
1860 WEIGHT_SET_INDEX(weight, max_idx);
1862 WEIGHT_SET_ACTIVE(weight, 0);
1863 ASSERT(!WEIGHT_IS_SPACEBASED(weight));
1868 ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
1871 * If the metaslab is fully allocated then just make the weight 0.
1873 if (space_map_allocated(msp->ms_sm) == msp->ms_size)
1876 * If the metaslab is already loaded, then use the range tree to
1877 * determine the weight. Otherwise, we rely on the space map information
1878 * to generate the weight.
1880 if (msp->ms_loaded) {
1881 weight = metaslab_weight_from_range_tree(msp);
1883 weight = metaslab_weight_from_spacemap(msp);
1887 * If the metaslab was active the last time we calculated its weight
1888 * then keep it active. We want to consume the entire region that
1889 * is associated with this weight.
1891 if (msp->ms_activation_weight != 0 && weight != 0)
1892 WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
1897 * Determine if we should attempt to allocate from this metaslab. If the
1898 * metaslab has a maximum size then we can quickly determine if the desired
1899 * allocation size can be satisfied. Otherwise, if we're using segment-based
1900 * weighting then we can determine the maximum allocation that this metaslab
1901 * can accommodate based on the index encoded in the weight. If we're using
1902 * space-based weights then rely on the entire weight (excluding the weight
1906 metaslab_should_allocate(metaslab_t *msp, uint64_t asize)
1908 boolean_t should_allocate;
1910 if (msp->ms_max_size != 0)
1911 return (msp->ms_max_size >= asize);
1913 if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
1915 * The metaslab segment weight indicates segments in the
1916 * range [2^i, 2^(i+1)), where i is the index in the weight.
1917 * Since the asize might be in the middle of the range, we
1918 * should attempt the allocation if asize < 2^(i+1).
1920 should_allocate = (asize <
1921 1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
1923 should_allocate = (asize <=
1924 (msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
1926 return (should_allocate);
1930 metaslab_weight(metaslab_t *msp)
1932 vdev_t *vd = msp->ms_group->mg_vd;
1933 spa_t *spa = vd->vdev_spa;
1936 ASSERT(MUTEX_HELD(&msp->ms_lock));
1939 * This vdev is in the process of being removed so there is nothing
1940 * for us to do here.
1942 if (vd->vdev_removing) {
1943 ASSERT0(space_map_allocated(msp->ms_sm));
1944 ASSERT0(vd->vdev_ms_shift);
1948 metaslab_set_fragmentation(msp);
1951 * Update the maximum size if the metaslab is loaded. This will
1952 * ensure that we get an accurate maximum size if newly freed space
1953 * has been added back into the free tree.
1956 msp->ms_max_size = metaslab_block_maxsize(msp);
1959 * Segment-based weighting requires space map histogram support.
1961 if (zfs_metaslab_segment_weight_enabled &&
1962 spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
1963 (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
1964 sizeof (space_map_phys_t))) {
1965 weight = metaslab_segment_weight(msp);
1967 weight = metaslab_space_weight(msp);
1973 metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
1975 ASSERT(MUTEX_HELD(&msp->ms_lock));
1977 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
1978 metaslab_load_wait(msp);
1979 if (!msp->ms_loaded) {
1980 int error = metaslab_load(msp);
1982 metaslab_group_sort(msp->ms_group, msp, 0);
1987 msp->ms_activation_weight = msp->ms_weight;
1988 metaslab_group_sort(msp->ms_group, msp,
1989 msp->ms_weight | activation_weight);
1991 ASSERT(msp->ms_loaded);
1992 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
1998 metaslab_passivate(metaslab_t *msp, uint64_t weight)
2000 uint64_t size = weight & ~METASLAB_WEIGHT_TYPE;
2003 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
2004 * this metaslab again. In that case, it had better be empty,
2005 * or we would be leaving space on the table.
2007 ASSERT(size >= SPA_MINBLOCKSIZE ||
2008 range_tree_space(msp->ms_tree) == 0);
2009 ASSERT0(weight & METASLAB_ACTIVE_MASK);
2011 msp->ms_activation_weight = 0;
2012 metaslab_group_sort(msp->ms_group, msp, weight);
2013 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
2017 * Segment-based metaslabs are activated once and remain active until
2018 * we either fail an allocation attempt (similar to space-based metaslabs)
2019 * or have exhausted the free space in zfs_metaslab_switch_threshold
2020 * buckets since the metaslab was activated. This function checks to see
2021 * if we've exhaused the zfs_metaslab_switch_threshold buckets in the
2022 * metaslab and passivates it proactively. This will allow us to select a
2023 * metaslabs with larger contiguous region if any remaining within this
2024 * metaslab group. If we're in sync pass > 1, then we continue using this
2025 * metaslab so that we don't dirty more block and cause more sync passes.
2028 metaslab_segment_may_passivate(metaslab_t *msp)
2030 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2032 if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
2036 * Since we are in the middle of a sync pass, the most accurate
2037 * information that is accessible to us is the in-core range tree
2038 * histogram; calculate the new weight based on that information.
2040 uint64_t weight = metaslab_weight_from_range_tree(msp);
2041 int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
2042 int current_idx = WEIGHT_GET_INDEX(weight);
2044 if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
2045 metaslab_passivate(msp, weight);
2049 metaslab_preload(void *arg)
2051 metaslab_t *msp = arg;
2052 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2054 ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
2056 mutex_enter(&msp->ms_lock);
2057 metaslab_load_wait(msp);
2058 if (!msp->ms_loaded)
2059 (void) metaslab_load(msp);
2060 msp->ms_selected_txg = spa_syncing_txg(spa);
2061 mutex_exit(&msp->ms_lock);
2065 metaslab_group_preload(metaslab_group_t *mg)
2067 spa_t *spa = mg->mg_vd->vdev_spa;
2069 avl_tree_t *t = &mg->mg_metaslab_tree;
2072 if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
2073 taskq_wait(mg->mg_taskq);
2077 mutex_enter(&mg->mg_lock);
2079 * Load the next potential metaslabs
2081 for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
2083 * We preload only the maximum number of metaslabs specified
2084 * by metaslab_preload_limit. If a metaslab is being forced
2085 * to condense then we preload it too. This will ensure
2086 * that force condensing happens in the next txg.
2088 if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
2092 VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
2093 msp, TQ_SLEEP) != 0);
2095 mutex_exit(&mg->mg_lock);
2099 * Determine if the space map's on-disk footprint is past our tolerance
2100 * for inefficiency. We would like to use the following criteria to make
2103 * 1. The size of the space map object should not dramatically increase as a
2104 * result of writing out the free space range tree.
2106 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
2107 * times the size than the free space range tree representation
2108 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1.MB).
2110 * 3. The on-disk size of the space map should actually decrease.
2112 * Checking the first condition is tricky since we don't want to walk
2113 * the entire AVL tree calculating the estimated on-disk size. Instead we
2114 * use the size-ordered range tree in the metaslab and calculate the
2115 * size required to write out the largest segment in our free tree. If the
2116 * size required to represent that segment on disk is larger than the space
2117 * map object then we avoid condensing this map.
2119 * To determine the second criterion we use a best-case estimate and assume
2120 * each segment can be represented on-disk as a single 64-bit entry. We refer
2121 * to this best-case estimate as the space map's minimal form.
2123 * Unfortunately, we cannot compute the on-disk size of the space map in this
2124 * context because we cannot accurately compute the effects of compression, etc.
2125 * Instead, we apply the heuristic described in the block comment for
2126 * zfs_metaslab_condense_block_threshold - we only condense if the space used
2127 * is greater than a threshold number of blocks.
2130 metaslab_should_condense(metaslab_t *msp)
2132 space_map_t *sm = msp->ms_sm;
2134 uint64_t size, entries, segsz, object_size, optimal_size, record_size;
2135 dmu_object_info_t doi;
2136 uint64_t vdev_blocksize = 1 << msp->ms_group->mg_vd->vdev_ashift;
2138 ASSERT(MUTEX_HELD(&msp->ms_lock));
2139 ASSERT(msp->ms_loaded);
2142 * Use the ms_size_tree range tree, which is ordered by size, to
2143 * obtain the largest segment in the free tree. We always condense
2144 * metaslabs that are empty and metaslabs for which a condense
2145 * request has been made.
2147 rs = avl_last(&msp->ms_size_tree);
2148 if (rs == NULL || msp->ms_condense_wanted)
2152 * Calculate the number of 64-bit entries this segment would
2153 * require when written to disk. If this single segment would be
2154 * larger on-disk than the entire current on-disk structure, then
2155 * clearly condensing will increase the on-disk structure size.
2157 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
2158 entries = size / (MIN(size, SM_RUN_MAX));
2159 segsz = entries * sizeof (uint64_t);
2161 optimal_size = sizeof (uint64_t) * avl_numnodes(&msp->ms_tree->rt_root);
2162 object_size = space_map_length(msp->ms_sm);
2164 dmu_object_info_from_db(sm->sm_dbuf, &doi);
2165 record_size = MAX(doi.doi_data_block_size, vdev_blocksize);
2167 return (segsz <= object_size &&
2168 object_size >= (optimal_size * zfs_condense_pct / 100) &&
2169 object_size > zfs_metaslab_condense_block_threshold * record_size);
2173 * Condense the on-disk space map representation to its minimized form.
2174 * The minimized form consists of a small number of allocations followed by
2175 * the entries of the free range tree.
2178 metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
2180 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2181 range_tree_t *condense_tree;
2182 space_map_t *sm = msp->ms_sm;
2184 ASSERT(MUTEX_HELD(&msp->ms_lock));
2185 ASSERT3U(spa_sync_pass(spa), ==, 1);
2186 ASSERT(msp->ms_loaded);
2189 spa_dbgmsg(spa, "condensing: txg %llu, msp[%llu] %p, vdev id %llu, "
2190 "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg,
2191 msp->ms_id, msp, msp->ms_group->mg_vd->vdev_id,
2192 msp->ms_group->mg_vd->vdev_spa->spa_name,
2193 space_map_length(msp->ms_sm), avl_numnodes(&msp->ms_tree->rt_root),
2194 msp->ms_condense_wanted ? "TRUE" : "FALSE");
2196 msp->ms_condense_wanted = B_FALSE;
2199 * Create an range tree that is 100% allocated. We remove segments
2200 * that have been freed in this txg, any deferred frees that exist,
2201 * and any allocation in the future. Removing segments should be
2202 * a relatively inexpensive operation since we expect these trees to
2203 * have a small number of nodes.
2205 condense_tree = range_tree_create(NULL, NULL, &msp->ms_lock);
2206 range_tree_add(condense_tree, msp->ms_start, msp->ms_size);
2209 * Remove what's been freed in this txg from the condense_tree.
2210 * Since we're in sync_pass 1, we know that all the frees from
2211 * this txg are in the freeingtree.
2213 range_tree_walk(msp->ms_freeingtree, range_tree_remove, condense_tree);
2215 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2216 range_tree_walk(msp->ms_defertree[t],
2217 range_tree_remove, condense_tree);
2220 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
2221 range_tree_walk(msp->ms_alloctree[(txg + t) & TXG_MASK],
2222 range_tree_remove, condense_tree);
2226 * We're about to drop the metaslab's lock thus allowing
2227 * other consumers to change it's content. Set the
2228 * metaslab's ms_condensing flag to ensure that
2229 * allocations on this metaslab do not occur while we're
2230 * in the middle of committing it to disk. This is only critical
2231 * for the ms_tree as all other range trees use per txg
2232 * views of their content.
2234 msp->ms_condensing = B_TRUE;
2236 mutex_exit(&msp->ms_lock);
2237 space_map_truncate(sm, tx);
2238 mutex_enter(&msp->ms_lock);
2241 * While we would ideally like to create a space map representation
2242 * that consists only of allocation records, doing so can be
2243 * prohibitively expensive because the in-core free tree can be
2244 * large, and therefore computationally expensive to subtract
2245 * from the condense_tree. Instead we sync out two trees, a cheap
2246 * allocation only tree followed by the in-core free tree. While not
2247 * optimal, this is typically close to optimal, and much cheaper to
2250 space_map_write(sm, condense_tree, SM_ALLOC, tx);
2251 range_tree_vacate(condense_tree, NULL, NULL);
2252 range_tree_destroy(condense_tree);
2254 space_map_write(sm, msp->ms_tree, SM_FREE, tx);
2255 msp->ms_condensing = B_FALSE;
2259 * Write a metaslab to disk in the context of the specified transaction group.
2262 metaslab_sync(metaslab_t *msp, uint64_t txg)
2264 metaslab_group_t *mg = msp->ms_group;
2265 vdev_t *vd = mg->mg_vd;
2266 spa_t *spa = vd->vdev_spa;
2267 objset_t *mos = spa_meta_objset(spa);
2268 range_tree_t *alloctree = msp->ms_alloctree[txg & TXG_MASK];
2270 uint64_t object = space_map_object(msp->ms_sm);
2272 ASSERT(!vd->vdev_ishole);
2275 * This metaslab has just been added so there's no work to do now.
2277 if (msp->ms_freeingtree == NULL) {
2278 ASSERT3P(alloctree, ==, NULL);
2282 ASSERT3P(alloctree, !=, NULL);
2283 ASSERT3P(msp->ms_freeingtree, !=, NULL);
2284 ASSERT3P(msp->ms_freedtree, !=, NULL);
2287 * Normally, we don't want to process a metaslab if there
2288 * are no allocations or frees to perform. However, if the metaslab
2289 * is being forced to condense and it's loaded, we need to let it
2292 if (range_tree_space(alloctree) == 0 &&
2293 range_tree_space(msp->ms_freeingtree) == 0 &&
2294 !(msp->ms_loaded && msp->ms_condense_wanted))
2298 VERIFY(txg <= spa_final_dirty_txg(spa));
2301 * The only state that can actually be changing concurrently with
2302 * metaslab_sync() is the metaslab's ms_tree. No other thread can
2303 * be modifying this txg's alloctree, freeingtree, freedtree, or
2304 * space_map_phys_t. Therefore, we only hold ms_lock to satify
2305 * space map ASSERTs. We drop it whenever we call into the DMU,
2306 * because the DMU can call down to us (e.g. via zio_free()) at
2310 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
2312 if (msp->ms_sm == NULL) {
2313 uint64_t new_object;
2315 new_object = space_map_alloc(mos, tx);
2316 VERIFY3U(new_object, !=, 0);
2318 VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
2319 msp->ms_start, msp->ms_size, vd->vdev_ashift,
2321 ASSERT(msp->ms_sm != NULL);
2324 mutex_enter(&msp->ms_lock);
2327 * Note: metaslab_condense() clears the space map's histogram.
2328 * Therefore we must verify and remove this histogram before
2331 metaslab_group_histogram_verify(mg);
2332 metaslab_class_histogram_verify(mg->mg_class);
2333 metaslab_group_histogram_remove(mg, msp);
2335 if (msp->ms_loaded && spa_sync_pass(spa) == 1 &&
2336 metaslab_should_condense(msp)) {
2337 metaslab_condense(msp, txg, tx);
2339 space_map_write(msp->ms_sm, alloctree, SM_ALLOC, tx);
2340 space_map_write(msp->ms_sm, msp->ms_freeingtree, SM_FREE, tx);
2343 if (msp->ms_loaded) {
2345 * When the space map is loaded, we have an accruate
2346 * histogram in the range tree. This gives us an opportunity
2347 * to bring the space map's histogram up-to-date so we clear
2348 * it first before updating it.
2350 space_map_histogram_clear(msp->ms_sm);
2351 space_map_histogram_add(msp->ms_sm, msp->ms_tree, tx);
2354 * Since we've cleared the histogram we need to add back
2355 * any free space that has already been processed, plus
2356 * any deferred space. This allows the on-disk histogram
2357 * to accurately reflect all free space even if some space
2358 * is not yet available for allocation (i.e. deferred).
2360 space_map_histogram_add(msp->ms_sm, msp->ms_freedtree, tx);
2363 * Add back any deferred free space that has not been
2364 * added back into the in-core free tree yet. This will
2365 * ensure that we don't end up with a space map histogram
2366 * that is completely empty unless the metaslab is fully
2369 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2370 space_map_histogram_add(msp->ms_sm,
2371 msp->ms_defertree[t], tx);
2376 * Always add the free space from this sync pass to the space
2377 * map histogram. We want to make sure that the on-disk histogram
2378 * accounts for all free space. If the space map is not loaded,
2379 * then we will lose some accuracy but will correct it the next
2380 * time we load the space map.
2382 space_map_histogram_add(msp->ms_sm, msp->ms_freeingtree, tx);
2384 metaslab_group_histogram_add(mg, msp);
2385 metaslab_group_histogram_verify(mg);
2386 metaslab_class_histogram_verify(mg->mg_class);
2389 * For sync pass 1, we avoid traversing this txg's free range tree
2390 * and instead will just swap the pointers for freeingtree and
2391 * freedtree. We can safely do this since the freed_tree is
2392 * guaranteed to be empty on the initial pass.
2394 if (spa_sync_pass(spa) == 1) {
2395 range_tree_swap(&msp->ms_freeingtree, &msp->ms_freedtree);
2397 range_tree_vacate(msp->ms_freeingtree,
2398 range_tree_add, msp->ms_freedtree);
2400 range_tree_vacate(alloctree, NULL, NULL);
2402 ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
2403 ASSERT0(range_tree_space(msp->ms_alloctree[TXG_CLEAN(txg) & TXG_MASK]));
2404 ASSERT0(range_tree_space(msp->ms_freeingtree));
2406 mutex_exit(&msp->ms_lock);
2408 if (object != space_map_object(msp->ms_sm)) {
2409 object = space_map_object(msp->ms_sm);
2410 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
2411 msp->ms_id, sizeof (uint64_t), &object, tx);
2417 * Called after a transaction group has completely synced to mark
2418 * all of the metaslab's free space as usable.
2421 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
2423 metaslab_group_t *mg = msp->ms_group;
2424 vdev_t *vd = mg->mg_vd;
2425 spa_t *spa = vd->vdev_spa;
2426 range_tree_t **defer_tree;
2427 int64_t alloc_delta, defer_delta;
2428 boolean_t defer_allowed = B_TRUE;
2430 ASSERT(!vd->vdev_ishole);
2432 mutex_enter(&msp->ms_lock);
2435 * If this metaslab is just becoming available, initialize its
2436 * range trees and add its capacity to the vdev.
2438 if (msp->ms_freedtree == NULL) {
2439 for (int t = 0; t < TXG_SIZE; t++) {
2440 ASSERT(msp->ms_alloctree[t] == NULL);
2442 msp->ms_alloctree[t] = range_tree_create(NULL, msp,
2446 ASSERT3P(msp->ms_freeingtree, ==, NULL);
2447 msp->ms_freeingtree = range_tree_create(NULL, msp,
2450 ASSERT3P(msp->ms_freedtree, ==, NULL);
2451 msp->ms_freedtree = range_tree_create(NULL, msp,
2454 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2455 ASSERT(msp->ms_defertree[t] == NULL);
2457 msp->ms_defertree[t] = range_tree_create(NULL, msp,
2461 vdev_space_update(vd, 0, 0, msp->ms_size);
2464 defer_tree = &msp->ms_defertree[txg % TXG_DEFER_SIZE];
2466 uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
2467 metaslab_class_get_alloc(spa_normal_class(spa));
2468 if (free_space <= spa_get_slop_space(spa)) {
2469 defer_allowed = B_FALSE;
2473 alloc_delta = space_map_alloc_delta(msp->ms_sm);
2474 if (defer_allowed) {
2475 defer_delta = range_tree_space(msp->ms_freedtree) -
2476 range_tree_space(*defer_tree);
2478 defer_delta -= range_tree_space(*defer_tree);
2481 vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
2484 * If there's a metaslab_load() in progress, wait for it to complete
2485 * so that we have a consistent view of the in-core space map.
2487 metaslab_load_wait(msp);
2490 * Move the frees from the defer_tree back to the free
2491 * range tree (if it's loaded). Swap the freed_tree and the
2492 * defer_tree -- this is safe to do because we've just emptied out
2495 range_tree_vacate(*defer_tree,
2496 msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree);
2497 if (defer_allowed) {
2498 range_tree_swap(&msp->ms_freedtree, defer_tree);
2500 range_tree_vacate(msp->ms_freedtree,
2501 msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree);
2504 space_map_update(msp->ms_sm);
2506 msp->ms_deferspace += defer_delta;
2507 ASSERT3S(msp->ms_deferspace, >=, 0);
2508 ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
2509 if (msp->ms_deferspace != 0) {
2511 * Keep syncing this metaslab until all deferred frees
2512 * are back in circulation.
2514 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
2518 * Calculate the new weights before unloading any metaslabs.
2519 * This will give us the most accurate weighting.
2521 metaslab_group_sort(mg, msp, metaslab_weight(msp));
2524 * If the metaslab is loaded and we've not tried to load or allocate
2525 * from it in 'metaslab_unload_delay' txgs, then unload it.
2527 if (msp->ms_loaded &&
2528 msp->ms_selected_txg + metaslab_unload_delay < txg) {
2529 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
2530 VERIFY0(range_tree_space(
2531 msp->ms_alloctree[(txg + t) & TXG_MASK]));
2534 if (!metaslab_debug_unload)
2535 metaslab_unload(msp);
2538 mutex_exit(&msp->ms_lock);
2542 metaslab_sync_reassess(metaslab_group_t *mg)
2544 metaslab_group_alloc_update(mg);
2545 mg->mg_fragmentation = metaslab_group_fragmentation(mg);
2548 * Preload the next potential metaslabs
2550 metaslab_group_preload(mg);
2554 metaslab_distance(metaslab_t *msp, dva_t *dva)
2556 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
2557 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
2558 uint64_t start = msp->ms_id;
2560 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
2561 return (1ULL << 63);
2564 return ((start - offset) << ms_shift);
2566 return ((offset - start) << ms_shift);
2571 * ==========================================================================
2572 * Metaslab allocation tracing facility
2573 * ==========================================================================
2575 kstat_t *metaslab_trace_ksp;
2576 kstat_named_t metaslab_trace_over_limit;
2579 metaslab_alloc_trace_init(void)
2581 ASSERT(metaslab_alloc_trace_cache == NULL);
2582 metaslab_alloc_trace_cache = kmem_cache_create(
2583 "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
2584 0, NULL, NULL, NULL, NULL, NULL, 0);
2585 metaslab_trace_ksp = kstat_create("zfs", 0, "metaslab_trace_stats",
2586 "misc", KSTAT_TYPE_NAMED, 1, KSTAT_FLAG_VIRTUAL);
2587 if (metaslab_trace_ksp != NULL) {
2588 metaslab_trace_ksp->ks_data = &metaslab_trace_over_limit;
2589 kstat_named_init(&metaslab_trace_over_limit,
2590 "metaslab_trace_over_limit", KSTAT_DATA_UINT64);
2591 kstat_install(metaslab_trace_ksp);
2596 metaslab_alloc_trace_fini(void)
2598 if (metaslab_trace_ksp != NULL) {
2599 kstat_delete(metaslab_trace_ksp);
2600 metaslab_trace_ksp = NULL;
2602 kmem_cache_destroy(metaslab_alloc_trace_cache);
2603 metaslab_alloc_trace_cache = NULL;
2607 * Add an allocation trace element to the allocation tracing list.
2610 metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
2611 metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset)
2613 if (!metaslab_trace_enabled)
2617 * When the tracing list reaches its maximum we remove
2618 * the second element in the list before adding a new one.
2619 * By removing the second element we preserve the original
2620 * entry as a clue to what allocations steps have already been
2623 if (zal->zal_size == metaslab_trace_max_entries) {
2624 metaslab_alloc_trace_t *mat_next;
2626 panic("too many entries in allocation list");
2628 atomic_inc_64(&metaslab_trace_over_limit.value.ui64);
2630 mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
2631 list_remove(&zal->zal_list, mat_next);
2632 kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
2635 metaslab_alloc_trace_t *mat =
2636 kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
2637 list_link_init(&mat->mat_list_node);
2640 mat->mat_size = psize;
2641 mat->mat_dva_id = dva_id;
2642 mat->mat_offset = offset;
2643 mat->mat_weight = 0;
2646 mat->mat_weight = msp->ms_weight;
2649 * The list is part of the zio so locking is not required. Only
2650 * a single thread will perform allocations for a given zio.
2652 list_insert_tail(&zal->zal_list, mat);
2655 ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
2659 metaslab_trace_init(zio_alloc_list_t *zal)
2661 list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
2662 offsetof(metaslab_alloc_trace_t, mat_list_node));
2667 metaslab_trace_fini(zio_alloc_list_t *zal)
2669 metaslab_alloc_trace_t *mat;
2671 while ((mat = list_remove_head(&zal->zal_list)) != NULL)
2672 kmem_cache_free(metaslab_alloc_trace_cache, mat);
2673 list_destroy(&zal->zal_list);
2678 * ==========================================================================
2679 * Metaslab block operations
2680 * ==========================================================================
2684 metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags)
2686 if (!(flags & METASLAB_ASYNC_ALLOC) ||
2687 flags & METASLAB_DONT_THROTTLE)
2690 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2691 if (!mg->mg_class->mc_alloc_throttle_enabled)
2694 (void) refcount_add(&mg->mg_alloc_queue_depth, tag);
2698 metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags)
2700 if (!(flags & METASLAB_ASYNC_ALLOC) ||
2701 flags & METASLAB_DONT_THROTTLE)
2704 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2705 if (!mg->mg_class->mc_alloc_throttle_enabled)
2708 (void) refcount_remove(&mg->mg_alloc_queue_depth, tag);
2712 metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag)
2715 const dva_t *dva = bp->blk_dva;
2716 int ndvas = BP_GET_NDVAS(bp);
2718 for (int d = 0; d < ndvas; d++) {
2719 uint64_t vdev = DVA_GET_VDEV(&dva[d]);
2720 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2721 VERIFY(refcount_not_held(&mg->mg_alloc_queue_depth, tag));
2727 metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
2730 range_tree_t *rt = msp->ms_tree;
2731 metaslab_class_t *mc = msp->ms_group->mg_class;
2733 VERIFY(!msp->ms_condensing);
2735 start = mc->mc_ops->msop_alloc(msp, size);
2736 if (start != -1ULL) {
2737 metaslab_group_t *mg = msp->ms_group;
2738 vdev_t *vd = mg->mg_vd;
2740 VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
2741 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
2742 VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
2743 range_tree_remove(rt, start, size);
2745 if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
2746 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
2748 range_tree_add(msp->ms_alloctree[txg & TXG_MASK], start, size);
2750 /* Track the last successful allocation */
2751 msp->ms_alloc_txg = txg;
2752 metaslab_verify_space(msp, txg);
2756 * Now that we've attempted the allocation we need to update the
2757 * metaslab's maximum block size since it may have changed.
2759 msp->ms_max_size = metaslab_block_maxsize(msp);
2764 metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
2765 uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d)
2767 metaslab_t *msp = NULL;
2768 uint64_t offset = -1ULL;
2769 uint64_t activation_weight;
2770 uint64_t target_distance;
2773 activation_weight = METASLAB_WEIGHT_PRIMARY;
2774 for (i = 0; i < d; i++) {
2775 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
2776 activation_weight = METASLAB_WEIGHT_SECONDARY;
2781 metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP);
2782 search->ms_weight = UINT64_MAX;
2783 search->ms_start = 0;
2785 boolean_t was_active;
2786 avl_tree_t *t = &mg->mg_metaslab_tree;
2789 mutex_enter(&mg->mg_lock);
2792 * Find the metaslab with the highest weight that is less
2793 * than what we've already tried. In the common case, this
2794 * means that we will examine each metaslab at most once.
2795 * Note that concurrent callers could reorder metaslabs
2796 * by activation/passivation once we have dropped the mg_lock.
2797 * If a metaslab is activated by another thread, and we fail
2798 * to allocate from the metaslab we have selected, we may
2799 * not try the newly-activated metaslab, and instead activate
2800 * another metaslab. This is not optimal, but generally
2801 * does not cause any problems (a possible exception being
2802 * if every metaslab is completely full except for the
2803 * the newly-activated metaslab which we fail to examine).
2805 msp = avl_find(t, search, &idx);
2807 msp = avl_nearest(t, idx, AVL_AFTER);
2808 for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
2810 if (!metaslab_should_allocate(msp, asize)) {
2811 metaslab_trace_add(zal, mg, msp, asize, d,
2817 * If the selected metaslab is condensing, skip it.
2819 if (msp->ms_condensing)
2822 was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
2823 if (activation_weight == METASLAB_WEIGHT_PRIMARY)
2826 target_distance = min_distance +
2827 (space_map_allocated(msp->ms_sm) != 0 ? 0 :
2830 for (i = 0; i < d; i++) {
2831 if (metaslab_distance(msp, &dva[i]) <
2838 mutex_exit(&mg->mg_lock);
2840 kmem_free(search, sizeof (*search));
2843 search->ms_weight = msp->ms_weight;
2844 search->ms_start = msp->ms_start + 1;
2846 mutex_enter(&msp->ms_lock);
2849 * Ensure that the metaslab we have selected is still
2850 * capable of handling our request. It's possible that
2851 * another thread may have changed the weight while we
2852 * were blocked on the metaslab lock. We check the
2853 * active status first to see if we need to reselect
2856 if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
2857 mutex_exit(&msp->ms_lock);
2861 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
2862 activation_weight == METASLAB_WEIGHT_PRIMARY) {
2863 metaslab_passivate(msp,
2864 msp->ms_weight & ~METASLAB_ACTIVE_MASK);
2865 mutex_exit(&msp->ms_lock);
2869 if (metaslab_activate(msp, activation_weight) != 0) {
2870 mutex_exit(&msp->ms_lock);
2873 msp->ms_selected_txg = txg;
2876 * Now that we have the lock, recheck to see if we should
2877 * continue to use this metaslab for this allocation. The
2878 * the metaslab is now loaded so metaslab_should_allocate() can
2879 * accurately determine if the allocation attempt should
2882 if (!metaslab_should_allocate(msp, asize)) {
2883 /* Passivate this metaslab and select a new one. */
2884 metaslab_trace_add(zal, mg, msp, asize, d,
2890 * If this metaslab is currently condensing then pick again as
2891 * we can't manipulate this metaslab until it's committed
2894 if (msp->ms_condensing) {
2895 metaslab_trace_add(zal, mg, msp, asize, d,
2897 mutex_exit(&msp->ms_lock);
2901 offset = metaslab_block_alloc(msp, asize, txg);
2902 metaslab_trace_add(zal, mg, msp, asize, d, offset);
2904 if (offset != -1ULL) {
2905 /* Proactively passivate the metaslab, if needed */
2906 metaslab_segment_may_passivate(msp);
2910 ASSERT(msp->ms_loaded);
2913 * We were unable to allocate from this metaslab so determine
2914 * a new weight for this metaslab. Now that we have loaded
2915 * the metaslab we can provide a better hint to the metaslab
2918 * For space-based metaslabs, we use the maximum block size.
2919 * This information is only available when the metaslab
2920 * is loaded and is more accurate than the generic free
2921 * space weight that was calculated by metaslab_weight().
2922 * This information allows us to quickly compare the maximum
2923 * available allocation in the metaslab to the allocation
2924 * size being requested.
2926 * For segment-based metaslabs, determine the new weight
2927 * based on the highest bucket in the range tree. We
2928 * explicitly use the loaded segment weight (i.e. the range
2929 * tree histogram) since it contains the space that is
2930 * currently available for allocation and is accurate
2931 * even within a sync pass.
2933 if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
2934 uint64_t weight = metaslab_block_maxsize(msp);
2935 WEIGHT_SET_SPACEBASED(weight);
2936 metaslab_passivate(msp, weight);
2938 metaslab_passivate(msp,
2939 metaslab_weight_from_range_tree(msp));
2943 * We have just failed an allocation attempt, check
2944 * that metaslab_should_allocate() agrees. Otherwise,
2945 * we may end up in an infinite loop retrying the same
2948 ASSERT(!metaslab_should_allocate(msp, asize));
2949 mutex_exit(&msp->ms_lock);
2951 mutex_exit(&msp->ms_lock);
2952 kmem_free(search, sizeof (*search));
2957 metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
2958 uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d)
2961 ASSERT(mg->mg_initialized);
2963 offset = metaslab_group_alloc_normal(mg, zal, asize, txg,
2964 min_distance, dva, d);
2966 mutex_enter(&mg->mg_lock);
2967 if (offset == -1ULL) {
2968 mg->mg_failed_allocations++;
2969 metaslab_trace_add(zal, mg, NULL, asize, d,
2970 TRACE_GROUP_FAILURE);
2971 if (asize == SPA_GANGBLOCKSIZE) {
2973 * This metaslab group was unable to allocate
2974 * the minimum gang block size so it must be out of
2975 * space. We must notify the allocation throttle
2976 * to start skipping allocation attempts to this
2977 * metaslab group until more space becomes available.
2978 * Note: this failure cannot be caused by the
2979 * allocation throttle since the allocation throttle
2980 * is only responsible for skipping devices and
2981 * not failing block allocations.
2983 mg->mg_no_free_space = B_TRUE;
2986 mg->mg_allocations++;
2987 mutex_exit(&mg->mg_lock);
2992 * If we have to write a ditto block (i.e. more than one DVA for a given BP)
2993 * on the same vdev as an existing DVA of this BP, then try to allocate it
2994 * at least (vdev_asize / (2 ^ ditto_same_vdev_distance_shift)) away from the
2997 int ditto_same_vdev_distance_shift = 3;
3000 * Allocate a block for the specified i/o.
3003 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
3004 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
3005 zio_alloc_list_t *zal)
3007 metaslab_group_t *mg, *rotor;
3009 boolean_t try_hard = B_FALSE;
3011 ASSERT(!DVA_IS_VALID(&dva[d]));
3014 * For testing, make some blocks above a certain size be gang blocks.
3016 if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0) {
3017 metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG);
3018 return (SET_ERROR(ENOSPC));
3022 * Start at the rotor and loop through all mgs until we find something.
3023 * Note that there's no locking on mc_rotor or mc_aliquot because
3024 * nothing actually breaks if we miss a few updates -- we just won't
3025 * allocate quite as evenly. It all balances out over time.
3027 * If we are doing ditto or log blocks, try to spread them across
3028 * consecutive vdevs. If we're forced to reuse a vdev before we've
3029 * allocated all of our ditto blocks, then try and spread them out on
3030 * that vdev as much as possible. If it turns out to not be possible,
3031 * gradually lower our standards until anything becomes acceptable.
3032 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
3033 * gives us hope of containing our fault domains to something we're
3034 * able to reason about. Otherwise, any two top-level vdev failures
3035 * will guarantee the loss of data. With consecutive allocation,
3036 * only two adjacent top-level vdev failures will result in data loss.
3038 * If we are doing gang blocks (hintdva is non-NULL), try to keep
3039 * ourselves on the same vdev as our gang block header. That
3040 * way, we can hope for locality in vdev_cache, plus it makes our
3041 * fault domains something tractable.
3044 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
3047 * It's possible the vdev we're using as the hint no
3048 * longer exists (i.e. removed). Consult the rotor when
3054 if (flags & METASLAB_HINTBP_AVOID &&
3055 mg->mg_next != NULL)
3060 } else if (d != 0) {
3061 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
3062 mg = vd->vdev_mg->mg_next;
3068 * If the hint put us into the wrong metaslab class, or into a
3069 * metaslab group that has been passivated, just follow the rotor.
3071 if (mg->mg_class != mc || mg->mg_activation_count <= 0)
3077 boolean_t allocatable;
3079 ASSERT(mg->mg_activation_count == 1);
3083 * Don't allocate from faulted devices.
3086 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
3087 allocatable = vdev_allocatable(vd);
3088 spa_config_exit(spa, SCL_ZIO, FTAG);
3090 allocatable = vdev_allocatable(vd);
3094 * Determine if the selected metaslab group is eligible
3095 * for allocations. If we're ganging then don't allow
3096 * this metaslab group to skip allocations since that would
3097 * inadvertently return ENOSPC and suspend the pool
3098 * even though space is still available.
3100 if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) {
3101 allocatable = metaslab_group_allocatable(mg, rotor,
3106 metaslab_trace_add(zal, mg, NULL, psize, d,
3107 TRACE_NOT_ALLOCATABLE);
3111 ASSERT(mg->mg_initialized);
3114 * Avoid writing single-copy data to a failing,
3115 * non-redundant vdev, unless we've already tried all
3118 if ((vd->vdev_stat.vs_write_errors > 0 ||
3119 vd->vdev_state < VDEV_STATE_HEALTHY) &&
3120 d == 0 && !try_hard && vd->vdev_children == 0) {
3121 metaslab_trace_add(zal, mg, NULL, psize, d,
3126 ASSERT(mg->mg_class == mc);
3129 * If we don't need to try hard, then require that the
3130 * block be 1/8th of the device away from any other DVAs
3131 * in this BP. If we are trying hard, allow any offset
3132 * to be used (distance=0).
3134 uint64_t distance = 0;
3136 distance = vd->vdev_asize >>
3137 ditto_same_vdev_distance_shift;
3138 if (distance <= (1ULL << vd->vdev_ms_shift))
3142 uint64_t asize = vdev_psize_to_asize(vd, psize);
3143 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
3145 uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg,
3148 if (offset != -1ULL) {
3150 * If we've just selected this metaslab group,
3151 * figure out whether the corresponding vdev is
3152 * over- or under-used relative to the pool,
3153 * and set an allocation bias to even it out.
3155 if (mc->mc_aliquot == 0 && metaslab_bias_enabled) {
3156 vdev_stat_t *vs = &vd->vdev_stat;
3159 vu = (vs->vs_alloc * 100) / (vs->vs_space + 1);
3160 cu = (mc->mc_alloc * 100) / (mc->mc_space + 1);
3163 * Calculate how much more or less we should
3164 * try to allocate from this device during
3165 * this iteration around the rotor.
3166 * For example, if a device is 80% full
3167 * and the pool is 20% full then we should
3168 * reduce allocations by 60% on this device.
3170 * mg_bias = (20 - 80) * 512K / 100 = -307K
3172 * This reduces allocations by 307K for this
3175 mg->mg_bias = ((cu - vu) *
3176 (int64_t)mg->mg_aliquot) / 100;
3177 } else if (!metaslab_bias_enabled) {
3181 if (atomic_add_64_nv(&mc->mc_aliquot, asize) >=
3182 mg->mg_aliquot + mg->mg_bias) {
3183 mc->mc_rotor = mg->mg_next;
3187 DVA_SET_VDEV(&dva[d], vd->vdev_id);
3188 DVA_SET_OFFSET(&dva[d], offset);
3189 DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER));
3190 DVA_SET_ASIZE(&dva[d], asize);
3195 mc->mc_rotor = mg->mg_next;
3197 } while ((mg = mg->mg_next) != rotor);
3200 * If we haven't tried hard, do so now.
3207 bzero(&dva[d], sizeof (dva_t));
3209 metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC);
3210 return (SET_ERROR(ENOSPC));
3214 * Free the block represented by DVA in the context of the specified
3215 * transaction group.
3218 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
3220 uint64_t vdev = DVA_GET_VDEV(dva);
3221 uint64_t offset = DVA_GET_OFFSET(dva);
3222 uint64_t size = DVA_GET_ASIZE(dva);
3226 ASSERT(DVA_IS_VALID(dva));
3228 if (txg > spa_freeze_txg(spa))
3231 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
3232 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
3233 cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu",
3234 (u_longlong_t)vdev, (u_longlong_t)offset);
3239 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3241 if (DVA_GET_GANG(dva))
3242 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
3244 mutex_enter(&msp->ms_lock);
3247 range_tree_remove(msp->ms_alloctree[txg & TXG_MASK],
3250 VERIFY(!msp->ms_condensing);
3251 VERIFY3U(offset, >=, msp->ms_start);
3252 VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
3253 VERIFY3U(range_tree_space(msp->ms_tree) + size, <=,
3255 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
3256 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
3257 range_tree_add(msp->ms_tree, offset, size);
3258 msp->ms_max_size = metaslab_block_maxsize(msp);
3260 VERIFY3U(txg, ==, spa->spa_syncing_txg);
3261 if (range_tree_space(msp->ms_freeingtree) == 0)
3262 vdev_dirty(vd, VDD_METASLAB, msp, txg);
3263 range_tree_add(msp->ms_freeingtree, offset, size);
3266 mutex_exit(&msp->ms_lock);
3270 * Intent log support: upon opening the pool after a crash, notify the SPA
3271 * of blocks that the intent log has allocated for immediate write, but
3272 * which are still considered free by the SPA because the last transaction
3273 * group didn't commit yet.
3276 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
3278 uint64_t vdev = DVA_GET_VDEV(dva);
3279 uint64_t offset = DVA_GET_OFFSET(dva);
3280 uint64_t size = DVA_GET_ASIZE(dva);
3285 ASSERT(DVA_IS_VALID(dva));
3287 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
3288 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
3289 return (SET_ERROR(ENXIO));
3291 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3293 if (DVA_GET_GANG(dva))
3294 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
3296 mutex_enter(&msp->ms_lock);
3298 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded)
3299 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
3301 if (error == 0 && !range_tree_contains(msp->ms_tree, offset, size))
3302 error = SET_ERROR(ENOENT);
3304 if (error || txg == 0) { /* txg == 0 indicates dry run */
3305 mutex_exit(&msp->ms_lock);
3309 VERIFY(!msp->ms_condensing);
3310 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
3311 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
3312 VERIFY3U(range_tree_space(msp->ms_tree) - size, <=, msp->ms_size);
3313 range_tree_remove(msp->ms_tree, offset, size);
3315 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
3316 if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
3317 vdev_dirty(vd, VDD_METASLAB, msp, txg);
3318 range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, size);
3321 mutex_exit(&msp->ms_lock);
3327 * Reserve some allocation slots. The reservation system must be called
3328 * before we call into the allocator. If there aren't any available slots
3329 * then the I/O will be throttled until an I/O completes and its slots are
3330 * freed up. The function returns true if it was successful in placing
3334 metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, zio_t *zio,
3337 uint64_t available_slots = 0;
3338 boolean_t slot_reserved = B_FALSE;
3340 ASSERT(mc->mc_alloc_throttle_enabled);
3341 mutex_enter(&mc->mc_lock);
3343 uint64_t reserved_slots = refcount_count(&mc->mc_alloc_slots);
3344 if (reserved_slots < mc->mc_alloc_max_slots)
3345 available_slots = mc->mc_alloc_max_slots - reserved_slots;
3347 if (slots <= available_slots || GANG_ALLOCATION(flags)) {
3349 * We reserve the slots individually so that we can unreserve
3350 * them individually when an I/O completes.
3352 for (int d = 0; d < slots; d++) {
3353 reserved_slots = refcount_add(&mc->mc_alloc_slots, zio);
3355 zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
3356 slot_reserved = B_TRUE;
3359 mutex_exit(&mc->mc_lock);
3360 return (slot_reserved);
3364 metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots, zio_t *zio)
3366 ASSERT(mc->mc_alloc_throttle_enabled);
3367 mutex_enter(&mc->mc_lock);
3368 for (int d = 0; d < slots; d++) {
3369 (void) refcount_remove(&mc->mc_alloc_slots, zio);
3371 mutex_exit(&mc->mc_lock);
3375 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
3376 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
3377 zio_alloc_list_t *zal, zio_t *zio)
3379 dva_t *dva = bp->blk_dva;
3380 dva_t *hintdva = hintbp->blk_dva;
3383 ASSERT(bp->blk_birth == 0);
3384 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
3386 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
3388 if (mc->mc_rotor == NULL) { /* no vdevs in this class */
3389 spa_config_exit(spa, SCL_ALLOC, FTAG);
3390 return (SET_ERROR(ENOSPC));
3393 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
3394 ASSERT(BP_GET_NDVAS(bp) == 0);
3395 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
3396 ASSERT3P(zal, !=, NULL);
3398 for (int d = 0; d < ndvas; d++) {
3399 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
3402 for (d--; d >= 0; d--) {
3403 metaslab_free_dva(spa, &dva[d], txg, B_TRUE);
3404 metaslab_group_alloc_decrement(spa,
3405 DVA_GET_VDEV(&dva[d]), zio, flags);
3406 bzero(&dva[d], sizeof (dva_t));
3408 spa_config_exit(spa, SCL_ALLOC, FTAG);
3412 * Update the metaslab group's queue depth
3413 * based on the newly allocated dva.
3415 metaslab_group_alloc_increment(spa,
3416 DVA_GET_VDEV(&dva[d]), zio, flags);
3421 ASSERT(BP_GET_NDVAS(bp) == ndvas);
3423 spa_config_exit(spa, SCL_ALLOC, FTAG);
3425 BP_SET_BIRTH(bp, txg, txg);
3431 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
3433 const dva_t *dva = bp->blk_dva;
3434 int ndvas = BP_GET_NDVAS(bp);
3436 ASSERT(!BP_IS_HOLE(bp));
3437 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
3439 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
3441 for (int d = 0; d < ndvas; d++)
3442 metaslab_free_dva(spa, &dva[d], txg, now);
3444 spa_config_exit(spa, SCL_FREE, FTAG);
3448 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
3450 const dva_t *dva = bp->blk_dva;
3451 int ndvas = BP_GET_NDVAS(bp);
3454 ASSERT(!BP_IS_HOLE(bp));
3458 * First do a dry run to make sure all DVAs are claimable,
3459 * so we don't have to unwind from partial failures below.
3461 if ((error = metaslab_claim(spa, bp, 0)) != 0)
3465 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
3467 for (int d = 0; d < ndvas; d++)
3468 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
3471 spa_config_exit(spa, SCL_ALLOC, FTAG);
3473 ASSERT(error == 0 || txg == 0);
3479 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
3481 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
3484 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
3485 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
3486 uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
3487 vdev_t *vd = vdev_lookup_top(spa, vdev);
3488 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
3489 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
3490 metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3493 range_tree_verify(msp->ms_tree, offset, size);
3495 range_tree_verify(msp->ms_freeingtree, offset, size);
3496 range_tree_verify(msp->ms_freedtree, offset, size);
3497 for (int j = 0; j < TXG_DEFER_SIZE; j++)
3498 range_tree_verify(msp->ms_defertree[j], offset, size);
3500 spa_config_exit(spa, SCL_VDEV, FTAG);