4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
27 #include <sys/zfs_context.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/space_map.h>
31 #include <sys/metaslab_impl.h>
32 #include <sys/vdev_impl.h>
34 #include <sys/spa_impl.h>
35 #include <sys/zfeature.h>
37 #define WITH_DF_BLOCK_ALLOCATOR
40 * Allow allocations to switch to gang blocks quickly. We do this to
41 * avoid having to load lots of space_maps in a given txg. There are,
42 * however, some cases where we want to avoid "fast" ganging and instead
43 * we want to do an exhaustive search of all metaslabs on this device.
44 * Currently we don't allow any gang, slog, or dump device related allocations
47 #define CAN_FASTGANG(flags) \
48 (!((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER | \
49 METASLAB_GANG_AVOID)))
51 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
52 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
53 #define METASLAB_ACTIVE_MASK \
54 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
57 * Metaslab granularity, in bytes. This is roughly similar to what would be
58 * referred to as the "stripe size" in traditional RAID arrays. In normal
59 * operation, we will try to write this amount of data to a top-level vdev
60 * before moving on to the next one.
62 uint64_t metaslab_aliquot = 512ULL << 10;
64 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
67 * The in-core space map representation is more compact than its on-disk form.
68 * The zfs_condense_pct determines how much more compact the in-core
69 * space_map representation must be before we compact it on-disk.
70 * Values should be greater than or equal to 100.
72 int zfs_condense_pct = 200;
75 * Condensing a metaslab is not guaranteed to actually reduce the amount of
76 * space used on disk. In particular, a space map uses data in increments of
77 * MAX(1 << ashift, space_map_blksz), so a metaslab might use the
78 * same number of blocks after condensing. Since the goal of condensing is to
79 * reduce the number of IOPs required to read the space map, we only want to
80 * condense when we can be sure we will reduce the number of blocks used by the
81 * space map. Unfortunately, we cannot precisely compute whether or not this is
82 * the case in metaslab_should_condense since we are holding ms_lock. Instead,
83 * we apply the following heuristic: do not condense a spacemap unless the
84 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
87 int zfs_metaslab_condense_block_threshold = 4;
90 * The zfs_mg_noalloc_threshold defines which metaslab groups should
91 * be eligible for allocation. The value is defined as a percentage of
92 * free space. Metaslab groups that have more free space than
93 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
94 * a metaslab group's free space is less than or equal to the
95 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
96 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
97 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
98 * groups are allowed to accept allocations. Gang blocks are always
99 * eligible to allocate on any metaslab group. The default value of 0 means
100 * no metaslab group will be excluded based on this criterion.
102 int zfs_mg_noalloc_threshold = 0;
105 * Metaslab groups are considered eligible for allocations if their
106 * fragmenation metric (measured as a percentage) is less than or equal to
107 * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold
108 * then it will be skipped unless all metaslab groups within the metaslab
109 * class have also crossed this threshold.
111 int zfs_mg_fragmentation_threshold = 85;
114 * Allow metaslabs to keep their active state as long as their fragmentation
115 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
116 * active metaslab that exceeds this threshold will no longer keep its active
117 * status allowing better metaslabs to be selected.
119 int zfs_metaslab_fragmentation_threshold = 70;
122 * When set will load all metaslabs when pool is first opened.
124 int metaslab_debug_load = 0;
127 * When set will prevent metaslabs from being unloaded.
129 int metaslab_debug_unload = 0;
132 * Minimum size which forces the dynamic allocator to change
133 * it's allocation strategy. Once the space map cannot satisfy
134 * an allocation of this size then it switches to using more
135 * aggressive strategy (i.e search by size rather than offset).
137 uint64_t metaslab_df_alloc_threshold = SPA_MAXBLOCKSIZE;
140 * The minimum free space, in percent, which must be available
141 * in a space map to continue allocations in a first-fit fashion.
142 * Once the space_map's free space drops below this level we dynamically
143 * switch to using best-fit allocations.
145 int metaslab_df_free_pct = 4;
148 * Percentage of all cpus that can be used by the metaslab taskq.
150 int metaslab_load_pct = 50;
153 * Determines how many txgs a metaslab may remain loaded without having any
154 * allocations from it. As long as a metaslab continues to be used we will
157 int metaslab_unload_delay = TXG_SIZE * 2;
160 * Max number of metaslabs per group to preload.
162 int metaslab_preload_limit = SPA_DVAS_PER_BP;
165 * Enable/disable preloading of metaslab.
167 int metaslab_preload_enabled = B_TRUE;
170 * Enable/disable fragmentation weighting on metaslabs.
172 int metaslab_fragmentation_factor_enabled = B_TRUE;
175 * Enable/disable lba weighting (i.e. outer tracks are given preference).
177 int metaslab_lba_weighting_enabled = B_TRUE;
180 * Enable/disable metaslab group biasing.
182 int metaslab_bias_enabled = B_TRUE;
184 static uint64_t metaslab_fragmentation(metaslab_t *);
187 * ==========================================================================
189 * ==========================================================================
192 metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
194 metaslab_class_t *mc;
196 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
201 mutex_init(&mc->mc_fastwrite_lock, NULL, MUTEX_DEFAULT, NULL);
207 metaslab_class_destroy(metaslab_class_t *mc)
209 ASSERT(mc->mc_rotor == NULL);
210 ASSERT(mc->mc_alloc == 0);
211 ASSERT(mc->mc_deferred == 0);
212 ASSERT(mc->mc_space == 0);
213 ASSERT(mc->mc_dspace == 0);
215 mutex_destroy(&mc->mc_fastwrite_lock);
216 kmem_free(mc, sizeof (metaslab_class_t));
220 metaslab_class_validate(metaslab_class_t *mc)
222 metaslab_group_t *mg;
226 * Must hold one of the spa_config locks.
228 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
229 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
231 if ((mg = mc->mc_rotor) == NULL)
236 ASSERT(vd->vdev_mg != NULL);
237 ASSERT3P(vd->vdev_top, ==, vd);
238 ASSERT3P(mg->mg_class, ==, mc);
239 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
240 } while ((mg = mg->mg_next) != mc->mc_rotor);
246 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
247 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
249 atomic_add_64(&mc->mc_alloc, alloc_delta);
250 atomic_add_64(&mc->mc_deferred, defer_delta);
251 atomic_add_64(&mc->mc_space, space_delta);
252 atomic_add_64(&mc->mc_dspace, dspace_delta);
256 metaslab_class_get_alloc(metaslab_class_t *mc)
258 return (mc->mc_alloc);
262 metaslab_class_get_deferred(metaslab_class_t *mc)
264 return (mc->mc_deferred);
268 metaslab_class_get_space(metaslab_class_t *mc)
270 return (mc->mc_space);
274 metaslab_class_get_dspace(metaslab_class_t *mc)
276 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
280 metaslab_class_histogram_verify(metaslab_class_t *mc)
282 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
286 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
289 mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
292 for (c = 0; c < rvd->vdev_children; c++) {
293 vdev_t *tvd = rvd->vdev_child[c];
294 metaslab_group_t *mg = tvd->vdev_mg;
297 * Skip any holes, uninitialized top-levels, or
298 * vdevs that are not in this metalab class.
300 if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
301 mg->mg_class != mc) {
305 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
306 mc_hist[i] += mg->mg_histogram[i];
309 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
310 VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
312 kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
316 * Calculate the metaslab class's fragmentation metric. The metric
317 * is weighted based on the space contribution of each metaslab group.
318 * The return value will be a number between 0 and 100 (inclusive), or
319 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
320 * zfs_frag_table for more information about the metric.
323 metaslab_class_fragmentation(metaslab_class_t *mc)
325 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
326 uint64_t fragmentation = 0;
329 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
331 for (c = 0; c < rvd->vdev_children; c++) {
332 vdev_t *tvd = rvd->vdev_child[c];
333 metaslab_group_t *mg = tvd->vdev_mg;
336 * Skip any holes, uninitialized top-levels, or
337 * vdevs that are not in this metalab class.
339 if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
340 mg->mg_class != mc) {
345 * If a metaslab group does not contain a fragmentation
346 * metric then just bail out.
348 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
349 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
350 return (ZFS_FRAG_INVALID);
354 * Determine how much this metaslab_group is contributing
355 * to the overall pool fragmentation metric.
357 fragmentation += mg->mg_fragmentation *
358 metaslab_group_get_space(mg);
360 fragmentation /= metaslab_class_get_space(mc);
362 ASSERT3U(fragmentation, <=, 100);
363 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
364 return (fragmentation);
368 * Calculate the amount of expandable space that is available in
369 * this metaslab class. If a device is expanded then its expandable
370 * space will be the amount of allocatable space that is currently not
371 * part of this metaslab class.
374 metaslab_class_expandable_space(metaslab_class_t *mc)
376 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
380 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
381 for (c = 0; c < rvd->vdev_children; c++) {
382 vdev_t *tvd = rvd->vdev_child[c];
383 metaslab_group_t *mg = tvd->vdev_mg;
385 if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
386 mg->mg_class != mc) {
390 space += tvd->vdev_max_asize - tvd->vdev_asize;
392 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
397 * ==========================================================================
399 * ==========================================================================
402 metaslab_compare(const void *x1, const void *x2)
404 const metaslab_t *m1 = x1;
405 const metaslab_t *m2 = x2;
407 if (m1->ms_weight < m2->ms_weight)
409 if (m1->ms_weight > m2->ms_weight)
413 * If the weights are identical, use the offset to force uniqueness.
415 if (m1->ms_start < m2->ms_start)
417 if (m1->ms_start > m2->ms_start)
420 ASSERT3P(m1, ==, m2);
426 * Update the allocatable flag and the metaslab group's capacity.
427 * The allocatable flag is set to true if the capacity is below
428 * the zfs_mg_noalloc_threshold. If a metaslab group transitions
429 * from allocatable to non-allocatable or vice versa then the metaslab
430 * group's class is updated to reflect the transition.
433 metaslab_group_alloc_update(metaslab_group_t *mg)
435 vdev_t *vd = mg->mg_vd;
436 metaslab_class_t *mc = mg->mg_class;
437 vdev_stat_t *vs = &vd->vdev_stat;
438 boolean_t was_allocatable;
440 ASSERT(vd == vd->vdev_top);
442 mutex_enter(&mg->mg_lock);
443 was_allocatable = mg->mg_allocatable;
445 mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
449 * A metaslab group is considered allocatable if it has plenty
450 * of free space or is not heavily fragmented. We only take
451 * fragmentation into account if the metaslab group has a valid
452 * fragmentation metric (i.e. a value between 0 and 100).
454 mg->mg_allocatable = (mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
455 (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
456 mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
459 * The mc_alloc_groups maintains a count of the number of
460 * groups in this metaslab class that are still above the
461 * zfs_mg_noalloc_threshold. This is used by the allocating
462 * threads to determine if they should avoid allocations to
463 * a given group. The allocator will avoid allocations to a group
464 * if that group has reached or is below the zfs_mg_noalloc_threshold
465 * and there are still other groups that are above the threshold.
466 * When a group transitions from allocatable to non-allocatable or
467 * vice versa we update the metaslab class to reflect that change.
468 * When the mc_alloc_groups value drops to 0 that means that all
469 * groups have reached the zfs_mg_noalloc_threshold making all groups
470 * eligible for allocations. This effectively means that all devices
471 * are balanced again.
473 if (was_allocatable && !mg->mg_allocatable)
474 mc->mc_alloc_groups--;
475 else if (!was_allocatable && mg->mg_allocatable)
476 mc->mc_alloc_groups++;
478 mutex_exit(&mg->mg_lock);
482 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
484 metaslab_group_t *mg;
486 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
487 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
488 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
489 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
492 mg->mg_activation_count = 0;
494 mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
495 minclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT);
501 metaslab_group_destroy(metaslab_group_t *mg)
503 ASSERT(mg->mg_prev == NULL);
504 ASSERT(mg->mg_next == NULL);
506 * We may have gone below zero with the activation count
507 * either because we never activated in the first place or
508 * because we're done, and possibly removing the vdev.
510 ASSERT(mg->mg_activation_count <= 0);
512 taskq_destroy(mg->mg_taskq);
513 avl_destroy(&mg->mg_metaslab_tree);
514 mutex_destroy(&mg->mg_lock);
515 kmem_free(mg, sizeof (metaslab_group_t));
519 metaslab_group_activate(metaslab_group_t *mg)
521 metaslab_class_t *mc = mg->mg_class;
522 metaslab_group_t *mgprev, *mgnext;
524 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
526 ASSERT(mc->mc_rotor != mg);
527 ASSERT(mg->mg_prev == NULL);
528 ASSERT(mg->mg_next == NULL);
529 ASSERT(mg->mg_activation_count <= 0);
531 if (++mg->mg_activation_count <= 0)
534 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
535 metaslab_group_alloc_update(mg);
537 if ((mgprev = mc->mc_rotor) == NULL) {
541 mgnext = mgprev->mg_next;
542 mg->mg_prev = mgprev;
543 mg->mg_next = mgnext;
544 mgprev->mg_next = mg;
545 mgnext->mg_prev = mg;
551 metaslab_group_passivate(metaslab_group_t *mg)
553 metaslab_class_t *mc = mg->mg_class;
554 metaslab_group_t *mgprev, *mgnext;
556 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
558 if (--mg->mg_activation_count != 0) {
559 ASSERT(mc->mc_rotor != mg);
560 ASSERT(mg->mg_prev == NULL);
561 ASSERT(mg->mg_next == NULL);
562 ASSERT(mg->mg_activation_count < 0);
566 taskq_wait_outstanding(mg->mg_taskq, 0);
567 metaslab_group_alloc_update(mg);
569 mgprev = mg->mg_prev;
570 mgnext = mg->mg_next;
575 mc->mc_rotor = mgnext;
576 mgprev->mg_next = mgnext;
577 mgnext->mg_prev = mgprev;
585 metaslab_group_get_space(metaslab_group_t *mg)
587 return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count);
591 metaslab_group_histogram_verify(metaslab_group_t *mg)
594 vdev_t *vd = mg->mg_vd;
595 uint64_t ashift = vd->vdev_ashift;
598 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
601 mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
604 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
605 SPACE_MAP_HISTOGRAM_SIZE + ashift);
607 for (m = 0; m < vd->vdev_ms_count; m++) {
608 metaslab_t *msp = vd->vdev_ms[m];
610 if (msp->ms_sm == NULL)
613 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
614 mg_hist[i + ashift] +=
615 msp->ms_sm->sm_phys->smp_histogram[i];
618 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
619 VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
621 kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
625 metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
627 metaslab_class_t *mc = mg->mg_class;
628 uint64_t ashift = mg->mg_vd->vdev_ashift;
631 ASSERT(MUTEX_HELD(&msp->ms_lock));
632 if (msp->ms_sm == NULL)
635 mutex_enter(&mg->mg_lock);
636 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
637 mg->mg_histogram[i + ashift] +=
638 msp->ms_sm->sm_phys->smp_histogram[i];
639 mc->mc_histogram[i + ashift] +=
640 msp->ms_sm->sm_phys->smp_histogram[i];
642 mutex_exit(&mg->mg_lock);
646 metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
648 metaslab_class_t *mc = mg->mg_class;
649 uint64_t ashift = mg->mg_vd->vdev_ashift;
652 ASSERT(MUTEX_HELD(&msp->ms_lock));
653 if (msp->ms_sm == NULL)
656 mutex_enter(&mg->mg_lock);
657 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
658 ASSERT3U(mg->mg_histogram[i + ashift], >=,
659 msp->ms_sm->sm_phys->smp_histogram[i]);
660 ASSERT3U(mc->mc_histogram[i + ashift], >=,
661 msp->ms_sm->sm_phys->smp_histogram[i]);
663 mg->mg_histogram[i + ashift] -=
664 msp->ms_sm->sm_phys->smp_histogram[i];
665 mc->mc_histogram[i + ashift] -=
666 msp->ms_sm->sm_phys->smp_histogram[i];
668 mutex_exit(&mg->mg_lock);
672 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
674 ASSERT(msp->ms_group == NULL);
675 mutex_enter(&mg->mg_lock);
678 avl_add(&mg->mg_metaslab_tree, msp);
679 mutex_exit(&mg->mg_lock);
681 mutex_enter(&msp->ms_lock);
682 metaslab_group_histogram_add(mg, msp);
683 mutex_exit(&msp->ms_lock);
687 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
689 mutex_enter(&msp->ms_lock);
690 metaslab_group_histogram_remove(mg, msp);
691 mutex_exit(&msp->ms_lock);
693 mutex_enter(&mg->mg_lock);
694 ASSERT(msp->ms_group == mg);
695 avl_remove(&mg->mg_metaslab_tree, msp);
696 msp->ms_group = NULL;
697 mutex_exit(&mg->mg_lock);
701 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
704 * Although in principle the weight can be any value, in
705 * practice we do not use values in the range [1, 511].
707 ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
708 ASSERT(MUTEX_HELD(&msp->ms_lock));
710 mutex_enter(&mg->mg_lock);
711 ASSERT(msp->ms_group == mg);
712 avl_remove(&mg->mg_metaslab_tree, msp);
713 msp->ms_weight = weight;
714 avl_add(&mg->mg_metaslab_tree, msp);
715 mutex_exit(&mg->mg_lock);
719 * Calculate the fragmentation for a given metaslab group. We can use
720 * a simple average here since all metaslabs within the group must have
721 * the same size. The return value will be a value between 0 and 100
722 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
723 * group have a fragmentation metric.
726 metaslab_group_fragmentation(metaslab_group_t *mg)
728 vdev_t *vd = mg->mg_vd;
729 uint64_t fragmentation = 0;
730 uint64_t valid_ms = 0;
733 for (m = 0; m < vd->vdev_ms_count; m++) {
734 metaslab_t *msp = vd->vdev_ms[m];
736 if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
740 fragmentation += msp->ms_fragmentation;
743 if (valid_ms <= vd->vdev_ms_count / 2)
744 return (ZFS_FRAG_INVALID);
746 fragmentation /= valid_ms;
747 ASSERT3U(fragmentation, <=, 100);
748 return (fragmentation);
752 * Determine if a given metaslab group should skip allocations. A metaslab
753 * group should avoid allocations if its free capacity is less than the
754 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
755 * zfs_mg_fragmentation_threshold and there is at least one metaslab group
756 * that can still handle allocations.
759 metaslab_group_allocatable(metaslab_group_t *mg)
761 vdev_t *vd = mg->mg_vd;
762 spa_t *spa = vd->vdev_spa;
763 metaslab_class_t *mc = mg->mg_class;
766 * We use two key metrics to determine if a metaslab group is
767 * considered allocatable -- free space and fragmentation. If
768 * the free space is greater than the free space threshold and
769 * the fragmentation is less than the fragmentation threshold then
770 * consider the group allocatable. There are two case when we will
771 * not consider these key metrics. The first is if the group is
772 * associated with a slog device and the second is if all groups
773 * in this metaslab class have already been consider ineligible
776 return ((mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
777 (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
778 mg->mg_fragmentation <= zfs_mg_fragmentation_threshold)) ||
779 mc != spa_normal_class(spa) || mc->mc_alloc_groups == 0);
783 * ==========================================================================
784 * Range tree callbacks
785 * ==========================================================================
789 * Comparison function for the private size-ordered tree. Tree is sorted
790 * by size, larger sizes at the end of the tree.
793 metaslab_rangesize_compare(const void *x1, const void *x2)
795 const range_seg_t *r1 = x1;
796 const range_seg_t *r2 = x2;
797 uint64_t rs_size1 = r1->rs_end - r1->rs_start;
798 uint64_t rs_size2 = r2->rs_end - r2->rs_start;
800 if (rs_size1 < rs_size2)
802 if (rs_size1 > rs_size2)
805 if (r1->rs_start < r2->rs_start)
808 if (r1->rs_start > r2->rs_start)
815 * Create any block allocator specific components. The current allocators
816 * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
819 metaslab_rt_create(range_tree_t *rt, void *arg)
821 metaslab_t *msp = arg;
823 ASSERT3P(rt->rt_arg, ==, msp);
824 ASSERT(msp->ms_tree == NULL);
826 avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
827 sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
831 * Destroy the block allocator specific components.
834 metaslab_rt_destroy(range_tree_t *rt, void *arg)
836 metaslab_t *msp = arg;
838 ASSERT3P(rt->rt_arg, ==, msp);
839 ASSERT3P(msp->ms_tree, ==, rt);
840 ASSERT0(avl_numnodes(&msp->ms_size_tree));
842 avl_destroy(&msp->ms_size_tree);
846 metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
848 metaslab_t *msp = arg;
850 ASSERT3P(rt->rt_arg, ==, msp);
851 ASSERT3P(msp->ms_tree, ==, rt);
852 VERIFY(!msp->ms_condensing);
853 avl_add(&msp->ms_size_tree, rs);
857 metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
859 metaslab_t *msp = arg;
861 ASSERT3P(rt->rt_arg, ==, msp);
862 ASSERT3P(msp->ms_tree, ==, rt);
863 VERIFY(!msp->ms_condensing);
864 avl_remove(&msp->ms_size_tree, rs);
868 metaslab_rt_vacate(range_tree_t *rt, void *arg)
870 metaslab_t *msp = arg;
872 ASSERT3P(rt->rt_arg, ==, msp);
873 ASSERT3P(msp->ms_tree, ==, rt);
876 * Normally one would walk the tree freeing nodes along the way.
877 * Since the nodes are shared with the range trees we can avoid
878 * walking all nodes and just reinitialize the avl tree. The nodes
879 * will be freed by the range tree, so we don't want to free them here.
881 avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
882 sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
885 static range_tree_ops_t metaslab_rt_ops = {
894 * ==========================================================================
895 * Metaslab block operations
896 * ==========================================================================
900 * Return the maximum contiguous segment within the metaslab.
903 metaslab_block_maxsize(metaslab_t *msp)
905 avl_tree_t *t = &msp->ms_size_tree;
908 if (t == NULL || (rs = avl_last(t)) == NULL)
911 return (rs->rs_end - rs->rs_start);
915 metaslab_block_alloc(metaslab_t *msp, uint64_t size)
918 range_tree_t *rt = msp->ms_tree;
920 VERIFY(!msp->ms_condensing);
922 start = msp->ms_ops->msop_alloc(msp, size);
923 if (start != -1ULL) {
924 vdev_t *vd = msp->ms_group->mg_vd;
926 VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
927 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
928 VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
929 range_tree_remove(rt, start, size);
935 * ==========================================================================
936 * Common allocator routines
937 * ==========================================================================
940 #if defined(WITH_FF_BLOCK_ALLOCATOR) || \
941 defined(WITH_DF_BLOCK_ALLOCATOR) || \
942 defined(WITH_CF_BLOCK_ALLOCATOR)
944 * This is a helper function that can be used by the allocator to find
945 * a suitable block to allocate. This will search the specified AVL
946 * tree looking for a block that matches the specified criteria.
949 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
952 range_seg_t *rs, rsearch;
955 rsearch.rs_start = *cursor;
956 rsearch.rs_end = *cursor + size;
958 rs = avl_find(t, &rsearch, &where);
960 rs = avl_nearest(t, where, AVL_AFTER);
963 uint64_t offset = P2ROUNDUP(rs->rs_start, align);
965 if (offset + size <= rs->rs_end) {
966 *cursor = offset + size;
969 rs = AVL_NEXT(t, rs);
973 * If we know we've searched the whole map (*cursor == 0), give up.
974 * Otherwise, reset the cursor to the beginning and try again.
980 return (metaslab_block_picker(t, cursor, size, align));
982 #endif /* WITH_FF/DF/CF_BLOCK_ALLOCATOR */
984 #if defined(WITH_FF_BLOCK_ALLOCATOR)
986 * ==========================================================================
987 * The first-fit block allocator
988 * ==========================================================================
991 metaslab_ff_alloc(metaslab_t *msp, uint64_t size)
994 * Find the largest power of 2 block size that evenly divides the
995 * requested size. This is used to try to allocate blocks with similar
996 * alignment from the same area of the metaslab (i.e. same cursor
997 * bucket) but it does not guarantee that other allocations sizes
998 * may exist in the same region.
1000 uint64_t align = size & -size;
1001 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1002 avl_tree_t *t = &msp->ms_tree->rt_root;
1004 return (metaslab_block_picker(t, cursor, size, align));
1007 static metaslab_ops_t metaslab_ff_ops = {
1011 metaslab_ops_t *zfs_metaslab_ops = &metaslab_ff_ops;
1012 #endif /* WITH_FF_BLOCK_ALLOCATOR */
1014 #if defined(WITH_DF_BLOCK_ALLOCATOR)
1016 * ==========================================================================
1017 * Dynamic block allocator -
1018 * Uses the first fit allocation scheme until space get low and then
1019 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
1020 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
1021 * ==========================================================================
1024 metaslab_df_alloc(metaslab_t *msp, uint64_t size)
1027 * Find the largest power of 2 block size that evenly divides the
1028 * requested size. This is used to try to allocate blocks with similar
1029 * alignment from the same area of the metaslab (i.e. same cursor
1030 * bucket) but it does not guarantee that other allocations sizes
1031 * may exist in the same region.
1033 uint64_t align = size & -size;
1034 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1035 range_tree_t *rt = msp->ms_tree;
1036 avl_tree_t *t = &rt->rt_root;
1037 uint64_t max_size = metaslab_block_maxsize(msp);
1038 int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
1040 ASSERT(MUTEX_HELD(&msp->ms_lock));
1041 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
1043 if (max_size < size)
1047 * If we're running low on space switch to using the size
1048 * sorted AVL tree (best-fit).
1050 if (max_size < metaslab_df_alloc_threshold ||
1051 free_pct < metaslab_df_free_pct) {
1052 t = &msp->ms_size_tree;
1056 return (metaslab_block_picker(t, cursor, size, 1ULL));
1059 static metaslab_ops_t metaslab_df_ops = {
1063 metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
1064 #endif /* WITH_DF_BLOCK_ALLOCATOR */
1066 #if defined(WITH_CF_BLOCK_ALLOCATOR)
1068 * ==========================================================================
1069 * Cursor fit block allocator -
1070 * Select the largest region in the metaslab, set the cursor to the beginning
1071 * of the range and the cursor_end to the end of the range. As allocations
1072 * are made advance the cursor. Continue allocating from the cursor until
1073 * the range is exhausted and then find a new range.
1074 * ==========================================================================
1077 metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
1079 range_tree_t *rt = msp->ms_tree;
1080 avl_tree_t *t = &msp->ms_size_tree;
1081 uint64_t *cursor = &msp->ms_lbas[0];
1082 uint64_t *cursor_end = &msp->ms_lbas[1];
1083 uint64_t offset = 0;
1085 ASSERT(MUTEX_HELD(&msp->ms_lock));
1086 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root));
1088 ASSERT3U(*cursor_end, >=, *cursor);
1090 if ((*cursor + size) > *cursor_end) {
1093 rs = avl_last(&msp->ms_size_tree);
1094 if (rs == NULL || (rs->rs_end - rs->rs_start) < size)
1097 *cursor = rs->rs_start;
1098 *cursor_end = rs->rs_end;
1107 static metaslab_ops_t metaslab_cf_ops = {
1111 metaslab_ops_t *zfs_metaslab_ops = &metaslab_cf_ops;
1112 #endif /* WITH_CF_BLOCK_ALLOCATOR */
1114 #if defined(WITH_NDF_BLOCK_ALLOCATOR)
1116 * ==========================================================================
1117 * New dynamic fit allocator -
1118 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1119 * contiguous blocks. If no region is found then just use the largest segment
1121 * ==========================================================================
1125 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1126 * to request from the allocator.
1128 uint64_t metaslab_ndf_clump_shift = 4;
1131 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
1133 avl_tree_t *t = &msp->ms_tree->rt_root;
1135 range_seg_t *rs, rsearch;
1136 uint64_t hbit = highbit64(size);
1137 uint64_t *cursor = &msp->ms_lbas[hbit - 1];
1138 uint64_t max_size = metaslab_block_maxsize(msp);
1140 ASSERT(MUTEX_HELD(&msp->ms_lock));
1141 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
1143 if (max_size < size)
1146 rsearch.rs_start = *cursor;
1147 rsearch.rs_end = *cursor + size;
1149 rs = avl_find(t, &rsearch, &where);
1150 if (rs == NULL || (rs->rs_end - rs->rs_start) < size) {
1151 t = &msp->ms_size_tree;
1153 rsearch.rs_start = 0;
1154 rsearch.rs_end = MIN(max_size,
1155 1ULL << (hbit + metaslab_ndf_clump_shift));
1156 rs = avl_find(t, &rsearch, &where);
1158 rs = avl_nearest(t, where, AVL_AFTER);
1162 if ((rs->rs_end - rs->rs_start) >= size) {
1163 *cursor = rs->rs_start + size;
1164 return (rs->rs_start);
1169 static metaslab_ops_t metaslab_ndf_ops = {
1173 metaslab_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops;
1174 #endif /* WITH_NDF_BLOCK_ALLOCATOR */
1178 * ==========================================================================
1180 * ==========================================================================
1184 * Wait for any in-progress metaslab loads to complete.
1187 metaslab_load_wait(metaslab_t *msp)
1189 ASSERT(MUTEX_HELD(&msp->ms_lock));
1191 while (msp->ms_loading) {
1192 ASSERT(!msp->ms_loaded);
1193 cv_wait(&msp->ms_load_cv, &msp->ms_lock);
1198 metaslab_load(metaslab_t *msp)
1203 ASSERT(MUTEX_HELD(&msp->ms_lock));
1204 ASSERT(!msp->ms_loaded);
1205 ASSERT(!msp->ms_loading);
1207 msp->ms_loading = B_TRUE;
1210 * If the space map has not been allocated yet, then treat
1211 * all the space in the metaslab as free and add it to the
1214 if (msp->ms_sm != NULL)
1215 error = space_map_load(msp->ms_sm, msp->ms_tree, SM_FREE);
1217 range_tree_add(msp->ms_tree, msp->ms_start, msp->ms_size);
1219 msp->ms_loaded = (error == 0);
1220 msp->ms_loading = B_FALSE;
1222 if (msp->ms_loaded) {
1223 for (t = 0; t < TXG_DEFER_SIZE; t++) {
1224 range_tree_walk(msp->ms_defertree[t],
1225 range_tree_remove, msp->ms_tree);
1228 cv_broadcast(&msp->ms_load_cv);
1233 metaslab_unload(metaslab_t *msp)
1235 ASSERT(MUTEX_HELD(&msp->ms_lock));
1236 range_tree_vacate(msp->ms_tree, NULL, NULL);
1237 msp->ms_loaded = B_FALSE;
1238 msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
1242 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg,
1245 vdev_t *vd = mg->mg_vd;
1246 objset_t *mos = vd->vdev_spa->spa_meta_objset;
1250 ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
1251 mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
1252 cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
1254 ms->ms_start = id << vd->vdev_ms_shift;
1255 ms->ms_size = 1ULL << vd->vdev_ms_shift;
1258 * We only open space map objects that already exist. All others
1259 * will be opened when we finally allocate an object for it.
1262 error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
1263 ms->ms_size, vd->vdev_ashift, &ms->ms_lock);
1266 kmem_free(ms, sizeof (metaslab_t));
1270 ASSERT(ms->ms_sm != NULL);
1274 * We create the main range tree here, but we don't create the
1275 * alloctree and freetree until metaslab_sync_done(). This serves
1276 * two purposes: it allows metaslab_sync_done() to detect the
1277 * addition of new space; and for debugging, it ensures that we'd
1278 * data fault on any attempt to use this metaslab before it's ready.
1280 ms->ms_tree = range_tree_create(&metaslab_rt_ops, ms, &ms->ms_lock);
1281 metaslab_group_add(mg, ms);
1283 ms->ms_fragmentation = metaslab_fragmentation(ms);
1284 ms->ms_ops = mg->mg_class->mc_ops;
1287 * If we're opening an existing pool (txg == 0) or creating
1288 * a new one (txg == TXG_INITIAL), all space is available now.
1289 * If we're adding space to an existing pool, the new space
1290 * does not become available until after this txg has synced.
1292 if (txg <= TXG_INITIAL)
1293 metaslab_sync_done(ms, 0);
1296 * If metaslab_debug_load is set and we're initializing a metaslab
1297 * that has an allocated space_map object then load the its space
1298 * map so that can verify frees.
1300 if (metaslab_debug_load && ms->ms_sm != NULL) {
1301 mutex_enter(&ms->ms_lock);
1302 VERIFY0(metaslab_load(ms));
1303 mutex_exit(&ms->ms_lock);
1307 vdev_dirty(vd, 0, NULL, txg);
1308 vdev_dirty(vd, VDD_METASLAB, ms, txg);
1317 metaslab_fini(metaslab_t *msp)
1321 metaslab_group_t *mg = msp->ms_group;
1323 metaslab_group_remove(mg, msp);
1325 mutex_enter(&msp->ms_lock);
1327 VERIFY(msp->ms_group == NULL);
1328 vdev_space_update(mg->mg_vd, -space_map_allocated(msp->ms_sm),
1330 space_map_close(msp->ms_sm);
1332 metaslab_unload(msp);
1333 range_tree_destroy(msp->ms_tree);
1335 for (t = 0; t < TXG_SIZE; t++) {
1336 range_tree_destroy(msp->ms_alloctree[t]);
1337 range_tree_destroy(msp->ms_freetree[t]);
1340 for (t = 0; t < TXG_DEFER_SIZE; t++) {
1341 range_tree_destroy(msp->ms_defertree[t]);
1344 ASSERT0(msp->ms_deferspace);
1346 mutex_exit(&msp->ms_lock);
1347 cv_destroy(&msp->ms_load_cv);
1348 mutex_destroy(&msp->ms_lock);
1350 kmem_free(msp, sizeof (metaslab_t));
1353 #define FRAGMENTATION_TABLE_SIZE 17
1356 * This table defines a segment size based fragmentation metric that will
1357 * allow each metaslab to derive its own fragmentation value. This is done
1358 * by calculating the space in each bucket of the spacemap histogram and
1359 * multiplying that by the fragmetation metric in this table. Doing
1360 * this for all buckets and dividing it by the total amount of free
1361 * space in this metaslab (i.e. the total free space in all buckets) gives
1362 * us the fragmentation metric. This means that a high fragmentation metric
1363 * equates to most of the free space being comprised of small segments.
1364 * Conversely, if the metric is low, then most of the free space is in
1365 * large segments. A 10% change in fragmentation equates to approximately
1366 * double the number of segments.
1368 * This table defines 0% fragmented space using 16MB segments. Testing has
1369 * shown that segments that are greater than or equal to 16MB do not suffer
1370 * from drastic performance problems. Using this value, we derive the rest
1371 * of the table. Since the fragmentation value is never stored on disk, it
1372 * is possible to change these calculations in the future.
1374 int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
1394 * Calclate the metaslab's fragmentation metric. A return value
1395 * of ZFS_FRAG_INVALID means that the metaslab has not been upgraded and does
1396 * not support this metric. Otherwise, the return value should be in the
1400 metaslab_fragmentation(metaslab_t *msp)
1402 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1403 uint64_t fragmentation = 0;
1405 boolean_t feature_enabled = spa_feature_is_enabled(spa,
1406 SPA_FEATURE_SPACEMAP_HISTOGRAM);
1409 if (!feature_enabled)
1410 return (ZFS_FRAG_INVALID);
1413 * A null space map means that the entire metaslab is free
1414 * and thus is not fragmented.
1416 if (msp->ms_sm == NULL)
1420 * If this metaslab's space_map has not been upgraded, flag it
1421 * so that we upgrade next time we encounter it.
1423 if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
1424 vdev_t *vd = msp->ms_group->mg_vd;
1426 if (spa_writeable(vd->vdev_spa)) {
1427 uint64_t txg = spa_syncing_txg(spa);
1429 msp->ms_condense_wanted = B_TRUE;
1430 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
1431 spa_dbgmsg(spa, "txg %llu, requesting force condense: "
1432 "msp %p, vd %p", txg, msp, vd);
1434 return (ZFS_FRAG_INVALID);
1437 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1439 uint8_t shift = msp->ms_sm->sm_shift;
1440 int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
1441 FRAGMENTATION_TABLE_SIZE - 1);
1443 if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
1446 space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
1449 ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
1450 fragmentation += space * zfs_frag_table[idx];
1454 fragmentation /= total;
1455 ASSERT3U(fragmentation, <=, 100);
1456 return (fragmentation);
1460 * Compute a weight -- a selection preference value -- for the given metaslab.
1461 * This is based on the amount of free space, the level of fragmentation,
1462 * the LBA range, and whether the metaslab is loaded.
1465 metaslab_weight(metaslab_t *msp)
1467 metaslab_group_t *mg = msp->ms_group;
1468 vdev_t *vd = mg->mg_vd;
1469 uint64_t weight, space;
1471 ASSERT(MUTEX_HELD(&msp->ms_lock));
1474 * This vdev is in the process of being removed so there is nothing
1475 * for us to do here.
1477 if (vd->vdev_removing) {
1478 ASSERT0(space_map_allocated(msp->ms_sm));
1479 ASSERT0(vd->vdev_ms_shift);
1484 * The baseline weight is the metaslab's free space.
1486 space = msp->ms_size - space_map_allocated(msp->ms_sm);
1488 msp->ms_fragmentation = metaslab_fragmentation(msp);
1489 if (metaslab_fragmentation_factor_enabled &&
1490 msp->ms_fragmentation != ZFS_FRAG_INVALID) {
1492 * Use the fragmentation information to inversely scale
1493 * down the baseline weight. We need to ensure that we
1494 * don't exclude this metaslab completely when it's 100%
1495 * fragmented. To avoid this we reduce the fragmented value
1498 space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
1501 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
1502 * this metaslab again. The fragmentation metric may have
1503 * decreased the space to something smaller than
1504 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
1505 * so that we can consume any remaining space.
1507 if (space > 0 && space < SPA_MINBLOCKSIZE)
1508 space = SPA_MINBLOCKSIZE;
1513 * Modern disks have uniform bit density and constant angular velocity.
1514 * Therefore, the outer recording zones are faster (higher bandwidth)
1515 * than the inner zones by the ratio of outer to inner track diameter,
1516 * which is typically around 2:1. We account for this by assigning
1517 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
1518 * In effect, this means that we'll select the metaslab with the most
1519 * free bandwidth rather than simply the one with the most free space.
1521 if (metaslab_lba_weighting_enabled) {
1522 weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
1523 ASSERT(weight >= space && weight <= 2 * space);
1527 * If this metaslab is one we're actively using, adjust its
1528 * weight to make it preferable to any inactive metaslab so
1529 * we'll polish it off. If the fragmentation on this metaslab
1530 * has exceed our threshold, then don't mark it active.
1532 if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
1533 msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
1534 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
1541 metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
1543 ASSERT(MUTEX_HELD(&msp->ms_lock));
1545 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
1546 metaslab_load_wait(msp);
1547 if (!msp->ms_loaded) {
1548 int error = metaslab_load(msp);
1550 metaslab_group_sort(msp->ms_group, msp, 0);
1555 metaslab_group_sort(msp->ms_group, msp,
1556 msp->ms_weight | activation_weight);
1558 ASSERT(msp->ms_loaded);
1559 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
1565 metaslab_passivate(metaslab_t *msp, uint64_t size)
1568 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
1569 * this metaslab again. In that case, it had better be empty,
1570 * or we would be leaving space on the table.
1572 ASSERT(size >= SPA_MINBLOCKSIZE || range_tree_space(msp->ms_tree) == 0);
1573 metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size));
1574 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
1578 metaslab_preload(void *arg)
1580 metaslab_t *msp = arg;
1581 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1583 ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
1585 mutex_enter(&msp->ms_lock);
1586 metaslab_load_wait(msp);
1587 if (!msp->ms_loaded)
1588 (void) metaslab_load(msp);
1591 * Set the ms_access_txg value so that we don't unload it right away.
1593 msp->ms_access_txg = spa_syncing_txg(spa) + metaslab_unload_delay + 1;
1594 mutex_exit(&msp->ms_lock);
1598 metaslab_group_preload(metaslab_group_t *mg)
1600 spa_t *spa = mg->mg_vd->vdev_spa;
1602 avl_tree_t *t = &mg->mg_metaslab_tree;
1605 if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
1606 taskq_wait_outstanding(mg->mg_taskq, 0);
1610 mutex_enter(&mg->mg_lock);
1612 * Load the next potential metaslabs
1615 while (msp != NULL) {
1616 metaslab_t *msp_next = AVL_NEXT(t, msp);
1619 * We preload only the maximum number of metaslabs specified
1620 * by metaslab_preload_limit. If a metaslab is being forced
1621 * to condense then we preload it too. This will ensure
1622 * that force condensing happens in the next txg.
1624 if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
1630 * We must drop the metaslab group lock here to preserve
1631 * lock ordering with the ms_lock (when grabbing both
1632 * the mg_lock and the ms_lock, the ms_lock must be taken
1633 * first). As a result, it is possible that the ordering
1634 * of the metaslabs within the avl tree may change before
1635 * we reacquire the lock. The metaslab cannot be removed from
1636 * the tree while we're in syncing context so it is safe to
1637 * drop the mg_lock here. If the metaslabs are reordered
1638 * nothing will break -- we just may end up loading a
1639 * less than optimal one.
1641 mutex_exit(&mg->mg_lock);
1642 VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
1643 msp, TQ_SLEEP) != 0);
1644 mutex_enter(&mg->mg_lock);
1647 mutex_exit(&mg->mg_lock);
1651 * Determine if the space map's on-disk footprint is past our tolerance
1652 * for inefficiency. We would like to use the following criteria to make
1655 * 1. The size of the space map object should not dramatically increase as a
1656 * result of writing out the free space range tree.
1658 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
1659 * times the size than the free space range tree representation
1660 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1.MB).
1662 * 3. The on-disk size of the space map should actually decrease.
1664 * Checking the first condition is tricky since we don't want to walk
1665 * the entire AVL tree calculating the estimated on-disk size. Instead we
1666 * use the size-ordered range tree in the metaslab and calculate the
1667 * size required to write out the largest segment in our free tree. If the
1668 * size required to represent that segment on disk is larger than the space
1669 * map object then we avoid condensing this map.
1671 * To determine the second criterion we use a best-case estimate and assume
1672 * each segment can be represented on-disk as a single 64-bit entry. We refer
1673 * to this best-case estimate as the space map's minimal form.
1675 * Unfortunately, we cannot compute the on-disk size of the space map in this
1676 * context because we cannot accurately compute the effects of compression, etc.
1677 * Instead, we apply the heuristic described in the block comment for
1678 * zfs_metaslab_condense_block_threshold - we only condense if the space used
1679 * is greater than a threshold number of blocks.
1682 metaslab_should_condense(metaslab_t *msp)
1684 space_map_t *sm = msp->ms_sm;
1686 uint64_t size, entries, segsz, object_size, optimal_size, record_size;
1687 dmu_object_info_t doi;
1688 uint64_t vdev_blocksize = 1 << msp->ms_group->mg_vd->vdev_ashift;
1690 ASSERT(MUTEX_HELD(&msp->ms_lock));
1691 ASSERT(msp->ms_loaded);
1694 * Use the ms_size_tree range tree, which is ordered by size, to
1695 * obtain the largest segment in the free tree. We always condense
1696 * metaslabs that are empty and metaslabs for which a condense
1697 * request has been made.
1699 rs = avl_last(&msp->ms_size_tree);
1700 if (rs == NULL || msp->ms_condense_wanted)
1704 * Calculate the number of 64-bit entries this segment would
1705 * require when written to disk. If this single segment would be
1706 * larger on-disk than the entire current on-disk structure, then
1707 * clearly condensing will increase the on-disk structure size.
1709 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
1710 entries = size / (MIN(size, SM_RUN_MAX));
1711 segsz = entries * sizeof (uint64_t);
1713 optimal_size = sizeof (uint64_t) * avl_numnodes(&msp->ms_tree->rt_root);
1714 object_size = space_map_length(msp->ms_sm);
1716 dmu_object_info_from_db(sm->sm_dbuf, &doi);
1717 record_size = MAX(doi.doi_data_block_size, vdev_blocksize);
1719 return (segsz <= object_size &&
1720 object_size >= (optimal_size * zfs_condense_pct / 100) &&
1721 object_size > zfs_metaslab_condense_block_threshold * record_size);
1725 * Condense the on-disk space map representation to its minimized form.
1726 * The minimized form consists of a small number of allocations followed by
1727 * the entries of the free range tree.
1730 metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
1732 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1733 range_tree_t *freetree = msp->ms_freetree[txg & TXG_MASK];
1734 range_tree_t *condense_tree;
1735 space_map_t *sm = msp->ms_sm;
1738 ASSERT(MUTEX_HELD(&msp->ms_lock));
1739 ASSERT3U(spa_sync_pass(spa), ==, 1);
1740 ASSERT(msp->ms_loaded);
1743 spa_dbgmsg(spa, "condensing: txg %llu, msp[%llu] %p, "
1744 "smp size %llu, segments %lu, forcing condense=%s", txg,
1745 msp->ms_id, msp, space_map_length(msp->ms_sm),
1746 avl_numnodes(&msp->ms_tree->rt_root),
1747 msp->ms_condense_wanted ? "TRUE" : "FALSE");
1749 msp->ms_condense_wanted = B_FALSE;
1752 * Create an range tree that is 100% allocated. We remove segments
1753 * that have been freed in this txg, any deferred frees that exist,
1754 * and any allocation in the future. Removing segments should be
1755 * a relatively inexpensive operation since we expect these trees to
1756 * have a small number of nodes.
1758 condense_tree = range_tree_create(NULL, NULL, &msp->ms_lock);
1759 range_tree_add(condense_tree, msp->ms_start, msp->ms_size);
1762 * Remove what's been freed in this txg from the condense_tree.
1763 * Since we're in sync_pass 1, we know that all the frees from
1764 * this txg are in the freetree.
1766 range_tree_walk(freetree, range_tree_remove, condense_tree);
1768 for (t = 0; t < TXG_DEFER_SIZE; t++) {
1769 range_tree_walk(msp->ms_defertree[t],
1770 range_tree_remove, condense_tree);
1773 for (t = 1; t < TXG_CONCURRENT_STATES; t++) {
1774 range_tree_walk(msp->ms_alloctree[(txg + t) & TXG_MASK],
1775 range_tree_remove, condense_tree);
1779 * We're about to drop the metaslab's lock thus allowing
1780 * other consumers to change it's content. Set the
1781 * metaslab's ms_condensing flag to ensure that
1782 * allocations on this metaslab do not occur while we're
1783 * in the middle of committing it to disk. This is only critical
1784 * for the ms_tree as all other range trees use per txg
1785 * views of their content.
1787 msp->ms_condensing = B_TRUE;
1789 mutex_exit(&msp->ms_lock);
1790 space_map_truncate(sm, tx);
1791 mutex_enter(&msp->ms_lock);
1794 * While we would ideally like to create a space_map representation
1795 * that consists only of allocation records, doing so can be
1796 * prohibitively expensive because the in-core free tree can be
1797 * large, and therefore computationally expensive to subtract
1798 * from the condense_tree. Instead we sync out two trees, a cheap
1799 * allocation only tree followed by the in-core free tree. While not
1800 * optimal, this is typically close to optimal, and much cheaper to
1803 space_map_write(sm, condense_tree, SM_ALLOC, tx);
1804 range_tree_vacate(condense_tree, NULL, NULL);
1805 range_tree_destroy(condense_tree);
1807 space_map_write(sm, msp->ms_tree, SM_FREE, tx);
1808 msp->ms_condensing = B_FALSE;
1812 * Write a metaslab to disk in the context of the specified transaction group.
1815 metaslab_sync(metaslab_t *msp, uint64_t txg)
1817 metaslab_group_t *mg = msp->ms_group;
1818 vdev_t *vd = mg->mg_vd;
1819 spa_t *spa = vd->vdev_spa;
1820 objset_t *mos = spa_meta_objset(spa);
1821 range_tree_t *alloctree = msp->ms_alloctree[txg & TXG_MASK];
1822 range_tree_t **freetree = &msp->ms_freetree[txg & TXG_MASK];
1823 range_tree_t **freed_tree =
1824 &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK];
1826 uint64_t object = space_map_object(msp->ms_sm);
1828 ASSERT(!vd->vdev_ishole);
1831 * This metaslab has just been added so there's no work to do now.
1833 if (*freetree == NULL) {
1834 ASSERT3P(alloctree, ==, NULL);
1838 ASSERT3P(alloctree, !=, NULL);
1839 ASSERT3P(*freetree, !=, NULL);
1840 ASSERT3P(*freed_tree, !=, NULL);
1843 * Normally, we don't want to process a metaslab if there
1844 * are no allocations or frees to perform. However, if the metaslab
1845 * is being forced to condense we need to let it through.
1847 if (range_tree_space(alloctree) == 0 &&
1848 range_tree_space(*freetree) == 0 &&
1849 !msp->ms_condense_wanted)
1853 * The only state that can actually be changing concurrently with
1854 * metaslab_sync() is the metaslab's ms_tree. No other thread can
1855 * be modifying this txg's alloctree, freetree, freed_tree, or
1856 * space_map_phys_t. Therefore, we only hold ms_lock to satify
1857 * space_map ASSERTs. We drop it whenever we call into the DMU,
1858 * because the DMU can call down to us (e.g. via zio_free()) at
1862 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
1864 if (msp->ms_sm == NULL) {
1865 uint64_t new_object;
1867 new_object = space_map_alloc(mos, tx);
1868 VERIFY3U(new_object, !=, 0);
1870 VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
1871 msp->ms_start, msp->ms_size, vd->vdev_ashift,
1873 ASSERT(msp->ms_sm != NULL);
1876 mutex_enter(&msp->ms_lock);
1879 * Note: metaslab_condense() clears the space_map's histogram.
1880 * Therefore we muse verify and remove this histogram before
1883 metaslab_group_histogram_verify(mg);
1884 metaslab_class_histogram_verify(mg->mg_class);
1885 metaslab_group_histogram_remove(mg, msp);
1887 if (msp->ms_loaded && spa_sync_pass(spa) == 1 &&
1888 metaslab_should_condense(msp)) {
1889 metaslab_condense(msp, txg, tx);
1891 space_map_write(msp->ms_sm, alloctree, SM_ALLOC, tx);
1892 space_map_write(msp->ms_sm, *freetree, SM_FREE, tx);
1895 if (msp->ms_loaded) {
1897 * When the space map is loaded, we have an accruate
1898 * histogram in the range tree. This gives us an opportunity
1899 * to bring the space map's histogram up-to-date so we clear
1900 * it first before updating it.
1902 space_map_histogram_clear(msp->ms_sm);
1903 space_map_histogram_add(msp->ms_sm, msp->ms_tree, tx);
1906 * Since the space map is not loaded we simply update the
1907 * exisiting histogram with what was freed in this txg. This
1908 * means that the on-disk histogram may not have an accurate
1909 * view of the free space but it's close enough to allow
1910 * us to make allocation decisions.
1912 space_map_histogram_add(msp->ms_sm, *freetree, tx);
1914 metaslab_group_histogram_add(mg, msp);
1915 metaslab_group_histogram_verify(mg);
1916 metaslab_class_histogram_verify(mg->mg_class);
1919 * For sync pass 1, we avoid traversing this txg's free range tree
1920 * and instead will just swap the pointers for freetree and
1921 * freed_tree. We can safely do this since the freed_tree is
1922 * guaranteed to be empty on the initial pass.
1924 if (spa_sync_pass(spa) == 1) {
1925 range_tree_swap(freetree, freed_tree);
1927 range_tree_vacate(*freetree, range_tree_add, *freed_tree);
1929 range_tree_vacate(alloctree, NULL, NULL);
1931 ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
1932 ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK]));
1934 mutex_exit(&msp->ms_lock);
1936 if (object != space_map_object(msp->ms_sm)) {
1937 object = space_map_object(msp->ms_sm);
1938 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
1939 msp->ms_id, sizeof (uint64_t), &object, tx);
1945 * Called after a transaction group has completely synced to mark
1946 * all of the metaslab's free space as usable.
1949 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
1951 metaslab_group_t *mg = msp->ms_group;
1952 vdev_t *vd = mg->mg_vd;
1953 range_tree_t **freed_tree;
1954 range_tree_t **defer_tree;
1955 int64_t alloc_delta, defer_delta;
1958 ASSERT(!vd->vdev_ishole);
1960 mutex_enter(&msp->ms_lock);
1963 * If this metaslab is just becoming available, initialize its
1964 * alloctrees, freetrees, and defertree and add its capacity to
1967 if (msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK] == NULL) {
1968 for (t = 0; t < TXG_SIZE; t++) {
1969 ASSERT(msp->ms_alloctree[t] == NULL);
1970 ASSERT(msp->ms_freetree[t] == NULL);
1972 msp->ms_alloctree[t] = range_tree_create(NULL, msp,
1974 msp->ms_freetree[t] = range_tree_create(NULL, msp,
1978 for (t = 0; t < TXG_DEFER_SIZE; t++) {
1979 ASSERT(msp->ms_defertree[t] == NULL);
1981 msp->ms_defertree[t] = range_tree_create(NULL, msp,
1985 vdev_space_update(vd, 0, 0, msp->ms_size);
1988 freed_tree = &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK];
1989 defer_tree = &msp->ms_defertree[txg % TXG_DEFER_SIZE];
1991 alloc_delta = space_map_alloc_delta(msp->ms_sm);
1992 defer_delta = range_tree_space(*freed_tree) -
1993 range_tree_space(*defer_tree);
1995 vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
1997 ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
1998 ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK]));
2001 * If there's a metaslab_load() in progress, wait for it to complete
2002 * so that we have a consistent view of the in-core space map.
2004 metaslab_load_wait(msp);
2007 * Move the frees from the defer_tree back to the free
2008 * range tree (if it's loaded). Swap the freed_tree and the
2009 * defer_tree -- this is safe to do because we've just emptied out
2012 range_tree_vacate(*defer_tree,
2013 msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree);
2014 range_tree_swap(freed_tree, defer_tree);
2016 space_map_update(msp->ms_sm);
2018 msp->ms_deferspace += defer_delta;
2019 ASSERT3S(msp->ms_deferspace, >=, 0);
2020 ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
2021 if (msp->ms_deferspace != 0) {
2023 * Keep syncing this metaslab until all deferred frees
2024 * are back in circulation.
2026 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
2029 if (msp->ms_loaded && msp->ms_access_txg < txg) {
2030 for (t = 1; t < TXG_CONCURRENT_STATES; t++) {
2031 VERIFY0(range_tree_space(
2032 msp->ms_alloctree[(txg + t) & TXG_MASK]));
2035 if (!metaslab_debug_unload)
2036 metaslab_unload(msp);
2039 metaslab_group_sort(mg, msp, metaslab_weight(msp));
2040 mutex_exit(&msp->ms_lock);
2044 metaslab_sync_reassess(metaslab_group_t *mg)
2046 metaslab_group_alloc_update(mg);
2047 mg->mg_fragmentation = metaslab_group_fragmentation(mg);
2050 * Preload the next potential metaslabs
2052 metaslab_group_preload(mg);
2056 metaslab_distance(metaslab_t *msp, dva_t *dva)
2058 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
2059 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
2060 uint64_t start = msp->ms_id;
2062 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
2063 return (1ULL << 63);
2066 return ((start - offset) << ms_shift);
2068 return ((offset - start) << ms_shift);
2073 metaslab_group_alloc(metaslab_group_t *mg, uint64_t psize, uint64_t asize,
2074 uint64_t txg, uint64_t min_distance, dva_t *dva, int d)
2076 spa_t *spa = mg->mg_vd->vdev_spa;
2077 metaslab_t *msp = NULL;
2078 uint64_t offset = -1ULL;
2079 avl_tree_t *t = &mg->mg_metaslab_tree;
2080 uint64_t activation_weight;
2081 uint64_t target_distance;
2084 activation_weight = METASLAB_WEIGHT_PRIMARY;
2085 for (i = 0; i < d; i++) {
2086 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
2087 activation_weight = METASLAB_WEIGHT_SECONDARY;
2093 boolean_t was_active;
2095 mutex_enter(&mg->mg_lock);
2096 for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) {
2097 if (msp->ms_weight < asize) {
2098 spa_dbgmsg(spa, "%s: failed to meet weight "
2099 "requirement: vdev %llu, txg %llu, mg %p, "
2100 "msp %p, psize %llu, asize %llu, "
2101 "weight %llu", spa_name(spa),
2102 mg->mg_vd->vdev_id, txg,
2103 mg, msp, psize, asize, msp->ms_weight);
2104 mutex_exit(&mg->mg_lock);
2109 * If the selected metaslab is condensing, skip it.
2111 if (msp->ms_condensing)
2114 was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
2115 if (activation_weight == METASLAB_WEIGHT_PRIMARY)
2118 target_distance = min_distance +
2119 (space_map_allocated(msp->ms_sm) != 0 ? 0 :
2122 for (i = 0; i < d; i++)
2123 if (metaslab_distance(msp, &dva[i]) <
2129 mutex_exit(&mg->mg_lock);
2133 mutex_enter(&msp->ms_lock);
2136 * Ensure that the metaslab we have selected is still
2137 * capable of handling our request. It's possible that
2138 * another thread may have changed the weight while we
2139 * were blocked on the metaslab lock.
2141 if (msp->ms_weight < asize || (was_active &&
2142 !(msp->ms_weight & METASLAB_ACTIVE_MASK) &&
2143 activation_weight == METASLAB_WEIGHT_PRIMARY)) {
2144 mutex_exit(&msp->ms_lock);
2148 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
2149 activation_weight == METASLAB_WEIGHT_PRIMARY) {
2150 metaslab_passivate(msp,
2151 msp->ms_weight & ~METASLAB_ACTIVE_MASK);
2152 mutex_exit(&msp->ms_lock);
2156 if (metaslab_activate(msp, activation_weight) != 0) {
2157 mutex_exit(&msp->ms_lock);
2162 * If this metaslab is currently condensing then pick again as
2163 * we can't manipulate this metaslab until it's committed
2166 if (msp->ms_condensing) {
2167 mutex_exit(&msp->ms_lock);
2171 if ((offset = metaslab_block_alloc(msp, asize)) != -1ULL)
2174 metaslab_passivate(msp, metaslab_block_maxsize(msp));
2175 mutex_exit(&msp->ms_lock);
2178 if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
2179 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
2181 range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, asize);
2182 msp->ms_access_txg = txg + metaslab_unload_delay;
2184 mutex_exit(&msp->ms_lock);
2190 * Allocate a block for the specified i/o.
2193 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
2194 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags)
2196 metaslab_group_t *mg, *fast_mg, *rotor;
2200 int zio_lock = B_FALSE;
2201 boolean_t allocatable;
2202 uint64_t offset = -1ULL;
2206 ASSERT(!DVA_IS_VALID(&dva[d]));
2209 * For testing, make some blocks above a certain size be gang blocks.
2211 if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0)
2212 return (SET_ERROR(ENOSPC));
2214 if (flags & METASLAB_FASTWRITE)
2215 mutex_enter(&mc->mc_fastwrite_lock);
2218 * Start at the rotor and loop through all mgs until we find something.
2219 * Note that there's no locking on mc_rotor or mc_aliquot because
2220 * nothing actually breaks if we miss a few updates -- we just won't
2221 * allocate quite as evenly. It all balances out over time.
2223 * If we are doing ditto or log blocks, try to spread them across
2224 * consecutive vdevs. If we're forced to reuse a vdev before we've
2225 * allocated all of our ditto blocks, then try and spread them out on
2226 * that vdev as much as possible. If it turns out to not be possible,
2227 * gradually lower our standards until anything becomes acceptable.
2228 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
2229 * gives us hope of containing our fault domains to something we're
2230 * able to reason about. Otherwise, any two top-level vdev failures
2231 * will guarantee the loss of data. With consecutive allocation,
2232 * only two adjacent top-level vdev failures will result in data loss.
2234 * If we are doing gang blocks (hintdva is non-NULL), try to keep
2235 * ourselves on the same vdev as our gang block header. That
2236 * way, we can hope for locality in vdev_cache, plus it makes our
2237 * fault domains something tractable.
2240 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
2243 * It's possible the vdev we're using as the hint no
2244 * longer exists (i.e. removed). Consult the rotor when
2250 if (flags & METASLAB_HINTBP_AVOID &&
2251 mg->mg_next != NULL)
2256 } else if (d != 0) {
2257 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
2258 mg = vd->vdev_mg->mg_next;
2259 } else if (flags & METASLAB_FASTWRITE) {
2260 mg = fast_mg = mc->mc_rotor;
2263 if (fast_mg->mg_vd->vdev_pending_fastwrite <
2264 mg->mg_vd->vdev_pending_fastwrite)
2266 } while ((fast_mg = fast_mg->mg_next) != mc->mc_rotor);
2273 * If the hint put us into the wrong metaslab class, or into a
2274 * metaslab group that has been passivated, just follow the rotor.
2276 if (mg->mg_class != mc || mg->mg_activation_count <= 0)
2283 ASSERT(mg->mg_activation_count == 1);
2288 * Don't allocate from faulted devices.
2291 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
2292 allocatable = vdev_allocatable(vd);
2293 spa_config_exit(spa, SCL_ZIO, FTAG);
2295 allocatable = vdev_allocatable(vd);
2299 * Determine if the selected metaslab group is eligible
2300 * for allocations. If we're ganging or have requested
2301 * an allocation for the smallest gang block size
2302 * then we don't want to avoid allocating to the this
2303 * metaslab group. If we're in this condition we should
2304 * try to allocate from any device possible so that we
2305 * don't inadvertently return ENOSPC and suspend the pool
2306 * even though space is still available.
2308 if (allocatable && CAN_FASTGANG(flags) &&
2309 psize > SPA_GANGBLOCKSIZE)
2310 allocatable = metaslab_group_allocatable(mg);
2316 * Avoid writing single-copy data to a failing vdev
2317 * unless the user instructs us that it is okay.
2319 if ((vd->vdev_stat.vs_write_errors > 0 ||
2320 vd->vdev_state < VDEV_STATE_HEALTHY) &&
2321 d == 0 && dshift == 3 && vd->vdev_children == 0) {
2326 ASSERT(mg->mg_class == mc);
2328 distance = vd->vdev_asize >> dshift;
2329 if (distance <= (1ULL << vd->vdev_ms_shift))
2334 asize = vdev_psize_to_asize(vd, psize);
2335 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
2337 offset = metaslab_group_alloc(mg, psize, asize, txg, distance,
2339 if (offset != -1ULL) {
2341 * If we've just selected this metaslab group,
2342 * figure out whether the corresponding vdev is
2343 * over- or under-used relative to the pool,
2344 * and set an allocation bias to even it out.
2346 * Bias is also used to compensate for unequally
2347 * sized vdevs so that space is allocated fairly.
2349 if (mc->mc_aliquot == 0 && metaslab_bias_enabled) {
2350 vdev_stat_t *vs = &vd->vdev_stat;
2351 int64_t vs_free = vs->vs_space - vs->vs_alloc;
2352 int64_t mc_free = mc->mc_space - mc->mc_alloc;
2356 * Calculate how much more or less we should
2357 * try to allocate from this device during
2358 * this iteration around the rotor.
2360 * This basically introduces a zero-centered
2361 * bias towards the devices with the most
2362 * free space, while compensating for vdev
2366 * vdev V1 = 16M/128M
2367 * vdev V2 = 16M/128M
2368 * ratio(V1) = 100% ratio(V2) = 100%
2370 * vdev V1 = 16M/128M
2371 * vdev V2 = 64M/128M
2372 * ratio(V1) = 127% ratio(V2) = 72%
2374 * vdev V1 = 16M/128M
2375 * vdev V2 = 64M/512M
2376 * ratio(V1) = 40% ratio(V2) = 160%
2378 ratio = (vs_free * mc->mc_alloc_groups * 100) /
2380 mg->mg_bias = ((ratio - 100) *
2381 (int64_t)mg->mg_aliquot) / 100;
2382 } else if (!metaslab_bias_enabled) {
2386 if ((flags & METASLAB_FASTWRITE) ||
2387 atomic_add_64_nv(&mc->mc_aliquot, asize) >=
2388 mg->mg_aliquot + mg->mg_bias) {
2389 mc->mc_rotor = mg->mg_next;
2393 DVA_SET_VDEV(&dva[d], vd->vdev_id);
2394 DVA_SET_OFFSET(&dva[d], offset);
2395 DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER));
2396 DVA_SET_ASIZE(&dva[d], asize);
2398 if (flags & METASLAB_FASTWRITE) {
2399 atomic_add_64(&vd->vdev_pending_fastwrite,
2401 mutex_exit(&mc->mc_fastwrite_lock);
2407 mc->mc_rotor = mg->mg_next;
2409 } while ((mg = mg->mg_next) != rotor);
2413 ASSERT(dshift < 64);
2417 if (!allocatable && !zio_lock) {
2423 bzero(&dva[d], sizeof (dva_t));
2425 if (flags & METASLAB_FASTWRITE)
2426 mutex_exit(&mc->mc_fastwrite_lock);
2428 return (SET_ERROR(ENOSPC));
2432 * Free the block represented by DVA in the context of the specified
2433 * transaction group.
2436 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
2438 uint64_t vdev = DVA_GET_VDEV(dva);
2439 uint64_t offset = DVA_GET_OFFSET(dva);
2440 uint64_t size = DVA_GET_ASIZE(dva);
2444 if (txg > spa_freeze_txg(spa))
2447 if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) ||
2448 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
2449 zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
2450 (u_longlong_t)vdev, (u_longlong_t)offset,
2451 (u_longlong_t)size);
2455 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
2457 if (DVA_GET_GANG(dva))
2458 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
2460 mutex_enter(&msp->ms_lock);
2463 range_tree_remove(msp->ms_alloctree[txg & TXG_MASK],
2466 VERIFY(!msp->ms_condensing);
2467 VERIFY3U(offset, >=, msp->ms_start);
2468 VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
2469 VERIFY3U(range_tree_space(msp->ms_tree) + size, <=,
2471 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
2472 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
2473 range_tree_add(msp->ms_tree, offset, size);
2475 if (range_tree_space(msp->ms_freetree[txg & TXG_MASK]) == 0)
2476 vdev_dirty(vd, VDD_METASLAB, msp, txg);
2477 range_tree_add(msp->ms_freetree[txg & TXG_MASK],
2481 mutex_exit(&msp->ms_lock);
2485 * Intent log support: upon opening the pool after a crash, notify the SPA
2486 * of blocks that the intent log has allocated for immediate write, but
2487 * which are still considered free by the SPA because the last transaction
2488 * group didn't commit yet.
2491 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
2493 uint64_t vdev = DVA_GET_VDEV(dva);
2494 uint64_t offset = DVA_GET_OFFSET(dva);
2495 uint64_t size = DVA_GET_ASIZE(dva);
2500 ASSERT(DVA_IS_VALID(dva));
2502 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
2503 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
2504 return (SET_ERROR(ENXIO));
2506 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
2508 if (DVA_GET_GANG(dva))
2509 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
2511 mutex_enter(&msp->ms_lock);
2513 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded)
2514 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
2516 if (error == 0 && !range_tree_contains(msp->ms_tree, offset, size))
2517 error = SET_ERROR(ENOENT);
2519 if (error || txg == 0) { /* txg == 0 indicates dry run */
2520 mutex_exit(&msp->ms_lock);
2524 VERIFY(!msp->ms_condensing);
2525 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
2526 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
2527 VERIFY3U(range_tree_space(msp->ms_tree) - size, <=, msp->ms_size);
2528 range_tree_remove(msp->ms_tree, offset, size);
2530 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
2531 if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
2532 vdev_dirty(vd, VDD_METASLAB, msp, txg);
2533 range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, size);
2536 mutex_exit(&msp->ms_lock);
2542 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
2543 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags)
2545 dva_t *dva = bp->blk_dva;
2546 dva_t *hintdva = hintbp->blk_dva;
2549 ASSERT(bp->blk_birth == 0);
2550 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
2552 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
2554 if (mc->mc_rotor == NULL) { /* no vdevs in this class */
2555 spa_config_exit(spa, SCL_ALLOC, FTAG);
2556 return (SET_ERROR(ENOSPC));
2559 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
2560 ASSERT(BP_GET_NDVAS(bp) == 0);
2561 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
2563 for (d = 0; d < ndvas; d++) {
2564 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
2567 for (d--; d >= 0; d--) {
2568 metaslab_free_dva(spa, &dva[d], txg, B_TRUE);
2569 bzero(&dva[d], sizeof (dva_t));
2571 spa_config_exit(spa, SCL_ALLOC, FTAG);
2576 ASSERT(BP_GET_NDVAS(bp) == ndvas);
2578 spa_config_exit(spa, SCL_ALLOC, FTAG);
2580 BP_SET_BIRTH(bp, txg, txg);
2586 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
2588 const dva_t *dva = bp->blk_dva;
2589 int d, ndvas = BP_GET_NDVAS(bp);
2591 ASSERT(!BP_IS_HOLE(bp));
2592 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
2594 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
2596 for (d = 0; d < ndvas; d++)
2597 metaslab_free_dva(spa, &dva[d], txg, now);
2599 spa_config_exit(spa, SCL_FREE, FTAG);
2603 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
2605 const dva_t *dva = bp->blk_dva;
2606 int ndvas = BP_GET_NDVAS(bp);
2609 ASSERT(!BP_IS_HOLE(bp));
2613 * First do a dry run to make sure all DVAs are claimable,
2614 * so we don't have to unwind from partial failures below.
2616 if ((error = metaslab_claim(spa, bp, 0)) != 0)
2620 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
2622 for (d = 0; d < ndvas; d++)
2623 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
2626 spa_config_exit(spa, SCL_ALLOC, FTAG);
2628 ASSERT(error == 0 || txg == 0);
2634 metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp)
2636 const dva_t *dva = bp->blk_dva;
2637 int ndvas = BP_GET_NDVAS(bp);
2638 uint64_t psize = BP_GET_PSIZE(bp);
2642 ASSERT(!BP_IS_HOLE(bp));
2643 ASSERT(!BP_IS_EMBEDDED(bp));
2646 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2648 for (d = 0; d < ndvas; d++) {
2649 if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
2651 atomic_add_64(&vd->vdev_pending_fastwrite, psize);
2654 spa_config_exit(spa, SCL_VDEV, FTAG);
2658 metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp)
2660 const dva_t *dva = bp->blk_dva;
2661 int ndvas = BP_GET_NDVAS(bp);
2662 uint64_t psize = BP_GET_PSIZE(bp);
2666 ASSERT(!BP_IS_HOLE(bp));
2667 ASSERT(!BP_IS_EMBEDDED(bp));
2670 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2672 for (d = 0; d < ndvas; d++) {
2673 if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
2675 ASSERT3U(vd->vdev_pending_fastwrite, >=, psize);
2676 atomic_sub_64(&vd->vdev_pending_fastwrite, psize);
2679 spa_config_exit(spa, SCL_VDEV, FTAG);
2683 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
2687 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
2690 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2691 for (i = 0; i < BP_GET_NDVAS(bp); i++) {
2692 uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
2693 vdev_t *vd = vdev_lookup_top(spa, vdev);
2694 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
2695 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
2696 metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
2699 range_tree_verify(msp->ms_tree, offset, size);
2701 for (j = 0; j < TXG_SIZE; j++)
2702 range_tree_verify(msp->ms_freetree[j], offset, size);
2703 for (j = 0; j < TXG_DEFER_SIZE; j++)
2704 range_tree_verify(msp->ms_defertree[j], offset, size);
2706 spa_config_exit(spa, SCL_VDEV, FTAG);
2709 #if defined(_KERNEL) && defined(HAVE_SPL)
2710 module_param(metaslab_debug_load, int, 0644);
2711 module_param(metaslab_debug_unload, int, 0644);
2712 module_param(metaslab_preload_enabled, int, 0644);
2713 module_param(zfs_mg_noalloc_threshold, int, 0644);
2714 module_param(zfs_mg_fragmentation_threshold, int, 0644);
2715 module_param(zfs_metaslab_fragmentation_threshold, int, 0644);
2716 module_param(metaslab_fragmentation_factor_enabled, int, 0644);
2717 module_param(metaslab_lba_weighting_enabled, int, 0644);
2718 module_param(metaslab_bias_enabled, int, 0644);
2720 MODULE_PARM_DESC(metaslab_debug_load,
2721 "load all metaslabs when pool is first opened");
2722 MODULE_PARM_DESC(metaslab_debug_unload,
2723 "prevent metaslabs from being unloaded");
2724 MODULE_PARM_DESC(metaslab_preload_enabled,
2725 "preload potential metaslabs during reassessment");
2727 MODULE_PARM_DESC(zfs_mg_noalloc_threshold,
2728 "percentage of free space for metaslab group to allow allocation");
2729 MODULE_PARM_DESC(zfs_mg_fragmentation_threshold,
2730 "fragmentation for metaslab group to allow allocation");
2732 MODULE_PARM_DESC(zfs_metaslab_fragmentation_threshold,
2733 "fragmentation for metaslab to allow allocation");
2734 MODULE_PARM_DESC(metaslab_fragmentation_factor_enabled,
2735 "use the fragmentation metric to prefer less fragmented metaslabs");
2736 MODULE_PARM_DESC(metaslab_lba_weighting_enabled,
2737 "prefer metaslabs with lower LBAs");
2738 MODULE_PARM_DESC(metaslab_bias_enabled,
2739 "enable metaslab group biasing");
2740 #endif /* _KERNEL && HAVE_SPL */