4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
27 #include <sys/zfs_context.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/space_map.h>
31 #include <sys/metaslab_impl.h>
32 #include <sys/vdev_impl.h>
34 #include <sys/spa_impl.h>
36 SYSCTL_DECL(_vfs_zfs);
37 SYSCTL_NODE(_vfs_zfs, OID_AUTO, metaslab, CTLFLAG_RW, 0, "ZFS metaslab");
40 * Allow allocations to switch to gang blocks quickly. We do this to
41 * avoid having to load lots of space_maps in a given txg. There are,
42 * however, some cases where we want to avoid "fast" ganging and instead
43 * we want to do an exhaustive search of all metaslabs on this device.
44 * Currently we don't allow any gang, zil, or dump device related allocations
47 #define CAN_FASTGANG(flags) \
48 (!((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER | \
49 METASLAB_GANG_AVOID)))
51 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
52 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
53 #define METASLAB_ACTIVE_MASK \
54 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
56 uint64_t metaslab_aliquot = 512ULL << 10;
57 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
58 TUNABLE_QUAD("vfs.zfs.metaslab.gang_bang", &metaslab_gang_bang);
59 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, gang_bang, CTLFLAG_RWTUN,
60 &metaslab_gang_bang, 0,
61 "Force gang block allocation for blocks larger than or equal to this value");
64 * The in-core space map representation is more compact than its on-disk form.
65 * The zfs_condense_pct determines how much more compact the in-core
66 * space_map representation must be before we compact it on-disk.
67 * Values should be greater than or equal to 100.
69 int zfs_condense_pct = 200;
70 TUNABLE_INT("vfs.zfs.condense_pct", &zfs_condense_pct);
71 SYSCTL_INT(_vfs_zfs, OID_AUTO, condense_pct, CTLFLAG_RWTUN,
73 "Condense on-disk spacemap when it is more than this many percents"
74 " of in-memory counterpart");
77 * This value defines the number of allowed allocation failures per vdev.
78 * If a device reaches this threshold in a given txg then we consider skipping
79 * allocations on that device. The value of zfs_mg_alloc_failures is computed
80 * in zio_init() unless it has been overridden in /etc/system.
82 int zfs_mg_alloc_failures = 0;
83 TUNABLE_INT("vfs.zfs.mg_alloc_failures", &zfs_mg_alloc_failures);
84 SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_alloc_failures, CTLFLAG_RWTUN,
85 &zfs_mg_alloc_failures, 0,
86 "Number of allowed allocation failures per vdev");
89 * The zfs_mg_noalloc_threshold defines which metaslab groups should
90 * be eligible for allocation. The value is defined as a percentage of
91 * a free space. Metaslab groups that have more free space than
92 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
93 * a metaslab group's free space is less than or equal to the
94 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
95 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
96 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
97 * groups are allowed to accept allocations. Gang blocks are always
98 * eligible to allocate on any metaslab group. The default value of 0 means
99 * no metaslab group will be excluded based on this criterion.
101 int zfs_mg_noalloc_threshold = 0;
102 TUNABLE_INT("vfs.zfs.mg_noalloc_threshold", &zfs_mg_noalloc_threshold);
103 SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_noalloc_threshold, CTLFLAG_RWTUN,
104 &zfs_mg_noalloc_threshold, 0,
105 "Percentage of metaslab group size that should be free"
106 " to make it eligible for allocation");
109 * When set will load all metaslabs when pool is first opened.
111 int metaslab_debug_load = 0;
112 TUNABLE_INT("vfs.zfs.metaslab.debug_load", &metaslab_debug_load);
113 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_load, CTLFLAG_RWTUN,
114 &metaslab_debug_load, 0,
115 "Load all metaslabs when pool is first opened");
118 * When set will prevent metaslabs from being unloaded.
120 int metaslab_debug_unload = 0;
121 TUNABLE_INT("vfs.zfs.metaslab.debug_unload", &metaslab_debug_unload);
122 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug_unload, CTLFLAG_RWTUN,
123 &metaslab_debug_unload, 0,
124 "Prevent metaslabs from being unloaded");
127 * Minimum size which forces the dynamic allocator to change
128 * it's allocation strategy. Once the space map cannot satisfy
129 * an allocation of this size then it switches to using more
130 * aggressive strategy (i.e search by size rather than offset).
132 uint64_t metaslab_df_alloc_threshold = SPA_MAXBLOCKSIZE;
133 TUNABLE_QUAD("vfs.zfs.metaslab.df_alloc_threshold",
134 &metaslab_df_alloc_threshold);
135 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold, CTLFLAG_RWTUN,
136 &metaslab_df_alloc_threshold, 0,
137 "Minimum size which forces the dynamic allocator to change it's allocation strategy");
140 * The minimum free space, in percent, which must be available
141 * in a space map to continue allocations in a first-fit fashion.
142 * Once the space_map's free space drops below this level we dynamically
143 * switch to using best-fit allocations.
145 int metaslab_df_free_pct = 4;
146 TUNABLE_INT("vfs.zfs.metaslab.df_free_pct", &metaslab_df_free_pct);
147 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct, CTLFLAG_RWTUN,
148 &metaslab_df_free_pct, 0,
149 "The minimum free space, in percent, which must be available in a space map to continue allocations in a first-fit fashion");
152 * A metaslab is considered "free" if it contains a contiguous
153 * segment which is greater than metaslab_min_alloc_size.
155 uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS;
156 TUNABLE_QUAD("vfs.zfs.metaslab.min_alloc_size",
157 &metaslab_min_alloc_size);
158 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, min_alloc_size, CTLFLAG_RWTUN,
159 &metaslab_min_alloc_size, 0,
160 "A metaslab is considered \"free\" if it contains a contiguous segment which is greater than vfs.zfs.metaslab.min_alloc_size");
163 * Percentage of all cpus that can be used by the metaslab taskq.
165 int metaslab_load_pct = 50;
166 TUNABLE_INT("vfs.zfs.metaslab.load_pct", &metaslab_load_pct);
167 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, load_pct, CTLFLAG_RWTUN,
168 &metaslab_load_pct, 0,
169 "Percentage of cpus that can be used by the metaslab taskq");
172 * Determines how many txgs a metaslab may remain loaded without having any
173 * allocations from it. As long as a metaslab continues to be used we will
176 int metaslab_unload_delay = TXG_SIZE * 2;
177 TUNABLE_INT("vfs.zfs.metaslab.unload_delay", &metaslab_unload_delay);
178 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, unload_delay, CTLFLAG_RWTUN,
179 &metaslab_unload_delay, 0,
180 "Number of TXGs that an unused metaslab can be kept in memory");
183 * Should we be willing to write data to degraded vdevs?
185 boolean_t zfs_write_to_degraded = B_FALSE;
186 SYSCTL_INT(_vfs_zfs, OID_AUTO, write_to_degraded, CTLFLAG_RW,
187 &zfs_write_to_degraded, 0,
188 "Allow writing data to degraded vdevs");
189 TUNABLE_INT("vfs.zfs.write_to_degraded", &zfs_write_to_degraded);
192 * Max number of metaslabs per group to preload.
194 int metaslab_preload_limit = SPA_DVAS_PER_BP;
195 TUNABLE_INT("vfs.zfs.metaslab.preload_limit", &metaslab_preload_limit);
196 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_limit, CTLFLAG_RWTUN,
197 &metaslab_preload_limit, 0,
198 "Max number of metaslabs per group to preload");
201 * Enable/disable preloading of metaslab.
203 boolean_t metaslab_preload_enabled = B_TRUE;
204 TUNABLE_INT("vfs.zfs.metaslab.preload_enabled", &metaslab_preload_enabled);
205 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_enabled, CTLFLAG_RWTUN,
206 &metaslab_preload_enabled, 0,
207 "Max number of metaslabs per group to preload");
210 * Enable/disable additional weight factor for each metaslab.
212 boolean_t metaslab_weight_factor_enable = B_FALSE;
213 TUNABLE_INT("vfs.zfs.metaslab.weight_factor_enable",
214 &metaslab_weight_factor_enable);
215 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, weight_factor_enable, CTLFLAG_RWTUN,
216 &metaslab_weight_factor_enable, 0,
217 "Enable additional weight factor for each metaslab");
221 * ==========================================================================
223 * ==========================================================================
226 metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
228 metaslab_class_t *mc;
230 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
240 metaslab_class_destroy(metaslab_class_t *mc)
242 ASSERT(mc->mc_rotor == NULL);
243 ASSERT(mc->mc_alloc == 0);
244 ASSERT(mc->mc_deferred == 0);
245 ASSERT(mc->mc_space == 0);
246 ASSERT(mc->mc_dspace == 0);
248 kmem_free(mc, sizeof (metaslab_class_t));
252 metaslab_class_validate(metaslab_class_t *mc)
254 metaslab_group_t *mg;
258 * Must hold one of the spa_config locks.
260 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
261 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
263 if ((mg = mc->mc_rotor) == NULL)
268 ASSERT(vd->vdev_mg != NULL);
269 ASSERT3P(vd->vdev_top, ==, vd);
270 ASSERT3P(mg->mg_class, ==, mc);
271 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
272 } while ((mg = mg->mg_next) != mc->mc_rotor);
278 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
279 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
281 atomic_add_64(&mc->mc_alloc, alloc_delta);
282 atomic_add_64(&mc->mc_deferred, defer_delta);
283 atomic_add_64(&mc->mc_space, space_delta);
284 atomic_add_64(&mc->mc_dspace, dspace_delta);
288 metaslab_class_minblocksize_update(metaslab_class_t *mc)
290 metaslab_group_t *mg;
292 uint64_t minashift = UINT64_MAX;
294 if ((mg = mc->mc_rotor) == NULL) {
295 mc->mc_minblocksize = SPA_MINBLOCKSIZE;
301 if (vd->vdev_ashift < minashift)
302 minashift = vd->vdev_ashift;
303 } while ((mg = mg->mg_next) != mc->mc_rotor);
305 mc->mc_minblocksize = 1ULL << minashift;
309 metaslab_class_get_alloc(metaslab_class_t *mc)
311 return (mc->mc_alloc);
315 metaslab_class_get_deferred(metaslab_class_t *mc)
317 return (mc->mc_deferred);
321 metaslab_class_get_space(metaslab_class_t *mc)
323 return (mc->mc_space);
327 metaslab_class_get_dspace(metaslab_class_t *mc)
329 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
333 metaslab_class_get_minblocksize(metaslab_class_t *mc)
335 return (mc->mc_minblocksize);
339 * ==========================================================================
341 * ==========================================================================
344 metaslab_compare(const void *x1, const void *x2)
346 const metaslab_t *m1 = x1;
347 const metaslab_t *m2 = x2;
349 if (m1->ms_weight < m2->ms_weight)
351 if (m1->ms_weight > m2->ms_weight)
355 * If the weights are identical, use the offset to force uniqueness.
357 if (m1->ms_start < m2->ms_start)
359 if (m1->ms_start > m2->ms_start)
362 ASSERT3P(m1, ==, m2);
368 * Update the allocatable flag and the metaslab group's capacity.
369 * The allocatable flag is set to true if the capacity is below
370 * the zfs_mg_noalloc_threshold. If a metaslab group transitions
371 * from allocatable to non-allocatable or vice versa then the metaslab
372 * group's class is updated to reflect the transition.
375 metaslab_group_alloc_update(metaslab_group_t *mg)
377 vdev_t *vd = mg->mg_vd;
378 metaslab_class_t *mc = mg->mg_class;
379 vdev_stat_t *vs = &vd->vdev_stat;
380 boolean_t was_allocatable;
382 ASSERT(vd == vd->vdev_top);
384 mutex_enter(&mg->mg_lock);
385 was_allocatable = mg->mg_allocatable;
387 mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
390 mg->mg_allocatable = (mg->mg_free_capacity > zfs_mg_noalloc_threshold);
393 * The mc_alloc_groups maintains a count of the number of
394 * groups in this metaslab class that are still above the
395 * zfs_mg_noalloc_threshold. This is used by the allocating
396 * threads to determine if they should avoid allocations to
397 * a given group. The allocator will avoid allocations to a group
398 * if that group has reached or is below the zfs_mg_noalloc_threshold
399 * and there are still other groups that are above the threshold.
400 * When a group transitions from allocatable to non-allocatable or
401 * vice versa we update the metaslab class to reflect that change.
402 * When the mc_alloc_groups value drops to 0 that means that all
403 * groups have reached the zfs_mg_noalloc_threshold making all groups
404 * eligible for allocations. This effectively means that all devices
405 * are balanced again.
407 if (was_allocatable && !mg->mg_allocatable)
408 mc->mc_alloc_groups--;
409 else if (!was_allocatable && mg->mg_allocatable)
410 mc->mc_alloc_groups++;
411 mutex_exit(&mg->mg_lock);
415 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
417 metaslab_group_t *mg;
419 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
420 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
421 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
422 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
425 mg->mg_activation_count = 0;
427 mg->mg_taskq = taskq_create("metaslab_group_tasksq", metaslab_load_pct,
428 minclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT);
434 metaslab_group_destroy(metaslab_group_t *mg)
436 ASSERT(mg->mg_prev == NULL);
437 ASSERT(mg->mg_next == NULL);
439 * We may have gone below zero with the activation count
440 * either because we never activated in the first place or
441 * because we're done, and possibly removing the vdev.
443 ASSERT(mg->mg_activation_count <= 0);
445 avl_destroy(&mg->mg_metaslab_tree);
446 mutex_destroy(&mg->mg_lock);
447 kmem_free(mg, sizeof (metaslab_group_t));
451 metaslab_group_activate(metaslab_group_t *mg)
453 metaslab_class_t *mc = mg->mg_class;
454 metaslab_group_t *mgprev, *mgnext;
456 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
458 ASSERT(mc->mc_rotor != mg);
459 ASSERT(mg->mg_prev == NULL);
460 ASSERT(mg->mg_next == NULL);
461 ASSERT(mg->mg_activation_count <= 0);
463 if (++mg->mg_activation_count <= 0)
466 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
467 metaslab_group_alloc_update(mg);
469 if ((mgprev = mc->mc_rotor) == NULL) {
473 mgnext = mgprev->mg_next;
474 mg->mg_prev = mgprev;
475 mg->mg_next = mgnext;
476 mgprev->mg_next = mg;
477 mgnext->mg_prev = mg;
480 metaslab_class_minblocksize_update(mc);
484 metaslab_group_passivate(metaslab_group_t *mg)
486 metaslab_class_t *mc = mg->mg_class;
487 metaslab_group_t *mgprev, *mgnext;
489 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
491 if (--mg->mg_activation_count != 0) {
492 ASSERT(mc->mc_rotor != mg);
493 ASSERT(mg->mg_prev == NULL);
494 ASSERT(mg->mg_next == NULL);
495 ASSERT(mg->mg_activation_count < 0);
499 taskq_wait(mg->mg_taskq);
501 mgprev = mg->mg_prev;
502 mgnext = mg->mg_next;
507 mc->mc_rotor = mgnext;
508 mgprev->mg_next = mgnext;
509 mgnext->mg_prev = mgprev;
514 metaslab_class_minblocksize_update(mc);
518 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
520 mutex_enter(&mg->mg_lock);
521 ASSERT(msp->ms_group == NULL);
524 avl_add(&mg->mg_metaslab_tree, msp);
525 mutex_exit(&mg->mg_lock);
529 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
531 mutex_enter(&mg->mg_lock);
532 ASSERT(msp->ms_group == mg);
533 avl_remove(&mg->mg_metaslab_tree, msp);
534 msp->ms_group = NULL;
535 mutex_exit(&mg->mg_lock);
539 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
542 * Although in principle the weight can be any value, in
543 * practice we do not use values in the range [1, 510].
545 ASSERT(weight >= SPA_MINBLOCKSIZE-1 || weight == 0);
546 ASSERT(MUTEX_HELD(&msp->ms_lock));
548 mutex_enter(&mg->mg_lock);
549 ASSERT(msp->ms_group == mg);
550 avl_remove(&mg->mg_metaslab_tree, msp);
551 msp->ms_weight = weight;
552 avl_add(&mg->mg_metaslab_tree, msp);
553 mutex_exit(&mg->mg_lock);
557 * Determine if a given metaslab group should skip allocations. A metaslab
558 * group should avoid allocations if its used capacity has crossed the
559 * zfs_mg_noalloc_threshold and there is at least one metaslab group
560 * that can still handle allocations.
563 metaslab_group_allocatable(metaslab_group_t *mg)
565 vdev_t *vd = mg->mg_vd;
566 spa_t *spa = vd->vdev_spa;
567 metaslab_class_t *mc = mg->mg_class;
570 * A metaslab group is considered allocatable if its free capacity
571 * is greater than the set value of zfs_mg_noalloc_threshold, it's
572 * associated with a slog, or there are no other metaslab groups
573 * with free capacity greater than zfs_mg_noalloc_threshold.
575 return (mg->mg_free_capacity > zfs_mg_noalloc_threshold ||
576 mc != spa_normal_class(spa) || mc->mc_alloc_groups == 0);
580 * ==========================================================================
581 * Range tree callbacks
582 * ==========================================================================
586 * Comparison function for the private size-ordered tree. Tree is sorted
587 * by size, larger sizes at the end of the tree.
590 metaslab_rangesize_compare(const void *x1, const void *x2)
592 const range_seg_t *r1 = x1;
593 const range_seg_t *r2 = x2;
594 uint64_t rs_size1 = r1->rs_end - r1->rs_start;
595 uint64_t rs_size2 = r2->rs_end - r2->rs_start;
597 if (rs_size1 < rs_size2)
599 if (rs_size1 > rs_size2)
602 if (r1->rs_start < r2->rs_start)
605 if (r1->rs_start > r2->rs_start)
612 * Create any block allocator specific components. The current allocators
613 * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
616 metaslab_rt_create(range_tree_t *rt, void *arg)
618 metaslab_t *msp = arg;
620 ASSERT3P(rt->rt_arg, ==, msp);
621 ASSERT(msp->ms_tree == NULL);
623 avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
624 sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
628 * Destroy the block allocator specific components.
631 metaslab_rt_destroy(range_tree_t *rt, void *arg)
633 metaslab_t *msp = arg;
635 ASSERT3P(rt->rt_arg, ==, msp);
636 ASSERT3P(msp->ms_tree, ==, rt);
637 ASSERT0(avl_numnodes(&msp->ms_size_tree));
639 avl_destroy(&msp->ms_size_tree);
643 metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
645 metaslab_t *msp = arg;
647 ASSERT3P(rt->rt_arg, ==, msp);
648 ASSERT3P(msp->ms_tree, ==, rt);
649 VERIFY(!msp->ms_condensing);
650 avl_add(&msp->ms_size_tree, rs);
654 metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
656 metaslab_t *msp = arg;
658 ASSERT3P(rt->rt_arg, ==, msp);
659 ASSERT3P(msp->ms_tree, ==, rt);
660 VERIFY(!msp->ms_condensing);
661 avl_remove(&msp->ms_size_tree, rs);
665 metaslab_rt_vacate(range_tree_t *rt, void *arg)
667 metaslab_t *msp = arg;
669 ASSERT3P(rt->rt_arg, ==, msp);
670 ASSERT3P(msp->ms_tree, ==, rt);
673 * Normally one would walk the tree freeing nodes along the way.
674 * Since the nodes are shared with the range trees we can avoid
675 * walking all nodes and just reinitialize the avl tree. The nodes
676 * will be freed by the range tree, so we don't want to free them here.
678 avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
679 sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
682 static range_tree_ops_t metaslab_rt_ops = {
691 * ==========================================================================
692 * Metaslab block operations
693 * ==========================================================================
697 * Return the maximum contiguous segment within the metaslab.
700 metaslab_block_maxsize(metaslab_t *msp)
702 avl_tree_t *t = &msp->ms_size_tree;
705 if (t == NULL || (rs = avl_last(t)) == NULL)
708 return (rs->rs_end - rs->rs_start);
712 metaslab_block_alloc(metaslab_t *msp, uint64_t size)
715 range_tree_t *rt = msp->ms_tree;
717 VERIFY(!msp->ms_condensing);
719 start = msp->ms_ops->msop_alloc(msp, size);
720 if (start != -1ULL) {
721 vdev_t *vd = msp->ms_group->mg_vd;
723 VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
724 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
725 VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
726 range_tree_remove(rt, start, size);
732 * ==========================================================================
733 * Common allocator routines
734 * ==========================================================================
738 * This is a helper function that can be used by the allocator to find
739 * a suitable block to allocate. This will search the specified AVL
740 * tree looking for a block that matches the specified criteria.
743 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
746 range_seg_t *rs, rsearch;
749 rsearch.rs_start = *cursor;
750 rsearch.rs_end = *cursor + size;
752 rs = avl_find(t, &rsearch, &where);
754 rs = avl_nearest(t, where, AVL_AFTER);
757 uint64_t offset = P2ROUNDUP(rs->rs_start, align);
759 if (offset + size <= rs->rs_end) {
760 *cursor = offset + size;
763 rs = AVL_NEXT(t, rs);
767 * If we know we've searched the whole map (*cursor == 0), give up.
768 * Otherwise, reset the cursor to the beginning and try again.
774 return (metaslab_block_picker(t, cursor, size, align));
778 * ==========================================================================
779 * The first-fit block allocator
780 * ==========================================================================
783 metaslab_ff_alloc(metaslab_t *msp, uint64_t size)
786 * Find the largest power of 2 block size that evenly divides the
787 * requested size. This is used to try to allocate blocks with similar
788 * alignment from the same area of the metaslab (i.e. same cursor
789 * bucket) but it does not guarantee that other allocations sizes
790 * may exist in the same region.
792 uint64_t align = size & -size;
793 uint64_t *cursor = &msp->ms_lbas[highbit(align) - 1];
794 avl_tree_t *t = &msp->ms_tree->rt_root;
796 return (metaslab_block_picker(t, cursor, size, align));
801 metaslab_ff_fragmented(metaslab_t *msp)
806 static metaslab_ops_t metaslab_ff_ops = {
808 metaslab_ff_fragmented
812 * ==========================================================================
813 * Dynamic block allocator -
814 * Uses the first fit allocation scheme until space get low and then
815 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
816 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
817 * ==========================================================================
820 metaslab_df_alloc(metaslab_t *msp, uint64_t size)
823 * Find the largest power of 2 block size that evenly divides the
824 * requested size. This is used to try to allocate blocks with similar
825 * alignment from the same area of the metaslab (i.e. same cursor
826 * bucket) but it does not guarantee that other allocations sizes
827 * may exist in the same region.
829 uint64_t align = size & -size;
830 uint64_t *cursor = &msp->ms_lbas[highbit(align) - 1];
831 range_tree_t *rt = msp->ms_tree;
832 avl_tree_t *t = &rt->rt_root;
833 uint64_t max_size = metaslab_block_maxsize(msp);
834 int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
836 ASSERT(MUTEX_HELD(&msp->ms_lock));
837 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
843 * If we're running low on space switch to using the size
844 * sorted AVL tree (best-fit).
846 if (max_size < metaslab_df_alloc_threshold ||
847 free_pct < metaslab_df_free_pct) {
848 t = &msp->ms_size_tree;
852 return (metaslab_block_picker(t, cursor, size, 1ULL));
856 metaslab_df_fragmented(metaslab_t *msp)
858 range_tree_t *rt = msp->ms_tree;
859 uint64_t max_size = metaslab_block_maxsize(msp);
860 int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
862 if (max_size >= metaslab_df_alloc_threshold &&
863 free_pct >= metaslab_df_free_pct)
869 static metaslab_ops_t metaslab_df_ops = {
871 metaslab_df_fragmented
875 * ==========================================================================
876 * Cursor fit block allocator -
877 * Select the largest region in the metaslab, set the cursor to the beginning
878 * of the range and the cursor_end to the end of the range. As allocations
879 * are made advance the cursor. Continue allocating from the cursor until
880 * the range is exhausted and then find a new range.
881 * ==========================================================================
884 metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
886 range_tree_t *rt = msp->ms_tree;
887 avl_tree_t *t = &msp->ms_size_tree;
888 uint64_t *cursor = &msp->ms_lbas[0];
889 uint64_t *cursor_end = &msp->ms_lbas[1];
892 ASSERT(MUTEX_HELD(&msp->ms_lock));
893 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root));
895 ASSERT3U(*cursor_end, >=, *cursor);
897 if ((*cursor + size) > *cursor_end) {
900 rs = avl_last(&msp->ms_size_tree);
901 if (rs == NULL || (rs->rs_end - rs->rs_start) < size)
904 *cursor = rs->rs_start;
905 *cursor_end = rs->rs_end;
915 metaslab_cf_fragmented(metaslab_t *msp)
917 return (metaslab_block_maxsize(msp) < metaslab_min_alloc_size);
920 static metaslab_ops_t metaslab_cf_ops = {
922 metaslab_cf_fragmented
926 * ==========================================================================
927 * New dynamic fit allocator -
928 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
929 * contiguous blocks. If no region is found then just use the largest segment
931 * ==========================================================================
935 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
936 * to request from the allocator.
938 uint64_t metaslab_ndf_clump_shift = 4;
941 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
943 avl_tree_t *t = &msp->ms_tree->rt_root;
945 range_seg_t *rs, rsearch;
946 uint64_t hbit = highbit(size);
947 uint64_t *cursor = &msp->ms_lbas[hbit - 1];
948 uint64_t max_size = metaslab_block_maxsize(msp);
950 ASSERT(MUTEX_HELD(&msp->ms_lock));
951 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
956 rsearch.rs_start = *cursor;
957 rsearch.rs_end = *cursor + size;
959 rs = avl_find(t, &rsearch, &where);
960 if (rs == NULL || (rs->rs_end - rs->rs_start) < size) {
961 t = &msp->ms_size_tree;
963 rsearch.rs_start = 0;
964 rsearch.rs_end = MIN(max_size,
965 1ULL << (hbit + metaslab_ndf_clump_shift));
966 rs = avl_find(t, &rsearch, &where);
968 rs = avl_nearest(t, where, AVL_AFTER);
972 if ((rs->rs_end - rs->rs_start) >= size) {
973 *cursor = rs->rs_start + size;
974 return (rs->rs_start);
980 metaslab_ndf_fragmented(metaslab_t *msp)
982 return (metaslab_block_maxsize(msp) <=
983 (metaslab_min_alloc_size << metaslab_ndf_clump_shift));
986 static metaslab_ops_t metaslab_ndf_ops = {
988 metaslab_ndf_fragmented
991 metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
994 * ==========================================================================
996 * ==========================================================================
1000 * Wait for any in-progress metaslab loads to complete.
1003 metaslab_load_wait(metaslab_t *msp)
1005 ASSERT(MUTEX_HELD(&msp->ms_lock));
1007 while (msp->ms_loading) {
1008 ASSERT(!msp->ms_loaded);
1009 cv_wait(&msp->ms_load_cv, &msp->ms_lock);
1014 metaslab_load(metaslab_t *msp)
1018 ASSERT(MUTEX_HELD(&msp->ms_lock));
1019 ASSERT(!msp->ms_loaded);
1020 ASSERT(!msp->ms_loading);
1022 msp->ms_loading = B_TRUE;
1025 * If the space map has not been allocated yet, then treat
1026 * all the space in the metaslab as free and add it to the
1029 if (msp->ms_sm != NULL)
1030 error = space_map_load(msp->ms_sm, msp->ms_tree, SM_FREE);
1032 range_tree_add(msp->ms_tree, msp->ms_start, msp->ms_size);
1034 msp->ms_loaded = (error == 0);
1035 msp->ms_loading = B_FALSE;
1037 if (msp->ms_loaded) {
1038 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1039 range_tree_walk(msp->ms_defertree[t],
1040 range_tree_remove, msp->ms_tree);
1043 cv_broadcast(&msp->ms_load_cv);
1048 metaslab_unload(metaslab_t *msp)
1050 ASSERT(MUTEX_HELD(&msp->ms_lock));
1051 range_tree_vacate(msp->ms_tree, NULL, NULL);
1052 msp->ms_loaded = B_FALSE;
1053 msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
1057 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg)
1059 vdev_t *vd = mg->mg_vd;
1060 objset_t *mos = vd->vdev_spa->spa_meta_objset;
1063 msp = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
1064 mutex_init(&msp->ms_lock, NULL, MUTEX_DEFAULT, NULL);
1065 cv_init(&msp->ms_load_cv, NULL, CV_DEFAULT, NULL);
1067 msp->ms_start = id << vd->vdev_ms_shift;
1068 msp->ms_size = 1ULL << vd->vdev_ms_shift;
1071 * We only open space map objects that already exist. All others
1072 * will be opened when we finally allocate an object for it.
1075 VERIFY0(space_map_open(&msp->ms_sm, mos, object, msp->ms_start,
1076 msp->ms_size, vd->vdev_ashift, &msp->ms_lock));
1077 ASSERT(msp->ms_sm != NULL);
1081 * We create the main range tree here, but we don't create the
1082 * alloctree and freetree until metaslab_sync_done(). This serves
1083 * two purposes: it allows metaslab_sync_done() to detect the
1084 * addition of new space; and for debugging, it ensures that we'd
1085 * data fault on any attempt to use this metaslab before it's ready.
1087 msp->ms_tree = range_tree_create(&metaslab_rt_ops, msp, &msp->ms_lock);
1088 metaslab_group_add(mg, msp);
1090 msp->ms_ops = mg->mg_class->mc_ops;
1093 * If we're opening an existing pool (txg == 0) or creating
1094 * a new one (txg == TXG_INITIAL), all space is available now.
1095 * If we're adding space to an existing pool, the new space
1096 * does not become available until after this txg has synced.
1098 if (txg <= TXG_INITIAL)
1099 metaslab_sync_done(msp, 0);
1102 * If metaslab_debug_load is set and we're initializing a metaslab
1103 * that has an allocated space_map object then load the its space
1104 * map so that can verify frees.
1106 if (metaslab_debug_load && msp->ms_sm != NULL) {
1107 mutex_enter(&msp->ms_lock);
1108 VERIFY0(metaslab_load(msp));
1109 mutex_exit(&msp->ms_lock);
1113 vdev_dirty(vd, 0, NULL, txg);
1114 vdev_dirty(vd, VDD_METASLAB, msp, txg);
1121 metaslab_fini(metaslab_t *msp)
1123 metaslab_group_t *mg = msp->ms_group;
1125 metaslab_group_remove(mg, msp);
1127 mutex_enter(&msp->ms_lock);
1129 VERIFY(msp->ms_group == NULL);
1130 vdev_space_update(mg->mg_vd, -space_map_allocated(msp->ms_sm),
1132 space_map_close(msp->ms_sm);
1134 metaslab_unload(msp);
1135 range_tree_destroy(msp->ms_tree);
1137 for (int t = 0; t < TXG_SIZE; t++) {
1138 range_tree_destroy(msp->ms_alloctree[t]);
1139 range_tree_destroy(msp->ms_freetree[t]);
1142 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1143 range_tree_destroy(msp->ms_defertree[t]);
1146 ASSERT0(msp->ms_deferspace);
1148 mutex_exit(&msp->ms_lock);
1149 cv_destroy(&msp->ms_load_cv);
1150 mutex_destroy(&msp->ms_lock);
1152 kmem_free(msp, sizeof (metaslab_t));
1156 * Apply a weighting factor based on the histogram information for this
1157 * metaslab. The current weighting factor is somewhat arbitrary and requires
1158 * additional investigation. The implementation provides a measure of
1159 * "weighted" free space and gives a higher weighting for larger contiguous
1160 * regions. The weighting factor is determined by counting the number of
1161 * sm_shift sectors that exist in each region represented by the histogram.
1162 * That value is then multiplied by the power of 2 exponent and the sm_shift
1165 * For example, assume the 2^21 histogram bucket has 4 2MB regions and the
1166 * metaslab has an sm_shift value of 9 (512B):
1168 * 1) calculate the number of sm_shift sectors in the region:
1169 * 2^21 / 2^9 = 2^12 = 4096 * 4 (number of regions) = 16384
1170 * 2) multiply by the power of 2 exponent and the sm_shift value:
1171 * 16384 * 21 * 9 = 3096576
1172 * This value will be added to the weighting of the metaslab.
1175 metaslab_weight_factor(metaslab_t *msp)
1177 uint64_t factor = 0;
1182 * A null space map means that the entire metaslab is free,
1183 * calculate a weight factor that spans the entire size of the
1186 if (msp->ms_sm == NULL) {
1187 vdev_t *vd = msp->ms_group->mg_vd;
1189 i = highbit(msp->ms_size) - 1;
1190 sectors = msp->ms_size >> vd->vdev_ashift;
1191 return (sectors * i * vd->vdev_ashift);
1194 if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
1197 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE(msp->ms_sm); i++) {
1198 if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
1202 * Determine the number of sm_shift sectors in the region
1203 * indicated by the histogram. For example, given an
1204 * sm_shift value of 9 (512 bytes) and i = 4 then we know
1205 * that we're looking at an 8K region in the histogram
1206 * (i.e. 9 + 4 = 13, 2^13 = 8192). To figure out the
1207 * number of sm_shift sectors (512 bytes in this example),
1208 * we would take 8192 / 512 = 16. Since the histogram
1209 * is offset by sm_shift we can simply use the value of
1210 * of i to calculate this (i.e. 2^i = 16 where i = 4).
1212 sectors = msp->ms_sm->sm_phys->smp_histogram[i] << i;
1213 factor += (i + msp->ms_sm->sm_shift) * sectors;
1215 return (factor * msp->ms_sm->sm_shift);
1219 metaslab_weight(metaslab_t *msp)
1221 metaslab_group_t *mg = msp->ms_group;
1222 vdev_t *vd = mg->mg_vd;
1223 uint64_t weight, space;
1225 ASSERT(MUTEX_HELD(&msp->ms_lock));
1228 * This vdev is in the process of being removed so there is nothing
1229 * for us to do here.
1231 if (vd->vdev_removing) {
1232 ASSERT0(space_map_allocated(msp->ms_sm));
1233 ASSERT0(vd->vdev_ms_shift);
1238 * The baseline weight is the metaslab's free space.
1240 space = msp->ms_size - space_map_allocated(msp->ms_sm);
1244 * Modern disks have uniform bit density and constant angular velocity.
1245 * Therefore, the outer recording zones are faster (higher bandwidth)
1246 * than the inner zones by the ratio of outer to inner track diameter,
1247 * which is typically around 2:1. We account for this by assigning
1248 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
1249 * In effect, this means that we'll select the metaslab with the most
1250 * free bandwidth rather than simply the one with the most free space.
1252 weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
1253 ASSERT(weight >= space && weight <= 2 * space);
1255 msp->ms_factor = metaslab_weight_factor(msp);
1256 if (metaslab_weight_factor_enable)
1257 weight += msp->ms_factor;
1259 if (msp->ms_loaded && !msp->ms_ops->msop_fragmented(msp)) {
1261 * If this metaslab is one we're actively using, adjust its
1262 * weight to make it preferable to any inactive metaslab so
1263 * we'll polish it off.
1265 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
1272 metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
1274 ASSERT(MUTEX_HELD(&msp->ms_lock));
1276 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
1277 metaslab_load_wait(msp);
1278 if (!msp->ms_loaded) {
1279 int error = metaslab_load(msp);
1281 metaslab_group_sort(msp->ms_group, msp, 0);
1286 metaslab_group_sort(msp->ms_group, msp,
1287 msp->ms_weight | activation_weight);
1289 ASSERT(msp->ms_loaded);
1290 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
1296 metaslab_passivate(metaslab_t *msp, uint64_t size)
1299 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
1300 * this metaslab again. In that case, it had better be empty,
1301 * or we would be leaving space on the table.
1303 ASSERT(size >= SPA_MINBLOCKSIZE || range_tree_space(msp->ms_tree) == 0);
1304 metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size));
1305 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
1309 metaslab_preload(void *arg)
1311 metaslab_t *msp = arg;
1312 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1314 mutex_enter(&msp->ms_lock);
1315 metaslab_load_wait(msp);
1316 if (!msp->ms_loaded)
1317 (void) metaslab_load(msp);
1320 * Set the ms_access_txg value so that we don't unload it right away.
1322 msp->ms_access_txg = spa_syncing_txg(spa) + metaslab_unload_delay + 1;
1323 mutex_exit(&msp->ms_lock);
1327 metaslab_group_preload(metaslab_group_t *mg)
1329 spa_t *spa = mg->mg_vd->vdev_spa;
1331 avl_tree_t *t = &mg->mg_metaslab_tree;
1334 if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
1335 taskq_wait(mg->mg_taskq);
1338 mutex_enter(&mg->mg_lock);
1341 * Prefetch the next potential metaslabs
1343 for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
1345 /* If we have reached our preload limit then we're done */
1346 if (++m > metaslab_preload_limit)
1349 VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
1350 msp, TQ_SLEEP) != 0);
1352 mutex_exit(&mg->mg_lock);
1356 * Determine if the space map's on-disk footprint is past our tolerance
1357 * for inefficiency. We would like to use the following criteria to make
1360 * 1. The size of the space map object should not dramatically increase as a
1361 * result of writing out the free space range tree.
1363 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
1364 * times the size than the free space range tree representation
1365 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1.MB).
1367 * Checking the first condition is tricky since we don't want to walk
1368 * the entire AVL tree calculating the estimated on-disk size. Instead we
1369 * use the size-ordered range tree in the metaslab and calculate the
1370 * size required to write out the largest segment in our free tree. If the
1371 * size required to represent that segment on disk is larger than the space
1372 * map object then we avoid condensing this map.
1374 * To determine the second criterion we use a best-case estimate and assume
1375 * each segment can be represented on-disk as a single 64-bit entry. We refer
1376 * to this best-case estimate as the space map's minimal form.
1379 metaslab_should_condense(metaslab_t *msp)
1381 space_map_t *sm = msp->ms_sm;
1383 uint64_t size, entries, segsz;
1385 ASSERT(MUTEX_HELD(&msp->ms_lock));
1386 ASSERT(msp->ms_loaded);
1389 * Use the ms_size_tree range tree, which is ordered by size, to
1390 * obtain the largest segment in the free tree. If the tree is empty
1391 * then we should condense the map.
1393 rs = avl_last(&msp->ms_size_tree);
1398 * Calculate the number of 64-bit entries this segment would
1399 * require when written to disk. If this single segment would be
1400 * larger on-disk than the entire current on-disk structure, then
1401 * clearly condensing will increase the on-disk structure size.
1403 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
1404 entries = size / (MIN(size, SM_RUN_MAX));
1405 segsz = entries * sizeof (uint64_t);
1407 return (segsz <= space_map_length(msp->ms_sm) &&
1408 space_map_length(msp->ms_sm) >= (zfs_condense_pct *
1409 sizeof (uint64_t) * avl_numnodes(&msp->ms_tree->rt_root)) / 100);
1413 * Condense the on-disk space map representation to its minimized form.
1414 * The minimized form consists of a small number of allocations followed by
1415 * the entries of the free range tree.
1418 metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
1420 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1421 range_tree_t *freetree = msp->ms_freetree[txg & TXG_MASK];
1422 range_tree_t *condense_tree;
1423 space_map_t *sm = msp->ms_sm;
1425 ASSERT(MUTEX_HELD(&msp->ms_lock));
1426 ASSERT3U(spa_sync_pass(spa), ==, 1);
1427 ASSERT(msp->ms_loaded);
1429 spa_dbgmsg(spa, "condensing: txg %llu, msp[%llu] %p, "
1430 "smp size %llu, segments %lu", txg, msp->ms_id, msp,
1431 space_map_length(msp->ms_sm), avl_numnodes(&msp->ms_tree->rt_root));
1434 * Create an range tree that is 100% allocated. We remove segments
1435 * that have been freed in this txg, any deferred frees that exist,
1436 * and any allocation in the future. Removing segments should be
1437 * a relatively inexpensive operation since we expect these trees to
1438 * have a small number of nodes.
1440 condense_tree = range_tree_create(NULL, NULL, &msp->ms_lock);
1441 range_tree_add(condense_tree, msp->ms_start, msp->ms_size);
1444 * Remove what's been freed in this txg from the condense_tree.
1445 * Since we're in sync_pass 1, we know that all the frees from
1446 * this txg are in the freetree.
1448 range_tree_walk(freetree, range_tree_remove, condense_tree);
1450 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1451 range_tree_walk(msp->ms_defertree[t],
1452 range_tree_remove, condense_tree);
1455 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
1456 range_tree_walk(msp->ms_alloctree[(txg + t) & TXG_MASK],
1457 range_tree_remove, condense_tree);
1461 * We're about to drop the metaslab's lock thus allowing
1462 * other consumers to change it's content. Set the
1463 * metaslab's ms_condensing flag to ensure that
1464 * allocations on this metaslab do not occur while we're
1465 * in the middle of committing it to disk. This is only critical
1466 * for the ms_tree as all other range trees use per txg
1467 * views of their content.
1469 msp->ms_condensing = B_TRUE;
1471 mutex_exit(&msp->ms_lock);
1472 space_map_truncate(sm, tx);
1473 mutex_enter(&msp->ms_lock);
1476 * While we would ideally like to create a space_map representation
1477 * that consists only of allocation records, doing so can be
1478 * prohibitively expensive because the in-core free tree can be
1479 * large, and therefore computationally expensive to subtract
1480 * from the condense_tree. Instead we sync out two trees, a cheap
1481 * allocation only tree followed by the in-core free tree. While not
1482 * optimal, this is typically close to optimal, and much cheaper to
1485 space_map_write(sm, condense_tree, SM_ALLOC, tx);
1486 range_tree_vacate(condense_tree, NULL, NULL);
1487 range_tree_destroy(condense_tree);
1489 space_map_write(sm, msp->ms_tree, SM_FREE, tx);
1490 msp->ms_condensing = B_FALSE;
1494 * Write a metaslab to disk in the context of the specified transaction group.
1497 metaslab_sync(metaslab_t *msp, uint64_t txg)
1499 metaslab_group_t *mg = msp->ms_group;
1500 vdev_t *vd = mg->mg_vd;
1501 spa_t *spa = vd->vdev_spa;
1502 objset_t *mos = spa_meta_objset(spa);
1503 range_tree_t *alloctree = msp->ms_alloctree[txg & TXG_MASK];
1504 range_tree_t **freetree = &msp->ms_freetree[txg & TXG_MASK];
1505 range_tree_t **freed_tree =
1506 &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK];
1508 uint64_t object = space_map_object(msp->ms_sm);
1510 ASSERT(!vd->vdev_ishole);
1513 * This metaslab has just been added so there's no work to do now.
1515 if (*freetree == NULL) {
1516 ASSERT3P(alloctree, ==, NULL);
1520 ASSERT3P(alloctree, !=, NULL);
1521 ASSERT3P(*freetree, !=, NULL);
1522 ASSERT3P(*freed_tree, !=, NULL);
1524 if (range_tree_space(alloctree) == 0 &&
1525 range_tree_space(*freetree) == 0)
1529 * The only state that can actually be changing concurrently with
1530 * metaslab_sync() is the metaslab's ms_tree. No other thread can
1531 * be modifying this txg's alloctree, freetree, freed_tree, or
1532 * space_map_phys_t. Therefore, we only hold ms_lock to satify
1533 * space_map ASSERTs. We drop it whenever we call into the DMU,
1534 * because the DMU can call down to us (e.g. via zio_free()) at
1538 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
1540 if (msp->ms_sm == NULL) {
1541 uint64_t new_object;
1543 new_object = space_map_alloc(mos, tx);
1544 VERIFY3U(new_object, !=, 0);
1546 VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
1547 msp->ms_start, msp->ms_size, vd->vdev_ashift,
1549 ASSERT(msp->ms_sm != NULL);
1552 mutex_enter(&msp->ms_lock);
1554 if (msp->ms_loaded && spa_sync_pass(spa) == 1 &&
1555 metaslab_should_condense(msp)) {
1556 metaslab_condense(msp, txg, tx);
1558 space_map_write(msp->ms_sm, alloctree, SM_ALLOC, tx);
1559 space_map_write(msp->ms_sm, *freetree, SM_FREE, tx);
1562 range_tree_vacate(alloctree, NULL, NULL);
1564 if (msp->ms_loaded) {
1566 * When the space map is loaded, we have an accruate
1567 * histogram in the range tree. This gives us an opportunity
1568 * to bring the space map's histogram up-to-date so we clear
1569 * it first before updating it.
1571 space_map_histogram_clear(msp->ms_sm);
1572 space_map_histogram_add(msp->ms_sm, msp->ms_tree, tx);
1575 * Since the space map is not loaded we simply update the
1576 * exisiting histogram with what was freed in this txg. This
1577 * means that the on-disk histogram may not have an accurate
1578 * view of the free space but it's close enough to allow
1579 * us to make allocation decisions.
1581 space_map_histogram_add(msp->ms_sm, *freetree, tx);
1585 * For sync pass 1, we avoid traversing this txg's free range tree
1586 * and instead will just swap the pointers for freetree and
1587 * freed_tree. We can safely do this since the freed_tree is
1588 * guaranteed to be empty on the initial pass.
1590 if (spa_sync_pass(spa) == 1) {
1591 range_tree_swap(freetree, freed_tree);
1593 range_tree_vacate(*freetree, range_tree_add, *freed_tree);
1596 ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
1597 ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK]));
1599 mutex_exit(&msp->ms_lock);
1601 if (object != space_map_object(msp->ms_sm)) {
1602 object = space_map_object(msp->ms_sm);
1603 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
1604 msp->ms_id, sizeof (uint64_t), &object, tx);
1610 * Called after a transaction group has completely synced to mark
1611 * all of the metaslab's free space as usable.
1614 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
1616 metaslab_group_t *mg = msp->ms_group;
1617 vdev_t *vd = mg->mg_vd;
1618 range_tree_t **freed_tree;
1619 range_tree_t **defer_tree;
1620 int64_t alloc_delta, defer_delta;
1622 ASSERT(!vd->vdev_ishole);
1624 mutex_enter(&msp->ms_lock);
1627 * If this metaslab is just becoming available, initialize its
1628 * alloctrees, freetrees, and defertree and add its capacity to
1631 if (msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK] == NULL) {
1632 for (int t = 0; t < TXG_SIZE; t++) {
1633 ASSERT(msp->ms_alloctree[t] == NULL);
1634 ASSERT(msp->ms_freetree[t] == NULL);
1636 msp->ms_alloctree[t] = range_tree_create(NULL, msp,
1638 msp->ms_freetree[t] = range_tree_create(NULL, msp,
1642 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1643 ASSERT(msp->ms_defertree[t] == NULL);
1645 msp->ms_defertree[t] = range_tree_create(NULL, msp,
1649 vdev_space_update(vd, 0, 0, msp->ms_size);
1652 freed_tree = &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK];
1653 defer_tree = &msp->ms_defertree[txg % TXG_DEFER_SIZE];
1655 alloc_delta = space_map_alloc_delta(msp->ms_sm);
1656 defer_delta = range_tree_space(*freed_tree) -
1657 range_tree_space(*defer_tree);
1659 vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
1661 ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
1662 ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK]));
1665 * If there's a metaslab_load() in progress, wait for it to complete
1666 * so that we have a consistent view of the in-core space map.
1668 metaslab_load_wait(msp);
1671 * Move the frees from the defer_tree back to the free
1672 * range tree (if it's loaded). Swap the freed_tree and the
1673 * defer_tree -- this is safe to do because we've just emptied out
1676 range_tree_vacate(*defer_tree,
1677 msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree);
1678 range_tree_swap(freed_tree, defer_tree);
1680 space_map_update(msp->ms_sm);
1682 msp->ms_deferspace += defer_delta;
1683 ASSERT3S(msp->ms_deferspace, >=, 0);
1684 ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
1685 if (msp->ms_deferspace != 0) {
1687 * Keep syncing this metaslab until all deferred frees
1688 * are back in circulation.
1690 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
1693 if (msp->ms_loaded && msp->ms_access_txg < txg) {
1694 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
1695 VERIFY0(range_tree_space(
1696 msp->ms_alloctree[(txg + t) & TXG_MASK]));
1699 if (!metaslab_debug_unload)
1700 metaslab_unload(msp);
1703 metaslab_group_sort(mg, msp, metaslab_weight(msp));
1704 mutex_exit(&msp->ms_lock);
1709 metaslab_sync_reassess(metaslab_group_t *mg)
1711 int64_t failures = mg->mg_alloc_failures;
1713 metaslab_group_alloc_update(mg);
1714 atomic_add_64(&mg->mg_alloc_failures, -failures);
1717 * Preload the next potential metaslabs
1719 metaslab_group_preload(mg);
1723 metaslab_distance(metaslab_t *msp, dva_t *dva)
1725 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
1726 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
1727 uint64_t start = msp->ms_id;
1729 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
1730 return (1ULL << 63);
1733 return ((start - offset) << ms_shift);
1735 return ((offset - start) << ms_shift);
1740 metaslab_group_alloc(metaslab_group_t *mg, uint64_t psize, uint64_t asize,
1741 uint64_t txg, uint64_t min_distance, dva_t *dva, int d, int flags)
1743 spa_t *spa = mg->mg_vd->vdev_spa;
1744 metaslab_t *msp = NULL;
1745 uint64_t offset = -1ULL;
1746 avl_tree_t *t = &mg->mg_metaslab_tree;
1747 uint64_t activation_weight;
1748 uint64_t target_distance;
1751 activation_weight = METASLAB_WEIGHT_PRIMARY;
1752 for (i = 0; i < d; i++) {
1753 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
1754 activation_weight = METASLAB_WEIGHT_SECONDARY;
1760 boolean_t was_active;
1762 mutex_enter(&mg->mg_lock);
1763 for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) {
1764 if (msp->ms_weight < asize) {
1765 spa_dbgmsg(spa, "%s: failed to meet weight "
1766 "requirement: vdev %llu, txg %llu, mg %p, "
1767 "msp %p, psize %llu, asize %llu, "
1768 "failures %llu, weight %llu",
1769 spa_name(spa), mg->mg_vd->vdev_id, txg,
1770 mg, msp, psize, asize,
1771 mg->mg_alloc_failures, msp->ms_weight);
1772 mutex_exit(&mg->mg_lock);
1777 * If the selected metaslab is condensing, skip it.
1779 if (msp->ms_condensing)
1782 was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
1783 if (activation_weight == METASLAB_WEIGHT_PRIMARY)
1786 target_distance = min_distance +
1787 (space_map_allocated(msp->ms_sm) != 0 ? 0 :
1790 for (i = 0; i < d; i++)
1791 if (metaslab_distance(msp, &dva[i]) <
1797 mutex_exit(&mg->mg_lock);
1801 mutex_enter(&msp->ms_lock);
1804 * If we've already reached the allowable number of failed
1805 * allocation attempts on this metaslab group then we
1806 * consider skipping it. We skip it only if we're allowed
1807 * to "fast" gang, the physical size is larger than
1808 * a gang block, and we're attempting to allocate from
1809 * the primary metaslab.
1811 if (mg->mg_alloc_failures > zfs_mg_alloc_failures &&
1812 CAN_FASTGANG(flags) && psize > SPA_GANGBLOCKSIZE &&
1813 activation_weight == METASLAB_WEIGHT_PRIMARY) {
1814 spa_dbgmsg(spa, "%s: skipping metaslab group: "
1815 "vdev %llu, txg %llu, mg %p, msp[%llu] %p, "
1816 "psize %llu, asize %llu, failures %llu",
1817 spa_name(spa), mg->mg_vd->vdev_id, txg, mg,
1818 msp->ms_id, msp, psize, asize,
1819 mg->mg_alloc_failures);
1820 mutex_exit(&msp->ms_lock);
1825 * Ensure that the metaslab we have selected is still
1826 * capable of handling our request. It's possible that
1827 * another thread may have changed the weight while we
1828 * were blocked on the metaslab lock.
1830 if (msp->ms_weight < asize || (was_active &&
1831 !(msp->ms_weight & METASLAB_ACTIVE_MASK) &&
1832 activation_weight == METASLAB_WEIGHT_PRIMARY)) {
1833 mutex_exit(&msp->ms_lock);
1837 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
1838 activation_weight == METASLAB_WEIGHT_PRIMARY) {
1839 metaslab_passivate(msp,
1840 msp->ms_weight & ~METASLAB_ACTIVE_MASK);
1841 mutex_exit(&msp->ms_lock);
1845 if (metaslab_activate(msp, activation_weight) != 0) {
1846 mutex_exit(&msp->ms_lock);
1851 * If this metaslab is currently condensing then pick again as
1852 * we can't manipulate this metaslab until it's committed
1855 if (msp->ms_condensing) {
1856 mutex_exit(&msp->ms_lock);
1860 if ((offset = metaslab_block_alloc(msp, asize)) != -1ULL)
1863 atomic_inc_64(&mg->mg_alloc_failures);
1865 metaslab_passivate(msp, metaslab_block_maxsize(msp));
1866 mutex_exit(&msp->ms_lock);
1869 if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
1870 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
1872 range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, asize);
1873 msp->ms_access_txg = txg + metaslab_unload_delay;
1875 mutex_exit(&msp->ms_lock);
1881 * Allocate a block for the specified i/o.
1884 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
1885 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags)
1887 metaslab_group_t *mg, *rotor;
1891 int zio_lock = B_FALSE;
1892 boolean_t allocatable;
1893 uint64_t offset = -1ULL;
1897 ASSERT(!DVA_IS_VALID(&dva[d]));
1900 * For testing, make some blocks above a certain size be gang blocks.
1902 if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0)
1903 return (SET_ERROR(ENOSPC));
1906 * Start at the rotor and loop through all mgs until we find something.
1907 * Note that there's no locking on mc_rotor or mc_aliquot because
1908 * nothing actually breaks if we miss a few updates -- we just won't
1909 * allocate quite as evenly. It all balances out over time.
1911 * If we are doing ditto or log blocks, try to spread them across
1912 * consecutive vdevs. If we're forced to reuse a vdev before we've
1913 * allocated all of our ditto blocks, then try and spread them out on
1914 * that vdev as much as possible. If it turns out to not be possible,
1915 * gradually lower our standards until anything becomes acceptable.
1916 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
1917 * gives us hope of containing our fault domains to something we're
1918 * able to reason about. Otherwise, any two top-level vdev failures
1919 * will guarantee the loss of data. With consecutive allocation,
1920 * only two adjacent top-level vdev failures will result in data loss.
1922 * If we are doing gang blocks (hintdva is non-NULL), try to keep
1923 * ourselves on the same vdev as our gang block header. That
1924 * way, we can hope for locality in vdev_cache, plus it makes our
1925 * fault domains something tractable.
1928 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
1931 * It's possible the vdev we're using as the hint no
1932 * longer exists (i.e. removed). Consult the rotor when
1938 if (flags & METASLAB_HINTBP_AVOID &&
1939 mg->mg_next != NULL)
1944 } else if (d != 0) {
1945 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
1946 mg = vd->vdev_mg->mg_next;
1952 * If the hint put us into the wrong metaslab class, or into a
1953 * metaslab group that has been passivated, just follow the rotor.
1955 if (mg->mg_class != mc || mg->mg_activation_count <= 0)
1962 ASSERT(mg->mg_activation_count == 1);
1967 * Don't allocate from faulted devices.
1970 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
1971 allocatable = vdev_allocatable(vd);
1972 spa_config_exit(spa, SCL_ZIO, FTAG);
1974 allocatable = vdev_allocatable(vd);
1978 * Determine if the selected metaslab group is eligible
1979 * for allocations. If we're ganging or have requested
1980 * an allocation for the smallest gang block size
1981 * then we don't want to avoid allocating to the this
1982 * metaslab group. If we're in this condition we should
1983 * try to allocate from any device possible so that we
1984 * don't inadvertently return ENOSPC and suspend the pool
1985 * even though space is still available.
1987 if (allocatable && CAN_FASTGANG(flags) &&
1988 psize > SPA_GANGBLOCKSIZE)
1989 allocatable = metaslab_group_allocatable(mg);
1995 * Avoid writing single-copy data to a failing vdev
1996 * unless the user instructs us that it is okay.
1998 if ((vd->vdev_stat.vs_write_errors > 0 ||
1999 vd->vdev_state < VDEV_STATE_HEALTHY) &&
2000 d == 0 && dshift == 3 &&
2001 !(zfs_write_to_degraded && vd->vdev_state ==
2002 VDEV_STATE_DEGRADED)) {
2007 ASSERT(mg->mg_class == mc);
2009 distance = vd->vdev_asize >> dshift;
2010 if (distance <= (1ULL << vd->vdev_ms_shift))
2015 asize = vdev_psize_to_asize(vd, psize);
2016 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
2018 offset = metaslab_group_alloc(mg, psize, asize, txg, distance,
2020 if (offset != -1ULL) {
2022 * If we've just selected this metaslab group,
2023 * figure out whether the corresponding vdev is
2024 * over- or under-used relative to the pool,
2025 * and set an allocation bias to even it out.
2027 if (mc->mc_aliquot == 0) {
2028 vdev_stat_t *vs = &vd->vdev_stat;
2031 vu = (vs->vs_alloc * 100) / (vs->vs_space + 1);
2032 cu = (mc->mc_alloc * 100) / (mc->mc_space + 1);
2035 * Calculate how much more or less we should
2036 * try to allocate from this device during
2037 * this iteration around the rotor.
2038 * For example, if a device is 80% full
2039 * and the pool is 20% full then we should
2040 * reduce allocations by 60% on this device.
2042 * mg_bias = (20 - 80) * 512K / 100 = -307K
2044 * This reduces allocations by 307K for this
2047 mg->mg_bias = ((cu - vu) *
2048 (int64_t)mg->mg_aliquot) / 100;
2051 if (atomic_add_64_nv(&mc->mc_aliquot, asize) >=
2052 mg->mg_aliquot + mg->mg_bias) {
2053 mc->mc_rotor = mg->mg_next;
2057 DVA_SET_VDEV(&dva[d], vd->vdev_id);
2058 DVA_SET_OFFSET(&dva[d], offset);
2059 DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER));
2060 DVA_SET_ASIZE(&dva[d], asize);
2065 mc->mc_rotor = mg->mg_next;
2067 } while ((mg = mg->mg_next) != rotor);
2071 ASSERT(dshift < 64);
2075 if (!allocatable && !zio_lock) {
2081 bzero(&dva[d], sizeof (dva_t));
2083 return (SET_ERROR(ENOSPC));
2087 * Free the block represented by DVA in the context of the specified
2088 * transaction group.
2091 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
2093 uint64_t vdev = DVA_GET_VDEV(dva);
2094 uint64_t offset = DVA_GET_OFFSET(dva);
2095 uint64_t size = DVA_GET_ASIZE(dva);
2099 ASSERT(DVA_IS_VALID(dva));
2101 if (txg > spa_freeze_txg(spa))
2104 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
2105 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
2106 cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu",
2107 (u_longlong_t)vdev, (u_longlong_t)offset);
2112 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
2114 if (DVA_GET_GANG(dva))
2115 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
2117 mutex_enter(&msp->ms_lock);
2120 range_tree_remove(msp->ms_alloctree[txg & TXG_MASK],
2123 VERIFY(!msp->ms_condensing);
2124 VERIFY3U(offset, >=, msp->ms_start);
2125 VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
2126 VERIFY3U(range_tree_space(msp->ms_tree) + size, <=,
2128 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
2129 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
2130 range_tree_add(msp->ms_tree, offset, size);
2132 if (range_tree_space(msp->ms_freetree[txg & TXG_MASK]) == 0)
2133 vdev_dirty(vd, VDD_METASLAB, msp, txg);
2134 range_tree_add(msp->ms_freetree[txg & TXG_MASK],
2138 mutex_exit(&msp->ms_lock);
2142 * Intent log support: upon opening the pool after a crash, notify the SPA
2143 * of blocks that the intent log has allocated for immediate write, but
2144 * which are still considered free by the SPA because the last transaction
2145 * group didn't commit yet.
2148 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
2150 uint64_t vdev = DVA_GET_VDEV(dva);
2151 uint64_t offset = DVA_GET_OFFSET(dva);
2152 uint64_t size = DVA_GET_ASIZE(dva);
2157 ASSERT(DVA_IS_VALID(dva));
2159 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
2160 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
2161 return (SET_ERROR(ENXIO));
2163 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
2165 if (DVA_GET_GANG(dva))
2166 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
2168 mutex_enter(&msp->ms_lock);
2170 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded)
2171 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
2173 if (error == 0 && !range_tree_contains(msp->ms_tree, offset, size))
2174 error = SET_ERROR(ENOENT);
2176 if (error || txg == 0) { /* txg == 0 indicates dry run */
2177 mutex_exit(&msp->ms_lock);
2181 VERIFY(!msp->ms_condensing);
2182 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
2183 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
2184 VERIFY3U(range_tree_space(msp->ms_tree) - size, <=, msp->ms_size);
2185 range_tree_remove(msp->ms_tree, offset, size);
2187 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
2188 if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
2189 vdev_dirty(vd, VDD_METASLAB, msp, txg);
2190 range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, size);
2193 mutex_exit(&msp->ms_lock);
2199 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
2200 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags)
2202 dva_t *dva = bp->blk_dva;
2203 dva_t *hintdva = hintbp->blk_dva;
2206 ASSERT(bp->blk_birth == 0);
2207 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
2209 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
2211 if (mc->mc_rotor == NULL) { /* no vdevs in this class */
2212 spa_config_exit(spa, SCL_ALLOC, FTAG);
2213 return (SET_ERROR(ENOSPC));
2216 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
2217 ASSERT(BP_GET_NDVAS(bp) == 0);
2218 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
2220 for (int d = 0; d < ndvas; d++) {
2221 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
2224 for (d--; d >= 0; d--) {
2225 metaslab_free_dva(spa, &dva[d], txg, B_TRUE);
2226 bzero(&dva[d], sizeof (dva_t));
2228 spa_config_exit(spa, SCL_ALLOC, FTAG);
2233 ASSERT(BP_GET_NDVAS(bp) == ndvas);
2235 spa_config_exit(spa, SCL_ALLOC, FTAG);
2237 BP_SET_BIRTH(bp, txg, txg);
2243 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
2245 const dva_t *dva = bp->blk_dva;
2246 int ndvas = BP_GET_NDVAS(bp);
2248 ASSERT(!BP_IS_HOLE(bp));
2249 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
2251 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
2253 for (int d = 0; d < ndvas; d++)
2254 metaslab_free_dva(spa, &dva[d], txg, now);
2256 spa_config_exit(spa, SCL_FREE, FTAG);
2260 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
2262 const dva_t *dva = bp->blk_dva;
2263 int ndvas = BP_GET_NDVAS(bp);
2266 ASSERT(!BP_IS_HOLE(bp));
2270 * First do a dry run to make sure all DVAs are claimable,
2271 * so we don't have to unwind from partial failures below.
2273 if ((error = metaslab_claim(spa, bp, 0)) != 0)
2277 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
2279 for (int d = 0; d < ndvas; d++)
2280 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
2283 spa_config_exit(spa, SCL_ALLOC, FTAG);
2285 ASSERT(error == 0 || txg == 0);
2291 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
2293 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
2296 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2297 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
2298 uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
2299 vdev_t *vd = vdev_lookup_top(spa, vdev);
2300 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
2301 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
2302 metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
2305 range_tree_verify(msp->ms_tree, offset, size);
2307 for (int j = 0; j < TXG_SIZE; j++)
2308 range_tree_verify(msp->ms_freetree[j], offset, size);
2309 for (int j = 0; j < TXG_DEFER_SIZE; j++)
2310 range_tree_verify(msp->ms_defertree[j], offset, size);
2312 spa_config_exit(spa, SCL_VDEV, FTAG);