4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011 by Delphix. All rights reserved.
26 #include <sys/zfs_context.h>
28 #include <sys/dmu_tx.h>
29 #include <sys/space_map.h>
30 #include <sys/metaslab_impl.h>
31 #include <sys/vdev_impl.h>
35 * Allow allocations to switch to gang blocks quickly. We do this to
36 * avoid having to load lots of space_maps in a given txg. There are,
37 * however, some cases where we want to avoid "fast" ganging and instead
38 * we want to do an exhaustive search of all metaslabs on this device.
39 * Currently we don't allow any gang or dump device related allocations
42 #define CAN_FASTGANG(flags) \
43 (!((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER | \
44 METASLAB_GANG_AVOID)))
46 uint64_t metaslab_aliquot = 512ULL << 10;
47 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
50 * This value defines the number of allowed allocation failures per vdev.
51 * If a device reaches this threshold in a given txg then we consider skipping
52 * allocations on that device.
54 int zfs_mg_alloc_failures = 0;
56 SYSCTL_DECL(_vfs_zfs);
57 SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_alloc_failures, CTLFLAG_RDTUN,
58 &zfs_mg_alloc_failures, 0,
59 "Number of allowed allocation failures per vdev");
60 TUNABLE_INT("vfs.zfs.mg_alloc_failures", &zfs_mg_alloc_failures);
63 * Metaslab debugging: when set, keeps all space maps in core to verify frees.
65 static int metaslab_debug = 0;
68 * Minimum size which forces the dynamic allocator to change
69 * it's allocation strategy. Once the space map cannot satisfy
70 * an allocation of this size then it switches to using more
71 * aggressive strategy (i.e search by size rather than offset).
73 uint64_t metaslab_df_alloc_threshold = SPA_MAXBLOCKSIZE;
76 * The minimum free space, in percent, which must be available
77 * in a space map to continue allocations in a first-fit fashion.
78 * Once the space_map's free space drops below this level we dynamically
79 * switch to using best-fit allocations.
81 int metaslab_df_free_pct = 4;
84 * A metaslab is considered "free" if it contains a contiguous
85 * segment which is greater than metaslab_min_alloc_size.
87 uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS;
90 * Max number of space_maps to prefetch.
92 int metaslab_prefetch_limit = SPA_DVAS_PER_BP;
95 * Percentage bonus multiplier for metaslabs that are in the bonus area.
97 int metaslab_smo_bonus_pct = 150;
100 * ==========================================================================
102 * ==========================================================================
105 metaslab_class_create(spa_t *spa, space_map_ops_t *ops)
107 metaslab_class_t *mc;
109 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
119 metaslab_class_destroy(metaslab_class_t *mc)
121 ASSERT(mc->mc_rotor == NULL);
122 ASSERT(mc->mc_alloc == 0);
123 ASSERT(mc->mc_deferred == 0);
124 ASSERT(mc->mc_space == 0);
125 ASSERT(mc->mc_dspace == 0);
127 kmem_free(mc, sizeof (metaslab_class_t));
131 metaslab_class_validate(metaslab_class_t *mc)
133 metaslab_group_t *mg;
137 * Must hold one of the spa_config locks.
139 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
140 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
142 if ((mg = mc->mc_rotor) == NULL)
147 ASSERT(vd->vdev_mg != NULL);
148 ASSERT3P(vd->vdev_top, ==, vd);
149 ASSERT3P(mg->mg_class, ==, mc);
150 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
151 } while ((mg = mg->mg_next) != mc->mc_rotor);
157 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
158 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
160 atomic_add_64(&mc->mc_alloc, alloc_delta);
161 atomic_add_64(&mc->mc_deferred, defer_delta);
162 atomic_add_64(&mc->mc_space, space_delta);
163 atomic_add_64(&mc->mc_dspace, dspace_delta);
167 metaslab_class_get_alloc(metaslab_class_t *mc)
169 return (mc->mc_alloc);
173 metaslab_class_get_deferred(metaslab_class_t *mc)
175 return (mc->mc_deferred);
179 metaslab_class_get_space(metaslab_class_t *mc)
181 return (mc->mc_space);
185 metaslab_class_get_dspace(metaslab_class_t *mc)
187 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
191 * ==========================================================================
193 * ==========================================================================
196 metaslab_compare(const void *x1, const void *x2)
198 const metaslab_t *m1 = x1;
199 const metaslab_t *m2 = x2;
201 if (m1->ms_weight < m2->ms_weight)
203 if (m1->ms_weight > m2->ms_weight)
207 * If the weights are identical, use the offset to force uniqueness.
209 if (m1->ms_map.sm_start < m2->ms_map.sm_start)
211 if (m1->ms_map.sm_start > m2->ms_map.sm_start)
214 ASSERT3P(m1, ==, m2);
220 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
222 metaslab_group_t *mg;
224 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
225 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
226 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
227 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
230 mg->mg_activation_count = 0;
236 metaslab_group_destroy(metaslab_group_t *mg)
238 ASSERT(mg->mg_prev == NULL);
239 ASSERT(mg->mg_next == NULL);
241 * We may have gone below zero with the activation count
242 * either because we never activated in the first place or
243 * because we're done, and possibly removing the vdev.
245 ASSERT(mg->mg_activation_count <= 0);
247 avl_destroy(&mg->mg_metaslab_tree);
248 mutex_destroy(&mg->mg_lock);
249 kmem_free(mg, sizeof (metaslab_group_t));
253 metaslab_group_activate(metaslab_group_t *mg)
255 metaslab_class_t *mc = mg->mg_class;
256 metaslab_group_t *mgprev, *mgnext;
258 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
260 ASSERT(mc->mc_rotor != mg);
261 ASSERT(mg->mg_prev == NULL);
262 ASSERT(mg->mg_next == NULL);
263 ASSERT(mg->mg_activation_count <= 0);
265 if (++mg->mg_activation_count <= 0)
268 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
270 if ((mgprev = mc->mc_rotor) == NULL) {
274 mgnext = mgprev->mg_next;
275 mg->mg_prev = mgprev;
276 mg->mg_next = mgnext;
277 mgprev->mg_next = mg;
278 mgnext->mg_prev = mg;
284 metaslab_group_passivate(metaslab_group_t *mg)
286 metaslab_class_t *mc = mg->mg_class;
287 metaslab_group_t *mgprev, *mgnext;
289 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
291 if (--mg->mg_activation_count != 0) {
292 ASSERT(mc->mc_rotor != mg);
293 ASSERT(mg->mg_prev == NULL);
294 ASSERT(mg->mg_next == NULL);
295 ASSERT(mg->mg_activation_count < 0);
299 mgprev = mg->mg_prev;
300 mgnext = mg->mg_next;
305 mc->mc_rotor = mgnext;
306 mgprev->mg_next = mgnext;
307 mgnext->mg_prev = mgprev;
315 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
317 mutex_enter(&mg->mg_lock);
318 ASSERT(msp->ms_group == NULL);
321 avl_add(&mg->mg_metaslab_tree, msp);
322 mutex_exit(&mg->mg_lock);
326 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
328 mutex_enter(&mg->mg_lock);
329 ASSERT(msp->ms_group == mg);
330 avl_remove(&mg->mg_metaslab_tree, msp);
331 msp->ms_group = NULL;
332 mutex_exit(&mg->mg_lock);
336 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
339 * Although in principle the weight can be any value, in
340 * practice we do not use values in the range [1, 510].
342 ASSERT(weight >= SPA_MINBLOCKSIZE-1 || weight == 0);
343 ASSERT(MUTEX_HELD(&msp->ms_lock));
345 mutex_enter(&mg->mg_lock);
346 ASSERT(msp->ms_group == mg);
347 avl_remove(&mg->mg_metaslab_tree, msp);
348 msp->ms_weight = weight;
349 avl_add(&mg->mg_metaslab_tree, msp);
350 mutex_exit(&mg->mg_lock);
354 * ==========================================================================
355 * Common allocator routines
356 * ==========================================================================
359 metaslab_segsize_compare(const void *x1, const void *x2)
361 const space_seg_t *s1 = x1;
362 const space_seg_t *s2 = x2;
363 uint64_t ss_size1 = s1->ss_end - s1->ss_start;
364 uint64_t ss_size2 = s2->ss_end - s2->ss_start;
366 if (ss_size1 < ss_size2)
368 if (ss_size1 > ss_size2)
371 if (s1->ss_start < s2->ss_start)
373 if (s1->ss_start > s2->ss_start)
380 * This is a helper function that can be used by the allocator to find
381 * a suitable block to allocate. This will search the specified AVL
382 * tree looking for a block that matches the specified criteria.
385 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
388 space_seg_t *ss, ssearch;
391 ssearch.ss_start = *cursor;
392 ssearch.ss_end = *cursor + size;
394 ss = avl_find(t, &ssearch, &where);
396 ss = avl_nearest(t, where, AVL_AFTER);
399 uint64_t offset = P2ROUNDUP(ss->ss_start, align);
401 if (offset + size <= ss->ss_end) {
402 *cursor = offset + size;
405 ss = AVL_NEXT(t, ss);
409 * If we know we've searched the whole map (*cursor == 0), give up.
410 * Otherwise, reset the cursor to the beginning and try again.
416 return (metaslab_block_picker(t, cursor, size, align));
420 metaslab_pp_load(space_map_t *sm)
424 ASSERT(sm->sm_ppd == NULL);
425 sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP);
427 sm->sm_pp_root = kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
428 avl_create(sm->sm_pp_root, metaslab_segsize_compare,
429 sizeof (space_seg_t), offsetof(struct space_seg, ss_pp_node));
431 for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss))
432 avl_add(sm->sm_pp_root, ss);
436 metaslab_pp_unload(space_map_t *sm)
440 kmem_free(sm->sm_ppd, 64 * sizeof (uint64_t));
443 while (avl_destroy_nodes(sm->sm_pp_root, &cookie) != NULL) {
444 /* tear down the tree */
447 avl_destroy(sm->sm_pp_root);
448 kmem_free(sm->sm_pp_root, sizeof (avl_tree_t));
449 sm->sm_pp_root = NULL;
454 metaslab_pp_claim(space_map_t *sm, uint64_t start, uint64_t size)
456 /* No need to update cursor */
461 metaslab_pp_free(space_map_t *sm, uint64_t start, uint64_t size)
463 /* No need to update cursor */
467 * Return the maximum contiguous segment within the metaslab.
470 metaslab_pp_maxsize(space_map_t *sm)
472 avl_tree_t *t = sm->sm_pp_root;
475 if (t == NULL || (ss = avl_last(t)) == NULL)
478 return (ss->ss_end - ss->ss_start);
482 * ==========================================================================
483 * The first-fit block allocator
484 * ==========================================================================
487 metaslab_ff_alloc(space_map_t *sm, uint64_t size)
489 avl_tree_t *t = &sm->sm_root;
490 uint64_t align = size & -size;
491 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1;
493 return (metaslab_block_picker(t, cursor, size, align));
498 metaslab_ff_fragmented(space_map_t *sm)
503 static space_map_ops_t metaslab_ff_ops = {
510 metaslab_ff_fragmented
514 * ==========================================================================
515 * Dynamic block allocator -
516 * Uses the first fit allocation scheme until space get low and then
517 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
518 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
519 * ==========================================================================
522 metaslab_df_alloc(space_map_t *sm, uint64_t size)
524 avl_tree_t *t = &sm->sm_root;
525 uint64_t align = size & -size;
526 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1;
527 uint64_t max_size = metaslab_pp_maxsize(sm);
528 int free_pct = sm->sm_space * 100 / sm->sm_size;
530 ASSERT(MUTEX_HELD(sm->sm_lock));
531 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root));
537 * If we're running low on space switch to using the size
538 * sorted AVL tree (best-fit).
540 if (max_size < metaslab_df_alloc_threshold ||
541 free_pct < metaslab_df_free_pct) {
546 return (metaslab_block_picker(t, cursor, size, 1ULL));
550 metaslab_df_fragmented(space_map_t *sm)
552 uint64_t max_size = metaslab_pp_maxsize(sm);
553 int free_pct = sm->sm_space * 100 / sm->sm_size;
555 if (max_size >= metaslab_df_alloc_threshold &&
556 free_pct >= metaslab_df_free_pct)
562 static space_map_ops_t metaslab_df_ops = {
569 metaslab_df_fragmented
573 * ==========================================================================
574 * Other experimental allocators
575 * ==========================================================================
578 metaslab_cdf_alloc(space_map_t *sm, uint64_t size)
580 avl_tree_t *t = &sm->sm_root;
581 uint64_t *cursor = (uint64_t *)sm->sm_ppd;
582 uint64_t *extent_end = (uint64_t *)sm->sm_ppd + 1;
583 uint64_t max_size = metaslab_pp_maxsize(sm);
584 uint64_t rsize = size;
587 ASSERT(MUTEX_HELD(sm->sm_lock));
588 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root));
593 ASSERT3U(*extent_end, >=, *cursor);
596 * If we're running low on space switch to using the size
597 * sorted AVL tree (best-fit).
599 if ((*cursor + size) > *extent_end) {
602 *cursor = *extent_end = 0;
604 if (max_size > 2 * SPA_MAXBLOCKSIZE)
605 rsize = MIN(metaslab_min_alloc_size, max_size);
606 offset = metaslab_block_picker(t, extent_end, rsize, 1ULL);
608 *cursor = offset + size;
610 offset = metaslab_block_picker(t, cursor, rsize, 1ULL);
612 ASSERT3U(*cursor, <=, *extent_end);
617 metaslab_cdf_fragmented(space_map_t *sm)
619 uint64_t max_size = metaslab_pp_maxsize(sm);
621 if (max_size > (metaslab_min_alloc_size * 10))
626 static space_map_ops_t metaslab_cdf_ops = {
633 metaslab_cdf_fragmented
636 uint64_t metaslab_ndf_clump_shift = 4;
639 metaslab_ndf_alloc(space_map_t *sm, uint64_t size)
641 avl_tree_t *t = &sm->sm_root;
643 space_seg_t *ss, ssearch;
644 uint64_t hbit = highbit(size);
645 uint64_t *cursor = (uint64_t *)sm->sm_ppd + hbit - 1;
646 uint64_t max_size = metaslab_pp_maxsize(sm);
648 ASSERT(MUTEX_HELD(sm->sm_lock));
649 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root));
654 ssearch.ss_start = *cursor;
655 ssearch.ss_end = *cursor + size;
657 ss = avl_find(t, &ssearch, &where);
658 if (ss == NULL || (ss->ss_start + size > ss->ss_end)) {
661 ssearch.ss_start = 0;
662 ssearch.ss_end = MIN(max_size,
663 1ULL << (hbit + metaslab_ndf_clump_shift));
664 ss = avl_find(t, &ssearch, &where);
666 ss = avl_nearest(t, where, AVL_AFTER);
671 if (ss->ss_start + size <= ss->ss_end) {
672 *cursor = ss->ss_start + size;
673 return (ss->ss_start);
680 metaslab_ndf_fragmented(space_map_t *sm)
682 uint64_t max_size = metaslab_pp_maxsize(sm);
684 if (max_size > (metaslab_min_alloc_size << metaslab_ndf_clump_shift))
690 static space_map_ops_t metaslab_ndf_ops = {
697 metaslab_ndf_fragmented
700 space_map_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
703 * ==========================================================================
705 * ==========================================================================
708 metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo,
709 uint64_t start, uint64_t size, uint64_t txg)
711 vdev_t *vd = mg->mg_vd;
714 msp = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
715 mutex_init(&msp->ms_lock, NULL, MUTEX_DEFAULT, NULL);
717 msp->ms_smo_syncing = *smo;
720 * We create the main space map here, but we don't create the
721 * allocmaps and freemaps until metaslab_sync_done(). This serves
722 * two purposes: it allows metaslab_sync_done() to detect the
723 * addition of new space; and for debugging, it ensures that we'd
724 * data fault on any attempt to use this metaslab before it's ready.
726 space_map_create(&msp->ms_map, start, size,
727 vd->vdev_ashift, &msp->ms_lock);
729 metaslab_group_add(mg, msp);
731 if (metaslab_debug && smo->smo_object != 0) {
732 mutex_enter(&msp->ms_lock);
733 VERIFY(space_map_load(&msp->ms_map, mg->mg_class->mc_ops,
734 SM_FREE, smo, spa_meta_objset(vd->vdev_spa)) == 0);
735 mutex_exit(&msp->ms_lock);
739 * If we're opening an existing pool (txg == 0) or creating
740 * a new one (txg == TXG_INITIAL), all space is available now.
741 * If we're adding space to an existing pool, the new space
742 * does not become available until after this txg has synced.
744 if (txg <= TXG_INITIAL)
745 metaslab_sync_done(msp, 0);
748 vdev_dirty(vd, 0, NULL, txg);
749 vdev_dirty(vd, VDD_METASLAB, msp, txg);
756 metaslab_fini(metaslab_t *msp)
758 metaslab_group_t *mg = msp->ms_group;
760 vdev_space_update(mg->mg_vd,
761 -msp->ms_smo.smo_alloc, 0, -msp->ms_map.sm_size);
763 metaslab_group_remove(mg, msp);
765 mutex_enter(&msp->ms_lock);
767 space_map_unload(&msp->ms_map);
768 space_map_destroy(&msp->ms_map);
770 for (int t = 0; t < TXG_SIZE; t++) {
771 space_map_destroy(&msp->ms_allocmap[t]);
772 space_map_destroy(&msp->ms_freemap[t]);
775 for (int t = 0; t < TXG_DEFER_SIZE; t++)
776 space_map_destroy(&msp->ms_defermap[t]);
778 ASSERT3S(msp->ms_deferspace, ==, 0);
780 mutex_exit(&msp->ms_lock);
781 mutex_destroy(&msp->ms_lock);
783 kmem_free(msp, sizeof (metaslab_t));
786 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
787 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
788 #define METASLAB_ACTIVE_MASK \
789 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
792 metaslab_weight(metaslab_t *msp)
794 metaslab_group_t *mg = msp->ms_group;
795 space_map_t *sm = &msp->ms_map;
796 space_map_obj_t *smo = &msp->ms_smo;
797 vdev_t *vd = mg->mg_vd;
798 uint64_t weight, space;
800 ASSERT(MUTEX_HELD(&msp->ms_lock));
803 * The baseline weight is the metaslab's free space.
805 space = sm->sm_size - smo->smo_alloc;
809 * Modern disks have uniform bit density and constant angular velocity.
810 * Therefore, the outer recording zones are faster (higher bandwidth)
811 * than the inner zones by the ratio of outer to inner track diameter,
812 * which is typically around 2:1. We account for this by assigning
813 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
814 * In effect, this means that we'll select the metaslab with the most
815 * free bandwidth rather than simply the one with the most free space.
817 weight = 2 * weight -
818 ((sm->sm_start >> vd->vdev_ms_shift) * weight) / vd->vdev_ms_count;
819 ASSERT(weight >= space && weight <= 2 * space);
822 * For locality, assign higher weight to metaslabs which have
823 * a lower offset than what we've already activated.
825 if (sm->sm_start <= mg->mg_bonus_area)
826 weight *= (metaslab_smo_bonus_pct / 100);
827 ASSERT(weight >= space &&
828 weight <= 2 * (metaslab_smo_bonus_pct / 100) * space);
830 if (sm->sm_loaded && !sm->sm_ops->smop_fragmented(sm)) {
832 * If this metaslab is one we're actively using, adjust its
833 * weight to make it preferable to any inactive metaslab so
834 * we'll polish it off.
836 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
842 metaslab_prefetch(metaslab_group_t *mg)
844 spa_t *spa = mg->mg_vd->vdev_spa;
846 avl_tree_t *t = &mg->mg_metaslab_tree;
849 mutex_enter(&mg->mg_lock);
852 * Prefetch the next potential metaslabs
854 for (msp = avl_first(t), m = 0; msp; msp = AVL_NEXT(t, msp), m++) {
855 space_map_t *sm = &msp->ms_map;
856 space_map_obj_t *smo = &msp->ms_smo;
858 /* If we have reached our prefetch limit then we're done */
859 if (m >= metaslab_prefetch_limit)
862 if (!sm->sm_loaded && smo->smo_object != 0) {
863 mutex_exit(&mg->mg_lock);
864 dmu_prefetch(spa_meta_objset(spa), smo->smo_object,
865 0ULL, smo->smo_objsize);
866 mutex_enter(&mg->mg_lock);
869 mutex_exit(&mg->mg_lock);
873 metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
875 metaslab_group_t *mg = msp->ms_group;
876 space_map_t *sm = &msp->ms_map;
877 space_map_ops_t *sm_ops = msp->ms_group->mg_class->mc_ops;
879 ASSERT(MUTEX_HELD(&msp->ms_lock));
881 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
882 space_map_load_wait(sm);
883 if (!sm->sm_loaded) {
884 int error = space_map_load(sm, sm_ops, SM_FREE,
886 spa_meta_objset(msp->ms_group->mg_vd->vdev_spa));
888 metaslab_group_sort(msp->ms_group, msp, 0);
891 for (int t = 0; t < TXG_DEFER_SIZE; t++)
892 space_map_walk(&msp->ms_defermap[t],
893 space_map_claim, sm);
898 * Track the bonus area as we activate new metaslabs.
900 if (sm->sm_start > mg->mg_bonus_area) {
901 mutex_enter(&mg->mg_lock);
902 mg->mg_bonus_area = sm->sm_start;
903 mutex_exit(&mg->mg_lock);
906 metaslab_group_sort(msp->ms_group, msp,
907 msp->ms_weight | activation_weight);
909 ASSERT(sm->sm_loaded);
910 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
916 metaslab_passivate(metaslab_t *msp, uint64_t size)
919 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
920 * this metaslab again. In that case, it had better be empty,
921 * or we would be leaving space on the table.
923 ASSERT(size >= SPA_MINBLOCKSIZE || msp->ms_map.sm_space == 0);
924 metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size));
925 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
929 * Write a metaslab to disk in the context of the specified transaction group.
932 metaslab_sync(metaslab_t *msp, uint64_t txg)
934 vdev_t *vd = msp->ms_group->mg_vd;
935 spa_t *spa = vd->vdev_spa;
936 objset_t *mos = spa_meta_objset(spa);
937 space_map_t *allocmap = &msp->ms_allocmap[txg & TXG_MASK];
938 space_map_t *freemap = &msp->ms_freemap[txg & TXG_MASK];
939 space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
940 space_map_t *sm = &msp->ms_map;
941 space_map_obj_t *smo = &msp->ms_smo_syncing;
945 ASSERT(!vd->vdev_ishole);
947 if (allocmap->sm_space == 0 && freemap->sm_space == 0)
951 * The only state that can actually be changing concurrently with
952 * metaslab_sync() is the metaslab's ms_map. No other thread can
953 * be modifying this txg's allocmap, freemap, freed_map, or smo.
954 * Therefore, we only hold ms_lock to satify space_map ASSERTs.
955 * We drop it whenever we call into the DMU, because the DMU
956 * can call down to us (e.g. via zio_free()) at any time.
959 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
961 if (smo->smo_object == 0) {
962 ASSERT(smo->smo_objsize == 0);
963 ASSERT(smo->smo_alloc == 0);
964 smo->smo_object = dmu_object_alloc(mos,
965 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT,
966 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx);
967 ASSERT(smo->smo_object != 0);
968 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
969 (sm->sm_start >> vd->vdev_ms_shift),
970 sizeof (uint64_t), &smo->smo_object, tx);
973 mutex_enter(&msp->ms_lock);
975 space_map_walk(freemap, space_map_add, freed_map);
977 if (sm->sm_loaded && spa_sync_pass(spa) == 1 && smo->smo_objsize >=
978 2 * sizeof (uint64_t) * avl_numnodes(&sm->sm_root)) {
980 * The in-core space map representation is twice as compact
981 * as the on-disk one, so it's time to condense the latter
982 * by generating a pure allocmap from first principles.
984 * This metaslab is 100% allocated,
985 * minus the content of the in-core map (sm),
986 * minus what's been freed this txg (freed_map),
987 * minus deferred frees (ms_defermap[]),
988 * minus allocations from txgs in the future
989 * (because they haven't been committed yet).
991 space_map_vacate(allocmap, NULL, NULL);
992 space_map_vacate(freemap, NULL, NULL);
994 space_map_add(allocmap, allocmap->sm_start, allocmap->sm_size);
996 space_map_walk(sm, space_map_remove, allocmap);
997 space_map_walk(freed_map, space_map_remove, allocmap);
999 for (int t = 0; t < TXG_DEFER_SIZE; t++)
1000 space_map_walk(&msp->ms_defermap[t],
1001 space_map_remove, allocmap);
1003 for (int t = 1; t < TXG_CONCURRENT_STATES; t++)
1004 space_map_walk(&msp->ms_allocmap[(txg + t) & TXG_MASK],
1005 space_map_remove, allocmap);
1007 mutex_exit(&msp->ms_lock);
1008 space_map_truncate(smo, mos, tx);
1009 mutex_enter(&msp->ms_lock);
1012 space_map_sync(allocmap, SM_ALLOC, smo, mos, tx);
1013 space_map_sync(freemap, SM_FREE, smo, mos, tx);
1015 mutex_exit(&msp->ms_lock);
1017 VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db));
1018 dmu_buf_will_dirty(db, tx);
1019 ASSERT3U(db->db_size, >=, sizeof (*smo));
1020 bcopy(smo, db->db_data, sizeof (*smo));
1021 dmu_buf_rele(db, FTAG);
1027 * Called after a transaction group has completely synced to mark
1028 * all of the metaslab's free space as usable.
1031 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
1033 space_map_obj_t *smo = &msp->ms_smo;
1034 space_map_obj_t *smosync = &msp->ms_smo_syncing;
1035 space_map_t *sm = &msp->ms_map;
1036 space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
1037 space_map_t *defer_map = &msp->ms_defermap[txg % TXG_DEFER_SIZE];
1038 metaslab_group_t *mg = msp->ms_group;
1039 vdev_t *vd = mg->mg_vd;
1040 int64_t alloc_delta, defer_delta;
1042 ASSERT(!vd->vdev_ishole);
1044 mutex_enter(&msp->ms_lock);
1047 * If this metaslab is just becoming available, initialize its
1048 * allocmaps and freemaps and add its capacity to the vdev.
1050 if (freed_map->sm_size == 0) {
1051 for (int t = 0; t < TXG_SIZE; t++) {
1052 space_map_create(&msp->ms_allocmap[t], sm->sm_start,
1053 sm->sm_size, sm->sm_shift, sm->sm_lock);
1054 space_map_create(&msp->ms_freemap[t], sm->sm_start,
1055 sm->sm_size, sm->sm_shift, sm->sm_lock);
1058 for (int t = 0; t < TXG_DEFER_SIZE; t++)
1059 space_map_create(&msp->ms_defermap[t], sm->sm_start,
1060 sm->sm_size, sm->sm_shift, sm->sm_lock);
1062 vdev_space_update(vd, 0, 0, sm->sm_size);
1065 alloc_delta = smosync->smo_alloc - smo->smo_alloc;
1066 defer_delta = freed_map->sm_space - defer_map->sm_space;
1068 vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
1070 ASSERT(msp->ms_allocmap[txg & TXG_MASK].sm_space == 0);
1071 ASSERT(msp->ms_freemap[txg & TXG_MASK].sm_space == 0);
1074 * If there's a space_map_load() in progress, wait for it to complete
1075 * so that we have a consistent view of the in-core space map.
1076 * Then, add defer_map (oldest deferred frees) to this map and
1077 * transfer freed_map (this txg's frees) to defer_map.
1079 space_map_load_wait(sm);
1080 space_map_vacate(defer_map, sm->sm_loaded ? space_map_free : NULL, sm);
1081 space_map_vacate(freed_map, space_map_add, defer_map);
1085 msp->ms_deferspace += defer_delta;
1086 ASSERT3S(msp->ms_deferspace, >=, 0);
1087 ASSERT3S(msp->ms_deferspace, <=, sm->sm_size);
1088 if (msp->ms_deferspace != 0) {
1090 * Keep syncing this metaslab until all deferred frees
1091 * are back in circulation.
1093 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
1097 * If the map is loaded but no longer active, evict it as soon as all
1098 * future allocations have synced. (If we unloaded it now and then
1099 * loaded a moment later, the map wouldn't reflect those allocations.)
1101 if (sm->sm_loaded && (msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
1104 for (int t = 1; t < TXG_CONCURRENT_STATES; t++)
1105 if (msp->ms_allocmap[(txg + t) & TXG_MASK].sm_space)
1108 if (evictable && !metaslab_debug)
1109 space_map_unload(sm);
1112 metaslab_group_sort(mg, msp, metaslab_weight(msp));
1114 mutex_exit(&msp->ms_lock);
1118 metaslab_sync_reassess(metaslab_group_t *mg)
1120 vdev_t *vd = mg->mg_vd;
1121 int64_t failures = mg->mg_alloc_failures;
1124 * Re-evaluate all metaslabs which have lower offsets than the
1127 for (int m = 0; m < vd->vdev_ms_count; m++) {
1128 metaslab_t *msp = vd->vdev_ms[m];
1130 if (msp->ms_map.sm_start > mg->mg_bonus_area)
1133 mutex_enter(&msp->ms_lock);
1134 metaslab_group_sort(mg, msp, metaslab_weight(msp));
1135 mutex_exit(&msp->ms_lock);
1138 atomic_add_64(&mg->mg_alloc_failures, -failures);
1141 * Prefetch the next potential metaslabs
1143 metaslab_prefetch(mg);
1147 metaslab_distance(metaslab_t *msp, dva_t *dva)
1149 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
1150 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
1151 uint64_t start = msp->ms_map.sm_start >> ms_shift;
1153 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
1154 return (1ULL << 63);
1157 return ((start - offset) << ms_shift);
1159 return ((offset - start) << ms_shift);
1164 metaslab_group_alloc(metaslab_group_t *mg, uint64_t psize, uint64_t asize,
1165 uint64_t txg, uint64_t min_distance, dva_t *dva, int d, int flags)
1167 spa_t *spa = mg->mg_vd->vdev_spa;
1168 metaslab_t *msp = NULL;
1169 uint64_t offset = -1ULL;
1170 avl_tree_t *t = &mg->mg_metaslab_tree;
1171 uint64_t activation_weight;
1172 uint64_t target_distance;
1175 activation_weight = METASLAB_WEIGHT_PRIMARY;
1176 for (i = 0; i < d; i++) {
1177 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
1178 activation_weight = METASLAB_WEIGHT_SECONDARY;
1184 boolean_t was_active;
1186 mutex_enter(&mg->mg_lock);
1187 for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) {
1188 if (msp->ms_weight < asize) {
1189 spa_dbgmsg(spa, "%s: failed to meet weight "
1190 "requirement: vdev %llu, txg %llu, mg %p, "
1191 "msp %p, psize %llu, asize %llu, "
1192 "failures %llu, weight %llu",
1193 spa_name(spa), mg->mg_vd->vdev_id, txg,
1194 mg, msp, psize, asize,
1195 mg->mg_alloc_failures, msp->ms_weight);
1196 mutex_exit(&mg->mg_lock);
1199 was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
1200 if (activation_weight == METASLAB_WEIGHT_PRIMARY)
1203 target_distance = min_distance +
1204 (msp->ms_smo.smo_alloc ? 0 : min_distance >> 1);
1206 for (i = 0; i < d; i++)
1207 if (metaslab_distance(msp, &dva[i]) <
1213 mutex_exit(&mg->mg_lock);
1218 * If we've already reached the allowable number of failed
1219 * allocation attempts on this metaslab group then we
1220 * consider skipping it. We skip it only if we're allowed
1221 * to "fast" gang, the physical size is larger than
1222 * a gang block, and we're attempting to allocate from
1223 * the primary metaslab.
1225 if (mg->mg_alloc_failures > zfs_mg_alloc_failures &&
1226 CAN_FASTGANG(flags) && psize > SPA_GANGBLOCKSIZE &&
1227 activation_weight == METASLAB_WEIGHT_PRIMARY) {
1228 spa_dbgmsg(spa, "%s: skipping metaslab group: "
1229 "vdev %llu, txg %llu, mg %p, psize %llu, "
1230 "asize %llu, failures %llu", spa_name(spa),
1231 mg->mg_vd->vdev_id, txg, mg, psize, asize,
1232 mg->mg_alloc_failures);
1236 mutex_enter(&msp->ms_lock);
1239 * Ensure that the metaslab we have selected is still
1240 * capable of handling our request. It's possible that
1241 * another thread may have changed the weight while we
1242 * were blocked on the metaslab lock.
1244 if (msp->ms_weight < asize || (was_active &&
1245 !(msp->ms_weight & METASLAB_ACTIVE_MASK) &&
1246 activation_weight == METASLAB_WEIGHT_PRIMARY)) {
1247 mutex_exit(&msp->ms_lock);
1251 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
1252 activation_weight == METASLAB_WEIGHT_PRIMARY) {
1253 metaslab_passivate(msp,
1254 msp->ms_weight & ~METASLAB_ACTIVE_MASK);
1255 mutex_exit(&msp->ms_lock);
1259 if (metaslab_activate(msp, activation_weight) != 0) {
1260 mutex_exit(&msp->ms_lock);
1264 if ((offset = space_map_alloc(&msp->ms_map, asize)) != -1ULL)
1267 atomic_inc_64(&mg->mg_alloc_failures);
1269 metaslab_passivate(msp, space_map_maxsize(&msp->ms_map));
1271 mutex_exit(&msp->ms_lock);
1274 if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0)
1275 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
1277 space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, asize);
1279 mutex_exit(&msp->ms_lock);
1285 * Allocate a block for the specified i/o.
1288 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
1289 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags)
1291 metaslab_group_t *mg, *rotor;
1295 int zio_lock = B_FALSE;
1296 boolean_t allocatable;
1297 uint64_t offset = -1ULL;
1301 ASSERT(!DVA_IS_VALID(&dva[d]));
1304 * For testing, make some blocks above a certain size be gang blocks.
1306 if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0)
1310 * Start at the rotor and loop through all mgs until we find something.
1311 * Note that there's no locking on mc_rotor or mc_aliquot because
1312 * nothing actually breaks if we miss a few updates -- we just won't
1313 * allocate quite as evenly. It all balances out over time.
1315 * If we are doing ditto or log blocks, try to spread them across
1316 * consecutive vdevs. If we're forced to reuse a vdev before we've
1317 * allocated all of our ditto blocks, then try and spread them out on
1318 * that vdev as much as possible. If it turns out to not be possible,
1319 * gradually lower our standards until anything becomes acceptable.
1320 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
1321 * gives us hope of containing our fault domains to something we're
1322 * able to reason about. Otherwise, any two top-level vdev failures
1323 * will guarantee the loss of data. With consecutive allocation,
1324 * only two adjacent top-level vdev failures will result in data loss.
1326 * If we are doing gang blocks (hintdva is non-NULL), try to keep
1327 * ourselves on the same vdev as our gang block header. That
1328 * way, we can hope for locality in vdev_cache, plus it makes our
1329 * fault domains something tractable.
1332 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
1335 * It's possible the vdev we're using as the hint no
1336 * longer exists (i.e. removed). Consult the rotor when
1342 if (flags & METASLAB_HINTBP_AVOID &&
1343 mg->mg_next != NULL)
1348 } else if (d != 0) {
1349 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
1350 mg = vd->vdev_mg->mg_next;
1356 * If the hint put us into the wrong metaslab class, or into a
1357 * metaslab group that has been passivated, just follow the rotor.
1359 if (mg->mg_class != mc || mg->mg_activation_count <= 0)
1366 ASSERT(mg->mg_activation_count == 1);
1371 * Don't allocate from faulted devices.
1374 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
1375 allocatable = vdev_allocatable(vd);
1376 spa_config_exit(spa, SCL_ZIO, FTAG);
1378 allocatable = vdev_allocatable(vd);
1384 * Avoid writing single-copy data to a failing vdev
1386 if ((vd->vdev_stat.vs_write_errors > 0 ||
1387 vd->vdev_state < VDEV_STATE_HEALTHY) &&
1388 d == 0 && dshift == 3) {
1393 ASSERT(mg->mg_class == mc);
1395 distance = vd->vdev_asize >> dshift;
1396 if (distance <= (1ULL << vd->vdev_ms_shift))
1401 asize = vdev_psize_to_asize(vd, psize);
1402 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
1404 offset = metaslab_group_alloc(mg, psize, asize, txg, distance,
1406 if (offset != -1ULL) {
1408 * If we've just selected this metaslab group,
1409 * figure out whether the corresponding vdev is
1410 * over- or under-used relative to the pool,
1411 * and set an allocation bias to even it out.
1413 if (mc->mc_aliquot == 0) {
1414 vdev_stat_t *vs = &vd->vdev_stat;
1417 vu = (vs->vs_alloc * 100) / (vs->vs_space + 1);
1418 cu = (mc->mc_alloc * 100) / (mc->mc_space + 1);
1421 * Calculate how much more or less we should
1422 * try to allocate from this device during
1423 * this iteration around the rotor.
1424 * For example, if a device is 80% full
1425 * and the pool is 20% full then we should
1426 * reduce allocations by 60% on this device.
1428 * mg_bias = (20 - 80) * 512K / 100 = -307K
1430 * This reduces allocations by 307K for this
1433 mg->mg_bias = ((cu - vu) *
1434 (int64_t)mg->mg_aliquot) / 100;
1437 if (atomic_add_64_nv(&mc->mc_aliquot, asize) >=
1438 mg->mg_aliquot + mg->mg_bias) {
1439 mc->mc_rotor = mg->mg_next;
1443 DVA_SET_VDEV(&dva[d], vd->vdev_id);
1444 DVA_SET_OFFSET(&dva[d], offset);
1445 DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER));
1446 DVA_SET_ASIZE(&dva[d], asize);
1451 mc->mc_rotor = mg->mg_next;
1453 } while ((mg = mg->mg_next) != rotor);
1457 ASSERT(dshift < 64);
1461 if (!allocatable && !zio_lock) {
1467 bzero(&dva[d], sizeof (dva_t));
1473 * Free the block represented by DVA in the context of the specified
1474 * transaction group.
1477 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
1479 uint64_t vdev = DVA_GET_VDEV(dva);
1480 uint64_t offset = DVA_GET_OFFSET(dva);
1481 uint64_t size = DVA_GET_ASIZE(dva);
1485 ASSERT(DVA_IS_VALID(dva));
1487 if (txg > spa_freeze_txg(spa))
1490 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
1491 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
1492 cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu",
1493 (u_longlong_t)vdev, (u_longlong_t)offset);
1498 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
1500 if (DVA_GET_GANG(dva))
1501 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
1503 mutex_enter(&msp->ms_lock);
1506 space_map_remove(&msp->ms_allocmap[txg & TXG_MASK],
1508 space_map_free(&msp->ms_map, offset, size);
1510 if (msp->ms_freemap[txg & TXG_MASK].sm_space == 0)
1511 vdev_dirty(vd, VDD_METASLAB, msp, txg);
1512 space_map_add(&msp->ms_freemap[txg & TXG_MASK], offset, size);
1515 mutex_exit(&msp->ms_lock);
1519 * Intent log support: upon opening the pool after a crash, notify the SPA
1520 * of blocks that the intent log has allocated for immediate write, but
1521 * which are still considered free by the SPA because the last transaction
1522 * group didn't commit yet.
1525 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
1527 uint64_t vdev = DVA_GET_VDEV(dva);
1528 uint64_t offset = DVA_GET_OFFSET(dva);
1529 uint64_t size = DVA_GET_ASIZE(dva);
1534 ASSERT(DVA_IS_VALID(dva));
1536 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
1537 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
1540 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
1542 if (DVA_GET_GANG(dva))
1543 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
1545 mutex_enter(&msp->ms_lock);
1547 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_map.sm_loaded)
1548 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
1550 if (error == 0 && !space_map_contains(&msp->ms_map, offset, size))
1553 if (error || txg == 0) { /* txg == 0 indicates dry run */
1554 mutex_exit(&msp->ms_lock);
1558 space_map_claim(&msp->ms_map, offset, size);
1560 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
1561 if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0)
1562 vdev_dirty(vd, VDD_METASLAB, msp, txg);
1563 space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size);
1566 mutex_exit(&msp->ms_lock);
1572 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
1573 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags)
1575 dva_t *dva = bp->blk_dva;
1576 dva_t *hintdva = hintbp->blk_dva;
1579 ASSERT(bp->blk_birth == 0);
1580 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
1582 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
1584 if (mc->mc_rotor == NULL) { /* no vdevs in this class */
1585 spa_config_exit(spa, SCL_ALLOC, FTAG);
1589 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
1590 ASSERT(BP_GET_NDVAS(bp) == 0);
1591 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
1593 for (int d = 0; d < ndvas; d++) {
1594 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
1597 for (d--; d >= 0; d--) {
1598 metaslab_free_dva(spa, &dva[d], txg, B_TRUE);
1599 bzero(&dva[d], sizeof (dva_t));
1601 spa_config_exit(spa, SCL_ALLOC, FTAG);
1606 ASSERT(BP_GET_NDVAS(bp) == ndvas);
1608 spa_config_exit(spa, SCL_ALLOC, FTAG);
1610 BP_SET_BIRTH(bp, txg, txg);
1616 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
1618 const dva_t *dva = bp->blk_dva;
1619 int ndvas = BP_GET_NDVAS(bp);
1621 ASSERT(!BP_IS_HOLE(bp));
1622 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
1624 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
1626 for (int d = 0; d < ndvas; d++)
1627 metaslab_free_dva(spa, &dva[d], txg, now);
1629 spa_config_exit(spa, SCL_FREE, FTAG);
1633 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
1635 const dva_t *dva = bp->blk_dva;
1636 int ndvas = BP_GET_NDVAS(bp);
1639 ASSERT(!BP_IS_HOLE(bp));
1643 * First do a dry run to make sure all DVAs are claimable,
1644 * so we don't have to unwind from partial failures below.
1646 if ((error = metaslab_claim(spa, bp, 0)) != 0)
1650 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
1652 for (int d = 0; d < ndvas; d++)
1653 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
1656 spa_config_exit(spa, SCL_ALLOC, FTAG);
1658 ASSERT(error == 0 || txg == 0);