4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <sys/zfs_context.h>
26 #include <sys/spa_impl.h>
28 #include <sys/dmu_tx.h>
29 #include <sys/space_map.h>
30 #include <sys/metaslab_impl.h>
31 #include <sys/vdev_impl.h>
34 uint64_t metaslab_aliquot = 512ULL << 10;
35 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
38 * Minimum size which forces the dynamic allocator to change
39 * it's allocation strategy. Once the space map cannot satisfy
40 * an allocation of this size then it switches to using more
41 * aggressive strategy (i.e search by size rather than offset).
43 uint64_t metaslab_df_alloc_threshold = SPA_MAXBLOCKSIZE;
46 * The minimum free space, in percent, which must be available
47 * in a space map to continue allocations in a first-fit fashion.
48 * Once the space_map's free space drops below this level we dynamically
49 * switch to using best-fit allocations.
51 int metaslab_df_free_pct = 4;
54 * A metaslab is considered "free" if it contains a contiguous
55 * segment which is greater than metaslab_min_alloc_size.
57 uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS;
60 * Max number of space_maps to prefetch.
62 int metaslab_prefetch_limit = SPA_DVAS_PER_BP;
65 * Percentage bonus multiplier for metaslabs that are in the bonus area.
67 int metaslab_smo_bonus_pct = 150;
70 * ==========================================================================
72 * ==========================================================================
75 metaslab_class_create(space_map_ops_t *ops)
79 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
88 metaslab_class_destroy(metaslab_class_t *mc)
92 while ((mg = mc->mc_rotor) != NULL) {
93 metaslab_class_remove(mc, mg);
94 metaslab_group_destroy(mg);
97 kmem_free(mc, sizeof (metaslab_class_t));
101 metaslab_class_add(metaslab_class_t *mc, metaslab_group_t *mg)
103 metaslab_group_t *mgprev, *mgnext;
105 ASSERT(mg->mg_class == NULL);
107 if ((mgprev = mc->mc_rotor) == NULL) {
111 mgnext = mgprev->mg_next;
112 mg->mg_prev = mgprev;
113 mg->mg_next = mgnext;
114 mgprev->mg_next = mg;
115 mgnext->mg_prev = mg;
122 metaslab_class_remove(metaslab_class_t *mc, metaslab_group_t *mg)
124 metaslab_group_t *mgprev, *mgnext;
126 ASSERT(mg->mg_class == mc);
128 mgprev = mg->mg_prev;
129 mgnext = mg->mg_next;
134 mc->mc_rotor = mgnext;
135 mgprev->mg_next = mgnext;
136 mgnext->mg_prev = mgprev;
145 * ==========================================================================
147 * ==========================================================================
150 metaslab_compare(const void *x1, const void *x2)
152 const metaslab_t *m1 = x1;
153 const metaslab_t *m2 = x2;
155 if (m1->ms_weight < m2->ms_weight)
157 if (m1->ms_weight > m2->ms_weight)
161 * If the weights are identical, use the offset to force uniqueness.
163 if (m1->ms_map.sm_start < m2->ms_map.sm_start)
165 if (m1->ms_map.sm_start > m2->ms_map.sm_start)
168 ASSERT3P(m1, ==, m2);
174 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
176 metaslab_group_t *mg;
178 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
179 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
180 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
181 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
182 mg->mg_aliquot = metaslab_aliquot * MAX(1, vd->vdev_children);
184 metaslab_class_add(mc, mg);
190 metaslab_group_destroy(metaslab_group_t *mg)
192 avl_destroy(&mg->mg_metaslab_tree);
193 mutex_destroy(&mg->mg_lock);
194 kmem_free(mg, sizeof (metaslab_group_t));
198 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
200 mutex_enter(&mg->mg_lock);
201 ASSERT(msp->ms_group == NULL);
204 avl_add(&mg->mg_metaslab_tree, msp);
205 mutex_exit(&mg->mg_lock);
209 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
211 mutex_enter(&mg->mg_lock);
212 ASSERT(msp->ms_group == mg);
213 avl_remove(&mg->mg_metaslab_tree, msp);
214 msp->ms_group = NULL;
215 mutex_exit(&mg->mg_lock);
219 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
222 * Although in principle the weight can be any value, in
223 * practice we do not use values in the range [1, 510].
225 ASSERT(weight >= SPA_MINBLOCKSIZE-1 || weight == 0);
226 ASSERT(MUTEX_HELD(&msp->ms_lock));
228 mutex_enter(&mg->mg_lock);
229 ASSERT(msp->ms_group == mg);
230 avl_remove(&mg->mg_metaslab_tree, msp);
231 msp->ms_weight = weight;
232 avl_add(&mg->mg_metaslab_tree, msp);
233 mutex_exit(&mg->mg_lock);
237 * ==========================================================================
238 * Common allocator routines
239 * ==========================================================================
242 metaslab_segsize_compare(const void *x1, const void *x2)
244 const space_seg_t *s1 = x1;
245 const space_seg_t *s2 = x2;
246 uint64_t ss_size1 = s1->ss_end - s1->ss_start;
247 uint64_t ss_size2 = s2->ss_end - s2->ss_start;
249 if (ss_size1 < ss_size2)
251 if (ss_size1 > ss_size2)
254 if (s1->ss_start < s2->ss_start)
256 if (s1->ss_start > s2->ss_start)
263 * This is a helper function that can be used by the allocator to find
264 * a suitable block to allocate. This will search the specified AVL
265 * tree looking for a block that matches the specified criteria.
268 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
271 space_seg_t *ss, ssearch;
274 ssearch.ss_start = *cursor;
275 ssearch.ss_end = *cursor + size;
277 ss = avl_find(t, &ssearch, &where);
279 ss = avl_nearest(t, where, AVL_AFTER);
282 uint64_t offset = P2ROUNDUP(ss->ss_start, align);
284 if (offset + size <= ss->ss_end) {
285 *cursor = offset + size;
288 ss = AVL_NEXT(t, ss);
292 * If we know we've searched the whole map (*cursor == 0), give up.
293 * Otherwise, reset the cursor to the beginning and try again.
299 return (metaslab_block_picker(t, cursor, size, align));
303 metaslab_pp_load(space_map_t *sm)
307 ASSERT(sm->sm_ppd == NULL);
308 sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP);
310 sm->sm_pp_root = kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
311 avl_create(sm->sm_pp_root, metaslab_segsize_compare,
312 sizeof (space_seg_t), offsetof(struct space_seg, ss_pp_node));
314 for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss))
315 avl_add(sm->sm_pp_root, ss);
319 metaslab_pp_unload(space_map_t *sm)
323 kmem_free(sm->sm_ppd, 64 * sizeof (uint64_t));
326 while (avl_destroy_nodes(sm->sm_pp_root, &cookie) != NULL) {
327 /* tear down the tree */
330 avl_destroy(sm->sm_pp_root);
331 kmem_free(sm->sm_pp_root, sizeof (avl_tree_t));
332 sm->sm_pp_root = NULL;
337 metaslab_pp_claim(space_map_t *sm, uint64_t start, uint64_t size)
339 /* No need to update cursor */
344 metaslab_pp_free(space_map_t *sm, uint64_t start, uint64_t size)
346 /* No need to update cursor */
350 * Return the maximum contiguous segment within the metaslab.
353 metaslab_pp_maxsize(space_map_t *sm)
355 avl_tree_t *t = sm->sm_pp_root;
358 if (t == NULL || (ss = avl_last(t)) == NULL)
361 return (ss->ss_end - ss->ss_start);
365 * ==========================================================================
366 * The first-fit block allocator
367 * ==========================================================================
370 metaslab_ff_alloc(space_map_t *sm, uint64_t size)
372 avl_tree_t *t = &sm->sm_root;
373 uint64_t align = size & -size;
374 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1;
376 return (metaslab_block_picker(t, cursor, size, align));
381 metaslab_ff_fragmented(space_map_t *sm)
386 static space_map_ops_t metaslab_ff_ops = {
393 metaslab_ff_fragmented
397 * ==========================================================================
398 * Dynamic block allocator -
399 * Uses the first fit allocation scheme until space get low and then
400 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
401 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
402 * ==========================================================================
405 metaslab_df_alloc(space_map_t *sm, uint64_t size)
407 avl_tree_t *t = &sm->sm_root;
408 uint64_t align = size & -size;
409 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1;
410 uint64_t max_size = metaslab_pp_maxsize(sm);
411 int free_pct = sm->sm_space * 100 / sm->sm_size;
413 ASSERT(MUTEX_HELD(sm->sm_lock));
414 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root));
420 * If we're running low on space switch to using the size
421 * sorted AVL tree (best-fit).
423 if (max_size < metaslab_df_alloc_threshold ||
424 free_pct < metaslab_df_free_pct) {
429 return (metaslab_block_picker(t, cursor, size, 1ULL));
433 metaslab_df_fragmented(space_map_t *sm)
435 uint64_t max_size = metaslab_pp_maxsize(sm);
436 int free_pct = sm->sm_space * 100 / sm->sm_size;
438 if (max_size >= metaslab_df_alloc_threshold &&
439 free_pct >= metaslab_df_free_pct)
445 static space_map_ops_t metaslab_df_ops = {
452 metaslab_df_fragmented
456 * ==========================================================================
457 * Other experimental allocators
458 * ==========================================================================
461 metaslab_cdf_alloc(space_map_t *sm, uint64_t size)
463 avl_tree_t *t = &sm->sm_root;
464 uint64_t *cursor = (uint64_t *)sm->sm_ppd;
465 uint64_t *extent_end = (uint64_t *)sm->sm_ppd + 1;
466 uint64_t max_size = metaslab_pp_maxsize(sm);
467 uint64_t rsize = size;
470 ASSERT(MUTEX_HELD(sm->sm_lock));
471 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root));
476 ASSERT3U(*extent_end, >=, *cursor);
479 * If we're running low on space switch to using the size
480 * sorted AVL tree (best-fit).
482 if ((*cursor + size) > *extent_end) {
485 *cursor = *extent_end = 0;
487 if (max_size > 2 * SPA_MAXBLOCKSIZE)
488 rsize = MIN(metaslab_min_alloc_size, max_size);
489 offset = metaslab_block_picker(t, extent_end, rsize, 1ULL);
491 *cursor = offset + size;
493 offset = metaslab_block_picker(t, cursor, rsize, 1ULL);
495 ASSERT3U(*cursor, <=, *extent_end);
500 metaslab_cdf_fragmented(space_map_t *sm)
502 uint64_t max_size = metaslab_pp_maxsize(sm);
504 if (max_size > (metaslab_min_alloc_size * 10))
509 static space_map_ops_t metaslab_cdf_ops = {
516 metaslab_cdf_fragmented
519 uint64_t metaslab_ndf_clump_shift = 4;
522 metaslab_ndf_alloc(space_map_t *sm, uint64_t size)
524 avl_tree_t *t = &sm->sm_root;
526 space_seg_t *ss, ssearch;
527 uint64_t hbit = highbit(size);
528 uint64_t *cursor = (uint64_t *)sm->sm_ppd + hbit - 1;
529 uint64_t max_size = metaslab_pp_maxsize(sm);
531 ASSERT(MUTEX_HELD(sm->sm_lock));
532 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root));
537 ssearch.ss_start = *cursor;
538 ssearch.ss_end = *cursor + size;
540 ss = avl_find(t, &ssearch, &where);
541 if (ss == NULL || (ss->ss_start + size > ss->ss_end)) {
544 ssearch.ss_start = 0;
545 ssearch.ss_end = MIN(max_size,
546 1ULL << (hbit + metaslab_ndf_clump_shift));
547 ss = avl_find(t, &ssearch, &where);
549 ss = avl_nearest(t, where, AVL_AFTER);
554 if (ss->ss_start + size <= ss->ss_end) {
555 *cursor = ss->ss_start + size;
556 return (ss->ss_start);
563 metaslab_ndf_fragmented(space_map_t *sm)
565 uint64_t max_size = metaslab_pp_maxsize(sm);
567 if (max_size > (metaslab_min_alloc_size << metaslab_ndf_clump_shift))
573 static space_map_ops_t metaslab_ndf_ops = {
580 metaslab_ndf_fragmented
583 space_map_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops;
586 * ==========================================================================
588 * ==========================================================================
591 metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo,
592 uint64_t start, uint64_t size, uint64_t txg)
594 vdev_t *vd = mg->mg_vd;
597 msp = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
598 mutex_init(&msp->ms_lock, NULL, MUTEX_DEFAULT, NULL);
600 msp->ms_smo_syncing = *smo;
603 * We create the main space map here, but we don't create the
604 * allocmaps and freemaps until metaslab_sync_done(). This serves
605 * two purposes: it allows metaslab_sync_done() to detect the
606 * addition of new space; and for debugging, it ensures that we'd
607 * data fault on any attempt to use this metaslab before it's ready.
609 space_map_create(&msp->ms_map, start, size,
610 vd->vdev_ashift, &msp->ms_lock);
612 metaslab_group_add(mg, msp);
615 * If we're opening an existing pool (txg == 0) or creating
616 * a new one (txg == TXG_INITIAL), all space is available now.
617 * If we're adding space to an existing pool, the new space
618 * does not become available until after this txg has synced.
620 if (txg <= TXG_INITIAL)
621 metaslab_sync_done(msp, 0);
625 * The vdev is dirty, but the metaslab isn't -- it just needs
626 * to have metaslab_sync_done() invoked from vdev_sync_done().
627 * [We could just dirty the metaslab, but that would cause us
628 * to allocate a space map object for it, which is wasteful
629 * and would mess up the locality logic in metaslab_weight().]
631 ASSERT(TXG_CLEAN(txg) == spa_last_synced_txg(vd->vdev_spa));
632 vdev_dirty(vd, 0, NULL, txg);
633 vdev_dirty(vd, VDD_METASLAB, msp, TXG_CLEAN(txg));
640 metaslab_fini(metaslab_t *msp)
642 metaslab_group_t *mg = msp->ms_group;
645 vdev_space_update(mg->mg_vd, -msp->ms_map.sm_size,
646 -msp->ms_smo.smo_alloc, B_TRUE);
648 metaslab_group_remove(mg, msp);
650 mutex_enter(&msp->ms_lock);
652 space_map_unload(&msp->ms_map);
653 space_map_destroy(&msp->ms_map);
655 for (t = 0; t < TXG_SIZE; t++) {
656 space_map_destroy(&msp->ms_allocmap[t]);
657 space_map_destroy(&msp->ms_freemap[t]);
660 mutex_exit(&msp->ms_lock);
661 mutex_destroy(&msp->ms_lock);
663 kmem_free(msp, sizeof (metaslab_t));
666 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
667 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
668 #define METASLAB_ACTIVE_MASK \
669 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
672 metaslab_weight(metaslab_t *msp)
674 metaslab_group_t *mg = msp->ms_group;
675 space_map_t *sm = &msp->ms_map;
676 space_map_obj_t *smo = &msp->ms_smo;
677 vdev_t *vd = mg->mg_vd;
678 uint64_t weight, space;
680 ASSERT(MUTEX_HELD(&msp->ms_lock));
683 * The baseline weight is the metaslab's free space.
685 space = sm->sm_size - smo->smo_alloc;
689 * Modern disks have uniform bit density and constant angular velocity.
690 * Therefore, the outer recording zones are faster (higher bandwidth)
691 * than the inner zones by the ratio of outer to inner track diameter,
692 * which is typically around 2:1. We account for this by assigning
693 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
694 * In effect, this means that we'll select the metaslab with the most
695 * free bandwidth rather than simply the one with the most free space.
697 weight = 2 * weight -
698 ((sm->sm_start >> vd->vdev_ms_shift) * weight) / vd->vdev_ms_count;
699 ASSERT(weight >= space && weight <= 2 * space);
702 * For locality, assign higher weight to metaslabs which have
703 * a lower offset than what we've already activated.
705 if (sm->sm_start <= mg->mg_bonus_area)
706 weight *= (metaslab_smo_bonus_pct / 100);
707 ASSERT(weight >= space &&
708 weight <= 2 * (metaslab_smo_bonus_pct / 100) * space);
710 if (sm->sm_loaded && !sm->sm_ops->smop_fragmented(sm)) {
712 * If this metaslab is one we're actively using, adjust its
713 * weight to make it preferable to any inactive metaslab so
714 * we'll polish it off.
716 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
722 metaslab_prefetch(metaslab_group_t *mg)
724 spa_t *spa = mg->mg_vd->vdev_spa;
726 avl_tree_t *t = &mg->mg_metaslab_tree;
729 mutex_enter(&mg->mg_lock);
732 * Prefetch the next potential metaslabs
734 for (msp = avl_first(t), m = 0; msp; msp = AVL_NEXT(t, msp), m++) {
735 space_map_t *sm = &msp->ms_map;
736 space_map_obj_t *smo = &msp->ms_smo;
738 /* If we have reached our prefetch limit then we're done */
739 if (m >= metaslab_prefetch_limit)
742 if (!sm->sm_loaded && smo->smo_object != 0) {
743 mutex_exit(&mg->mg_lock);
744 dmu_prefetch(spa->spa_meta_objset, smo->smo_object,
745 0ULL, smo->smo_objsize);
746 mutex_enter(&mg->mg_lock);
749 mutex_exit(&mg->mg_lock);
753 metaslab_activate(metaslab_t *msp, uint64_t activation_weight, uint64_t size)
755 metaslab_group_t *mg = msp->ms_group;
756 space_map_t *sm = &msp->ms_map;
757 space_map_ops_t *sm_ops = msp->ms_group->mg_class->mc_ops;
759 ASSERT(MUTEX_HELD(&msp->ms_lock));
761 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
762 int error = space_map_load(sm, sm_ops, SM_FREE, &msp->ms_smo,
763 msp->ms_group->mg_vd->vdev_spa->spa_meta_objset);
765 metaslab_group_sort(msp->ms_group, msp, 0);
770 * Track the bonus area as we activate new metaslabs.
772 if (sm->sm_start > mg->mg_bonus_area) {
773 mutex_enter(&mg->mg_lock);
774 mg->mg_bonus_area = sm->sm_start;
775 mutex_exit(&mg->mg_lock);
779 * If we were able to load the map then make sure
780 * that this map is still able to satisfy our request.
782 if (msp->ms_weight < size)
785 metaslab_group_sort(msp->ms_group, msp,
786 msp->ms_weight | activation_weight);
788 ASSERT(sm->sm_loaded);
789 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
795 metaslab_passivate(metaslab_t *msp, uint64_t size)
798 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
799 * this metaslab again. In that case, it had better be empty,
800 * or we would be leaving space on the table.
802 ASSERT(size >= SPA_MINBLOCKSIZE || msp->ms_map.sm_space == 0);
803 metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size));
804 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
808 * Write a metaslab to disk in the context of the specified transaction group.
811 metaslab_sync(metaslab_t *msp, uint64_t txg)
813 vdev_t *vd = msp->ms_group->mg_vd;
814 spa_t *spa = vd->vdev_spa;
815 objset_t *mos = spa->spa_meta_objset;
816 space_map_t *allocmap = &msp->ms_allocmap[txg & TXG_MASK];
817 space_map_t *freemap = &msp->ms_freemap[txg & TXG_MASK];
818 space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
819 space_map_t *sm = &msp->ms_map;
820 space_map_obj_t *smo = &msp->ms_smo_syncing;
825 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
828 * The only state that can actually be changing concurrently with
829 * metaslab_sync() is the metaslab's ms_map. No other thread can
830 * be modifying this txg's allocmap, freemap, freed_map, or smo.
831 * Therefore, we only hold ms_lock to satify space_map ASSERTs.
832 * We drop it whenever we call into the DMU, because the DMU
833 * can call down to us (e.g. via zio_free()) at any time.
835 mutex_enter(&msp->ms_lock);
837 if (smo->smo_object == 0) {
838 ASSERT(smo->smo_objsize == 0);
839 ASSERT(smo->smo_alloc == 0);
840 mutex_exit(&msp->ms_lock);
841 smo->smo_object = dmu_object_alloc(mos,
842 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT,
843 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx);
844 ASSERT(smo->smo_object != 0);
845 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
846 (sm->sm_start >> vd->vdev_ms_shift),
847 sizeof (uint64_t), &smo->smo_object, tx);
848 mutex_enter(&msp->ms_lock);
851 space_map_walk(freemap, space_map_add, freed_map);
853 if (sm->sm_loaded && spa_sync_pass(spa) == 1 && smo->smo_objsize >=
854 2 * sizeof (uint64_t) * avl_numnodes(&sm->sm_root)) {
856 * The in-core space map representation is twice as compact
857 * as the on-disk one, so it's time to condense the latter
858 * by generating a pure allocmap from first principles.
860 * This metaslab is 100% allocated,
861 * minus the content of the in-core map (sm),
862 * minus what's been freed this txg (freed_map),
863 * minus allocations from txgs in the future
864 * (because they haven't been committed yet).
866 space_map_vacate(allocmap, NULL, NULL);
867 space_map_vacate(freemap, NULL, NULL);
869 space_map_add(allocmap, allocmap->sm_start, allocmap->sm_size);
871 space_map_walk(sm, space_map_remove, allocmap);
872 space_map_walk(freed_map, space_map_remove, allocmap);
874 for (t = 1; t < TXG_CONCURRENT_STATES; t++)
875 space_map_walk(&msp->ms_allocmap[(txg + t) & TXG_MASK],
876 space_map_remove, allocmap);
878 mutex_exit(&msp->ms_lock);
879 space_map_truncate(smo, mos, tx);
880 mutex_enter(&msp->ms_lock);
883 space_map_sync(allocmap, SM_ALLOC, smo, mos, tx);
884 space_map_sync(freemap, SM_FREE, smo, mos, tx);
886 mutex_exit(&msp->ms_lock);
888 VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db));
889 dmu_buf_will_dirty(db, tx);
890 ASSERT3U(db->db_size, >=, sizeof (*smo));
891 bcopy(smo, db->db_data, sizeof (*smo));
892 dmu_buf_rele(db, FTAG);
898 * Called after a transaction group has completely synced to mark
899 * all of the metaslab's free space as usable.
902 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
904 space_map_obj_t *smo = &msp->ms_smo;
905 space_map_obj_t *smosync = &msp->ms_smo_syncing;
906 space_map_t *sm = &msp->ms_map;
907 space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
908 metaslab_group_t *mg = msp->ms_group;
909 vdev_t *vd = mg->mg_vd;
912 mutex_enter(&msp->ms_lock);
915 * If this metaslab is just becoming available, initialize its
916 * allocmaps and freemaps and add its capacity to the vdev.
918 if (freed_map->sm_size == 0) {
919 for (t = 0; t < TXG_SIZE; t++) {
920 space_map_create(&msp->ms_allocmap[t], sm->sm_start,
921 sm->sm_size, sm->sm_shift, sm->sm_lock);
922 space_map_create(&msp->ms_freemap[t], sm->sm_start,
923 sm->sm_size, sm->sm_shift, sm->sm_lock);
925 vdev_space_update(vd, sm->sm_size, 0, B_TRUE);
928 vdev_space_update(vd, 0, smosync->smo_alloc - smo->smo_alloc, B_TRUE);
930 ASSERT(msp->ms_allocmap[txg & TXG_MASK].sm_space == 0);
931 ASSERT(msp->ms_freemap[txg & TXG_MASK].sm_space == 0);
934 * If there's a space_map_load() in progress, wait for it to complete
935 * so that we have a consistent view of the in-core space map.
936 * Then, add everything we freed in this txg to the map.
938 space_map_load_wait(sm);
939 space_map_vacate(freed_map, sm->sm_loaded ? space_map_free : NULL, sm);
944 * If the map is loaded but no longer active, evict it as soon as all
945 * future allocations have synced. (If we unloaded it now and then
946 * loaded a moment later, the map wouldn't reflect those allocations.)
948 if (sm->sm_loaded && (msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
951 for (t = 1; t < TXG_CONCURRENT_STATES; t++)
952 if (msp->ms_allocmap[(txg + t) & TXG_MASK].sm_space)
956 space_map_unload(sm);
959 metaslab_group_sort(mg, msp, metaslab_weight(msp));
961 mutex_exit(&msp->ms_lock);
965 metaslab_sync_reassess(metaslab_group_t *mg)
967 vdev_t *vd = mg->mg_vd;
970 * Re-evaluate all metaslabs which have lower offsets than the
973 for (int m = 0; m < vd->vdev_ms_count; m++) {
974 metaslab_t *msp = vd->vdev_ms[m];
976 if (msp->ms_map.sm_start > mg->mg_bonus_area)
979 mutex_enter(&msp->ms_lock);
980 metaslab_group_sort(mg, msp, metaslab_weight(msp));
981 mutex_exit(&msp->ms_lock);
985 * Prefetch the next potential metaslabs
987 metaslab_prefetch(mg);
991 metaslab_distance(metaslab_t *msp, dva_t *dva)
993 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
994 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
995 uint64_t start = msp->ms_map.sm_start >> ms_shift;
997 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
1001 return ((start - offset) << ms_shift);
1003 return ((offset - start) << ms_shift);
1008 metaslab_group_alloc(metaslab_group_t *mg, uint64_t size, uint64_t txg,
1009 uint64_t min_distance, dva_t *dva, int d)
1011 metaslab_t *msp = NULL;
1012 uint64_t offset = -1ULL;
1013 avl_tree_t *t = &mg->mg_metaslab_tree;
1014 uint64_t activation_weight;
1015 uint64_t target_distance;
1018 activation_weight = METASLAB_WEIGHT_PRIMARY;
1019 for (i = 0; i < d; i++) {
1020 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
1021 activation_weight = METASLAB_WEIGHT_SECONDARY;
1027 boolean_t was_active;
1029 mutex_enter(&mg->mg_lock);
1030 for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) {
1031 if (msp->ms_weight < size) {
1032 mutex_exit(&mg->mg_lock);
1036 was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
1037 if (activation_weight == METASLAB_WEIGHT_PRIMARY)
1040 target_distance = min_distance +
1041 (msp->ms_smo.smo_alloc ? 0 : min_distance >> 1);
1043 for (i = 0; i < d; i++)
1044 if (metaslab_distance(msp, &dva[i]) <
1050 mutex_exit(&mg->mg_lock);
1054 mutex_enter(&msp->ms_lock);
1057 * Ensure that the metaslab we have selected is still
1058 * capable of handling our request. It's possible that
1059 * another thread may have changed the weight while we
1060 * were blocked on the metaslab lock.
1062 if (msp->ms_weight < size || (was_active &&
1063 !(msp->ms_weight & METASLAB_ACTIVE_MASK) &&
1064 activation_weight == METASLAB_WEIGHT_PRIMARY)) {
1065 mutex_exit(&msp->ms_lock);
1069 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
1070 activation_weight == METASLAB_WEIGHT_PRIMARY) {
1071 metaslab_passivate(msp,
1072 msp->ms_weight & ~METASLAB_ACTIVE_MASK);
1073 mutex_exit(&msp->ms_lock);
1077 if (metaslab_activate(msp, activation_weight, size) != 0) {
1078 mutex_exit(&msp->ms_lock);
1082 if ((offset = space_map_alloc(&msp->ms_map, size)) != -1ULL)
1085 metaslab_passivate(msp, space_map_maxsize(&msp->ms_map));
1087 mutex_exit(&msp->ms_lock);
1090 if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0)
1091 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
1093 space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size);
1095 mutex_exit(&msp->ms_lock);
1101 * Allocate a block for the specified i/o.
1104 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
1105 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags)
1107 metaslab_group_t *mg, *rotor;
1111 int zio_lock = B_FALSE;
1112 boolean_t allocatable;
1113 uint64_t offset = -1ULL;
1117 ASSERT(!DVA_IS_VALID(&dva[d]));
1120 * For testing, make some blocks above a certain size be gang blocks.
1122 if (psize >= metaslab_gang_bang && (LBOLT & 3) == 0)
1126 * Start at the rotor and loop through all mgs until we find something.
1127 * Note that there's no locking on mc_rotor or mc_allocated because
1128 * nothing actually breaks if we miss a few updates -- we just won't
1129 * allocate quite as evenly. It all balances out over time.
1131 * If we are doing ditto or log blocks, try to spread them across
1132 * consecutive vdevs. If we're forced to reuse a vdev before we've
1133 * allocated all of our ditto blocks, then try and spread them out on
1134 * that vdev as much as possible. If it turns out to not be possible,
1135 * gradually lower our standards until anything becomes acceptable.
1136 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
1137 * gives us hope of containing our fault domains to something we're
1138 * able to reason about. Otherwise, any two top-level vdev failures
1139 * will guarantee the loss of data. With consecutive allocation,
1140 * only two adjacent top-level vdev failures will result in data loss.
1142 * If we are doing gang blocks (hintdva is non-NULL), try to keep
1143 * ourselves on the same vdev as our gang block header. That
1144 * way, we can hope for locality in vdev_cache, plus it makes our
1145 * fault domains something tractable.
1148 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
1149 if (flags & METASLAB_HINTBP_AVOID)
1150 mg = vd->vdev_mg->mg_next;
1153 } else if (d != 0) {
1154 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
1155 mg = vd->vdev_mg->mg_next;
1161 * If the hint put us into the wrong class, just follow the rotor.
1163 if (mg->mg_class != mc)
1173 * Don't allocate from faulted devices.
1176 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
1177 allocatable = vdev_allocatable(vd);
1178 spa_config_exit(spa, SCL_ZIO, FTAG);
1180 allocatable = vdev_allocatable(vd);
1186 * Avoid writing single-copy data to a failing vdev
1188 if ((vd->vdev_stat.vs_write_errors > 0 ||
1189 vd->vdev_state < VDEV_STATE_HEALTHY) &&
1190 d == 0 && dshift == 3) {
1195 ASSERT(mg->mg_class == mc);
1197 distance = vd->vdev_asize >> dshift;
1198 if (distance <= (1ULL << vd->vdev_ms_shift))
1203 asize = vdev_psize_to_asize(vd, psize);
1204 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
1206 offset = metaslab_group_alloc(mg, asize, txg, distance, dva, d);
1207 if (offset != -1ULL) {
1209 * If we've just selected this metaslab group,
1210 * figure out whether the corresponding vdev is
1211 * over- or under-used relative to the pool,
1212 * and set an allocation bias to even it out.
1214 if (mc->mc_allocated == 0) {
1215 vdev_stat_t *vs = &vd->vdev_stat;
1216 uint64_t alloc, space;
1219 alloc = spa_get_alloc(spa);
1220 space = spa_get_space(spa);
1223 * Determine percent used in units of 0..1024.
1224 * (This is just to avoid floating point.)
1226 vu = (vs->vs_alloc << 10) / (vs->vs_space + 1);
1227 su = (alloc << 10) / (space + 1);
1230 * Bias by at most +/- 25% of the aliquot.
1232 mg->mg_bias = ((su - vu) *
1233 (int64_t)mg->mg_aliquot) / (1024 * 4);
1236 if (atomic_add_64_nv(&mc->mc_allocated, asize) >=
1237 mg->mg_aliquot + mg->mg_bias) {
1238 mc->mc_rotor = mg->mg_next;
1239 mc->mc_allocated = 0;
1242 DVA_SET_VDEV(&dva[d], vd->vdev_id);
1243 DVA_SET_OFFSET(&dva[d], offset);
1244 DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER));
1245 DVA_SET_ASIZE(&dva[d], asize);
1250 mc->mc_rotor = mg->mg_next;
1251 mc->mc_allocated = 0;
1252 } while ((mg = mg->mg_next) != rotor);
1256 ASSERT(dshift < 64);
1260 if (!allocatable && !zio_lock) {
1266 bzero(&dva[d], sizeof (dva_t));
1272 * Free the block represented by DVA in the context of the specified
1273 * transaction group.
1276 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
1278 uint64_t vdev = DVA_GET_VDEV(dva);
1279 uint64_t offset = DVA_GET_OFFSET(dva);
1280 uint64_t size = DVA_GET_ASIZE(dva);
1284 ASSERT(DVA_IS_VALID(dva));
1286 if (txg > spa_freeze_txg(spa))
1289 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
1290 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
1291 cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu",
1292 (u_longlong_t)vdev, (u_longlong_t)offset);
1297 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
1299 if (DVA_GET_GANG(dva))
1300 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
1302 mutex_enter(&msp->ms_lock);
1305 space_map_remove(&msp->ms_allocmap[txg & TXG_MASK],
1307 space_map_free(&msp->ms_map, offset, size);
1309 if (msp->ms_freemap[txg & TXG_MASK].sm_space == 0)
1310 vdev_dirty(vd, VDD_METASLAB, msp, txg);
1311 space_map_add(&msp->ms_freemap[txg & TXG_MASK], offset, size);
1314 mutex_exit(&msp->ms_lock);
1318 * Intent log support: upon opening the pool after a crash, notify the SPA
1319 * of blocks that the intent log has allocated for immediate write, but
1320 * which are still considered free by the SPA because the last transaction
1321 * group didn't commit yet.
1324 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
1326 uint64_t vdev = DVA_GET_VDEV(dva);
1327 uint64_t offset = DVA_GET_OFFSET(dva);
1328 uint64_t size = DVA_GET_ASIZE(dva);
1333 ASSERT(DVA_IS_VALID(dva));
1335 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
1336 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
1339 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
1341 if (DVA_GET_GANG(dva))
1342 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
1344 mutex_enter(&msp->ms_lock);
1346 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY, 0);
1347 if (error || txg == 0) { /* txg == 0 indicates dry run */
1348 mutex_exit(&msp->ms_lock);
1352 space_map_claim(&msp->ms_map, offset, size);
1354 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
1355 if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0)
1356 vdev_dirty(vd, VDD_METASLAB, msp, txg);
1357 space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size);
1360 mutex_exit(&msp->ms_lock);
1366 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
1367 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags)
1369 dva_t *dva = bp->blk_dva;
1370 dva_t *hintdva = hintbp->blk_dva;
1373 ASSERT(bp->blk_birth == 0);
1375 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
1377 if (mc->mc_rotor == NULL) { /* no vdevs in this class */
1378 spa_config_exit(spa, SCL_ALLOC, FTAG);
1382 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
1383 ASSERT(BP_GET_NDVAS(bp) == 0);
1384 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
1386 for (int d = 0; d < ndvas; d++) {
1387 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
1390 for (d--; d >= 0; d--) {
1391 metaslab_free_dva(spa, &dva[d], txg, B_TRUE);
1392 bzero(&dva[d], sizeof (dva_t));
1394 spa_config_exit(spa, SCL_ALLOC, FTAG);
1399 ASSERT(BP_GET_NDVAS(bp) == ndvas);
1401 spa_config_exit(spa, SCL_ALLOC, FTAG);
1403 bp->blk_birth = txg;
1409 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
1411 const dva_t *dva = bp->blk_dva;
1412 int ndvas = BP_GET_NDVAS(bp);
1414 ASSERT(!BP_IS_HOLE(bp));
1415 ASSERT(!now || bp->blk_birth >= spa->spa_syncing_txg);
1417 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
1419 for (int d = 0; d < ndvas; d++)
1420 metaslab_free_dva(spa, &dva[d], txg, now);
1422 spa_config_exit(spa, SCL_FREE, FTAG);
1426 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
1428 const dva_t *dva = bp->blk_dva;
1429 int ndvas = BP_GET_NDVAS(bp);
1432 ASSERT(!BP_IS_HOLE(bp));
1436 * First do a dry run to make sure all DVAs are claimable,
1437 * so we don't have to unwind from partial failures below.
1439 if ((error = metaslab_claim(spa, bp, 0)) != 0)
1443 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
1445 for (int d = 0; d < ndvas; d++)
1446 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
1449 spa_config_exit(spa, SCL_ALLOC, FTAG);
1451 ASSERT(error == 0 || txg == 0);