4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/zfs_context.h>
27 #include <sys/spa_impl.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/space_map.h>
31 #include <sys/metaslab_impl.h>
32 #include <sys/vdev_impl.h>
35 uint64_t metaslab_aliquot = 512ULL << 10;
36 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
39 * Minimum size which forces the dynamic allocator to change
40 * it's allocation strategy. Once the space map cannot satisfy
41 * an allocation of this size then it switches to using more
42 * aggressive strategy (i.e search by size rather than offset).
44 uint64_t metaslab_df_alloc_threshold = SPA_MAXBLOCKSIZE;
47 * The minimum free space, in percent, which must be available
48 * in a space map to continue allocations in a first-fit fashion.
49 * Once the space_map's free space drops below this level we dynamically
50 * switch to using best-fit allocations.
52 int metaslab_df_free_pct = 30;
55 * ==========================================================================
57 * ==========================================================================
60 metaslab_class_create(space_map_ops_t *ops)
64 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
73 metaslab_class_destroy(metaslab_class_t *mc)
77 while ((mg = mc->mc_rotor) != NULL) {
78 metaslab_class_remove(mc, mg);
79 metaslab_group_destroy(mg);
82 kmem_free(mc, sizeof (metaslab_class_t));
86 metaslab_class_add(metaslab_class_t *mc, metaslab_group_t *mg)
88 metaslab_group_t *mgprev, *mgnext;
90 ASSERT(mg->mg_class == NULL);
92 if ((mgprev = mc->mc_rotor) == NULL) {
96 mgnext = mgprev->mg_next;
100 mgnext->mg_prev = mg;
107 metaslab_class_remove(metaslab_class_t *mc, metaslab_group_t *mg)
109 metaslab_group_t *mgprev, *mgnext;
111 ASSERT(mg->mg_class == mc);
113 mgprev = mg->mg_prev;
114 mgnext = mg->mg_next;
119 mc->mc_rotor = mgnext;
120 mgprev->mg_next = mgnext;
121 mgnext->mg_prev = mgprev;
130 * ==========================================================================
132 * ==========================================================================
135 metaslab_compare(const void *x1, const void *x2)
137 const metaslab_t *m1 = x1;
138 const metaslab_t *m2 = x2;
140 if (m1->ms_weight < m2->ms_weight)
142 if (m1->ms_weight > m2->ms_weight)
146 * If the weights are identical, use the offset to force uniqueness.
148 if (m1->ms_map.sm_start < m2->ms_map.sm_start)
150 if (m1->ms_map.sm_start > m2->ms_map.sm_start)
153 ASSERT3P(m1, ==, m2);
159 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
161 metaslab_group_t *mg;
163 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
164 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
165 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
166 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
167 mg->mg_aliquot = metaslab_aliquot * MAX(1, vd->vdev_children);
169 metaslab_class_add(mc, mg);
175 metaslab_group_destroy(metaslab_group_t *mg)
177 avl_destroy(&mg->mg_metaslab_tree);
178 mutex_destroy(&mg->mg_lock);
179 kmem_free(mg, sizeof (metaslab_group_t));
183 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
185 mutex_enter(&mg->mg_lock);
186 ASSERT(msp->ms_group == NULL);
189 avl_add(&mg->mg_metaslab_tree, msp);
190 mutex_exit(&mg->mg_lock);
194 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
196 mutex_enter(&mg->mg_lock);
197 ASSERT(msp->ms_group == mg);
198 avl_remove(&mg->mg_metaslab_tree, msp);
199 msp->ms_group = NULL;
200 mutex_exit(&mg->mg_lock);
204 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
207 * Although in principle the weight can be any value, in
208 * practice we do not use values in the range [1, 510].
210 ASSERT(weight >= SPA_MINBLOCKSIZE-1 || weight == 0);
211 ASSERT(MUTEX_HELD(&msp->ms_lock));
213 mutex_enter(&mg->mg_lock);
214 ASSERT(msp->ms_group == mg);
215 avl_remove(&mg->mg_metaslab_tree, msp);
216 msp->ms_weight = weight;
217 avl_add(&mg->mg_metaslab_tree, msp);
218 mutex_exit(&mg->mg_lock);
222 * This is a helper function that can be used by the allocator to find
223 * a suitable block to allocate. This will search the specified AVL
224 * tree looking for a block that matches the specified criteria.
227 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
230 space_seg_t *ss, ssearch;
233 ssearch.ss_start = *cursor;
234 ssearch.ss_end = *cursor + size;
236 ss = avl_find(t, &ssearch, &where);
238 ss = avl_nearest(t, where, AVL_AFTER);
241 uint64_t offset = P2ROUNDUP(ss->ss_start, align);
243 if (offset + size <= ss->ss_end) {
244 *cursor = offset + size;
247 ss = AVL_NEXT(t, ss);
251 * If we know we've searched the whole map (*cursor == 0), give up.
252 * Otherwise, reset the cursor to the beginning and try again.
258 return (metaslab_block_picker(t, cursor, size, align));
262 * ==========================================================================
263 * The first-fit block allocator
264 * ==========================================================================
267 metaslab_ff_load(space_map_t *sm)
269 ASSERT(sm->sm_ppd == NULL);
270 sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP);
271 sm->sm_pp_root = NULL;
275 metaslab_ff_unload(space_map_t *sm)
277 kmem_free(sm->sm_ppd, 64 * sizeof (uint64_t));
282 metaslab_ff_alloc(space_map_t *sm, uint64_t size)
284 avl_tree_t *t = &sm->sm_root;
285 uint64_t align = size & -size;
286 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1;
288 return (metaslab_block_picker(t, cursor, size, align));
293 metaslab_ff_claim(space_map_t *sm, uint64_t start, uint64_t size)
295 /* No need to update cursor */
300 metaslab_ff_free(space_map_t *sm, uint64_t start, uint64_t size)
302 /* No need to update cursor */
305 static space_map_ops_t metaslab_ff_ops = {
315 * Dynamic block allocator -
316 * Uses the first fit allocation scheme until space get low and then
317 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
318 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
322 metaslab_df_maxsize(space_map_t *sm)
324 avl_tree_t *t = sm->sm_pp_root;
327 if (t == NULL || (ss = avl_last(t)) == NULL)
330 return (ss->ss_end - ss->ss_start);
334 metaslab_df_seg_compare(const void *x1, const void *x2)
336 const space_seg_t *s1 = x1;
337 const space_seg_t *s2 = x2;
338 uint64_t ss_size1 = s1->ss_end - s1->ss_start;
339 uint64_t ss_size2 = s2->ss_end - s2->ss_start;
341 if (ss_size1 < ss_size2)
343 if (ss_size1 > ss_size2)
346 if (s1->ss_start < s2->ss_start)
348 if (s1->ss_start > s2->ss_start)
355 metaslab_df_load(space_map_t *sm)
359 ASSERT(sm->sm_ppd == NULL);
360 sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP);
362 sm->sm_pp_root = kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
363 avl_create(sm->sm_pp_root, metaslab_df_seg_compare,
364 sizeof (space_seg_t), offsetof(struct space_seg, ss_pp_node));
366 for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss))
367 avl_add(sm->sm_pp_root, ss);
371 metaslab_df_unload(space_map_t *sm)
375 kmem_free(sm->sm_ppd, 64 * sizeof (uint64_t));
378 while (avl_destroy_nodes(sm->sm_pp_root, &cookie) != NULL) {
379 /* tear down the tree */
382 avl_destroy(sm->sm_pp_root);
383 kmem_free(sm->sm_pp_root, sizeof (avl_tree_t));
384 sm->sm_pp_root = NULL;
388 metaslab_df_alloc(space_map_t *sm, uint64_t size)
390 avl_tree_t *t = &sm->sm_root;
391 uint64_t align = size & -size;
392 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1;
393 uint64_t max_size = metaslab_df_maxsize(sm);
394 int free_pct = sm->sm_space * 100 / sm->sm_size;
396 ASSERT(MUTEX_HELD(sm->sm_lock));
397 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root));
403 * If we're running low on space switch to using the size
404 * sorted AVL tree (best-fit).
406 if (max_size < metaslab_df_alloc_threshold ||
407 free_pct < metaslab_df_free_pct) {
412 return (metaslab_block_picker(t, cursor, size, 1ULL));
417 metaslab_df_claim(space_map_t *sm, uint64_t start, uint64_t size)
419 /* No need to update cursor */
424 metaslab_df_free(space_map_t *sm, uint64_t start, uint64_t size)
426 /* No need to update cursor */
429 static space_map_ops_t metaslab_df_ops = {
438 space_map_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
441 * ==========================================================================
443 * ==========================================================================
446 metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo,
447 uint64_t start, uint64_t size, uint64_t txg)
449 vdev_t *vd = mg->mg_vd;
452 msp = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
453 mutex_init(&msp->ms_lock, NULL, MUTEX_DEFAULT, NULL);
455 msp->ms_smo_syncing = *smo;
458 * We create the main space map here, but we don't create the
459 * allocmaps and freemaps until metaslab_sync_done(). This serves
460 * two purposes: it allows metaslab_sync_done() to detect the
461 * addition of new space; and for debugging, it ensures that we'd
462 * data fault on any attempt to use this metaslab before it's ready.
464 space_map_create(&msp->ms_map, start, size,
465 vd->vdev_ashift, &msp->ms_lock);
467 metaslab_group_add(mg, msp);
470 * If we're opening an existing pool (txg == 0) or creating
471 * a new one (txg == TXG_INITIAL), all space is available now.
472 * If we're adding space to an existing pool, the new space
473 * does not become available until after this txg has synced.
475 if (txg <= TXG_INITIAL)
476 metaslab_sync_done(msp, 0);
480 * The vdev is dirty, but the metaslab isn't -- it just needs
481 * to have metaslab_sync_done() invoked from vdev_sync_done().
482 * [We could just dirty the metaslab, but that would cause us
483 * to allocate a space map object for it, which is wasteful
484 * and would mess up the locality logic in metaslab_weight().]
486 ASSERT(TXG_CLEAN(txg) == spa_last_synced_txg(vd->vdev_spa));
487 vdev_dirty(vd, 0, NULL, txg);
488 vdev_dirty(vd, VDD_METASLAB, msp, TXG_CLEAN(txg));
495 metaslab_fini(metaslab_t *msp)
497 metaslab_group_t *mg = msp->ms_group;
500 vdev_space_update(mg->mg_vd, -msp->ms_map.sm_size,
501 -msp->ms_smo.smo_alloc, B_TRUE);
503 metaslab_group_remove(mg, msp);
505 mutex_enter(&msp->ms_lock);
507 space_map_unload(&msp->ms_map);
508 space_map_destroy(&msp->ms_map);
510 for (t = 0; t < TXG_SIZE; t++) {
511 space_map_destroy(&msp->ms_allocmap[t]);
512 space_map_destroy(&msp->ms_freemap[t]);
515 mutex_exit(&msp->ms_lock);
516 mutex_destroy(&msp->ms_lock);
518 kmem_free(msp, sizeof (metaslab_t));
521 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
522 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
523 #define METASLAB_ACTIVE_MASK \
524 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
525 #define METASLAB_SMO_BONUS_MULTIPLIER 2
528 metaslab_weight(metaslab_t *msp)
530 metaslab_group_t *mg = msp->ms_group;
531 space_map_t *sm = &msp->ms_map;
532 space_map_obj_t *smo = &msp->ms_smo;
533 vdev_t *vd = mg->mg_vd;
534 uint64_t weight, space;
536 ASSERT(MUTEX_HELD(&msp->ms_lock));
539 * The baseline weight is the metaslab's free space.
541 space = sm->sm_size - smo->smo_alloc;
545 * Modern disks have uniform bit density and constant angular velocity.
546 * Therefore, the outer recording zones are faster (higher bandwidth)
547 * than the inner zones by the ratio of outer to inner track diameter,
548 * which is typically around 2:1. We account for this by assigning
549 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
550 * In effect, this means that we'll select the metaslab with the most
551 * free bandwidth rather than simply the one with the most free space.
553 weight = 2 * weight -
554 ((sm->sm_start >> vd->vdev_ms_shift) * weight) / vd->vdev_ms_count;
555 ASSERT(weight >= space && weight <= 2 * space);
558 * For locality, assign higher weight to metaslabs we've used before.
560 if (smo->smo_object != 0)
561 weight *= METASLAB_SMO_BONUS_MULTIPLIER;
562 ASSERT(weight >= space &&
563 weight <= 2 * METASLAB_SMO_BONUS_MULTIPLIER * space);
566 * If this metaslab is one we're actively using, adjust its weight to
567 * make it preferable to any inactive metaslab so we'll polish it off.
569 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
575 metaslab_activate(metaslab_t *msp, uint64_t activation_weight, uint64_t size)
577 space_map_t *sm = &msp->ms_map;
578 space_map_ops_t *sm_ops = msp->ms_group->mg_class->mc_ops;
580 ASSERT(MUTEX_HELD(&msp->ms_lock));
582 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
583 int error = space_map_load(sm, sm_ops, SM_FREE, &msp->ms_smo,
584 msp->ms_group->mg_vd->vdev_spa->spa_meta_objset);
586 metaslab_group_sort(msp->ms_group, msp, 0);
591 * If we were able to load the map then make sure
592 * that this map is still able to satisfy our request.
594 if (msp->ms_weight < size)
597 metaslab_group_sort(msp->ms_group, msp,
598 msp->ms_weight | activation_weight);
600 ASSERT(sm->sm_loaded);
601 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
607 metaslab_passivate(metaslab_t *msp, uint64_t size)
610 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
611 * this metaslab again. In that case, it had better be empty,
612 * or we would be leaving space on the table.
614 ASSERT(size >= SPA_MINBLOCKSIZE || msp->ms_map.sm_space == 0);
615 metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size));
616 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
620 * Write a metaslab to disk in the context of the specified transaction group.
623 metaslab_sync(metaslab_t *msp, uint64_t txg)
625 vdev_t *vd = msp->ms_group->mg_vd;
626 spa_t *spa = vd->vdev_spa;
627 objset_t *mos = spa->spa_meta_objset;
628 space_map_t *allocmap = &msp->ms_allocmap[txg & TXG_MASK];
629 space_map_t *freemap = &msp->ms_freemap[txg & TXG_MASK];
630 space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
631 space_map_t *sm = &msp->ms_map;
632 space_map_obj_t *smo = &msp->ms_smo_syncing;
637 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
640 * The only state that can actually be changing concurrently with
641 * metaslab_sync() is the metaslab's ms_map. No other thread can
642 * be modifying this txg's allocmap, freemap, freed_map, or smo.
643 * Therefore, we only hold ms_lock to satify space_map ASSERTs.
644 * We drop it whenever we call into the DMU, because the DMU
645 * can call down to us (e.g. via zio_free()) at any time.
647 mutex_enter(&msp->ms_lock);
649 if (smo->smo_object == 0) {
650 ASSERT(smo->smo_objsize == 0);
651 ASSERT(smo->smo_alloc == 0);
652 mutex_exit(&msp->ms_lock);
653 smo->smo_object = dmu_object_alloc(mos,
654 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT,
655 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx);
656 ASSERT(smo->smo_object != 0);
657 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
658 (sm->sm_start >> vd->vdev_ms_shift),
659 sizeof (uint64_t), &smo->smo_object, tx);
660 mutex_enter(&msp->ms_lock);
663 space_map_walk(freemap, space_map_add, freed_map);
665 if (sm->sm_loaded && spa_sync_pass(spa) == 1 && smo->smo_objsize >=
666 2 * sizeof (uint64_t) * avl_numnodes(&sm->sm_root)) {
668 * The in-core space map representation is twice as compact
669 * as the on-disk one, so it's time to condense the latter
670 * by generating a pure allocmap from first principles.
672 * This metaslab is 100% allocated,
673 * minus the content of the in-core map (sm),
674 * minus what's been freed this txg (freed_map),
675 * minus allocations from txgs in the future
676 * (because they haven't been committed yet).
678 space_map_vacate(allocmap, NULL, NULL);
679 space_map_vacate(freemap, NULL, NULL);
681 space_map_add(allocmap, allocmap->sm_start, allocmap->sm_size);
683 space_map_walk(sm, space_map_remove, allocmap);
684 space_map_walk(freed_map, space_map_remove, allocmap);
686 for (t = 1; t < TXG_CONCURRENT_STATES; t++)
687 space_map_walk(&msp->ms_allocmap[(txg + t) & TXG_MASK],
688 space_map_remove, allocmap);
690 mutex_exit(&msp->ms_lock);
691 space_map_truncate(smo, mos, tx);
692 mutex_enter(&msp->ms_lock);
695 space_map_sync(allocmap, SM_ALLOC, smo, mos, tx);
696 space_map_sync(freemap, SM_FREE, smo, mos, tx);
698 mutex_exit(&msp->ms_lock);
700 VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db));
701 dmu_buf_will_dirty(db, tx);
702 ASSERT3U(db->db_size, >=, sizeof (*smo));
703 bcopy(smo, db->db_data, sizeof (*smo));
704 dmu_buf_rele(db, FTAG);
710 * Called after a transaction group has completely synced to mark
711 * all of the metaslab's free space as usable.
714 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
716 space_map_obj_t *smo = &msp->ms_smo;
717 space_map_obj_t *smosync = &msp->ms_smo_syncing;
718 space_map_t *sm = &msp->ms_map;
719 space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
720 metaslab_group_t *mg = msp->ms_group;
721 vdev_t *vd = mg->mg_vd;
724 mutex_enter(&msp->ms_lock);
727 * If this metaslab is just becoming available, initialize its
728 * allocmaps and freemaps and add its capacity to the vdev.
730 if (freed_map->sm_size == 0) {
731 for (t = 0; t < TXG_SIZE; t++) {
732 space_map_create(&msp->ms_allocmap[t], sm->sm_start,
733 sm->sm_size, sm->sm_shift, sm->sm_lock);
734 space_map_create(&msp->ms_freemap[t], sm->sm_start,
735 sm->sm_size, sm->sm_shift, sm->sm_lock);
737 vdev_space_update(vd, sm->sm_size, 0, B_TRUE);
740 vdev_space_update(vd, 0, smosync->smo_alloc - smo->smo_alloc, B_TRUE);
742 ASSERT(msp->ms_allocmap[txg & TXG_MASK].sm_space == 0);
743 ASSERT(msp->ms_freemap[txg & TXG_MASK].sm_space == 0);
746 * If there's a space_map_load() in progress, wait for it to complete
747 * so that we have a consistent view of the in-core space map.
748 * Then, add everything we freed in this txg to the map.
750 space_map_load_wait(sm);
751 space_map_vacate(freed_map, sm->sm_loaded ? space_map_free : NULL, sm);
756 * If the map is loaded but no longer active, evict it as soon as all
757 * future allocations have synced. (If we unloaded it now and then
758 * loaded a moment later, the map wouldn't reflect those allocations.)
760 if (sm->sm_loaded && (msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
763 for (t = 1; t < TXG_CONCURRENT_STATES; t++)
764 if (msp->ms_allocmap[(txg + t) & TXG_MASK].sm_space)
768 space_map_unload(sm);
771 metaslab_group_sort(mg, msp, metaslab_weight(msp));
773 mutex_exit(&msp->ms_lock);
777 metaslab_distance(metaslab_t *msp, dva_t *dva)
779 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
780 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
781 uint64_t start = msp->ms_map.sm_start >> ms_shift;
783 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
787 return ((start - offset) << ms_shift);
789 return ((offset - start) << ms_shift);
794 metaslab_group_alloc(metaslab_group_t *mg, uint64_t size, uint64_t txg,
795 uint64_t min_distance, dva_t *dva, int d)
797 metaslab_t *msp = NULL;
798 uint64_t offset = -1ULL;
799 avl_tree_t *t = &mg->mg_metaslab_tree;
800 uint64_t activation_weight;
801 uint64_t target_distance;
804 activation_weight = METASLAB_WEIGHT_PRIMARY;
805 for (i = 0; i < d; i++) {
806 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
807 activation_weight = METASLAB_WEIGHT_SECONDARY;
813 boolean_t was_active;
815 mutex_enter(&mg->mg_lock);
816 for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) {
817 if (msp->ms_weight < size) {
818 mutex_exit(&mg->mg_lock);
822 was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
823 if (activation_weight == METASLAB_WEIGHT_PRIMARY)
826 target_distance = min_distance +
827 (msp->ms_smo.smo_alloc ? 0 : min_distance >> 1);
829 for (i = 0; i < d; i++)
830 if (metaslab_distance(msp, &dva[i]) <
836 mutex_exit(&mg->mg_lock);
840 mutex_enter(&msp->ms_lock);
843 * Ensure that the metaslab we have selected is still
844 * capable of handling our request. It's possible that
845 * another thread may have changed the weight while we
846 * were blocked on the metaslab lock.
848 if (msp->ms_weight < size || (was_active &&
849 !(msp->ms_weight & METASLAB_ACTIVE_MASK) &&
850 activation_weight == METASLAB_WEIGHT_PRIMARY)) {
851 mutex_exit(&msp->ms_lock);
855 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
856 activation_weight == METASLAB_WEIGHT_PRIMARY) {
857 metaslab_passivate(msp,
858 msp->ms_weight & ~METASLAB_ACTIVE_MASK);
859 mutex_exit(&msp->ms_lock);
863 if (metaslab_activate(msp, activation_weight, size) != 0) {
864 mutex_exit(&msp->ms_lock);
868 if ((offset = space_map_alloc(&msp->ms_map, size)) != -1ULL)
871 metaslab_passivate(msp, size - 1);
873 mutex_exit(&msp->ms_lock);
876 if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0)
877 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
879 space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size);
881 mutex_exit(&msp->ms_lock);
887 * Allocate a block for the specified i/o.
890 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
891 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags)
893 metaslab_group_t *mg, *rotor;
897 int zio_lock = B_FALSE;
898 boolean_t allocatable;
899 uint64_t offset = -1ULL;
903 ASSERT(!DVA_IS_VALID(&dva[d]));
906 * For testing, make some blocks above a certain size be gang blocks.
908 if (psize >= metaslab_gang_bang && (LBOLT & 3) == 0)
912 * Start at the rotor and loop through all mgs until we find something.
913 * Note that there's no locking on mc_rotor or mc_allocated because
914 * nothing actually breaks if we miss a few updates -- we just won't
915 * allocate quite as evenly. It all balances out over time.
917 * If we are doing ditto or log blocks, try to spread them across
918 * consecutive vdevs. If we're forced to reuse a vdev before we've
919 * allocated all of our ditto blocks, then try and spread them out on
920 * that vdev as much as possible. If it turns out to not be possible,
921 * gradually lower our standards until anything becomes acceptable.
922 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
923 * gives us hope of containing our fault domains to something we're
924 * able to reason about. Otherwise, any two top-level vdev failures
925 * will guarantee the loss of data. With consecutive allocation,
926 * only two adjacent top-level vdev failures will result in data loss.
928 * If we are doing gang blocks (hintdva is non-NULL), try to keep
929 * ourselves on the same vdev as our gang block header. That
930 * way, we can hope for locality in vdev_cache, plus it makes our
931 * fault domains something tractable.
934 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
935 if (flags & METASLAB_HINTBP_AVOID)
936 mg = vd->vdev_mg->mg_next;
940 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
941 mg = vd->vdev_mg->mg_next;
947 * If the hint put us into the wrong class, just follow the rotor.
949 if (mg->mg_class != mc)
959 * Don't allocate from faulted devices.
962 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
963 allocatable = vdev_allocatable(vd);
964 spa_config_exit(spa, SCL_ZIO, FTAG);
966 allocatable = vdev_allocatable(vd);
972 * Avoid writing single-copy data to a failing vdev
974 if ((vd->vdev_stat.vs_write_errors > 0 ||
975 vd->vdev_state < VDEV_STATE_HEALTHY) &&
976 d == 0 && dshift == 3) {
981 ASSERT(mg->mg_class == mc);
983 distance = vd->vdev_asize >> dshift;
984 if (distance <= (1ULL << vd->vdev_ms_shift))
989 asize = vdev_psize_to_asize(vd, psize);
990 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
992 offset = metaslab_group_alloc(mg, asize, txg, distance, dva, d);
993 if (offset != -1ULL) {
995 * If we've just selected this metaslab group,
996 * figure out whether the corresponding vdev is
997 * over- or under-used relative to the pool,
998 * and set an allocation bias to even it out.
1000 if (mc->mc_allocated == 0) {
1001 vdev_stat_t *vs = &vd->vdev_stat;
1002 uint64_t alloc, space;
1005 alloc = spa_get_alloc(spa);
1006 space = spa_get_space(spa);
1009 * Determine percent used in units of 0..1024.
1010 * (This is just to avoid floating point.)
1012 vu = (vs->vs_alloc << 10) / (vs->vs_space + 1);
1013 su = (alloc << 10) / (space + 1);
1016 * Bias by at most +/- 25% of the aliquot.
1018 mg->mg_bias = ((su - vu) *
1019 (int64_t)mg->mg_aliquot) / (1024 * 4);
1022 if (atomic_add_64_nv(&mc->mc_allocated, asize) >=
1023 mg->mg_aliquot + mg->mg_bias) {
1024 mc->mc_rotor = mg->mg_next;
1025 mc->mc_allocated = 0;
1028 DVA_SET_VDEV(&dva[d], vd->vdev_id);
1029 DVA_SET_OFFSET(&dva[d], offset);
1030 DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER));
1031 DVA_SET_ASIZE(&dva[d], asize);
1036 mc->mc_rotor = mg->mg_next;
1037 mc->mc_allocated = 0;
1038 } while ((mg = mg->mg_next) != rotor);
1042 ASSERT(dshift < 64);
1046 if (!allocatable && !zio_lock) {
1052 bzero(&dva[d], sizeof (dva_t));
1058 * Free the block represented by DVA in the context of the specified
1059 * transaction group.
1062 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
1064 uint64_t vdev = DVA_GET_VDEV(dva);
1065 uint64_t offset = DVA_GET_OFFSET(dva);
1066 uint64_t size = DVA_GET_ASIZE(dva);
1070 ASSERT(DVA_IS_VALID(dva));
1072 if (txg > spa_freeze_txg(spa))
1075 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
1076 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
1077 cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu",
1078 (u_longlong_t)vdev, (u_longlong_t)offset);
1083 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
1085 if (DVA_GET_GANG(dva))
1086 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
1088 mutex_enter(&msp->ms_lock);
1091 space_map_remove(&msp->ms_allocmap[txg & TXG_MASK],
1093 space_map_free(&msp->ms_map, offset, size);
1095 if (msp->ms_freemap[txg & TXG_MASK].sm_space == 0)
1096 vdev_dirty(vd, VDD_METASLAB, msp, txg);
1097 space_map_add(&msp->ms_freemap[txg & TXG_MASK], offset, size);
1100 mutex_exit(&msp->ms_lock);
1104 * Intent log support: upon opening the pool after a crash, notify the SPA
1105 * of blocks that the intent log has allocated for immediate write, but
1106 * which are still considered free by the SPA because the last transaction
1107 * group didn't commit yet.
1110 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
1112 uint64_t vdev = DVA_GET_VDEV(dva);
1113 uint64_t offset = DVA_GET_OFFSET(dva);
1114 uint64_t size = DVA_GET_ASIZE(dva);
1119 ASSERT(DVA_IS_VALID(dva));
1121 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
1122 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
1125 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
1127 if (DVA_GET_GANG(dva))
1128 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
1130 mutex_enter(&msp->ms_lock);
1132 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY, 0);
1133 if (error || txg == 0) { /* txg == 0 indicates dry run */
1134 mutex_exit(&msp->ms_lock);
1138 space_map_claim(&msp->ms_map, offset, size);
1140 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
1141 if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0)
1142 vdev_dirty(vd, VDD_METASLAB, msp, txg);
1143 space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size);
1146 mutex_exit(&msp->ms_lock);
1152 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
1153 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags)
1155 dva_t *dva = bp->blk_dva;
1156 dva_t *hintdva = hintbp->blk_dva;
1159 ASSERT(bp->blk_birth == 0);
1161 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
1163 if (mc->mc_rotor == NULL) { /* no vdevs in this class */
1164 spa_config_exit(spa, SCL_ALLOC, FTAG);
1168 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
1169 ASSERT(BP_GET_NDVAS(bp) == 0);
1170 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
1172 for (int d = 0; d < ndvas; d++) {
1173 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
1176 for (d--; d >= 0; d--) {
1177 metaslab_free_dva(spa, &dva[d], txg, B_TRUE);
1178 bzero(&dva[d], sizeof (dva_t));
1180 spa_config_exit(spa, SCL_ALLOC, FTAG);
1185 ASSERT(BP_GET_NDVAS(bp) == ndvas);
1187 spa_config_exit(spa, SCL_ALLOC, FTAG);
1189 bp->blk_birth = txg;
1195 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
1197 const dva_t *dva = bp->blk_dva;
1198 int ndvas = BP_GET_NDVAS(bp);
1200 ASSERT(!BP_IS_HOLE(bp));
1201 ASSERT(!now || bp->blk_birth >= spa->spa_syncing_txg);
1203 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
1205 for (int d = 0; d < ndvas; d++)
1206 metaslab_free_dva(spa, &dva[d], txg, now);
1208 spa_config_exit(spa, SCL_FREE, FTAG);
1212 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
1214 const dva_t *dva = bp->blk_dva;
1215 int ndvas = BP_GET_NDVAS(bp);
1218 ASSERT(!BP_IS_HOLE(bp));
1222 * First do a dry run to make sure all DVAs are claimable,
1223 * so we don't have to unwind from partial failures below.
1225 if ((error = metaslab_claim(spa, bp, 0)) != 0)
1229 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
1231 for (int d = 0; d < ndvas; d++)
1232 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
1235 spa_config_exit(spa, SCL_ALLOC, FTAG);
1237 ASSERT(error == 0 || txg == 0);