4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "%Z%%M% %I% %E% SMI"
28 #include <sys/zfs_context.h>
29 #include <sys/spa_impl.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/space_map.h>
33 #include <sys/metaslab_impl.h>
34 #include <sys/vdev_impl.h>
37 uint64_t metaslab_aliquot = 512ULL << 10;
40 * ==========================================================================
42 * ==========================================================================
45 metaslab_class_create(void)
49 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
57 metaslab_class_destroy(metaslab_class_t *mc)
61 while ((mg = mc->mc_rotor) != NULL) {
62 metaslab_class_remove(mc, mg);
63 metaslab_group_destroy(mg);
66 kmem_free(mc, sizeof (metaslab_class_t));
70 metaslab_class_add(metaslab_class_t *mc, metaslab_group_t *mg)
72 metaslab_group_t *mgprev, *mgnext;
74 ASSERT(mg->mg_class == NULL);
76 if ((mgprev = mc->mc_rotor) == NULL) {
80 mgnext = mgprev->mg_next;
91 metaslab_class_remove(metaslab_class_t *mc, metaslab_group_t *mg)
93 metaslab_group_t *mgprev, *mgnext;
95 ASSERT(mg->mg_class == mc);
103 mc->mc_rotor = mgnext;
104 mgprev->mg_next = mgnext;
105 mgnext->mg_prev = mgprev;
114 * ==========================================================================
116 * ==========================================================================
119 metaslab_compare(const void *x1, const void *x2)
121 const metaslab_t *m1 = x1;
122 const metaslab_t *m2 = x2;
124 if (m1->ms_weight < m2->ms_weight)
126 if (m1->ms_weight > m2->ms_weight)
130 * If the weights are identical, use the offset to force uniqueness.
132 if (m1->ms_map.sm_start < m2->ms_map.sm_start)
134 if (m1->ms_map.sm_start > m2->ms_map.sm_start)
137 ASSERT3P(m1, ==, m2);
143 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
145 metaslab_group_t *mg;
147 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
148 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
149 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
150 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
151 mg->mg_aliquot = metaslab_aliquot * MAX(1, vd->vdev_children);
153 metaslab_class_add(mc, mg);
159 metaslab_group_destroy(metaslab_group_t *mg)
161 avl_destroy(&mg->mg_metaslab_tree);
162 mutex_destroy(&mg->mg_lock);
163 kmem_free(mg, sizeof (metaslab_group_t));
167 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
169 mutex_enter(&mg->mg_lock);
170 ASSERT(msp->ms_group == NULL);
173 avl_add(&mg->mg_metaslab_tree, msp);
174 mutex_exit(&mg->mg_lock);
178 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
180 mutex_enter(&mg->mg_lock);
181 ASSERT(msp->ms_group == mg);
182 avl_remove(&mg->mg_metaslab_tree, msp);
183 msp->ms_group = NULL;
184 mutex_exit(&mg->mg_lock);
188 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
191 * Although in principle the weight can be any value, in
192 * practice we do not use values in the range [1, 510].
194 ASSERT(weight >= SPA_MINBLOCKSIZE-1 || weight == 0);
195 ASSERT(MUTEX_HELD(&msp->ms_lock));
197 mutex_enter(&mg->mg_lock);
198 ASSERT(msp->ms_group == mg);
199 avl_remove(&mg->mg_metaslab_tree, msp);
200 msp->ms_weight = weight;
201 avl_add(&mg->mg_metaslab_tree, msp);
202 mutex_exit(&mg->mg_lock);
206 * ==========================================================================
207 * The first-fit block allocator
208 * ==========================================================================
211 metaslab_ff_load(space_map_t *sm)
213 ASSERT(sm->sm_ppd == NULL);
214 sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP);
218 metaslab_ff_unload(space_map_t *sm)
220 kmem_free(sm->sm_ppd, 64 * sizeof (uint64_t));
225 metaslab_ff_alloc(space_map_t *sm, uint64_t size)
227 avl_tree_t *t = &sm->sm_root;
228 uint64_t align = size & -size;
229 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1;
230 space_seg_t *ss, ssearch;
233 ssearch.ss_start = *cursor;
234 ssearch.ss_end = *cursor + size;
236 ss = avl_find(t, &ssearch, &where);
238 ss = avl_nearest(t, where, AVL_AFTER);
241 uint64_t offset = P2ROUNDUP(ss->ss_start, align);
243 if (offset + size <= ss->ss_end) {
244 *cursor = offset + size;
247 ss = AVL_NEXT(t, ss);
251 * If we know we've searched the whole map (*cursor == 0), give up.
252 * Otherwise, reset the cursor to the beginning and try again.
258 return (metaslab_ff_alloc(sm, size));
263 metaslab_ff_claim(space_map_t *sm, uint64_t start, uint64_t size)
265 /* No need to update cursor */
270 metaslab_ff_free(space_map_t *sm, uint64_t start, uint64_t size)
272 /* No need to update cursor */
275 static space_map_ops_t metaslab_ff_ops = {
284 * ==========================================================================
286 * ==========================================================================
289 metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo,
290 uint64_t start, uint64_t size, uint64_t txg)
292 vdev_t *vd = mg->mg_vd;
295 msp = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
296 mutex_init(&msp->ms_lock, NULL, MUTEX_DEFAULT, NULL);
298 msp->ms_smo_syncing = *smo;
301 * We create the main space map here, but we don't create the
302 * allocmaps and freemaps until metaslab_sync_done(). This serves
303 * two purposes: it allows metaslab_sync_done() to detect the
304 * addition of new space; and for debugging, it ensures that we'd
305 * data fault on any attempt to use this metaslab before it's ready.
307 space_map_create(&msp->ms_map, start, size,
308 vd->vdev_ashift, &msp->ms_lock);
310 metaslab_group_add(mg, msp);
313 * If we're opening an existing pool (txg == 0) or creating
314 * a new one (txg == TXG_INITIAL), all space is available now.
315 * If we're adding space to an existing pool, the new space
316 * does not become available until after this txg has synced.
318 if (txg <= TXG_INITIAL)
319 metaslab_sync_done(msp, 0);
323 * The vdev is dirty, but the metaslab isn't -- it just needs
324 * to have metaslab_sync_done() invoked from vdev_sync_done().
325 * [We could just dirty the metaslab, but that would cause us
326 * to allocate a space map object for it, which is wasteful
327 * and would mess up the locality logic in metaslab_weight().]
329 ASSERT(TXG_CLEAN(txg) == spa_last_synced_txg(vd->vdev_spa));
330 vdev_dirty(vd, 0, NULL, txg);
331 vdev_dirty(vd, VDD_METASLAB, msp, TXG_CLEAN(txg));
338 metaslab_fini(metaslab_t *msp)
340 metaslab_group_t *mg = msp->ms_group;
343 vdev_space_update(mg->mg_vd, -msp->ms_map.sm_size,
344 -msp->ms_smo.smo_alloc);
346 metaslab_group_remove(mg, msp);
348 mutex_enter(&msp->ms_lock);
350 space_map_unload(&msp->ms_map);
351 space_map_destroy(&msp->ms_map);
353 for (t = 0; t < TXG_SIZE; t++) {
354 space_map_destroy(&msp->ms_allocmap[t]);
355 space_map_destroy(&msp->ms_freemap[t]);
358 mutex_exit(&msp->ms_lock);
359 mutex_destroy(&msp->ms_lock);
361 kmem_free(msp, sizeof (metaslab_t));
364 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
365 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
366 #define METASLAB_ACTIVE_MASK \
367 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
368 #define METASLAB_SMO_BONUS_MULTIPLIER 2
371 metaslab_weight(metaslab_t *msp)
373 metaslab_group_t *mg = msp->ms_group;
374 space_map_t *sm = &msp->ms_map;
375 space_map_obj_t *smo = &msp->ms_smo;
376 vdev_t *vd = mg->mg_vd;
377 uint64_t weight, space;
379 ASSERT(MUTEX_HELD(&msp->ms_lock));
382 * The baseline weight is the metaslab's free space.
384 space = sm->sm_size - smo->smo_alloc;
388 * Modern disks have uniform bit density and constant angular velocity.
389 * Therefore, the outer recording zones are faster (higher bandwidth)
390 * than the inner zones by the ratio of outer to inner track diameter,
391 * which is typically around 2:1. We account for this by assigning
392 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
393 * In effect, this means that we'll select the metaslab with the most
394 * free bandwidth rather than simply the one with the most free space.
396 weight = 2 * weight -
397 ((sm->sm_start >> vd->vdev_ms_shift) * weight) / vd->vdev_ms_count;
398 ASSERT(weight >= space && weight <= 2 * space);
401 * For locality, assign higher weight to metaslabs we've used before.
403 if (smo->smo_object != 0)
404 weight *= METASLAB_SMO_BONUS_MULTIPLIER;
405 ASSERT(weight >= space &&
406 weight <= 2 * METASLAB_SMO_BONUS_MULTIPLIER * space);
409 * If this metaslab is one we're actively using, adjust its weight to
410 * make it preferable to any inactive metaslab so we'll polish it off.
412 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
418 metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
420 space_map_t *sm = &msp->ms_map;
422 ASSERT(MUTEX_HELD(&msp->ms_lock));
424 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
425 int error = space_map_load(sm, &metaslab_ff_ops,
426 SM_FREE, &msp->ms_smo,
427 msp->ms_group->mg_vd->vdev_spa->spa_meta_objset);
429 metaslab_group_sort(msp->ms_group, msp, 0);
432 metaslab_group_sort(msp->ms_group, msp,
433 msp->ms_weight | activation_weight);
435 ASSERT(sm->sm_loaded);
436 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
442 metaslab_passivate(metaslab_t *msp, uint64_t size)
445 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
446 * this metaslab again. In that case, it had better be empty,
447 * or we would be leaving space on the table.
449 ASSERT(size >= SPA_MINBLOCKSIZE || msp->ms_map.sm_space == 0);
450 metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size));
451 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
455 * Write a metaslab to disk in the context of the specified transaction group.
458 metaslab_sync(metaslab_t *msp, uint64_t txg)
460 vdev_t *vd = msp->ms_group->mg_vd;
461 spa_t *spa = vd->vdev_spa;
462 objset_t *mos = spa->spa_meta_objset;
463 space_map_t *allocmap = &msp->ms_allocmap[txg & TXG_MASK];
464 space_map_t *freemap = &msp->ms_freemap[txg & TXG_MASK];
465 space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
466 space_map_t *sm = &msp->ms_map;
467 space_map_obj_t *smo = &msp->ms_smo_syncing;
472 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
475 * The only state that can actually be changing concurrently with
476 * metaslab_sync() is the metaslab's ms_map. No other thread can
477 * be modifying this txg's allocmap, freemap, freed_map, or smo.
478 * Therefore, we only hold ms_lock to satify space_map ASSERTs.
479 * We drop it whenever we call into the DMU, because the DMU
480 * can call down to us (e.g. via zio_free()) at any time.
482 mutex_enter(&msp->ms_lock);
484 if (smo->smo_object == 0) {
485 ASSERT(smo->smo_objsize == 0);
486 ASSERT(smo->smo_alloc == 0);
487 mutex_exit(&msp->ms_lock);
488 smo->smo_object = dmu_object_alloc(mos,
489 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT,
490 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx);
491 ASSERT(smo->smo_object != 0);
492 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
493 (sm->sm_start >> vd->vdev_ms_shift),
494 sizeof (uint64_t), &smo->smo_object, tx);
495 mutex_enter(&msp->ms_lock);
498 space_map_walk(freemap, space_map_add, freed_map);
500 if (sm->sm_loaded && spa_sync_pass(spa) == 1 && smo->smo_objsize >=
501 2 * sizeof (uint64_t) * avl_numnodes(&sm->sm_root)) {
503 * The in-core space map representation is twice as compact
504 * as the on-disk one, so it's time to condense the latter
505 * by generating a pure allocmap from first principles.
507 * This metaslab is 100% allocated,
508 * minus the content of the in-core map (sm),
509 * minus what's been freed this txg (freed_map),
510 * minus allocations from txgs in the future
511 * (because they haven't been committed yet).
513 space_map_vacate(allocmap, NULL, NULL);
514 space_map_vacate(freemap, NULL, NULL);
516 space_map_add(allocmap, allocmap->sm_start, allocmap->sm_size);
518 space_map_walk(sm, space_map_remove, allocmap);
519 space_map_walk(freed_map, space_map_remove, allocmap);
521 for (t = 1; t < TXG_CONCURRENT_STATES; t++)
522 space_map_walk(&msp->ms_allocmap[(txg + t) & TXG_MASK],
523 space_map_remove, allocmap);
525 mutex_exit(&msp->ms_lock);
526 space_map_truncate(smo, mos, tx);
527 mutex_enter(&msp->ms_lock);
530 space_map_sync(allocmap, SM_ALLOC, smo, mos, tx);
531 space_map_sync(freemap, SM_FREE, smo, mos, tx);
533 mutex_exit(&msp->ms_lock);
535 VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db));
536 dmu_buf_will_dirty(db, tx);
537 ASSERT3U(db->db_size, ==, sizeof (*smo));
538 bcopy(smo, db->db_data, db->db_size);
539 dmu_buf_rele(db, FTAG);
545 * Called after a transaction group has completely synced to mark
546 * all of the metaslab's free space as usable.
549 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
551 space_map_obj_t *smo = &msp->ms_smo;
552 space_map_obj_t *smosync = &msp->ms_smo_syncing;
553 space_map_t *sm = &msp->ms_map;
554 space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
555 metaslab_group_t *mg = msp->ms_group;
556 vdev_t *vd = mg->mg_vd;
559 mutex_enter(&msp->ms_lock);
562 * If this metaslab is just becoming available, initialize its
563 * allocmaps and freemaps and add its capacity to the vdev.
565 if (freed_map->sm_size == 0) {
566 for (t = 0; t < TXG_SIZE; t++) {
567 space_map_create(&msp->ms_allocmap[t], sm->sm_start,
568 sm->sm_size, sm->sm_shift, sm->sm_lock);
569 space_map_create(&msp->ms_freemap[t], sm->sm_start,
570 sm->sm_size, sm->sm_shift, sm->sm_lock);
572 vdev_space_update(vd, sm->sm_size, 0);
575 vdev_space_update(vd, 0, smosync->smo_alloc - smo->smo_alloc);
577 ASSERT(msp->ms_allocmap[txg & TXG_MASK].sm_space == 0);
578 ASSERT(msp->ms_freemap[txg & TXG_MASK].sm_space == 0);
581 * If there's a space_map_load() in progress, wait for it to complete
582 * so that we have a consistent view of the in-core space map.
583 * Then, add everything we freed in this txg to the map.
585 space_map_load_wait(sm);
586 space_map_vacate(freed_map, sm->sm_loaded ? space_map_free : NULL, sm);
591 * If the map is loaded but no longer active, evict it as soon as all
592 * future allocations have synced. (If we unloaded it now and then
593 * loaded a moment later, the map wouldn't reflect those allocations.)
595 if (sm->sm_loaded && (msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
598 for (t = 1; t < TXG_CONCURRENT_STATES; t++)
599 if (msp->ms_allocmap[(txg + t) & TXG_MASK].sm_space)
603 space_map_unload(sm);
606 metaslab_group_sort(mg, msp, metaslab_weight(msp));
608 mutex_exit(&msp->ms_lock);
612 metaslab_distance(metaslab_t *msp, dva_t *dva)
614 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
615 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
616 uint64_t start = msp->ms_map.sm_start >> ms_shift;
618 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
622 return ((start - offset) << ms_shift);
624 return ((offset - start) << ms_shift);
629 metaslab_group_alloc(metaslab_group_t *mg, uint64_t size, uint64_t txg,
630 uint64_t min_distance, dva_t *dva, int d)
632 metaslab_t *msp = NULL;
633 uint64_t offset = -1ULL;
634 avl_tree_t *t = &mg->mg_metaslab_tree;
635 uint64_t activation_weight;
636 uint64_t target_distance;
639 activation_weight = METASLAB_WEIGHT_PRIMARY;
640 for (i = 0; i < d; i++)
641 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id)
642 activation_weight = METASLAB_WEIGHT_SECONDARY;
645 mutex_enter(&mg->mg_lock);
646 for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) {
647 if (msp->ms_weight < size) {
648 mutex_exit(&mg->mg_lock);
652 if (activation_weight == METASLAB_WEIGHT_PRIMARY)
655 target_distance = min_distance +
656 (msp->ms_smo.smo_alloc ? 0 : min_distance >> 1);
658 for (i = 0; i < d; i++)
659 if (metaslab_distance(msp, &dva[i]) <
665 mutex_exit(&mg->mg_lock);
669 mutex_enter(&msp->ms_lock);
672 * Ensure that the metaslab we have selected is still
673 * capable of handling our request. It's possible that
674 * another thread may have changed the weight while we
675 * were blocked on the metaslab lock.
677 if (msp->ms_weight < size) {
678 mutex_exit(&msp->ms_lock);
682 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
683 activation_weight == METASLAB_WEIGHT_PRIMARY) {
684 metaslab_passivate(msp,
685 msp->ms_weight & ~METASLAB_ACTIVE_MASK);
686 mutex_exit(&msp->ms_lock);
690 if (metaslab_activate(msp, activation_weight) != 0) {
691 mutex_exit(&msp->ms_lock);
695 if ((offset = space_map_alloc(&msp->ms_map, size)) != -1ULL)
698 metaslab_passivate(msp, size - 1);
700 mutex_exit(&msp->ms_lock);
703 if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0)
704 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
706 space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size);
708 mutex_exit(&msp->ms_lock);
714 * Allocate a block for the specified i/o.
717 metaslab_alloc_dva(spa_t *spa, uint64_t psize, dva_t *dva, int d,
718 dva_t *hintdva, uint64_t txg, boolean_t hintdva_avoid)
720 metaslab_group_t *mg, *rotor;
721 metaslab_class_t *mc;
725 uint64_t offset = -1ULL;
729 ASSERT(!DVA_IS_VALID(&dva[d]));
731 mc = spa_metaslab_class_select(spa);
734 * Start at the rotor and loop through all mgs until we find something.
735 * Note that there's no locking on mc_rotor or mc_allocated because
736 * nothing actually breaks if we miss a few updates -- we just won't
737 * allocate quite as evenly. It all balances out over time.
739 * If we are doing ditto or log blocks, try to spread them across
740 * consecutive vdevs. If we're forced to reuse a vdev before we've
741 * allocated all of our ditto blocks, then try and spread them out on
742 * that vdev as much as possible. If it turns out to not be possible,
743 * gradually lower our standards until anything becomes acceptable.
744 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
745 * gives us hope of containing our fault domains to something we're
746 * able to reason about. Otherwise, any two top-level vdev failures
747 * will guarantee the loss of data. With consecutive allocation,
748 * only two adjacent top-level vdev failures will result in data loss.
750 * If we are doing gang blocks (hintdva is non-NULL), try to keep
751 * ourselves on the same vdev as our gang block header. That
752 * way, we can hope for locality in vdev_cache, plus it makes our
753 * fault domains something tractable.
756 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
758 mg = vd->vdev_mg->mg_next;
762 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
763 mg = vd->vdev_mg->mg_next;
774 distance = vd->vdev_asize >> dshift;
775 if (distance <= (1ULL << vd->vdev_ms_shift))
780 asize = vdev_psize_to_asize(vd, psize);
781 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
783 offset = metaslab_group_alloc(mg, asize, txg, distance, dva, d);
784 if (offset != -1ULL) {
786 * If we've just selected this metaslab group,
787 * figure out whether the corresponding vdev is
788 * over- or under-used relative to the pool,
789 * and set an allocation bias to even it out.
791 if (mc->mc_allocated == 0) {
792 vdev_stat_t *vs = &vd->vdev_stat;
793 uint64_t alloc, space;
796 alloc = spa_get_alloc(spa);
797 space = spa_get_space(spa);
800 * Determine percent used in units of 0..1024.
801 * (This is just to avoid floating point.)
803 vu = (vs->vs_alloc << 10) / (vs->vs_space + 1);
804 su = (alloc << 10) / (space + 1);
807 * Bias by at most +/- 25% of the aliquot.
809 mg->mg_bias = ((su - vu) *
810 (int64_t)mg->mg_aliquot) / (1024 * 4);
813 if (atomic_add_64_nv(&mc->mc_allocated, asize) >=
814 mg->mg_aliquot + mg->mg_bias) {
815 mc->mc_rotor = mg->mg_next;
816 mc->mc_allocated = 0;
819 DVA_SET_VDEV(&dva[d], vd->vdev_id);
820 DVA_SET_OFFSET(&dva[d], offset);
821 DVA_SET_GANG(&dva[d], 0);
822 DVA_SET_ASIZE(&dva[d], asize);
826 mc->mc_rotor = mg->mg_next;
827 mc->mc_allocated = 0;
828 } while ((mg = mg->mg_next) != rotor);
836 bzero(&dva[d], sizeof (dva_t));
842 * Free the block represented by DVA in the context of the specified
846 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
848 uint64_t vdev = DVA_GET_VDEV(dva);
849 uint64_t offset = DVA_GET_OFFSET(dva);
850 uint64_t size = DVA_GET_ASIZE(dva);
854 ASSERT(DVA_IS_VALID(dva));
856 if (txg > spa_freeze_txg(spa))
859 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
860 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
861 cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu",
862 (u_longlong_t)vdev, (u_longlong_t)offset);
867 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
869 if (DVA_GET_GANG(dva))
870 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
872 mutex_enter(&msp->ms_lock);
875 space_map_remove(&msp->ms_allocmap[txg & TXG_MASK],
877 space_map_free(&msp->ms_map, offset, size);
879 if (msp->ms_freemap[txg & TXG_MASK].sm_space == 0)
880 vdev_dirty(vd, VDD_METASLAB, msp, txg);
881 space_map_add(&msp->ms_freemap[txg & TXG_MASK], offset, size);
884 * verify that this region is actually allocated in
885 * either a ms_allocmap or the ms_map
887 if (msp->ms_map.sm_loaded) {
888 boolean_t allocd = B_FALSE;
891 if (!space_map_contains(&msp->ms_map, offset, size)) {
894 for (i = 0; i < TXG_CONCURRENT_STATES; i++) {
895 space_map_t *sm = &msp->ms_allocmap
896 [(txg - i) & TXG_MASK];
897 if (space_map_contains(sm,
906 zfs_panic_recover("freeing free segment "
907 "(vdev=%llu offset=%llx size=%llx)",
908 (longlong_t)vdev, (longlong_t)offset,
916 mutex_exit(&msp->ms_lock);
920 * Intent log support: upon opening the pool after a crash, notify the SPA
921 * of blocks that the intent log has allocated for immediate write, but
922 * which are still considered free by the SPA because the last transaction
923 * group didn't commit yet.
926 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
928 uint64_t vdev = DVA_GET_VDEV(dva);
929 uint64_t offset = DVA_GET_OFFSET(dva);
930 uint64_t size = DVA_GET_ASIZE(dva);
935 ASSERT(DVA_IS_VALID(dva));
937 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
938 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
941 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
943 if (DVA_GET_GANG(dva))
944 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
946 mutex_enter(&msp->ms_lock);
948 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
950 mutex_exit(&msp->ms_lock);
954 if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0)
955 vdev_dirty(vd, VDD_METASLAB, msp, txg);
957 space_map_claim(&msp->ms_map, offset, size);
958 space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size);
960 mutex_exit(&msp->ms_lock);
966 metaslab_alloc(spa_t *spa, uint64_t psize, blkptr_t *bp, int ndvas,
967 uint64_t txg, blkptr_t *hintbp, boolean_t hintbp_avoid)
969 dva_t *dva = bp->blk_dva;
970 dva_t *hintdva = hintbp->blk_dva;
974 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
975 ASSERT(BP_GET_NDVAS(bp) == 0);
976 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
978 for (d = 0; d < ndvas; d++) {
979 error = metaslab_alloc_dva(spa, psize, dva, d, hintdva,
982 for (d--; d >= 0; d--) {
983 metaslab_free_dva(spa, &dva[d], txg, B_TRUE);
984 bzero(&dva[d], sizeof (dva_t));
990 ASSERT(BP_GET_NDVAS(bp) == ndvas);
996 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
998 const dva_t *dva = bp->blk_dva;
999 int ndvas = BP_GET_NDVAS(bp);
1002 ASSERT(!BP_IS_HOLE(bp));
1004 for (d = 0; d < ndvas; d++)
1005 metaslab_free_dva(spa, &dva[d], txg, now);
1009 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
1011 const dva_t *dva = bp->blk_dva;
1012 int ndvas = BP_GET_NDVAS(bp);
1016 ASSERT(!BP_IS_HOLE(bp));
1018 for (d = 0; d < ndvas; d++)
1019 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
1022 return (last_error);