4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
27 #include <sys/zfs_context.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/space_map.h>
31 #include <sys/metaslab_impl.h>
32 #include <sys/vdev_impl.h>
36 * Allow allocations to switch to gang blocks quickly. We do this to
37 * avoid having to load lots of space_maps in a given txg. There are,
38 * however, some cases where we want to avoid "fast" ganging and instead
39 * we want to do an exhaustive search of all metaslabs on this device.
40 * Currently we don't allow any gang, zil, or dump device related allocations
43 #define CAN_FASTGANG(flags) \
44 (!((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER | \
45 METASLAB_GANG_AVOID)))
47 uint64_t metaslab_aliquot = 512ULL << 10;
48 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
51 * The in-core space map representation is more compact than its on-disk form.
52 * The zfs_condense_pct determines how much more compact the in-core
53 * space_map representation must be before we compact it on-disk.
54 * Values should be greater than or equal to 100.
56 int zfs_condense_pct = 200;
59 * This value defines the number of allowed allocation failures per vdev.
60 * If a device reaches this threshold in a given txg then we consider skipping
61 * allocations on that device.
63 int zfs_mg_alloc_failures = 0;
65 SYSCTL_DECL(_vfs_zfs);
66 SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_alloc_failures, CTLFLAG_RDTUN,
67 &zfs_mg_alloc_failures, 0,
68 "Number of allowed allocation failures per vdev");
69 TUNABLE_INT("vfs.zfs.mg_alloc_failures", &zfs_mg_alloc_failures);
72 * Metaslab debugging: when set, keeps all space maps in core to verify frees.
74 static int metaslab_debug = 0;
77 * Minimum size which forces the dynamic allocator to change
78 * it's allocation strategy. Once the space map cannot satisfy
79 * an allocation of this size then it switches to using more
80 * aggressive strategy (i.e search by size rather than offset).
82 uint64_t metaslab_df_alloc_threshold = SPA_MAXBLOCKSIZE;
85 * The minimum free space, in percent, which must be available
86 * in a space map to continue allocations in a first-fit fashion.
87 * Once the space_map's free space drops below this level we dynamically
88 * switch to using best-fit allocations.
90 int metaslab_df_free_pct = 4;
93 * A metaslab is considered "free" if it contains a contiguous
94 * segment which is greater than metaslab_min_alloc_size.
96 uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS;
99 * Max number of space_maps to prefetch.
101 int metaslab_prefetch_limit = SPA_DVAS_PER_BP;
104 * Percentage bonus multiplier for metaslabs that are in the bonus area.
106 int metaslab_smo_bonus_pct = 150;
109 * Should we be willing to write data to degraded vdevs?
111 boolean_t zfs_write_to_degraded = B_FALSE;
112 SYSCTL_INT(_vfs_zfs, OID_AUTO, write_to_degraded, CTLFLAG_RW,
113 &zfs_write_to_degraded, 0,
114 "Allow writing data to degraded vdevs");
115 TUNABLE_INT("vfs.zfs.write_to_degraded", &zfs_write_to_degraded);
118 * ==========================================================================
120 * ==========================================================================
123 metaslab_class_create(spa_t *spa, space_map_ops_t *ops)
125 metaslab_class_t *mc;
127 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
137 metaslab_class_destroy(metaslab_class_t *mc)
139 ASSERT(mc->mc_rotor == NULL);
140 ASSERT(mc->mc_alloc == 0);
141 ASSERT(mc->mc_deferred == 0);
142 ASSERT(mc->mc_space == 0);
143 ASSERT(mc->mc_dspace == 0);
145 kmem_free(mc, sizeof (metaslab_class_t));
149 metaslab_class_validate(metaslab_class_t *mc)
151 metaslab_group_t *mg;
155 * Must hold one of the spa_config locks.
157 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
158 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
160 if ((mg = mc->mc_rotor) == NULL)
165 ASSERT(vd->vdev_mg != NULL);
166 ASSERT3P(vd->vdev_top, ==, vd);
167 ASSERT3P(mg->mg_class, ==, mc);
168 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
169 } while ((mg = mg->mg_next) != mc->mc_rotor);
175 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
176 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
178 atomic_add_64(&mc->mc_alloc, alloc_delta);
179 atomic_add_64(&mc->mc_deferred, defer_delta);
180 atomic_add_64(&mc->mc_space, space_delta);
181 atomic_add_64(&mc->mc_dspace, dspace_delta);
185 metaslab_class_get_alloc(metaslab_class_t *mc)
187 return (mc->mc_alloc);
191 metaslab_class_get_deferred(metaslab_class_t *mc)
193 return (mc->mc_deferred);
197 metaslab_class_get_space(metaslab_class_t *mc)
199 return (mc->mc_space);
203 metaslab_class_get_dspace(metaslab_class_t *mc)
205 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
209 * ==========================================================================
211 * ==========================================================================
214 metaslab_compare(const void *x1, const void *x2)
216 const metaslab_t *m1 = x1;
217 const metaslab_t *m2 = x2;
219 if (m1->ms_weight < m2->ms_weight)
221 if (m1->ms_weight > m2->ms_weight)
225 * If the weights are identical, use the offset to force uniqueness.
227 if (m1->ms_map->sm_start < m2->ms_map->sm_start)
229 if (m1->ms_map->sm_start > m2->ms_map->sm_start)
232 ASSERT3P(m1, ==, m2);
238 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
240 metaslab_group_t *mg;
242 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
243 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
244 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
245 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
248 mg->mg_activation_count = 0;
254 metaslab_group_destroy(metaslab_group_t *mg)
256 ASSERT(mg->mg_prev == NULL);
257 ASSERT(mg->mg_next == NULL);
259 * We may have gone below zero with the activation count
260 * either because we never activated in the first place or
261 * because we're done, and possibly removing the vdev.
263 ASSERT(mg->mg_activation_count <= 0);
265 avl_destroy(&mg->mg_metaslab_tree);
266 mutex_destroy(&mg->mg_lock);
267 kmem_free(mg, sizeof (metaslab_group_t));
271 metaslab_group_activate(metaslab_group_t *mg)
273 metaslab_class_t *mc = mg->mg_class;
274 metaslab_group_t *mgprev, *mgnext;
276 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
278 ASSERT(mc->mc_rotor != mg);
279 ASSERT(mg->mg_prev == NULL);
280 ASSERT(mg->mg_next == NULL);
281 ASSERT(mg->mg_activation_count <= 0);
283 if (++mg->mg_activation_count <= 0)
286 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
288 if ((mgprev = mc->mc_rotor) == NULL) {
292 mgnext = mgprev->mg_next;
293 mg->mg_prev = mgprev;
294 mg->mg_next = mgnext;
295 mgprev->mg_next = mg;
296 mgnext->mg_prev = mg;
302 metaslab_group_passivate(metaslab_group_t *mg)
304 metaslab_class_t *mc = mg->mg_class;
305 metaslab_group_t *mgprev, *mgnext;
307 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
309 if (--mg->mg_activation_count != 0) {
310 ASSERT(mc->mc_rotor != mg);
311 ASSERT(mg->mg_prev == NULL);
312 ASSERT(mg->mg_next == NULL);
313 ASSERT(mg->mg_activation_count < 0);
317 mgprev = mg->mg_prev;
318 mgnext = mg->mg_next;
323 mc->mc_rotor = mgnext;
324 mgprev->mg_next = mgnext;
325 mgnext->mg_prev = mgprev;
333 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
335 mutex_enter(&mg->mg_lock);
336 ASSERT(msp->ms_group == NULL);
339 avl_add(&mg->mg_metaslab_tree, msp);
340 mutex_exit(&mg->mg_lock);
344 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
346 mutex_enter(&mg->mg_lock);
347 ASSERT(msp->ms_group == mg);
348 avl_remove(&mg->mg_metaslab_tree, msp);
349 msp->ms_group = NULL;
350 mutex_exit(&mg->mg_lock);
354 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
357 * Although in principle the weight can be any value, in
358 * practice we do not use values in the range [1, 510].
360 ASSERT(weight >= SPA_MINBLOCKSIZE-1 || weight == 0);
361 ASSERT(MUTEX_HELD(&msp->ms_lock));
363 mutex_enter(&mg->mg_lock);
364 ASSERT(msp->ms_group == mg);
365 avl_remove(&mg->mg_metaslab_tree, msp);
366 msp->ms_weight = weight;
367 avl_add(&mg->mg_metaslab_tree, msp);
368 mutex_exit(&mg->mg_lock);
372 * ==========================================================================
373 * Common allocator routines
374 * ==========================================================================
377 metaslab_segsize_compare(const void *x1, const void *x2)
379 const space_seg_t *s1 = x1;
380 const space_seg_t *s2 = x2;
381 uint64_t ss_size1 = s1->ss_end - s1->ss_start;
382 uint64_t ss_size2 = s2->ss_end - s2->ss_start;
384 if (ss_size1 < ss_size2)
386 if (ss_size1 > ss_size2)
389 if (s1->ss_start < s2->ss_start)
391 if (s1->ss_start > s2->ss_start)
398 * This is a helper function that can be used by the allocator to find
399 * a suitable block to allocate. This will search the specified AVL
400 * tree looking for a block that matches the specified criteria.
403 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
406 space_seg_t *ss, ssearch;
409 ssearch.ss_start = *cursor;
410 ssearch.ss_end = *cursor + size;
412 ss = avl_find(t, &ssearch, &where);
414 ss = avl_nearest(t, where, AVL_AFTER);
417 uint64_t offset = P2ROUNDUP(ss->ss_start, align);
419 if (offset + size <= ss->ss_end) {
420 *cursor = offset + size;
423 ss = AVL_NEXT(t, ss);
427 * If we know we've searched the whole map (*cursor == 0), give up.
428 * Otherwise, reset the cursor to the beginning and try again.
434 return (metaslab_block_picker(t, cursor, size, align));
438 metaslab_pp_load(space_map_t *sm)
442 ASSERT(sm->sm_ppd == NULL);
443 sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP);
445 sm->sm_pp_root = kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
446 avl_create(sm->sm_pp_root, metaslab_segsize_compare,
447 sizeof (space_seg_t), offsetof(struct space_seg, ss_pp_node));
449 for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss))
450 avl_add(sm->sm_pp_root, ss);
454 metaslab_pp_unload(space_map_t *sm)
458 kmem_free(sm->sm_ppd, 64 * sizeof (uint64_t));
461 while (avl_destroy_nodes(sm->sm_pp_root, &cookie) != NULL) {
462 /* tear down the tree */
465 avl_destroy(sm->sm_pp_root);
466 kmem_free(sm->sm_pp_root, sizeof (avl_tree_t));
467 sm->sm_pp_root = NULL;
472 metaslab_pp_claim(space_map_t *sm, uint64_t start, uint64_t size)
474 /* No need to update cursor */
479 metaslab_pp_free(space_map_t *sm, uint64_t start, uint64_t size)
481 /* No need to update cursor */
485 * Return the maximum contiguous segment within the metaslab.
488 metaslab_pp_maxsize(space_map_t *sm)
490 avl_tree_t *t = sm->sm_pp_root;
493 if (t == NULL || (ss = avl_last(t)) == NULL)
496 return (ss->ss_end - ss->ss_start);
500 * ==========================================================================
501 * The first-fit block allocator
502 * ==========================================================================
505 metaslab_ff_alloc(space_map_t *sm, uint64_t size)
507 avl_tree_t *t = &sm->sm_root;
508 uint64_t align = size & -size;
509 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1;
511 return (metaslab_block_picker(t, cursor, size, align));
516 metaslab_ff_fragmented(space_map_t *sm)
521 static space_map_ops_t metaslab_ff_ops = {
528 metaslab_ff_fragmented
532 * ==========================================================================
533 * Dynamic block allocator -
534 * Uses the first fit allocation scheme until space get low and then
535 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
536 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
537 * ==========================================================================
540 metaslab_df_alloc(space_map_t *sm, uint64_t size)
542 avl_tree_t *t = &sm->sm_root;
543 uint64_t align = size & -size;
544 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1;
545 uint64_t max_size = metaslab_pp_maxsize(sm);
546 int free_pct = sm->sm_space * 100 / sm->sm_size;
548 ASSERT(MUTEX_HELD(sm->sm_lock));
549 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root));
555 * If we're running low on space switch to using the size
556 * sorted AVL tree (best-fit).
558 if (max_size < metaslab_df_alloc_threshold ||
559 free_pct < metaslab_df_free_pct) {
564 return (metaslab_block_picker(t, cursor, size, 1ULL));
568 metaslab_df_fragmented(space_map_t *sm)
570 uint64_t max_size = metaslab_pp_maxsize(sm);
571 int free_pct = sm->sm_space * 100 / sm->sm_size;
573 if (max_size >= metaslab_df_alloc_threshold &&
574 free_pct >= metaslab_df_free_pct)
580 static space_map_ops_t metaslab_df_ops = {
587 metaslab_df_fragmented
591 * ==========================================================================
592 * Other experimental allocators
593 * ==========================================================================
596 metaslab_cdf_alloc(space_map_t *sm, uint64_t size)
598 avl_tree_t *t = &sm->sm_root;
599 uint64_t *cursor = (uint64_t *)sm->sm_ppd;
600 uint64_t *extent_end = (uint64_t *)sm->sm_ppd + 1;
601 uint64_t max_size = metaslab_pp_maxsize(sm);
602 uint64_t rsize = size;
605 ASSERT(MUTEX_HELD(sm->sm_lock));
606 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root));
611 ASSERT3U(*extent_end, >=, *cursor);
614 * If we're running low on space switch to using the size
615 * sorted AVL tree (best-fit).
617 if ((*cursor + size) > *extent_end) {
620 *cursor = *extent_end = 0;
622 if (max_size > 2 * SPA_MAXBLOCKSIZE)
623 rsize = MIN(metaslab_min_alloc_size, max_size);
624 offset = metaslab_block_picker(t, extent_end, rsize, 1ULL);
626 *cursor = offset + size;
628 offset = metaslab_block_picker(t, cursor, rsize, 1ULL);
630 ASSERT3U(*cursor, <=, *extent_end);
635 metaslab_cdf_fragmented(space_map_t *sm)
637 uint64_t max_size = metaslab_pp_maxsize(sm);
639 if (max_size > (metaslab_min_alloc_size * 10))
644 static space_map_ops_t metaslab_cdf_ops = {
651 metaslab_cdf_fragmented
654 uint64_t metaslab_ndf_clump_shift = 4;
657 metaslab_ndf_alloc(space_map_t *sm, uint64_t size)
659 avl_tree_t *t = &sm->sm_root;
661 space_seg_t *ss, ssearch;
662 uint64_t hbit = highbit(size);
663 uint64_t *cursor = (uint64_t *)sm->sm_ppd + hbit - 1;
664 uint64_t max_size = metaslab_pp_maxsize(sm);
666 ASSERT(MUTEX_HELD(sm->sm_lock));
667 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root));
672 ssearch.ss_start = *cursor;
673 ssearch.ss_end = *cursor + size;
675 ss = avl_find(t, &ssearch, &where);
676 if (ss == NULL || (ss->ss_start + size > ss->ss_end)) {
679 ssearch.ss_start = 0;
680 ssearch.ss_end = MIN(max_size,
681 1ULL << (hbit + metaslab_ndf_clump_shift));
682 ss = avl_find(t, &ssearch, &where);
684 ss = avl_nearest(t, where, AVL_AFTER);
689 if (ss->ss_start + size <= ss->ss_end) {
690 *cursor = ss->ss_start + size;
691 return (ss->ss_start);
698 metaslab_ndf_fragmented(space_map_t *sm)
700 uint64_t max_size = metaslab_pp_maxsize(sm);
702 if (max_size > (metaslab_min_alloc_size << metaslab_ndf_clump_shift))
708 static space_map_ops_t metaslab_ndf_ops = {
715 metaslab_ndf_fragmented
718 space_map_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
721 * ==========================================================================
723 * ==========================================================================
726 metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo,
727 uint64_t start, uint64_t size, uint64_t txg)
729 vdev_t *vd = mg->mg_vd;
732 msp = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
733 mutex_init(&msp->ms_lock, NULL, MUTEX_DEFAULT, NULL);
735 msp->ms_smo_syncing = *smo;
738 * We create the main space map here, but we don't create the
739 * allocmaps and freemaps until metaslab_sync_done(). This serves
740 * two purposes: it allows metaslab_sync_done() to detect the
741 * addition of new space; and for debugging, it ensures that we'd
742 * data fault on any attempt to use this metaslab before it's ready.
744 msp->ms_map = kmem_zalloc(sizeof (space_map_t), KM_SLEEP);
745 space_map_create(msp->ms_map, start, size,
746 vd->vdev_ashift, &msp->ms_lock);
748 metaslab_group_add(mg, msp);
750 if (metaslab_debug && smo->smo_object != 0) {
751 mutex_enter(&msp->ms_lock);
752 VERIFY(space_map_load(msp->ms_map, mg->mg_class->mc_ops,
753 SM_FREE, smo, spa_meta_objset(vd->vdev_spa)) == 0);
754 mutex_exit(&msp->ms_lock);
758 * If we're opening an existing pool (txg == 0) or creating
759 * a new one (txg == TXG_INITIAL), all space is available now.
760 * If we're adding space to an existing pool, the new space
761 * does not become available until after this txg has synced.
763 if (txg <= TXG_INITIAL)
764 metaslab_sync_done(msp, 0);
767 vdev_dirty(vd, 0, NULL, txg);
768 vdev_dirty(vd, VDD_METASLAB, msp, txg);
775 metaslab_fini(metaslab_t *msp)
777 metaslab_group_t *mg = msp->ms_group;
779 vdev_space_update(mg->mg_vd,
780 -msp->ms_smo.smo_alloc, 0, -msp->ms_map->sm_size);
782 metaslab_group_remove(mg, msp);
784 mutex_enter(&msp->ms_lock);
786 space_map_unload(msp->ms_map);
787 space_map_destroy(msp->ms_map);
788 kmem_free(msp->ms_map, sizeof (*msp->ms_map));
790 for (int t = 0; t < TXG_SIZE; t++) {
791 space_map_destroy(msp->ms_allocmap[t]);
792 space_map_destroy(msp->ms_freemap[t]);
793 kmem_free(msp->ms_allocmap[t], sizeof (*msp->ms_allocmap[t]));
794 kmem_free(msp->ms_freemap[t], sizeof (*msp->ms_freemap[t]));
797 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
798 space_map_destroy(msp->ms_defermap[t]);
799 kmem_free(msp->ms_defermap[t], sizeof (*msp->ms_defermap[t]));
802 ASSERT0(msp->ms_deferspace);
804 mutex_exit(&msp->ms_lock);
805 mutex_destroy(&msp->ms_lock);
807 kmem_free(msp, sizeof (metaslab_t));
810 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
811 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
812 #define METASLAB_ACTIVE_MASK \
813 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
816 metaslab_weight(metaslab_t *msp)
818 metaslab_group_t *mg = msp->ms_group;
819 space_map_t *sm = msp->ms_map;
820 space_map_obj_t *smo = &msp->ms_smo;
821 vdev_t *vd = mg->mg_vd;
822 uint64_t weight, space;
824 ASSERT(MUTEX_HELD(&msp->ms_lock));
827 * This vdev is in the process of being removed so there is nothing
830 if (vd->vdev_removing) {
831 ASSERT0(smo->smo_alloc);
832 ASSERT0(vd->vdev_ms_shift);
837 * The baseline weight is the metaslab's free space.
839 space = sm->sm_size - smo->smo_alloc;
843 * Modern disks have uniform bit density and constant angular velocity.
844 * Therefore, the outer recording zones are faster (higher bandwidth)
845 * than the inner zones by the ratio of outer to inner track diameter,
846 * which is typically around 2:1. We account for this by assigning
847 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
848 * In effect, this means that we'll select the metaslab with the most
849 * free bandwidth rather than simply the one with the most free space.
851 weight = 2 * weight -
852 ((sm->sm_start >> vd->vdev_ms_shift) * weight) / vd->vdev_ms_count;
853 ASSERT(weight >= space && weight <= 2 * space);
856 * For locality, assign higher weight to metaslabs which have
857 * a lower offset than what we've already activated.
859 if (sm->sm_start <= mg->mg_bonus_area)
860 weight *= (metaslab_smo_bonus_pct / 100);
861 ASSERT(weight >= space &&
862 weight <= 2 * (metaslab_smo_bonus_pct / 100) * space);
864 if (sm->sm_loaded && !sm->sm_ops->smop_fragmented(sm)) {
866 * If this metaslab is one we're actively using, adjust its
867 * weight to make it preferable to any inactive metaslab so
868 * we'll polish it off.
870 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
876 metaslab_prefetch(metaslab_group_t *mg)
878 spa_t *spa = mg->mg_vd->vdev_spa;
880 avl_tree_t *t = &mg->mg_metaslab_tree;
883 mutex_enter(&mg->mg_lock);
886 * Prefetch the next potential metaslabs
888 for (msp = avl_first(t), m = 0; msp; msp = AVL_NEXT(t, msp), m++) {
889 space_map_t *sm = msp->ms_map;
890 space_map_obj_t *smo = &msp->ms_smo;
892 /* If we have reached our prefetch limit then we're done */
893 if (m >= metaslab_prefetch_limit)
896 if (!sm->sm_loaded && smo->smo_object != 0) {
897 mutex_exit(&mg->mg_lock);
898 dmu_prefetch(spa_meta_objset(spa), smo->smo_object,
899 0ULL, smo->smo_objsize);
900 mutex_enter(&mg->mg_lock);
903 mutex_exit(&mg->mg_lock);
907 metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
909 metaslab_group_t *mg = msp->ms_group;
910 space_map_t *sm = msp->ms_map;
911 space_map_ops_t *sm_ops = msp->ms_group->mg_class->mc_ops;
913 ASSERT(MUTEX_HELD(&msp->ms_lock));
915 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
916 space_map_load_wait(sm);
917 if (!sm->sm_loaded) {
918 space_map_obj_t *smo = &msp->ms_smo;
920 int error = space_map_load(sm, sm_ops, SM_FREE, smo,
921 spa_meta_objset(msp->ms_group->mg_vd->vdev_spa));
923 metaslab_group_sort(msp->ms_group, msp, 0);
926 for (int t = 0; t < TXG_DEFER_SIZE; t++)
927 space_map_walk(msp->ms_defermap[t],
928 space_map_claim, sm);
933 * Track the bonus area as we activate new metaslabs.
935 if (sm->sm_start > mg->mg_bonus_area) {
936 mutex_enter(&mg->mg_lock);
937 mg->mg_bonus_area = sm->sm_start;
938 mutex_exit(&mg->mg_lock);
941 metaslab_group_sort(msp->ms_group, msp,
942 msp->ms_weight | activation_weight);
944 ASSERT(sm->sm_loaded);
945 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
951 metaslab_passivate(metaslab_t *msp, uint64_t size)
954 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
955 * this metaslab again. In that case, it had better be empty,
956 * or we would be leaving space on the table.
958 ASSERT(size >= SPA_MINBLOCKSIZE || msp->ms_map->sm_space == 0);
959 metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size));
960 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
964 * Determine if the in-core space map representation can be condensed on-disk.
965 * We would like to use the following criteria to make our decision:
967 * 1. The size of the space map object should not dramatically increase as a
968 * result of writing out our in-core free map.
970 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
971 * times the size than the in-core representation (i.e. zfs_condense_pct = 110
972 * and in-core = 1MB, minimal = 1.1.MB).
974 * Checking the first condition is tricky since we don't want to walk
975 * the entire AVL tree calculating the estimated on-disk size. Instead we
976 * use the size-ordered AVL tree in the space map and calculate the
977 * size required for the largest segment in our in-core free map. If the
978 * size required to represent that segment on disk is larger than the space
979 * map object then we avoid condensing this map.
981 * To determine the second criterion we use a best-case estimate and assume
982 * each segment can be represented on-disk as a single 64-bit entry. We refer
983 * to this best-case estimate as the space map's minimal form.
986 metaslab_should_condense(metaslab_t *msp)
988 space_map_t *sm = msp->ms_map;
989 space_map_obj_t *smo = &msp->ms_smo_syncing;
991 uint64_t size, entries, segsz;
993 ASSERT(MUTEX_HELD(&msp->ms_lock));
994 ASSERT(sm->sm_loaded);
997 * Use the sm_pp_root AVL tree, which is ordered by size, to obtain
998 * the largest segment in the in-core free map. If the tree is
999 * empty then we should condense the map.
1001 ss = avl_last(sm->sm_pp_root);
1006 * Calculate the number of 64-bit entries this segment would
1007 * require when written to disk. If this single segment would be
1008 * larger on-disk than the entire current on-disk structure, then
1009 * clearly condensing will increase the on-disk structure size.
1011 size = (ss->ss_end - ss->ss_start) >> sm->sm_shift;
1012 entries = size / (MIN(size, SM_RUN_MAX));
1013 segsz = entries * sizeof (uint64_t);
1015 return (segsz <= smo->smo_objsize &&
1016 smo->smo_objsize >= (zfs_condense_pct *
1017 sizeof (uint64_t) * avl_numnodes(&sm->sm_root)) / 100);
1021 * Condense the on-disk space map representation to its minimized form.
1022 * The minimized form consists of a small number of allocations followed by
1023 * the in-core free map.
1026 metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
1028 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1029 space_map_t *freemap = msp->ms_freemap[txg & TXG_MASK];
1030 space_map_t condense_map;
1031 space_map_t *sm = msp->ms_map;
1032 objset_t *mos = spa_meta_objset(spa);
1033 space_map_obj_t *smo = &msp->ms_smo_syncing;
1035 ASSERT(MUTEX_HELD(&msp->ms_lock));
1036 ASSERT3U(spa_sync_pass(spa), ==, 1);
1037 ASSERT(sm->sm_loaded);
1039 spa_dbgmsg(spa, "condensing: txg %llu, msp[%llu] %p, "
1040 "smo size %llu, segments %lu", txg,
1041 (msp->ms_map->sm_start / msp->ms_map->sm_size), msp,
1042 smo->smo_objsize, avl_numnodes(&sm->sm_root));
1045 * Create an map that is a 100% allocated map. We remove segments
1046 * that have been freed in this txg, any deferred frees that exist,
1047 * and any allocation in the future. Removing segments should be
1048 * a relatively inexpensive operation since we expect these maps to
1049 * a small number of nodes.
1051 space_map_create(&condense_map, sm->sm_start, sm->sm_size,
1052 sm->sm_shift, sm->sm_lock);
1053 space_map_add(&condense_map, condense_map.sm_start,
1054 condense_map.sm_size);
1057 * Remove what's been freed in this txg from the condense_map.
1058 * Since we're in sync_pass 1, we know that all the frees from
1059 * this txg are in the freemap.
1061 space_map_walk(freemap, space_map_remove, &condense_map);
1063 for (int t = 0; t < TXG_DEFER_SIZE; t++)
1064 space_map_walk(msp->ms_defermap[t],
1065 space_map_remove, &condense_map);
1067 for (int t = 1; t < TXG_CONCURRENT_STATES; t++)
1068 space_map_walk(msp->ms_allocmap[(txg + t) & TXG_MASK],
1069 space_map_remove, &condense_map);
1072 * We're about to drop the metaslab's lock thus allowing
1073 * other consumers to change it's content. Set the
1074 * space_map's sm_condensing flag to ensure that
1075 * allocations on this metaslab do not occur while we're
1076 * in the middle of committing it to disk. This is only critical
1077 * for the ms_map as all other space_maps use per txg
1078 * views of their content.
1080 sm->sm_condensing = B_TRUE;
1082 mutex_exit(&msp->ms_lock);
1083 space_map_truncate(smo, mos, tx);
1084 mutex_enter(&msp->ms_lock);
1087 * While we would ideally like to create a space_map representation
1088 * that consists only of allocation records, doing so can be
1089 * prohibitively expensive because the in-core free map can be
1090 * large, and therefore computationally expensive to subtract
1091 * from the condense_map. Instead we sync out two maps, a cheap
1092 * allocation only map followed by the in-core free map. While not
1093 * optimal, this is typically close to optimal, and much cheaper to
1096 space_map_sync(&condense_map, SM_ALLOC, smo, mos, tx);
1097 space_map_vacate(&condense_map, NULL, NULL);
1098 space_map_destroy(&condense_map);
1100 space_map_sync(sm, SM_FREE, smo, mos, tx);
1101 sm->sm_condensing = B_FALSE;
1103 spa_dbgmsg(spa, "condensed: txg %llu, msp[%llu] %p, "
1104 "smo size %llu", txg,
1105 (msp->ms_map->sm_start / msp->ms_map->sm_size), msp,
1110 * Write a metaslab to disk in the context of the specified transaction group.
1113 metaslab_sync(metaslab_t *msp, uint64_t txg)
1115 vdev_t *vd = msp->ms_group->mg_vd;
1116 spa_t *spa = vd->vdev_spa;
1117 objset_t *mos = spa_meta_objset(spa);
1118 space_map_t *allocmap = msp->ms_allocmap[txg & TXG_MASK];
1119 space_map_t **freemap = &msp->ms_freemap[txg & TXG_MASK];
1120 space_map_t **freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
1121 space_map_t *sm = msp->ms_map;
1122 space_map_obj_t *smo = &msp->ms_smo_syncing;
1126 ASSERT(!vd->vdev_ishole);
1129 * This metaslab has just been added so there's no work to do now.
1131 if (*freemap == NULL) {
1132 ASSERT3P(allocmap, ==, NULL);
1136 ASSERT3P(allocmap, !=, NULL);
1137 ASSERT3P(*freemap, !=, NULL);
1138 ASSERT3P(*freed_map, !=, NULL);
1140 if (allocmap->sm_space == 0 && (*freemap)->sm_space == 0)
1144 * The only state that can actually be changing concurrently with
1145 * metaslab_sync() is the metaslab's ms_map. No other thread can
1146 * be modifying this txg's allocmap, freemap, freed_map, or smo.
1147 * Therefore, we only hold ms_lock to satify space_map ASSERTs.
1148 * We drop it whenever we call into the DMU, because the DMU
1149 * can call down to us (e.g. via zio_free()) at any time.
1152 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
1154 if (smo->smo_object == 0) {
1155 ASSERT(smo->smo_objsize == 0);
1156 ASSERT(smo->smo_alloc == 0);
1157 smo->smo_object = dmu_object_alloc(mos,
1158 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT,
1159 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx);
1160 ASSERT(smo->smo_object != 0);
1161 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
1162 (sm->sm_start >> vd->vdev_ms_shift),
1163 sizeof (uint64_t), &smo->smo_object, tx);
1166 mutex_enter(&msp->ms_lock);
1168 if (sm->sm_loaded && spa_sync_pass(spa) == 1 &&
1169 metaslab_should_condense(msp)) {
1170 metaslab_condense(msp, txg, tx);
1172 space_map_sync(allocmap, SM_ALLOC, smo, mos, tx);
1173 space_map_sync(*freemap, SM_FREE, smo, mos, tx);
1176 space_map_vacate(allocmap, NULL, NULL);
1179 * For sync pass 1, we avoid walking the entire space map and
1180 * instead will just swap the pointers for freemap and
1181 * freed_map. We can safely do this since the freed_map is
1182 * guaranteed to be empty on the initial pass.
1184 if (spa_sync_pass(spa) == 1) {
1185 ASSERT0((*freed_map)->sm_space);
1186 ASSERT0(avl_numnodes(&(*freed_map)->sm_root));
1187 space_map_swap(freemap, freed_map);
1189 space_map_vacate(*freemap, space_map_add, *freed_map);
1192 ASSERT0(msp->ms_allocmap[txg & TXG_MASK]->sm_space);
1193 ASSERT0(msp->ms_freemap[txg & TXG_MASK]->sm_space);
1195 mutex_exit(&msp->ms_lock);
1197 VERIFY0(dmu_bonus_hold(mos, smo->smo_object, FTAG, &db));
1198 dmu_buf_will_dirty(db, tx);
1199 ASSERT3U(db->db_size, >=, sizeof (*smo));
1200 bcopy(smo, db->db_data, sizeof (*smo));
1201 dmu_buf_rele(db, FTAG);
1207 * Called after a transaction group has completely synced to mark
1208 * all of the metaslab's free space as usable.
1211 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
1213 space_map_obj_t *smo = &msp->ms_smo;
1214 space_map_obj_t *smosync = &msp->ms_smo_syncing;
1215 space_map_t *sm = msp->ms_map;
1216 space_map_t **freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
1217 space_map_t **defer_map = &msp->ms_defermap[txg % TXG_DEFER_SIZE];
1218 metaslab_group_t *mg = msp->ms_group;
1219 vdev_t *vd = mg->mg_vd;
1220 int64_t alloc_delta, defer_delta;
1222 ASSERT(!vd->vdev_ishole);
1224 mutex_enter(&msp->ms_lock);
1227 * If this metaslab is just becoming available, initialize its
1228 * allocmaps, freemaps, and defermap and add its capacity to the vdev.
1230 if (*freed_map == NULL) {
1231 ASSERT(*defer_map == NULL);
1232 for (int t = 0; t < TXG_SIZE; t++) {
1233 msp->ms_allocmap[t] = kmem_zalloc(sizeof (space_map_t),
1235 space_map_create(msp->ms_allocmap[t], sm->sm_start,
1236 sm->sm_size, sm->sm_shift, sm->sm_lock);
1237 msp->ms_freemap[t] = kmem_zalloc(sizeof (space_map_t),
1239 space_map_create(msp->ms_freemap[t], sm->sm_start,
1240 sm->sm_size, sm->sm_shift, sm->sm_lock);
1243 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1244 msp->ms_defermap[t] = kmem_zalloc(sizeof (space_map_t),
1246 space_map_create(msp->ms_defermap[t], sm->sm_start,
1247 sm->sm_size, sm->sm_shift, sm->sm_lock);
1250 freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
1251 defer_map = &msp->ms_defermap[txg % TXG_DEFER_SIZE];
1253 vdev_space_update(vd, 0, 0, sm->sm_size);
1256 alloc_delta = smosync->smo_alloc - smo->smo_alloc;
1257 defer_delta = (*freed_map)->sm_space - (*defer_map)->sm_space;
1259 vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
1261 ASSERT(msp->ms_allocmap[txg & TXG_MASK]->sm_space == 0);
1262 ASSERT(msp->ms_freemap[txg & TXG_MASK]->sm_space == 0);
1265 * If there's a space_map_load() in progress, wait for it to complete
1266 * so that we have a consistent view of the in-core space map.
1268 space_map_load_wait(sm);
1271 * Move the frees from the defer_map to this map (if it's loaded).
1272 * Swap the freed_map and the defer_map -- this is safe to do
1273 * because we've just emptied out the defer_map.
1275 space_map_vacate(*defer_map, sm->sm_loaded ? space_map_free : NULL, sm);
1276 ASSERT0((*defer_map)->sm_space);
1277 ASSERT0(avl_numnodes(&(*defer_map)->sm_root));
1278 space_map_swap(freed_map, defer_map);
1282 msp->ms_deferspace += defer_delta;
1283 ASSERT3S(msp->ms_deferspace, >=, 0);
1284 ASSERT3S(msp->ms_deferspace, <=, sm->sm_size);
1285 if (msp->ms_deferspace != 0) {
1287 * Keep syncing this metaslab until all deferred frees
1288 * are back in circulation.
1290 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
1294 * If the map is loaded but no longer active, evict it as soon as all
1295 * future allocations have synced. (If we unloaded it now and then
1296 * loaded a moment later, the map wouldn't reflect those allocations.)
1298 if (sm->sm_loaded && (msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
1301 for (int t = 1; t < TXG_CONCURRENT_STATES; t++)
1302 if (msp->ms_allocmap[(txg + t) & TXG_MASK]->sm_space)
1305 if (evictable && !metaslab_debug)
1306 space_map_unload(sm);
1309 metaslab_group_sort(mg, msp, metaslab_weight(msp));
1311 mutex_exit(&msp->ms_lock);
1315 metaslab_sync_reassess(metaslab_group_t *mg)
1317 vdev_t *vd = mg->mg_vd;
1318 int64_t failures = mg->mg_alloc_failures;
1321 * Re-evaluate all metaslabs which have lower offsets than the
1324 for (int m = 0; m < vd->vdev_ms_count; m++) {
1325 metaslab_t *msp = vd->vdev_ms[m];
1327 if (msp->ms_map->sm_start > mg->mg_bonus_area)
1330 mutex_enter(&msp->ms_lock);
1331 metaslab_group_sort(mg, msp, metaslab_weight(msp));
1332 mutex_exit(&msp->ms_lock);
1335 atomic_add_64(&mg->mg_alloc_failures, -failures);
1338 * Prefetch the next potential metaslabs
1340 metaslab_prefetch(mg);
1344 metaslab_distance(metaslab_t *msp, dva_t *dva)
1346 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
1347 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
1348 uint64_t start = msp->ms_map->sm_start >> ms_shift;
1350 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
1351 return (1ULL << 63);
1354 return ((start - offset) << ms_shift);
1356 return ((offset - start) << ms_shift);
1361 metaslab_group_alloc(metaslab_group_t *mg, uint64_t psize, uint64_t asize,
1362 uint64_t txg, uint64_t min_distance, dva_t *dva, int d, int flags)
1364 spa_t *spa = mg->mg_vd->vdev_spa;
1365 metaslab_t *msp = NULL;
1366 uint64_t offset = -1ULL;
1367 avl_tree_t *t = &mg->mg_metaslab_tree;
1368 uint64_t activation_weight;
1369 uint64_t target_distance;
1372 activation_weight = METASLAB_WEIGHT_PRIMARY;
1373 for (i = 0; i < d; i++) {
1374 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
1375 activation_weight = METASLAB_WEIGHT_SECONDARY;
1381 boolean_t was_active;
1383 mutex_enter(&mg->mg_lock);
1384 for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) {
1385 if (msp->ms_weight < asize) {
1386 spa_dbgmsg(spa, "%s: failed to meet weight "
1387 "requirement: vdev %llu, txg %llu, mg %p, "
1388 "msp %p, psize %llu, asize %llu, "
1389 "failures %llu, weight %llu",
1390 spa_name(spa), mg->mg_vd->vdev_id, txg,
1391 mg, msp, psize, asize,
1392 mg->mg_alloc_failures, msp->ms_weight);
1393 mutex_exit(&mg->mg_lock);
1398 * If the selected metaslab is condensing, skip it.
1400 if (msp->ms_map->sm_condensing)
1403 was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
1404 if (activation_weight == METASLAB_WEIGHT_PRIMARY)
1407 target_distance = min_distance +
1408 (msp->ms_smo.smo_alloc ? 0 : min_distance >> 1);
1410 for (i = 0; i < d; i++)
1411 if (metaslab_distance(msp, &dva[i]) <
1417 mutex_exit(&mg->mg_lock);
1422 * If we've already reached the allowable number of failed
1423 * allocation attempts on this metaslab group then we
1424 * consider skipping it. We skip it only if we're allowed
1425 * to "fast" gang, the physical size is larger than
1426 * a gang block, and we're attempting to allocate from
1427 * the primary metaslab.
1429 if (mg->mg_alloc_failures > zfs_mg_alloc_failures &&
1430 CAN_FASTGANG(flags) && psize > SPA_GANGBLOCKSIZE &&
1431 activation_weight == METASLAB_WEIGHT_PRIMARY) {
1432 spa_dbgmsg(spa, "%s: skipping metaslab group: "
1433 "vdev %llu, txg %llu, mg %p, psize %llu, "
1434 "asize %llu, failures %llu", spa_name(spa),
1435 mg->mg_vd->vdev_id, txg, mg, psize, asize,
1436 mg->mg_alloc_failures);
1440 mutex_enter(&msp->ms_lock);
1443 * Ensure that the metaslab we have selected is still
1444 * capable of handling our request. It's possible that
1445 * another thread may have changed the weight while we
1446 * were blocked on the metaslab lock.
1448 if (msp->ms_weight < asize || (was_active &&
1449 !(msp->ms_weight & METASLAB_ACTIVE_MASK) &&
1450 activation_weight == METASLAB_WEIGHT_PRIMARY)) {
1451 mutex_exit(&msp->ms_lock);
1455 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
1456 activation_weight == METASLAB_WEIGHT_PRIMARY) {
1457 metaslab_passivate(msp,
1458 msp->ms_weight & ~METASLAB_ACTIVE_MASK);
1459 mutex_exit(&msp->ms_lock);
1463 if (metaslab_activate(msp, activation_weight) != 0) {
1464 mutex_exit(&msp->ms_lock);
1469 * If this metaslab is currently condensing then pick again as
1470 * we can't manipulate this metaslab until it's committed
1473 if (msp->ms_map->sm_condensing) {
1474 mutex_exit(&msp->ms_lock);
1478 if ((offset = space_map_alloc(msp->ms_map, asize)) != -1ULL)
1481 atomic_inc_64(&mg->mg_alloc_failures);
1483 metaslab_passivate(msp, space_map_maxsize(msp->ms_map));
1485 mutex_exit(&msp->ms_lock);
1488 if (msp->ms_allocmap[txg & TXG_MASK]->sm_space == 0)
1489 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
1491 space_map_add(msp->ms_allocmap[txg & TXG_MASK], offset, asize);
1493 mutex_exit(&msp->ms_lock);
1499 * Allocate a block for the specified i/o.
1502 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
1503 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags)
1505 metaslab_group_t *mg, *rotor;
1509 int zio_lock = B_FALSE;
1510 boolean_t allocatable;
1511 uint64_t offset = -1ULL;
1515 ASSERT(!DVA_IS_VALID(&dva[d]));
1518 * For testing, make some blocks above a certain size be gang blocks.
1520 if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0)
1521 return (SET_ERROR(ENOSPC));
1524 * Start at the rotor and loop through all mgs until we find something.
1525 * Note that there's no locking on mc_rotor or mc_aliquot because
1526 * nothing actually breaks if we miss a few updates -- we just won't
1527 * allocate quite as evenly. It all balances out over time.
1529 * If we are doing ditto or log blocks, try to spread them across
1530 * consecutive vdevs. If we're forced to reuse a vdev before we've
1531 * allocated all of our ditto blocks, then try and spread them out on
1532 * that vdev as much as possible. If it turns out to not be possible,
1533 * gradually lower our standards until anything becomes acceptable.
1534 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
1535 * gives us hope of containing our fault domains to something we're
1536 * able to reason about. Otherwise, any two top-level vdev failures
1537 * will guarantee the loss of data. With consecutive allocation,
1538 * only two adjacent top-level vdev failures will result in data loss.
1540 * If we are doing gang blocks (hintdva is non-NULL), try to keep
1541 * ourselves on the same vdev as our gang block header. That
1542 * way, we can hope for locality in vdev_cache, plus it makes our
1543 * fault domains something tractable.
1546 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
1549 * It's possible the vdev we're using as the hint no
1550 * longer exists (i.e. removed). Consult the rotor when
1556 if (flags & METASLAB_HINTBP_AVOID &&
1557 mg->mg_next != NULL)
1562 } else if (d != 0) {
1563 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
1564 mg = vd->vdev_mg->mg_next;
1570 * If the hint put us into the wrong metaslab class, or into a
1571 * metaslab group that has been passivated, just follow the rotor.
1573 if (mg->mg_class != mc || mg->mg_activation_count <= 0)
1580 ASSERT(mg->mg_activation_count == 1);
1585 * Don't allocate from faulted devices.
1588 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
1589 allocatable = vdev_allocatable(vd);
1590 spa_config_exit(spa, SCL_ZIO, FTAG);
1592 allocatable = vdev_allocatable(vd);
1598 * Avoid writing single-copy data to a failing vdev
1599 * unless the user instructs us that it is okay.
1601 if ((vd->vdev_stat.vs_write_errors > 0 ||
1602 vd->vdev_state < VDEV_STATE_HEALTHY) &&
1603 d == 0 && dshift == 3 &&
1604 !(zfs_write_to_degraded && vd->vdev_state ==
1605 VDEV_STATE_DEGRADED)) {
1610 ASSERT(mg->mg_class == mc);
1612 distance = vd->vdev_asize >> dshift;
1613 if (distance <= (1ULL << vd->vdev_ms_shift))
1618 asize = vdev_psize_to_asize(vd, psize);
1619 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
1621 offset = metaslab_group_alloc(mg, psize, asize, txg, distance,
1623 if (offset != -1ULL) {
1625 * If we've just selected this metaslab group,
1626 * figure out whether the corresponding vdev is
1627 * over- or under-used relative to the pool,
1628 * and set an allocation bias to even it out.
1630 if (mc->mc_aliquot == 0) {
1631 vdev_stat_t *vs = &vd->vdev_stat;
1634 vu = (vs->vs_alloc * 100) / (vs->vs_space + 1);
1635 cu = (mc->mc_alloc * 100) / (mc->mc_space + 1);
1638 * Calculate how much more or less we should
1639 * try to allocate from this device during
1640 * this iteration around the rotor.
1641 * For example, if a device is 80% full
1642 * and the pool is 20% full then we should
1643 * reduce allocations by 60% on this device.
1645 * mg_bias = (20 - 80) * 512K / 100 = -307K
1647 * This reduces allocations by 307K for this
1650 mg->mg_bias = ((cu - vu) *
1651 (int64_t)mg->mg_aliquot) / 100;
1654 if (atomic_add_64_nv(&mc->mc_aliquot, asize) >=
1655 mg->mg_aliquot + mg->mg_bias) {
1656 mc->mc_rotor = mg->mg_next;
1660 DVA_SET_VDEV(&dva[d], vd->vdev_id);
1661 DVA_SET_OFFSET(&dva[d], offset);
1662 DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER));
1663 DVA_SET_ASIZE(&dva[d], asize);
1668 mc->mc_rotor = mg->mg_next;
1670 } while ((mg = mg->mg_next) != rotor);
1674 ASSERT(dshift < 64);
1678 if (!allocatable && !zio_lock) {
1684 bzero(&dva[d], sizeof (dva_t));
1686 return (SET_ERROR(ENOSPC));
1690 * Free the block represented by DVA in the context of the specified
1691 * transaction group.
1694 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
1696 uint64_t vdev = DVA_GET_VDEV(dva);
1697 uint64_t offset = DVA_GET_OFFSET(dva);
1698 uint64_t size = DVA_GET_ASIZE(dva);
1702 ASSERT(DVA_IS_VALID(dva));
1704 if (txg > spa_freeze_txg(spa))
1707 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
1708 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
1709 cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu",
1710 (u_longlong_t)vdev, (u_longlong_t)offset);
1715 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
1717 if (DVA_GET_GANG(dva))
1718 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
1720 mutex_enter(&msp->ms_lock);
1723 space_map_remove(msp->ms_allocmap[txg & TXG_MASK],
1725 space_map_free(msp->ms_map, offset, size);
1727 if (msp->ms_freemap[txg & TXG_MASK]->sm_space == 0)
1728 vdev_dirty(vd, VDD_METASLAB, msp, txg);
1729 space_map_add(msp->ms_freemap[txg & TXG_MASK], offset, size);
1732 mutex_exit(&msp->ms_lock);
1736 * Intent log support: upon opening the pool after a crash, notify the SPA
1737 * of blocks that the intent log has allocated for immediate write, but
1738 * which are still considered free by the SPA because the last transaction
1739 * group didn't commit yet.
1742 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
1744 uint64_t vdev = DVA_GET_VDEV(dva);
1745 uint64_t offset = DVA_GET_OFFSET(dva);
1746 uint64_t size = DVA_GET_ASIZE(dva);
1751 ASSERT(DVA_IS_VALID(dva));
1753 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
1754 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
1755 return (SET_ERROR(ENXIO));
1757 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
1759 if (DVA_GET_GANG(dva))
1760 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
1762 mutex_enter(&msp->ms_lock);
1764 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_map->sm_loaded)
1765 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
1767 if (error == 0 && !space_map_contains(msp->ms_map, offset, size))
1768 error = SET_ERROR(ENOENT);
1770 if (error || txg == 0) { /* txg == 0 indicates dry run */
1771 mutex_exit(&msp->ms_lock);
1775 space_map_claim(msp->ms_map, offset, size);
1777 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
1778 if (msp->ms_allocmap[txg & TXG_MASK]->sm_space == 0)
1779 vdev_dirty(vd, VDD_METASLAB, msp, txg);
1780 space_map_add(msp->ms_allocmap[txg & TXG_MASK], offset, size);
1783 mutex_exit(&msp->ms_lock);
1789 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
1790 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags)
1792 dva_t *dva = bp->blk_dva;
1793 dva_t *hintdva = hintbp->blk_dva;
1796 ASSERT(bp->blk_birth == 0);
1797 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
1799 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
1801 if (mc->mc_rotor == NULL) { /* no vdevs in this class */
1802 spa_config_exit(spa, SCL_ALLOC, FTAG);
1803 return (SET_ERROR(ENOSPC));
1806 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
1807 ASSERT(BP_GET_NDVAS(bp) == 0);
1808 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
1810 for (int d = 0; d < ndvas; d++) {
1811 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
1814 for (d--; d >= 0; d--) {
1815 metaslab_free_dva(spa, &dva[d], txg, B_TRUE);
1816 bzero(&dva[d], sizeof (dva_t));
1818 spa_config_exit(spa, SCL_ALLOC, FTAG);
1823 ASSERT(BP_GET_NDVAS(bp) == ndvas);
1825 spa_config_exit(spa, SCL_ALLOC, FTAG);
1827 BP_SET_BIRTH(bp, txg, txg);
1833 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
1835 const dva_t *dva = bp->blk_dva;
1836 int ndvas = BP_GET_NDVAS(bp);
1838 ASSERT(!BP_IS_HOLE(bp));
1839 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
1841 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
1843 for (int d = 0; d < ndvas; d++)
1844 metaslab_free_dva(spa, &dva[d], txg, now);
1846 spa_config_exit(spa, SCL_FREE, FTAG);
1850 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
1852 const dva_t *dva = bp->blk_dva;
1853 int ndvas = BP_GET_NDVAS(bp);
1856 ASSERT(!BP_IS_HOLE(bp));
1860 * First do a dry run to make sure all DVAs are claimable,
1861 * so we don't have to unwind from partial failures below.
1863 if ((error = metaslab_claim(spa, bp, 0)) != 0)
1867 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
1869 for (int d = 0; d < ndvas; d++)
1870 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
1873 spa_config_exit(spa, SCL_ALLOC, FTAG);
1875 ASSERT(error == 0 || txg == 0);
1881 checkmap(space_map_t *sm, uint64_t off, uint64_t size)
1886 mutex_enter(sm->sm_lock);
1887 ss = space_map_find(sm, off, size, &where);
1889 panic("freeing free block; ss=%p", (void *)ss);
1890 mutex_exit(sm->sm_lock);
1894 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
1896 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
1899 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
1900 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
1901 uint64_t vdid = DVA_GET_VDEV(&bp->blk_dva[i]);
1902 vdev_t *vd = vdev_lookup_top(spa, vdid);
1903 uint64_t off = DVA_GET_OFFSET(&bp->blk_dva[i]);
1904 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
1905 metaslab_t *ms = vd->vdev_ms[off >> vd->vdev_ms_shift];
1907 if (ms->ms_map->sm_loaded)
1908 checkmap(ms->ms_map, off, size);
1910 for (int j = 0; j < TXG_SIZE; j++)
1911 checkmap(ms->ms_freemap[j], off, size);
1912 for (int j = 0; j < TXG_DEFER_SIZE; j++)
1913 checkmap(ms->ms_defermap[j], off, size);
1915 spa_config_exit(spa, SCL_VDEV, FTAG);