4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
27 #include <sys/zfs_context.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/space_map.h>
31 #include <sys/metaslab_impl.h>
32 #include <sys/vdev_impl.h>
36 * Allow allocations to switch to gang blocks quickly. We do this to
37 * avoid having to load lots of space_maps in a given txg. There are,
38 * however, some cases where we want to avoid "fast" ganging and instead
39 * we want to do an exhaustive search of all metaslabs on this device.
40 * Currently we don't allow any gang, zil, or dump device related allocations
43 #define CAN_FASTGANG(flags) \
44 (!((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER | \
45 METASLAB_GANG_AVOID)))
47 uint64_t metaslab_aliquot = 512ULL << 10;
48 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
51 * The in-core space map representation is more compact than its on-disk form.
52 * The zfs_condense_pct determines how much more compact the in-core
53 * space_map representation must be before we compact it on-disk.
54 * Values should be greater than or equal to 100.
56 int zfs_condense_pct = 200;
59 * This value defines the number of allowed allocation failures per vdev.
60 * If a device reaches this threshold in a given txg then we consider skipping
61 * allocations on that device.
63 int zfs_mg_alloc_failures = 0;
65 SYSCTL_DECL(_vfs_zfs);
66 SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_alloc_failures, CTLFLAG_RDTUN,
67 &zfs_mg_alloc_failures, 0,
68 "Number of allowed allocation failures per vdev");
69 TUNABLE_INT("vfs.zfs.mg_alloc_failures", &zfs_mg_alloc_failures);
72 * Metaslab debugging: when set, keeps all space maps in core to verify frees.
74 static int metaslab_debug = 0;
77 * Minimum size which forces the dynamic allocator to change
78 * it's allocation strategy. Once the space map cannot satisfy
79 * an allocation of this size then it switches to using more
80 * aggressive strategy (i.e search by size rather than offset).
82 uint64_t metaslab_df_alloc_threshold = SPA_MAXBLOCKSIZE;
85 * The minimum free space, in percent, which must be available
86 * in a space map to continue allocations in a first-fit fashion.
87 * Once the space_map's free space drops below this level we dynamically
88 * switch to using best-fit allocations.
90 int metaslab_df_free_pct = 4;
93 * A metaslab is considered "free" if it contains a contiguous
94 * segment which is greater than metaslab_min_alloc_size.
96 uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS;
99 * Max number of space_maps to prefetch.
101 int metaslab_prefetch_limit = SPA_DVAS_PER_BP;
104 * Percentage bonus multiplier for metaslabs that are in the bonus area.
106 int metaslab_smo_bonus_pct = 150;
109 * Should we be willing to write data to degraded vdevs?
111 boolean_t zfs_write_to_degraded = B_FALSE;
112 SYSCTL_INT(_vfs_zfs, OID_AUTO, write_to_degraded, CTLFLAG_RWTUN,
113 &zfs_write_to_degraded, 0, "Allow writing data to degraded vdevs");
114 TUNABLE_INT("vfs.zfs.write_to_degraded", &zfs_write_to_degraded);
117 * ==========================================================================
119 * ==========================================================================
122 metaslab_class_create(spa_t *spa, space_map_ops_t *ops)
124 metaslab_class_t *mc;
126 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
136 metaslab_class_destroy(metaslab_class_t *mc)
138 ASSERT(mc->mc_rotor == NULL);
139 ASSERT(mc->mc_alloc == 0);
140 ASSERT(mc->mc_deferred == 0);
141 ASSERT(mc->mc_space == 0);
142 ASSERT(mc->mc_dspace == 0);
144 kmem_free(mc, sizeof (metaslab_class_t));
148 metaslab_class_validate(metaslab_class_t *mc)
150 metaslab_group_t *mg;
154 * Must hold one of the spa_config locks.
156 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
157 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
159 if ((mg = mc->mc_rotor) == NULL)
164 ASSERT(vd->vdev_mg != NULL);
165 ASSERT3P(vd->vdev_top, ==, vd);
166 ASSERT3P(mg->mg_class, ==, mc);
167 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
168 } while ((mg = mg->mg_next) != mc->mc_rotor);
174 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
175 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
177 atomic_add_64(&mc->mc_alloc, alloc_delta);
178 atomic_add_64(&mc->mc_deferred, defer_delta);
179 atomic_add_64(&mc->mc_space, space_delta);
180 atomic_add_64(&mc->mc_dspace, dspace_delta);
184 metaslab_class_get_alloc(metaslab_class_t *mc)
186 return (mc->mc_alloc);
190 metaslab_class_get_deferred(metaslab_class_t *mc)
192 return (mc->mc_deferred);
196 metaslab_class_get_space(metaslab_class_t *mc)
198 return (mc->mc_space);
202 metaslab_class_get_dspace(metaslab_class_t *mc)
204 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
208 * ==========================================================================
210 * ==========================================================================
213 metaslab_compare(const void *x1, const void *x2)
215 const metaslab_t *m1 = x1;
216 const metaslab_t *m2 = x2;
218 if (m1->ms_weight < m2->ms_weight)
220 if (m1->ms_weight > m2->ms_weight)
224 * If the weights are identical, use the offset to force uniqueness.
226 if (m1->ms_map->sm_start < m2->ms_map->sm_start)
228 if (m1->ms_map->sm_start > m2->ms_map->sm_start)
231 ASSERT3P(m1, ==, m2);
237 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
239 metaslab_group_t *mg;
241 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
242 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
243 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
244 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
247 mg->mg_activation_count = 0;
253 metaslab_group_destroy(metaslab_group_t *mg)
255 ASSERT(mg->mg_prev == NULL);
256 ASSERT(mg->mg_next == NULL);
258 * We may have gone below zero with the activation count
259 * either because we never activated in the first place or
260 * because we're done, and possibly removing the vdev.
262 ASSERT(mg->mg_activation_count <= 0);
264 avl_destroy(&mg->mg_metaslab_tree);
265 mutex_destroy(&mg->mg_lock);
266 kmem_free(mg, sizeof (metaslab_group_t));
270 metaslab_group_activate(metaslab_group_t *mg)
272 metaslab_class_t *mc = mg->mg_class;
273 metaslab_group_t *mgprev, *mgnext;
275 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
277 ASSERT(mc->mc_rotor != mg);
278 ASSERT(mg->mg_prev == NULL);
279 ASSERT(mg->mg_next == NULL);
280 ASSERT(mg->mg_activation_count <= 0);
282 if (++mg->mg_activation_count <= 0)
285 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
287 if ((mgprev = mc->mc_rotor) == NULL) {
291 mgnext = mgprev->mg_next;
292 mg->mg_prev = mgprev;
293 mg->mg_next = mgnext;
294 mgprev->mg_next = mg;
295 mgnext->mg_prev = mg;
301 metaslab_group_passivate(metaslab_group_t *mg)
303 metaslab_class_t *mc = mg->mg_class;
304 metaslab_group_t *mgprev, *mgnext;
306 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
308 if (--mg->mg_activation_count != 0) {
309 ASSERT(mc->mc_rotor != mg);
310 ASSERT(mg->mg_prev == NULL);
311 ASSERT(mg->mg_next == NULL);
312 ASSERT(mg->mg_activation_count < 0);
316 mgprev = mg->mg_prev;
317 mgnext = mg->mg_next;
322 mc->mc_rotor = mgnext;
323 mgprev->mg_next = mgnext;
324 mgnext->mg_prev = mgprev;
332 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
334 mutex_enter(&mg->mg_lock);
335 ASSERT(msp->ms_group == NULL);
338 avl_add(&mg->mg_metaslab_tree, msp);
339 mutex_exit(&mg->mg_lock);
343 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
345 mutex_enter(&mg->mg_lock);
346 ASSERT(msp->ms_group == mg);
347 avl_remove(&mg->mg_metaslab_tree, msp);
348 msp->ms_group = NULL;
349 mutex_exit(&mg->mg_lock);
353 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
356 * Although in principle the weight can be any value, in
357 * practice we do not use values in the range [1, 510].
359 ASSERT(weight >= SPA_MINBLOCKSIZE-1 || weight == 0);
360 ASSERT(MUTEX_HELD(&msp->ms_lock));
362 mutex_enter(&mg->mg_lock);
363 ASSERT(msp->ms_group == mg);
364 avl_remove(&mg->mg_metaslab_tree, msp);
365 msp->ms_weight = weight;
366 avl_add(&mg->mg_metaslab_tree, msp);
367 mutex_exit(&mg->mg_lock);
371 * ==========================================================================
372 * Common allocator routines
373 * ==========================================================================
376 metaslab_segsize_compare(const void *x1, const void *x2)
378 const space_seg_t *s1 = x1;
379 const space_seg_t *s2 = x2;
380 uint64_t ss_size1 = s1->ss_end - s1->ss_start;
381 uint64_t ss_size2 = s2->ss_end - s2->ss_start;
383 if (ss_size1 < ss_size2)
385 if (ss_size1 > ss_size2)
388 if (s1->ss_start < s2->ss_start)
390 if (s1->ss_start > s2->ss_start)
397 * This is a helper function that can be used by the allocator to find
398 * a suitable block to allocate. This will search the specified AVL
399 * tree looking for a block that matches the specified criteria.
402 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
405 space_seg_t *ss, ssearch;
408 ssearch.ss_start = *cursor;
409 ssearch.ss_end = *cursor + size;
411 ss = avl_find(t, &ssearch, &where);
413 ss = avl_nearest(t, where, AVL_AFTER);
416 uint64_t offset = P2ROUNDUP(ss->ss_start, align);
418 if (offset + size <= ss->ss_end) {
419 *cursor = offset + size;
422 ss = AVL_NEXT(t, ss);
426 * If we know we've searched the whole map (*cursor == 0), give up.
427 * Otherwise, reset the cursor to the beginning and try again.
433 return (metaslab_block_picker(t, cursor, size, align));
437 metaslab_pp_load(space_map_t *sm)
441 ASSERT(sm->sm_ppd == NULL);
442 sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP);
444 sm->sm_pp_root = kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
445 avl_create(sm->sm_pp_root, metaslab_segsize_compare,
446 sizeof (space_seg_t), offsetof(struct space_seg, ss_pp_node));
448 for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss))
449 avl_add(sm->sm_pp_root, ss);
453 metaslab_pp_unload(space_map_t *sm)
457 kmem_free(sm->sm_ppd, 64 * sizeof (uint64_t));
460 while (avl_destroy_nodes(sm->sm_pp_root, &cookie) != NULL) {
461 /* tear down the tree */
464 avl_destroy(sm->sm_pp_root);
465 kmem_free(sm->sm_pp_root, sizeof (avl_tree_t));
466 sm->sm_pp_root = NULL;
471 metaslab_pp_claim(space_map_t *sm, uint64_t start, uint64_t size)
473 /* No need to update cursor */
478 metaslab_pp_free(space_map_t *sm, uint64_t start, uint64_t size)
480 /* No need to update cursor */
484 * Return the maximum contiguous segment within the metaslab.
487 metaslab_pp_maxsize(space_map_t *sm)
489 avl_tree_t *t = sm->sm_pp_root;
492 if (t == NULL || (ss = avl_last(t)) == NULL)
495 return (ss->ss_end - ss->ss_start);
499 * ==========================================================================
500 * The first-fit block allocator
501 * ==========================================================================
504 metaslab_ff_alloc(space_map_t *sm, uint64_t size)
506 avl_tree_t *t = &sm->sm_root;
507 uint64_t align = size & -size;
508 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1;
510 return (metaslab_block_picker(t, cursor, size, align));
515 metaslab_ff_fragmented(space_map_t *sm)
520 static space_map_ops_t metaslab_ff_ops = {
527 metaslab_ff_fragmented
531 * ==========================================================================
532 * Dynamic block allocator -
533 * Uses the first fit allocation scheme until space get low and then
534 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
535 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
536 * ==========================================================================
539 metaslab_df_alloc(space_map_t *sm, uint64_t size)
541 avl_tree_t *t = &sm->sm_root;
542 uint64_t align = size & -size;
543 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1;
544 uint64_t max_size = metaslab_pp_maxsize(sm);
545 int free_pct = sm->sm_space * 100 / sm->sm_size;
547 ASSERT(MUTEX_HELD(sm->sm_lock));
548 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root));
554 * If we're running low on space switch to using the size
555 * sorted AVL tree (best-fit).
557 if (max_size < metaslab_df_alloc_threshold ||
558 free_pct < metaslab_df_free_pct) {
563 return (metaslab_block_picker(t, cursor, size, 1ULL));
567 metaslab_df_fragmented(space_map_t *sm)
569 uint64_t max_size = metaslab_pp_maxsize(sm);
570 int free_pct = sm->sm_space * 100 / sm->sm_size;
572 if (max_size >= metaslab_df_alloc_threshold &&
573 free_pct >= metaslab_df_free_pct)
579 static space_map_ops_t metaslab_df_ops = {
586 metaslab_df_fragmented
590 * ==========================================================================
591 * Other experimental allocators
592 * ==========================================================================
595 metaslab_cdf_alloc(space_map_t *sm, uint64_t size)
597 avl_tree_t *t = &sm->sm_root;
598 uint64_t *cursor = (uint64_t *)sm->sm_ppd;
599 uint64_t *extent_end = (uint64_t *)sm->sm_ppd + 1;
600 uint64_t max_size = metaslab_pp_maxsize(sm);
601 uint64_t rsize = size;
604 ASSERT(MUTEX_HELD(sm->sm_lock));
605 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root));
610 ASSERT3U(*extent_end, >=, *cursor);
613 * If we're running low on space switch to using the size
614 * sorted AVL tree (best-fit).
616 if ((*cursor + size) > *extent_end) {
619 *cursor = *extent_end = 0;
621 if (max_size > 2 * SPA_MAXBLOCKSIZE)
622 rsize = MIN(metaslab_min_alloc_size, max_size);
623 offset = metaslab_block_picker(t, extent_end, rsize, 1ULL);
625 *cursor = offset + size;
627 offset = metaslab_block_picker(t, cursor, rsize, 1ULL);
629 ASSERT3U(*cursor, <=, *extent_end);
634 metaslab_cdf_fragmented(space_map_t *sm)
636 uint64_t max_size = metaslab_pp_maxsize(sm);
638 if (max_size > (metaslab_min_alloc_size * 10))
643 static space_map_ops_t metaslab_cdf_ops = {
650 metaslab_cdf_fragmented
653 uint64_t metaslab_ndf_clump_shift = 4;
656 metaslab_ndf_alloc(space_map_t *sm, uint64_t size)
658 avl_tree_t *t = &sm->sm_root;
660 space_seg_t *ss, ssearch;
661 uint64_t hbit = highbit(size);
662 uint64_t *cursor = (uint64_t *)sm->sm_ppd + hbit - 1;
663 uint64_t max_size = metaslab_pp_maxsize(sm);
665 ASSERT(MUTEX_HELD(sm->sm_lock));
666 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root));
671 ssearch.ss_start = *cursor;
672 ssearch.ss_end = *cursor + size;
674 ss = avl_find(t, &ssearch, &where);
675 if (ss == NULL || (ss->ss_start + size > ss->ss_end)) {
678 ssearch.ss_start = 0;
679 ssearch.ss_end = MIN(max_size,
680 1ULL << (hbit + metaslab_ndf_clump_shift));
681 ss = avl_find(t, &ssearch, &where);
683 ss = avl_nearest(t, where, AVL_AFTER);
688 if (ss->ss_start + size <= ss->ss_end) {
689 *cursor = ss->ss_start + size;
690 return (ss->ss_start);
697 metaslab_ndf_fragmented(space_map_t *sm)
699 uint64_t max_size = metaslab_pp_maxsize(sm);
701 if (max_size > (metaslab_min_alloc_size << metaslab_ndf_clump_shift))
707 static space_map_ops_t metaslab_ndf_ops = {
714 metaslab_ndf_fragmented
717 space_map_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
720 * ==========================================================================
722 * ==========================================================================
725 metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo,
726 uint64_t start, uint64_t size, uint64_t txg)
728 vdev_t *vd = mg->mg_vd;
731 msp = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
732 mutex_init(&msp->ms_lock, NULL, MUTEX_DEFAULT, NULL);
734 msp->ms_smo_syncing = *smo;
737 * We create the main space map here, but we don't create the
738 * allocmaps and freemaps until metaslab_sync_done(). This serves
739 * two purposes: it allows metaslab_sync_done() to detect the
740 * addition of new space; and for debugging, it ensures that we'd
741 * data fault on any attempt to use this metaslab before it's ready.
743 msp->ms_map = kmem_zalloc(sizeof (space_map_t), KM_SLEEP);
744 space_map_create(msp->ms_map, start, size,
745 vd->vdev_ashift, &msp->ms_lock);
747 metaslab_group_add(mg, msp);
749 if (metaslab_debug && smo->smo_object != 0) {
750 mutex_enter(&msp->ms_lock);
751 VERIFY(space_map_load(msp->ms_map, mg->mg_class->mc_ops,
752 SM_FREE, smo, spa_meta_objset(vd->vdev_spa)) == 0);
753 mutex_exit(&msp->ms_lock);
757 * If we're opening an existing pool (txg == 0) or creating
758 * a new one (txg == TXG_INITIAL), all space is available now.
759 * If we're adding space to an existing pool, the new space
760 * does not become available until after this txg has synced.
762 if (txg <= TXG_INITIAL)
763 metaslab_sync_done(msp, 0);
766 vdev_dirty(vd, 0, NULL, txg);
767 vdev_dirty(vd, VDD_METASLAB, msp, txg);
774 metaslab_fini(metaslab_t *msp)
776 metaslab_group_t *mg = msp->ms_group;
778 vdev_space_update(mg->mg_vd,
779 -msp->ms_smo.smo_alloc, 0, -msp->ms_map->sm_size);
781 metaslab_group_remove(mg, msp);
783 mutex_enter(&msp->ms_lock);
785 space_map_unload(msp->ms_map);
786 space_map_destroy(msp->ms_map);
787 kmem_free(msp->ms_map, sizeof (*msp->ms_map));
789 for (int t = 0; t < TXG_SIZE; t++) {
790 space_map_destroy(msp->ms_allocmap[t]);
791 space_map_destroy(msp->ms_freemap[t]);
792 kmem_free(msp->ms_allocmap[t], sizeof (*msp->ms_allocmap[t]));
793 kmem_free(msp->ms_freemap[t], sizeof (*msp->ms_freemap[t]));
796 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
797 space_map_destroy(msp->ms_defermap[t]);
798 kmem_free(msp->ms_defermap[t], sizeof (*msp->ms_defermap[t]));
801 ASSERT0(msp->ms_deferspace);
803 mutex_exit(&msp->ms_lock);
804 mutex_destroy(&msp->ms_lock);
806 kmem_free(msp, sizeof (metaslab_t));
809 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
810 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
811 #define METASLAB_ACTIVE_MASK \
812 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
815 metaslab_weight(metaslab_t *msp)
817 metaslab_group_t *mg = msp->ms_group;
818 space_map_t *sm = msp->ms_map;
819 space_map_obj_t *smo = &msp->ms_smo;
820 vdev_t *vd = mg->mg_vd;
821 uint64_t weight, space;
823 ASSERT(MUTEX_HELD(&msp->ms_lock));
826 * This vdev is in the process of being removed so there is nothing
829 if (vd->vdev_removing) {
830 ASSERT0(smo->smo_alloc);
831 ASSERT0(vd->vdev_ms_shift);
836 * The baseline weight is the metaslab's free space.
838 space = sm->sm_size - smo->smo_alloc;
842 * Modern disks have uniform bit density and constant angular velocity.
843 * Therefore, the outer recording zones are faster (higher bandwidth)
844 * than the inner zones by the ratio of outer to inner track diameter,
845 * which is typically around 2:1. We account for this by assigning
846 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
847 * In effect, this means that we'll select the metaslab with the most
848 * free bandwidth rather than simply the one with the most free space.
850 weight = 2 * weight -
851 ((sm->sm_start >> vd->vdev_ms_shift) * weight) / vd->vdev_ms_count;
852 ASSERT(weight >= space && weight <= 2 * space);
855 * For locality, assign higher weight to metaslabs which have
856 * a lower offset than what we've already activated.
858 if (sm->sm_start <= mg->mg_bonus_area)
859 weight *= (metaslab_smo_bonus_pct / 100);
860 ASSERT(weight >= space &&
861 weight <= 2 * (metaslab_smo_bonus_pct / 100) * space);
863 if (sm->sm_loaded && !sm->sm_ops->smop_fragmented(sm)) {
865 * If this metaslab is one we're actively using, adjust its
866 * weight to make it preferable to any inactive metaslab so
867 * we'll polish it off.
869 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
875 metaslab_prefetch(metaslab_group_t *mg)
877 spa_t *spa = mg->mg_vd->vdev_spa;
879 avl_tree_t *t = &mg->mg_metaslab_tree;
882 mutex_enter(&mg->mg_lock);
885 * Prefetch the next potential metaslabs
887 for (msp = avl_first(t), m = 0; msp; msp = AVL_NEXT(t, msp), m++) {
888 space_map_t *sm = msp->ms_map;
889 space_map_obj_t *smo = &msp->ms_smo;
891 /* If we have reached our prefetch limit then we're done */
892 if (m >= metaslab_prefetch_limit)
895 if (!sm->sm_loaded && smo->smo_object != 0) {
896 mutex_exit(&mg->mg_lock);
897 dmu_prefetch(spa_meta_objset(spa), smo->smo_object,
898 0ULL, smo->smo_objsize);
899 mutex_enter(&mg->mg_lock);
902 mutex_exit(&mg->mg_lock);
906 metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
908 metaslab_group_t *mg = msp->ms_group;
909 space_map_t *sm = msp->ms_map;
910 space_map_ops_t *sm_ops = msp->ms_group->mg_class->mc_ops;
912 ASSERT(MUTEX_HELD(&msp->ms_lock));
914 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
915 space_map_load_wait(sm);
916 if (!sm->sm_loaded) {
917 space_map_obj_t *smo = &msp->ms_smo;
919 int error = space_map_load(sm, sm_ops, SM_FREE, smo,
920 spa_meta_objset(msp->ms_group->mg_vd->vdev_spa));
922 metaslab_group_sort(msp->ms_group, msp, 0);
925 for (int t = 0; t < TXG_DEFER_SIZE; t++)
926 space_map_walk(msp->ms_defermap[t],
927 space_map_claim, sm);
932 * Track the bonus area as we activate new metaslabs.
934 if (sm->sm_start > mg->mg_bonus_area) {
935 mutex_enter(&mg->mg_lock);
936 mg->mg_bonus_area = sm->sm_start;
937 mutex_exit(&mg->mg_lock);
940 metaslab_group_sort(msp->ms_group, msp,
941 msp->ms_weight | activation_weight);
943 ASSERT(sm->sm_loaded);
944 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
950 metaslab_passivate(metaslab_t *msp, uint64_t size)
953 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
954 * this metaslab again. In that case, it had better be empty,
955 * or we would be leaving space on the table.
957 ASSERT(size >= SPA_MINBLOCKSIZE || msp->ms_map->sm_space == 0);
958 metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size));
959 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
963 * Determine if the in-core space map representation can be condensed on-disk.
964 * We would like to use the following criteria to make our decision:
966 * 1. The size of the space map object should not dramatically increase as a
967 * result of writing out our in-core free map.
969 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
970 * times the size than the in-core representation (i.e. zfs_condense_pct = 110
971 * and in-core = 1MB, minimal = 1.1.MB).
973 * Checking the first condition is tricky since we don't want to walk
974 * the entire AVL tree calculating the estimated on-disk size. Instead we
975 * use the size-ordered AVL tree in the space map and calculate the
976 * size required for the largest segment in our in-core free map. If the
977 * size required to represent that segment on disk is larger than the space
978 * map object then we avoid condensing this map.
980 * To determine the second criterion we use a best-case estimate and assume
981 * each segment can be represented on-disk as a single 64-bit entry. We refer
982 * to this best-case estimate as the space map's minimal form.
985 metaslab_should_condense(metaslab_t *msp)
987 space_map_t *sm = msp->ms_map;
988 space_map_obj_t *smo = &msp->ms_smo_syncing;
990 uint64_t size, entries, segsz;
992 ASSERT(MUTEX_HELD(&msp->ms_lock));
993 ASSERT(sm->sm_loaded);
996 * Use the sm_pp_root AVL tree, which is ordered by size, to obtain
997 * the largest segment in the in-core free map. If the tree is
998 * empty then we should condense the map.
1000 ss = avl_last(sm->sm_pp_root);
1005 * Calculate the number of 64-bit entries this segment would
1006 * require when written to disk. If this single segment would be
1007 * larger on-disk than the entire current on-disk structure, then
1008 * clearly condensing will increase the on-disk structure size.
1010 size = (ss->ss_end - ss->ss_start) >> sm->sm_shift;
1011 entries = size / (MIN(size, SM_RUN_MAX));
1012 segsz = entries * sizeof (uint64_t);
1014 return (segsz <= smo->smo_objsize &&
1015 smo->smo_objsize >= (zfs_condense_pct *
1016 sizeof (uint64_t) * avl_numnodes(&sm->sm_root)) / 100);
1020 * Condense the on-disk space map representation to its minimized form.
1021 * The minimized form consists of a small number of allocations followed by
1022 * the in-core free map.
1025 metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
1027 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1028 space_map_t *freemap = msp->ms_freemap[txg & TXG_MASK];
1029 space_map_t condense_map;
1030 space_map_t *sm = msp->ms_map;
1031 objset_t *mos = spa_meta_objset(spa);
1032 space_map_obj_t *smo = &msp->ms_smo_syncing;
1034 ASSERT(MUTEX_HELD(&msp->ms_lock));
1035 ASSERT3U(spa_sync_pass(spa), ==, 1);
1036 ASSERT(sm->sm_loaded);
1038 spa_dbgmsg(spa, "condensing: txg %llu, msp[%llu] %p, "
1039 "smo size %llu, segments %lu", txg,
1040 (msp->ms_map->sm_start / msp->ms_map->sm_size), msp,
1041 smo->smo_objsize, avl_numnodes(&sm->sm_root));
1044 * Create an map that is a 100% allocated map. We remove segments
1045 * that have been freed in this txg, any deferred frees that exist,
1046 * and any allocation in the future. Removing segments should be
1047 * a relatively inexpensive operation since we expect these maps to
1048 * a small number of nodes.
1050 space_map_create(&condense_map, sm->sm_start, sm->sm_size,
1051 sm->sm_shift, sm->sm_lock);
1052 space_map_add(&condense_map, condense_map.sm_start,
1053 condense_map.sm_size);
1056 * Remove what's been freed in this txg from the condense_map.
1057 * Since we're in sync_pass 1, we know that all the frees from
1058 * this txg are in the freemap.
1060 space_map_walk(freemap, space_map_remove, &condense_map);
1062 for (int t = 0; t < TXG_DEFER_SIZE; t++)
1063 space_map_walk(msp->ms_defermap[t],
1064 space_map_remove, &condense_map);
1066 for (int t = 1; t < TXG_CONCURRENT_STATES; t++)
1067 space_map_walk(msp->ms_allocmap[(txg + t) & TXG_MASK],
1068 space_map_remove, &condense_map);
1071 * We're about to drop the metaslab's lock thus allowing
1072 * other consumers to change it's content. Set the
1073 * space_map's sm_condensing flag to ensure that
1074 * allocations on this metaslab do not occur while we're
1075 * in the middle of committing it to disk. This is only critical
1076 * for the ms_map as all other space_maps use per txg
1077 * views of their content.
1079 sm->sm_condensing = B_TRUE;
1081 mutex_exit(&msp->ms_lock);
1082 space_map_truncate(smo, mos, tx);
1083 mutex_enter(&msp->ms_lock);
1086 * While we would ideally like to create a space_map representation
1087 * that consists only of allocation records, doing so can be
1088 * prohibitively expensive because the in-core free map can be
1089 * large, and therefore computationally expensive to subtract
1090 * from the condense_map. Instead we sync out two maps, a cheap
1091 * allocation only map followed by the in-core free map. While not
1092 * optimal, this is typically close to optimal, and much cheaper to
1095 space_map_sync(&condense_map, SM_ALLOC, smo, mos, tx);
1096 space_map_vacate(&condense_map, NULL, NULL);
1097 space_map_destroy(&condense_map);
1099 space_map_sync(sm, SM_FREE, smo, mos, tx);
1100 sm->sm_condensing = B_FALSE;
1102 spa_dbgmsg(spa, "condensed: txg %llu, msp[%llu] %p, "
1103 "smo size %llu", txg,
1104 (msp->ms_map->sm_start / msp->ms_map->sm_size), msp,
1109 * Write a metaslab to disk in the context of the specified transaction group.
1112 metaslab_sync(metaslab_t *msp, uint64_t txg)
1114 vdev_t *vd = msp->ms_group->mg_vd;
1115 spa_t *spa = vd->vdev_spa;
1116 objset_t *mos = spa_meta_objset(spa);
1117 space_map_t *allocmap = msp->ms_allocmap[txg & TXG_MASK];
1118 space_map_t **freemap = &msp->ms_freemap[txg & TXG_MASK];
1119 space_map_t **freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
1120 space_map_t *sm = msp->ms_map;
1121 space_map_obj_t *smo = &msp->ms_smo_syncing;
1125 ASSERT(!vd->vdev_ishole);
1128 * This metaslab has just been added so there's no work to do now.
1130 if (*freemap == NULL) {
1131 ASSERT3P(allocmap, ==, NULL);
1135 ASSERT3P(allocmap, !=, NULL);
1136 ASSERT3P(*freemap, !=, NULL);
1137 ASSERT3P(*freed_map, !=, NULL);
1139 if (allocmap->sm_space == 0 && (*freemap)->sm_space == 0)
1143 * The only state that can actually be changing concurrently with
1144 * metaslab_sync() is the metaslab's ms_map. No other thread can
1145 * be modifying this txg's allocmap, freemap, freed_map, or smo.
1146 * Therefore, we only hold ms_lock to satify space_map ASSERTs.
1147 * We drop it whenever we call into the DMU, because the DMU
1148 * can call down to us (e.g. via zio_free()) at any time.
1151 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
1153 if (smo->smo_object == 0) {
1154 ASSERT(smo->smo_objsize == 0);
1155 ASSERT(smo->smo_alloc == 0);
1156 smo->smo_object = dmu_object_alloc(mos,
1157 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT,
1158 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx);
1159 ASSERT(smo->smo_object != 0);
1160 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
1161 (sm->sm_start >> vd->vdev_ms_shift),
1162 sizeof (uint64_t), &smo->smo_object, tx);
1165 mutex_enter(&msp->ms_lock);
1167 if (sm->sm_loaded && spa_sync_pass(spa) == 1 &&
1168 metaslab_should_condense(msp)) {
1169 metaslab_condense(msp, txg, tx);
1171 space_map_sync(allocmap, SM_ALLOC, smo, mos, tx);
1172 space_map_sync(*freemap, SM_FREE, smo, mos, tx);
1175 space_map_vacate(allocmap, NULL, NULL);
1178 * For sync pass 1, we avoid walking the entire space map and
1179 * instead will just swap the pointers for freemap and
1180 * freed_map. We can safely do this since the freed_map is
1181 * guaranteed to be empty on the initial pass.
1183 if (spa_sync_pass(spa) == 1) {
1184 ASSERT0((*freed_map)->sm_space);
1185 ASSERT0(avl_numnodes(&(*freed_map)->sm_root));
1186 space_map_swap(freemap, freed_map);
1188 space_map_vacate(*freemap, space_map_add, *freed_map);
1191 ASSERT0(msp->ms_allocmap[txg & TXG_MASK]->sm_space);
1192 ASSERT0(msp->ms_freemap[txg & TXG_MASK]->sm_space);
1194 mutex_exit(&msp->ms_lock);
1196 VERIFY0(dmu_bonus_hold(mos, smo->smo_object, FTAG, &db));
1197 dmu_buf_will_dirty(db, tx);
1198 ASSERT3U(db->db_size, >=, sizeof (*smo));
1199 bcopy(smo, db->db_data, sizeof (*smo));
1200 dmu_buf_rele(db, FTAG);
1206 * Called after a transaction group has completely synced to mark
1207 * all of the metaslab's free space as usable.
1210 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
1212 space_map_obj_t *smo = &msp->ms_smo;
1213 space_map_obj_t *smosync = &msp->ms_smo_syncing;
1214 space_map_t *sm = msp->ms_map;
1215 space_map_t **freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
1216 space_map_t **defer_map = &msp->ms_defermap[txg % TXG_DEFER_SIZE];
1217 metaslab_group_t *mg = msp->ms_group;
1218 vdev_t *vd = mg->mg_vd;
1219 int64_t alloc_delta, defer_delta;
1221 ASSERT(!vd->vdev_ishole);
1223 mutex_enter(&msp->ms_lock);
1226 * If this metaslab is just becoming available, initialize its
1227 * allocmaps, freemaps, and defermap and add its capacity to the vdev.
1229 if (*freed_map == NULL) {
1230 ASSERT(*defer_map == NULL);
1231 for (int t = 0; t < TXG_SIZE; t++) {
1232 msp->ms_allocmap[t] = kmem_zalloc(sizeof (space_map_t),
1234 space_map_create(msp->ms_allocmap[t], sm->sm_start,
1235 sm->sm_size, sm->sm_shift, sm->sm_lock);
1236 msp->ms_freemap[t] = kmem_zalloc(sizeof (space_map_t),
1238 space_map_create(msp->ms_freemap[t], sm->sm_start,
1239 sm->sm_size, sm->sm_shift, sm->sm_lock);
1242 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1243 msp->ms_defermap[t] = kmem_zalloc(sizeof (space_map_t),
1245 space_map_create(msp->ms_defermap[t], sm->sm_start,
1246 sm->sm_size, sm->sm_shift, sm->sm_lock);
1249 freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
1250 defer_map = &msp->ms_defermap[txg % TXG_DEFER_SIZE];
1252 vdev_space_update(vd, 0, 0, sm->sm_size);
1255 alloc_delta = smosync->smo_alloc - smo->smo_alloc;
1256 defer_delta = (*freed_map)->sm_space - (*defer_map)->sm_space;
1258 vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
1260 ASSERT(msp->ms_allocmap[txg & TXG_MASK]->sm_space == 0);
1261 ASSERT(msp->ms_freemap[txg & TXG_MASK]->sm_space == 0);
1264 * If there's a space_map_load() in progress, wait for it to complete
1265 * so that we have a consistent view of the in-core space map.
1267 space_map_load_wait(sm);
1270 * Move the frees from the defer_map to this map (if it's loaded).
1271 * Swap the freed_map and the defer_map -- this is safe to do
1272 * because we've just emptied out the defer_map.
1274 space_map_vacate(*defer_map, sm->sm_loaded ? space_map_free : NULL, sm);
1275 ASSERT0((*defer_map)->sm_space);
1276 ASSERT0(avl_numnodes(&(*defer_map)->sm_root));
1277 space_map_swap(freed_map, defer_map);
1281 msp->ms_deferspace += defer_delta;
1282 ASSERT3S(msp->ms_deferspace, >=, 0);
1283 ASSERT3S(msp->ms_deferspace, <=, sm->sm_size);
1284 if (msp->ms_deferspace != 0) {
1286 * Keep syncing this metaslab until all deferred frees
1287 * are back in circulation.
1289 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
1293 * If the map is loaded but no longer active, evict it as soon as all
1294 * future allocations have synced. (If we unloaded it now and then
1295 * loaded a moment later, the map wouldn't reflect those allocations.)
1297 if (sm->sm_loaded && (msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
1300 for (int t = 1; t < TXG_CONCURRENT_STATES; t++)
1301 if (msp->ms_allocmap[(txg + t) & TXG_MASK]->sm_space)
1304 if (evictable && !metaslab_debug)
1305 space_map_unload(sm);
1308 metaslab_group_sort(mg, msp, metaslab_weight(msp));
1310 mutex_exit(&msp->ms_lock);
1314 metaslab_sync_reassess(metaslab_group_t *mg)
1316 vdev_t *vd = mg->mg_vd;
1317 int64_t failures = mg->mg_alloc_failures;
1320 * Re-evaluate all metaslabs which have lower offsets than the
1323 for (int m = 0; m < vd->vdev_ms_count; m++) {
1324 metaslab_t *msp = vd->vdev_ms[m];
1326 if (msp->ms_map->sm_start > mg->mg_bonus_area)
1329 mutex_enter(&msp->ms_lock);
1330 metaslab_group_sort(mg, msp, metaslab_weight(msp));
1331 mutex_exit(&msp->ms_lock);
1334 atomic_add_64(&mg->mg_alloc_failures, -failures);
1337 * Prefetch the next potential metaslabs
1339 metaslab_prefetch(mg);
1343 metaslab_distance(metaslab_t *msp, dva_t *dva)
1345 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
1346 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
1347 uint64_t start = msp->ms_map->sm_start >> ms_shift;
1349 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
1350 return (1ULL << 63);
1353 return ((start - offset) << ms_shift);
1355 return ((offset - start) << ms_shift);
1360 metaslab_group_alloc(metaslab_group_t *mg, uint64_t psize, uint64_t asize,
1361 uint64_t txg, uint64_t min_distance, dva_t *dva, int d, int flags)
1363 spa_t *spa = mg->mg_vd->vdev_spa;
1364 metaslab_t *msp = NULL;
1365 uint64_t offset = -1ULL;
1366 avl_tree_t *t = &mg->mg_metaslab_tree;
1367 uint64_t activation_weight;
1368 uint64_t target_distance;
1371 activation_weight = METASLAB_WEIGHT_PRIMARY;
1372 for (i = 0; i < d; i++) {
1373 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
1374 activation_weight = METASLAB_WEIGHT_SECONDARY;
1380 boolean_t was_active;
1382 mutex_enter(&mg->mg_lock);
1383 for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) {
1384 if (msp->ms_weight < asize) {
1385 spa_dbgmsg(spa, "%s: failed to meet weight "
1386 "requirement: vdev %llu, txg %llu, mg %p, "
1387 "msp %p, psize %llu, asize %llu, "
1388 "failures %llu, weight %llu",
1389 spa_name(spa), mg->mg_vd->vdev_id, txg,
1390 mg, msp, psize, asize,
1391 mg->mg_alloc_failures, msp->ms_weight);
1392 mutex_exit(&mg->mg_lock);
1397 * If the selected metaslab is condensing, skip it.
1399 if (msp->ms_map->sm_condensing)
1402 was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
1403 if (activation_weight == METASLAB_WEIGHT_PRIMARY)
1406 target_distance = min_distance +
1407 (msp->ms_smo.smo_alloc ? 0 : min_distance >> 1);
1409 for (i = 0; i < d; i++)
1410 if (metaslab_distance(msp, &dva[i]) <
1416 mutex_exit(&mg->mg_lock);
1421 * If we've already reached the allowable number of failed
1422 * allocation attempts on this metaslab group then we
1423 * consider skipping it. We skip it only if we're allowed
1424 * to "fast" gang, the physical size is larger than
1425 * a gang block, and we're attempting to allocate from
1426 * the primary metaslab.
1428 if (mg->mg_alloc_failures > zfs_mg_alloc_failures &&
1429 CAN_FASTGANG(flags) && psize > SPA_GANGBLOCKSIZE &&
1430 activation_weight == METASLAB_WEIGHT_PRIMARY) {
1431 spa_dbgmsg(spa, "%s: skipping metaslab group: "
1432 "vdev %llu, txg %llu, mg %p, psize %llu, "
1433 "asize %llu, failures %llu", spa_name(spa),
1434 mg->mg_vd->vdev_id, txg, mg, psize, asize,
1435 mg->mg_alloc_failures);
1439 mutex_enter(&msp->ms_lock);
1442 * Ensure that the metaslab we have selected is still
1443 * capable of handling our request. It's possible that
1444 * another thread may have changed the weight while we
1445 * were blocked on the metaslab lock.
1447 if (msp->ms_weight < asize || (was_active &&
1448 !(msp->ms_weight & METASLAB_ACTIVE_MASK) &&
1449 activation_weight == METASLAB_WEIGHT_PRIMARY)) {
1450 mutex_exit(&msp->ms_lock);
1454 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
1455 activation_weight == METASLAB_WEIGHT_PRIMARY) {
1456 metaslab_passivate(msp,
1457 msp->ms_weight & ~METASLAB_ACTIVE_MASK);
1458 mutex_exit(&msp->ms_lock);
1462 if (metaslab_activate(msp, activation_weight) != 0) {
1463 mutex_exit(&msp->ms_lock);
1468 * If this metaslab is currently condensing then pick again as
1469 * we can't manipulate this metaslab until it's committed
1472 if (msp->ms_map->sm_condensing) {
1473 mutex_exit(&msp->ms_lock);
1477 if ((offset = space_map_alloc(msp->ms_map, asize)) != -1ULL)
1480 atomic_inc_64(&mg->mg_alloc_failures);
1482 metaslab_passivate(msp, space_map_maxsize(msp->ms_map));
1484 mutex_exit(&msp->ms_lock);
1487 if (msp->ms_allocmap[txg & TXG_MASK]->sm_space == 0)
1488 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
1490 space_map_add(msp->ms_allocmap[txg & TXG_MASK], offset, asize);
1492 mutex_exit(&msp->ms_lock);
1498 * Allocate a block for the specified i/o.
1501 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
1502 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags)
1504 metaslab_group_t *mg, *rotor;
1508 int zio_lock = B_FALSE;
1509 boolean_t allocatable;
1510 uint64_t offset = -1ULL;
1514 ASSERT(!DVA_IS_VALID(&dva[d]));
1517 * For testing, make some blocks above a certain size be gang blocks.
1519 if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0)
1520 return (SET_ERROR(ENOSPC));
1523 * Start at the rotor and loop through all mgs until we find something.
1524 * Note that there's no locking on mc_rotor or mc_aliquot because
1525 * nothing actually breaks if we miss a few updates -- we just won't
1526 * allocate quite as evenly. It all balances out over time.
1528 * If we are doing ditto or log blocks, try to spread them across
1529 * consecutive vdevs. If we're forced to reuse a vdev before we've
1530 * allocated all of our ditto blocks, then try and spread them out on
1531 * that vdev as much as possible. If it turns out to not be possible,
1532 * gradually lower our standards until anything becomes acceptable.
1533 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
1534 * gives us hope of containing our fault domains to something we're
1535 * able to reason about. Otherwise, any two top-level vdev failures
1536 * will guarantee the loss of data. With consecutive allocation,
1537 * only two adjacent top-level vdev failures will result in data loss.
1539 * If we are doing gang blocks (hintdva is non-NULL), try to keep
1540 * ourselves on the same vdev as our gang block header. That
1541 * way, we can hope for locality in vdev_cache, plus it makes our
1542 * fault domains something tractable.
1545 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
1548 * It's possible the vdev we're using as the hint no
1549 * longer exists (i.e. removed). Consult the rotor when
1555 if (flags & METASLAB_HINTBP_AVOID &&
1556 mg->mg_next != NULL)
1561 } else if (d != 0) {
1562 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
1563 mg = vd->vdev_mg->mg_next;
1569 * If the hint put us into the wrong metaslab class, or into a
1570 * metaslab group that has been passivated, just follow the rotor.
1572 if (mg->mg_class != mc || mg->mg_activation_count <= 0)
1579 ASSERT(mg->mg_activation_count == 1);
1584 * Don't allocate from faulted devices.
1587 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
1588 allocatable = vdev_allocatable(vd);
1589 spa_config_exit(spa, SCL_ZIO, FTAG);
1591 allocatable = vdev_allocatable(vd);
1597 * Avoid writing single-copy data to a failing vdev
1598 * unless the user instructs us that it is okay.
1600 if ((vd->vdev_stat.vs_write_errors > 0 ||
1601 vd->vdev_state < VDEV_STATE_HEALTHY) &&
1602 d == 0 && dshift == 3 &&
1603 !(zfs_write_to_degraded && vd->vdev_state ==
1604 VDEV_STATE_DEGRADED)) {
1609 ASSERT(mg->mg_class == mc);
1611 distance = vd->vdev_asize >> dshift;
1612 if (distance <= (1ULL << vd->vdev_ms_shift))
1617 asize = vdev_psize_to_asize(vd, psize);
1618 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
1620 offset = metaslab_group_alloc(mg, psize, asize, txg, distance,
1622 if (offset != -1ULL) {
1624 * If we've just selected this metaslab group,
1625 * figure out whether the corresponding vdev is
1626 * over- or under-used relative to the pool,
1627 * and set an allocation bias to even it out.
1629 if (mc->mc_aliquot == 0) {
1630 vdev_stat_t *vs = &vd->vdev_stat;
1633 vu = (vs->vs_alloc * 100) / (vs->vs_space + 1);
1634 cu = (mc->mc_alloc * 100) / (mc->mc_space + 1);
1637 * Calculate how much more or less we should
1638 * try to allocate from this device during
1639 * this iteration around the rotor.
1640 * For example, if a device is 80% full
1641 * and the pool is 20% full then we should
1642 * reduce allocations by 60% on this device.
1644 * mg_bias = (20 - 80) * 512K / 100 = -307K
1646 * This reduces allocations by 307K for this
1649 mg->mg_bias = ((cu - vu) *
1650 (int64_t)mg->mg_aliquot) / 100;
1653 if (atomic_add_64_nv(&mc->mc_aliquot, asize) >=
1654 mg->mg_aliquot + mg->mg_bias) {
1655 mc->mc_rotor = mg->mg_next;
1659 DVA_SET_VDEV(&dva[d], vd->vdev_id);
1660 DVA_SET_OFFSET(&dva[d], offset);
1661 DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER));
1662 DVA_SET_ASIZE(&dva[d], asize);
1667 mc->mc_rotor = mg->mg_next;
1669 } while ((mg = mg->mg_next) != rotor);
1673 ASSERT(dshift < 64);
1677 if (!allocatable && !zio_lock) {
1683 bzero(&dva[d], sizeof (dva_t));
1685 return (SET_ERROR(ENOSPC));
1689 * Free the block represented by DVA in the context of the specified
1690 * transaction group.
1693 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
1695 uint64_t vdev = DVA_GET_VDEV(dva);
1696 uint64_t offset = DVA_GET_OFFSET(dva);
1697 uint64_t size = DVA_GET_ASIZE(dva);
1701 ASSERT(DVA_IS_VALID(dva));
1703 if (txg > spa_freeze_txg(spa))
1706 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
1707 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
1708 cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu",
1709 (u_longlong_t)vdev, (u_longlong_t)offset);
1714 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
1716 if (DVA_GET_GANG(dva))
1717 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
1719 mutex_enter(&msp->ms_lock);
1722 space_map_remove(msp->ms_allocmap[txg & TXG_MASK],
1724 space_map_free(msp->ms_map, offset, size);
1726 if (msp->ms_freemap[txg & TXG_MASK]->sm_space == 0)
1727 vdev_dirty(vd, VDD_METASLAB, msp, txg);
1728 space_map_add(msp->ms_freemap[txg & TXG_MASK], offset, size);
1731 mutex_exit(&msp->ms_lock);
1735 * Intent log support: upon opening the pool after a crash, notify the SPA
1736 * of blocks that the intent log has allocated for immediate write, but
1737 * which are still considered free by the SPA because the last transaction
1738 * group didn't commit yet.
1741 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
1743 uint64_t vdev = DVA_GET_VDEV(dva);
1744 uint64_t offset = DVA_GET_OFFSET(dva);
1745 uint64_t size = DVA_GET_ASIZE(dva);
1750 ASSERT(DVA_IS_VALID(dva));
1752 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
1753 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
1754 return (SET_ERROR(ENXIO));
1756 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
1758 if (DVA_GET_GANG(dva))
1759 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
1761 mutex_enter(&msp->ms_lock);
1763 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_map->sm_loaded)
1764 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
1766 if (error == 0 && !space_map_contains(msp->ms_map, offset, size))
1767 error = SET_ERROR(ENOENT);
1769 if (error || txg == 0) { /* txg == 0 indicates dry run */
1770 mutex_exit(&msp->ms_lock);
1774 space_map_claim(msp->ms_map, offset, size);
1776 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
1777 if (msp->ms_allocmap[txg & TXG_MASK]->sm_space == 0)
1778 vdev_dirty(vd, VDD_METASLAB, msp, txg);
1779 space_map_add(msp->ms_allocmap[txg & TXG_MASK], offset, size);
1782 mutex_exit(&msp->ms_lock);
1788 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
1789 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags)
1791 dva_t *dva = bp->blk_dva;
1792 dva_t *hintdva = hintbp->blk_dva;
1795 ASSERT(bp->blk_birth == 0);
1796 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
1798 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
1800 if (mc->mc_rotor == NULL) { /* no vdevs in this class */
1801 spa_config_exit(spa, SCL_ALLOC, FTAG);
1802 return (SET_ERROR(ENOSPC));
1805 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
1806 ASSERT(BP_GET_NDVAS(bp) == 0);
1807 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
1809 for (int d = 0; d < ndvas; d++) {
1810 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
1813 for (d--; d >= 0; d--) {
1814 metaslab_free_dva(spa, &dva[d], txg, B_TRUE);
1815 bzero(&dva[d], sizeof (dva_t));
1817 spa_config_exit(spa, SCL_ALLOC, FTAG);
1822 ASSERT(BP_GET_NDVAS(bp) == ndvas);
1824 spa_config_exit(spa, SCL_ALLOC, FTAG);
1826 BP_SET_BIRTH(bp, txg, txg);
1832 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
1834 const dva_t *dva = bp->blk_dva;
1835 int ndvas = BP_GET_NDVAS(bp);
1837 ASSERT(!BP_IS_HOLE(bp));
1838 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
1840 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
1842 for (int d = 0; d < ndvas; d++)
1843 metaslab_free_dva(spa, &dva[d], txg, now);
1845 spa_config_exit(spa, SCL_FREE, FTAG);
1849 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
1851 const dva_t *dva = bp->blk_dva;
1852 int ndvas = BP_GET_NDVAS(bp);
1855 ASSERT(!BP_IS_HOLE(bp));
1859 * First do a dry run to make sure all DVAs are claimable,
1860 * so we don't have to unwind from partial failures below.
1862 if ((error = metaslab_claim(spa, bp, 0)) != 0)
1866 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
1868 for (int d = 0; d < ndvas; d++)
1869 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
1872 spa_config_exit(spa, SCL_ALLOC, FTAG);
1874 ASSERT(error == 0 || txg == 0);
1880 checkmap(space_map_t *sm, uint64_t off, uint64_t size)
1885 mutex_enter(sm->sm_lock);
1886 ss = space_map_find(sm, off, size, &where);
1888 panic("freeing free block; ss=%p", (void *)ss);
1889 mutex_exit(sm->sm_lock);
1893 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
1895 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
1898 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
1899 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
1900 uint64_t vdid = DVA_GET_VDEV(&bp->blk_dva[i]);
1901 vdev_t *vd = vdev_lookup_top(spa, vdid);
1902 uint64_t off = DVA_GET_OFFSET(&bp->blk_dva[i]);
1903 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
1904 metaslab_t *ms = vd->vdev_ms[off >> vd->vdev_ms_shift];
1906 if (ms->ms_map->sm_loaded)
1907 checkmap(ms->ms_map, off, size);
1909 for (int j = 0; j < TXG_SIZE; j++)
1910 checkmap(ms->ms_freemap[j], off, size);
1911 for (int j = 0; j < TXG_DEFER_SIZE; j++)
1912 checkmap(ms->ms_defermap[j], off, size);
1914 spa_config_exit(spa, SCL_VDEV, FTAG);