4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
27 #include <sys/zfs_context.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/space_map.h>
31 #include <sys/metaslab_impl.h>
32 #include <sys/vdev_impl.h>
35 SYSCTL_DECL(_vfs_zfs);
36 SYSCTL_NODE(_vfs_zfs, OID_AUTO, metaslab, CTLFLAG_RW, 0, "ZFS metaslab");
39 * Allow allocations to switch to gang blocks quickly. We do this to
40 * avoid having to load lots of space_maps in a given txg. There are,
41 * however, some cases where we want to avoid "fast" ganging and instead
42 * we want to do an exhaustive search of all metaslabs on this device.
43 * Currently we don't allow any gang, zil, or dump device related allocations
46 #define CAN_FASTGANG(flags) \
47 (!((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER | \
48 METASLAB_GANG_AVOID)))
50 uint64_t metaslab_aliquot = 512ULL << 10;
51 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
52 TUNABLE_QUAD("vfs.zfs.metaslab.gang_bang", &metaslab_gang_bang);
53 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, gang_bang, CTLFLAG_RWTUN,
54 &metaslab_gang_bang, 0,
55 "Force gang block allocation for blocks larger than or equal to this value");
58 * The in-core space map representation is more compact than its on-disk form.
59 * The zfs_condense_pct determines how much more compact the in-core
60 * space_map representation must be before we compact it on-disk.
61 * Values should be greater than or equal to 100.
63 int zfs_condense_pct = 200;
66 * This value defines the number of allowed allocation failures per vdev.
67 * If a device reaches this threshold in a given txg then we consider skipping
68 * allocations on that device.
70 int zfs_mg_alloc_failures = 0;
71 TUNABLE_INT("vfs.zfs.mg_alloc_failures", &zfs_mg_alloc_failures);
72 SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_alloc_failures, CTLFLAG_RWTUN,
73 &zfs_mg_alloc_failures, 0,
74 "Number of allowed allocation failures per vdev");
77 * Metaslab debugging: when set, keeps all space maps in core to verify frees.
79 static int metaslab_debug = 0;
80 TUNABLE_INT("vfs.zfs.metaslab.debug", &metaslab_debug);
81 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, debug, CTLFLAG_RWTUN, &metaslab_debug,
83 "Metaslab debugging: when set, keeps all space maps in core to verify frees");
86 * Minimum size which forces the dynamic allocator to change
87 * it's allocation strategy. Once the space map cannot satisfy
88 * an allocation of this size then it switches to using more
89 * aggressive strategy (i.e search by size rather than offset).
91 uint64_t metaslab_df_alloc_threshold = SPA_MAXBLOCKSIZE;
92 TUNABLE_QUAD("vfs.zfs.metaslab.df_alloc_threshold",
93 &metaslab_df_alloc_threshold);
94 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold, CTLFLAG_RWTUN,
95 &metaslab_df_alloc_threshold, 0,
96 "Minimum size which forces the dynamic allocator to change it's allocation strategy");
99 * The minimum free space, in percent, which must be available
100 * in a space map to continue allocations in a first-fit fashion.
101 * Once the space_map's free space drops below this level we dynamically
102 * switch to using best-fit allocations.
104 int metaslab_df_free_pct = 4;
105 TUNABLE_INT("vfs.zfs.metaslab.df_free_pct", &metaslab_df_free_pct);
106 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct, CTLFLAG_RWTUN,
107 &metaslab_df_free_pct, 0,
108 "The minimum free space, in percent, which must be available in a space map to continue allocations in a first-fit fashion");
111 * A metaslab is considered "free" if it contains a contiguous
112 * segment which is greater than metaslab_min_alloc_size.
114 uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS;
115 TUNABLE_QUAD("vfs.zfs.metaslab.min_alloc_size",
116 &metaslab_min_alloc_size);
117 SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, min_alloc_size, CTLFLAG_RWTUN,
118 &metaslab_min_alloc_size, 0,
119 "A metaslab is considered \"free\" if it contains a contiguous segment which is greater than vfs.zfs.metaslab.min_alloc_size");
122 * Max number of space_maps to prefetch.
124 int metaslab_prefetch_limit = SPA_DVAS_PER_BP;
125 TUNABLE_INT("vfs.zfs.metaslab.prefetch_limit", &metaslab_prefetch_limit);
126 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, prefetch_limit, CTLFLAG_RWTUN,
127 &metaslab_prefetch_limit, 0, "Maximum number of space_maps to prefetch");
130 * Percentage bonus multiplier for metaslabs that are in the bonus area.
132 int metaslab_smo_bonus_pct = 150;
133 TUNABLE_INT("vfs.zfs.metaslab.smo_bonus_pct", &metaslab_smo_bonus_pct);
134 SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, smo_bonus_pct, CTLFLAG_RWTUN,
135 &metaslab_smo_bonus_pct, 0, "Maximum number of space_maps to prefetch");
138 * Should we be willing to write data to degraded vdevs?
140 boolean_t zfs_write_to_degraded = B_FALSE;
141 SYSCTL_INT(_vfs_zfs, OID_AUTO, write_to_degraded, CTLFLAG_RWTUN,
142 &zfs_write_to_degraded, 0, "Allow writing data to degraded vdevs");
143 TUNABLE_INT("vfs.zfs.write_to_degraded", &zfs_write_to_degraded);
146 * ==========================================================================
148 * ==========================================================================
151 metaslab_class_create(spa_t *spa, space_map_ops_t *ops)
153 metaslab_class_t *mc;
155 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
165 metaslab_class_destroy(metaslab_class_t *mc)
167 ASSERT(mc->mc_rotor == NULL);
168 ASSERT(mc->mc_alloc == 0);
169 ASSERT(mc->mc_deferred == 0);
170 ASSERT(mc->mc_space == 0);
171 ASSERT(mc->mc_dspace == 0);
173 kmem_free(mc, sizeof (metaslab_class_t));
177 metaslab_class_validate(metaslab_class_t *mc)
179 metaslab_group_t *mg;
183 * Must hold one of the spa_config locks.
185 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
186 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
188 if ((mg = mc->mc_rotor) == NULL)
193 ASSERT(vd->vdev_mg != NULL);
194 ASSERT3P(vd->vdev_top, ==, vd);
195 ASSERT3P(mg->mg_class, ==, mc);
196 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
197 } while ((mg = mg->mg_next) != mc->mc_rotor);
203 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
204 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
206 atomic_add_64(&mc->mc_alloc, alloc_delta);
207 atomic_add_64(&mc->mc_deferred, defer_delta);
208 atomic_add_64(&mc->mc_space, space_delta);
209 atomic_add_64(&mc->mc_dspace, dspace_delta);
213 metaslab_class_minblocksize_update(metaslab_class_t *mc)
215 metaslab_group_t *mg;
217 uint64_t minashift = UINT64_MAX;
219 if ((mg = mc->mc_rotor) == NULL) {
220 mc->mc_minblocksize = SPA_MINBLOCKSIZE;
226 if (vd->vdev_ashift < minashift)
227 minashift = vd->vdev_ashift;
228 } while ((mg = mg->mg_next) != mc->mc_rotor);
230 mc->mc_minblocksize = 1ULL << minashift;
234 metaslab_class_get_alloc(metaslab_class_t *mc)
236 return (mc->mc_alloc);
240 metaslab_class_get_deferred(metaslab_class_t *mc)
242 return (mc->mc_deferred);
246 metaslab_class_get_space(metaslab_class_t *mc)
248 return (mc->mc_space);
252 metaslab_class_get_dspace(metaslab_class_t *mc)
254 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
258 metaslab_class_get_minblocksize(metaslab_class_t *mc)
260 return (mc->mc_minblocksize);
264 * ==========================================================================
266 * ==========================================================================
269 metaslab_compare(const void *x1, const void *x2)
271 const metaslab_t *m1 = x1;
272 const metaslab_t *m2 = x2;
274 if (m1->ms_weight < m2->ms_weight)
276 if (m1->ms_weight > m2->ms_weight)
280 * If the weights are identical, use the offset to force uniqueness.
282 if (m1->ms_map->sm_start < m2->ms_map->sm_start)
284 if (m1->ms_map->sm_start > m2->ms_map->sm_start)
287 ASSERT3P(m1, ==, m2);
293 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
295 metaslab_group_t *mg;
297 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
298 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
299 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
300 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
303 mg->mg_activation_count = 0;
309 metaslab_group_destroy(metaslab_group_t *mg)
311 ASSERT(mg->mg_prev == NULL);
312 ASSERT(mg->mg_next == NULL);
314 * We may have gone below zero with the activation count
315 * either because we never activated in the first place or
316 * because we're done, and possibly removing the vdev.
318 ASSERT(mg->mg_activation_count <= 0);
320 avl_destroy(&mg->mg_metaslab_tree);
321 mutex_destroy(&mg->mg_lock);
322 kmem_free(mg, sizeof (metaslab_group_t));
326 metaslab_group_activate(metaslab_group_t *mg)
328 metaslab_class_t *mc = mg->mg_class;
329 metaslab_group_t *mgprev, *mgnext;
331 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
333 ASSERT(mc->mc_rotor != mg);
334 ASSERT(mg->mg_prev == NULL);
335 ASSERT(mg->mg_next == NULL);
336 ASSERT(mg->mg_activation_count <= 0);
338 if (++mg->mg_activation_count <= 0)
341 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
343 if ((mgprev = mc->mc_rotor) == NULL) {
347 mgnext = mgprev->mg_next;
348 mg->mg_prev = mgprev;
349 mg->mg_next = mgnext;
350 mgprev->mg_next = mg;
351 mgnext->mg_prev = mg;
354 metaslab_class_minblocksize_update(mc);
358 metaslab_group_passivate(metaslab_group_t *mg)
360 metaslab_class_t *mc = mg->mg_class;
361 metaslab_group_t *mgprev, *mgnext;
363 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
365 if (--mg->mg_activation_count != 0) {
366 ASSERT(mc->mc_rotor != mg);
367 ASSERT(mg->mg_prev == NULL);
368 ASSERT(mg->mg_next == NULL);
369 ASSERT(mg->mg_activation_count < 0);
373 mgprev = mg->mg_prev;
374 mgnext = mg->mg_next;
379 mc->mc_rotor = mgnext;
380 mgprev->mg_next = mgnext;
381 mgnext->mg_prev = mgprev;
386 metaslab_class_minblocksize_update(mc);
390 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
392 mutex_enter(&mg->mg_lock);
393 ASSERT(msp->ms_group == NULL);
396 avl_add(&mg->mg_metaslab_tree, msp);
397 mutex_exit(&mg->mg_lock);
401 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
403 mutex_enter(&mg->mg_lock);
404 ASSERT(msp->ms_group == mg);
405 avl_remove(&mg->mg_metaslab_tree, msp);
406 msp->ms_group = NULL;
407 mutex_exit(&mg->mg_lock);
411 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
414 * Although in principle the weight can be any value, in
415 * practice we do not use values in the range [1, 510].
417 ASSERT(weight >= SPA_MINBLOCKSIZE-1 || weight == 0);
418 ASSERT(MUTEX_HELD(&msp->ms_lock));
420 mutex_enter(&mg->mg_lock);
421 ASSERT(msp->ms_group == mg);
422 avl_remove(&mg->mg_metaslab_tree, msp);
423 msp->ms_weight = weight;
424 avl_add(&mg->mg_metaslab_tree, msp);
425 mutex_exit(&mg->mg_lock);
429 * ==========================================================================
430 * Common allocator routines
431 * ==========================================================================
434 metaslab_segsize_compare(const void *x1, const void *x2)
436 const space_seg_t *s1 = x1;
437 const space_seg_t *s2 = x2;
438 uint64_t ss_size1 = s1->ss_end - s1->ss_start;
439 uint64_t ss_size2 = s2->ss_end - s2->ss_start;
441 if (ss_size1 < ss_size2)
443 if (ss_size1 > ss_size2)
446 if (s1->ss_start < s2->ss_start)
448 if (s1->ss_start > s2->ss_start)
455 * This is a helper function that can be used by the allocator to find
456 * a suitable block to allocate. This will search the specified AVL
457 * tree looking for a block that matches the specified criteria.
460 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
463 space_seg_t *ss, ssearch;
466 ssearch.ss_start = *cursor;
467 ssearch.ss_end = *cursor + size;
469 ss = avl_find(t, &ssearch, &where);
471 ss = avl_nearest(t, where, AVL_AFTER);
474 uint64_t offset = P2ROUNDUP(ss->ss_start, align);
476 if (offset + size <= ss->ss_end) {
477 *cursor = offset + size;
480 ss = AVL_NEXT(t, ss);
484 * If we know we've searched the whole map (*cursor == 0), give up.
485 * Otherwise, reset the cursor to the beginning and try again.
491 return (metaslab_block_picker(t, cursor, size, align));
495 metaslab_pp_load(space_map_t *sm)
499 ASSERT(sm->sm_ppd == NULL);
500 sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP);
502 sm->sm_pp_root = kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
503 avl_create(sm->sm_pp_root, metaslab_segsize_compare,
504 sizeof (space_seg_t), offsetof(struct space_seg, ss_pp_node));
506 for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss))
507 avl_add(sm->sm_pp_root, ss);
511 metaslab_pp_unload(space_map_t *sm)
515 kmem_free(sm->sm_ppd, 64 * sizeof (uint64_t));
518 while (avl_destroy_nodes(sm->sm_pp_root, &cookie) != NULL) {
519 /* tear down the tree */
522 avl_destroy(sm->sm_pp_root);
523 kmem_free(sm->sm_pp_root, sizeof (avl_tree_t));
524 sm->sm_pp_root = NULL;
529 metaslab_pp_claim(space_map_t *sm, uint64_t start, uint64_t size)
531 /* No need to update cursor */
536 metaslab_pp_free(space_map_t *sm, uint64_t start, uint64_t size)
538 /* No need to update cursor */
542 * Return the maximum contiguous segment within the metaslab.
545 metaslab_pp_maxsize(space_map_t *sm)
547 avl_tree_t *t = sm->sm_pp_root;
550 if (t == NULL || (ss = avl_last(t)) == NULL)
553 return (ss->ss_end - ss->ss_start);
557 * ==========================================================================
558 * The first-fit block allocator
559 * ==========================================================================
562 metaslab_ff_alloc(space_map_t *sm, uint64_t size)
564 avl_tree_t *t = &sm->sm_root;
565 uint64_t align = size & -size;
566 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1;
568 return (metaslab_block_picker(t, cursor, size, align));
573 metaslab_ff_fragmented(space_map_t *sm)
578 static space_map_ops_t metaslab_ff_ops = {
585 metaslab_ff_fragmented
589 * ==========================================================================
590 * Dynamic block allocator -
591 * Uses the first fit allocation scheme until space get low and then
592 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
593 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
594 * ==========================================================================
597 metaslab_df_alloc(space_map_t *sm, uint64_t size)
599 avl_tree_t *t = &sm->sm_root;
600 uint64_t align = size & -size;
601 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1;
602 uint64_t max_size = metaslab_pp_maxsize(sm);
603 int free_pct = sm->sm_space * 100 / sm->sm_size;
605 ASSERT(MUTEX_HELD(sm->sm_lock));
606 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root));
612 * If we're running low on space switch to using the size
613 * sorted AVL tree (best-fit).
615 if (max_size < metaslab_df_alloc_threshold ||
616 free_pct < metaslab_df_free_pct) {
621 return (metaslab_block_picker(t, cursor, size, 1ULL));
625 metaslab_df_fragmented(space_map_t *sm)
627 uint64_t max_size = metaslab_pp_maxsize(sm);
628 int free_pct = sm->sm_space * 100 / sm->sm_size;
630 if (max_size >= metaslab_df_alloc_threshold &&
631 free_pct >= metaslab_df_free_pct)
637 static space_map_ops_t metaslab_df_ops = {
644 metaslab_df_fragmented
648 * ==========================================================================
649 * Other experimental allocators
650 * ==========================================================================
653 metaslab_cdf_alloc(space_map_t *sm, uint64_t size)
655 avl_tree_t *t = &sm->sm_root;
656 uint64_t *cursor = (uint64_t *)sm->sm_ppd;
657 uint64_t *extent_end = (uint64_t *)sm->sm_ppd + 1;
658 uint64_t max_size = metaslab_pp_maxsize(sm);
659 uint64_t rsize = size;
662 ASSERT(MUTEX_HELD(sm->sm_lock));
663 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root));
668 ASSERT3U(*extent_end, >=, *cursor);
671 * If we're running low on space switch to using the size
672 * sorted AVL tree (best-fit).
674 if ((*cursor + size) > *extent_end) {
677 *cursor = *extent_end = 0;
679 if (max_size > 2 * SPA_MAXBLOCKSIZE)
680 rsize = MIN(metaslab_min_alloc_size, max_size);
681 offset = metaslab_block_picker(t, extent_end, rsize, 1ULL);
683 *cursor = offset + size;
685 offset = metaslab_block_picker(t, cursor, rsize, 1ULL);
687 ASSERT3U(*cursor, <=, *extent_end);
692 metaslab_cdf_fragmented(space_map_t *sm)
694 uint64_t max_size = metaslab_pp_maxsize(sm);
696 if (max_size > (metaslab_min_alloc_size * 10))
701 static space_map_ops_t metaslab_cdf_ops = {
708 metaslab_cdf_fragmented
711 uint64_t metaslab_ndf_clump_shift = 4;
714 metaslab_ndf_alloc(space_map_t *sm, uint64_t size)
716 avl_tree_t *t = &sm->sm_root;
718 space_seg_t *ss, ssearch;
719 uint64_t hbit = highbit(size);
720 uint64_t *cursor = (uint64_t *)sm->sm_ppd + hbit - 1;
721 uint64_t max_size = metaslab_pp_maxsize(sm);
723 ASSERT(MUTEX_HELD(sm->sm_lock));
724 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root));
729 ssearch.ss_start = *cursor;
730 ssearch.ss_end = *cursor + size;
732 ss = avl_find(t, &ssearch, &where);
733 if (ss == NULL || (ss->ss_start + size > ss->ss_end)) {
736 ssearch.ss_start = 0;
737 ssearch.ss_end = MIN(max_size,
738 1ULL << (hbit + metaslab_ndf_clump_shift));
739 ss = avl_find(t, &ssearch, &where);
741 ss = avl_nearest(t, where, AVL_AFTER);
746 if (ss->ss_start + size <= ss->ss_end) {
747 *cursor = ss->ss_start + size;
748 return (ss->ss_start);
755 metaslab_ndf_fragmented(space_map_t *sm)
757 uint64_t max_size = metaslab_pp_maxsize(sm);
759 if (max_size > (metaslab_min_alloc_size << metaslab_ndf_clump_shift))
765 static space_map_ops_t metaslab_ndf_ops = {
772 metaslab_ndf_fragmented
775 space_map_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
778 * ==========================================================================
780 * ==========================================================================
783 metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo,
784 uint64_t start, uint64_t size, uint64_t txg)
786 vdev_t *vd = mg->mg_vd;
789 msp = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
790 mutex_init(&msp->ms_lock, NULL, MUTEX_DEFAULT, NULL);
792 msp->ms_smo_syncing = *smo;
795 * We create the main space map here, but we don't create the
796 * allocmaps and freemaps until metaslab_sync_done(). This serves
797 * two purposes: it allows metaslab_sync_done() to detect the
798 * addition of new space; and for debugging, it ensures that we'd
799 * data fault on any attempt to use this metaslab before it's ready.
801 msp->ms_map = kmem_zalloc(sizeof (space_map_t), KM_SLEEP);
802 space_map_create(msp->ms_map, start, size,
803 vd->vdev_ashift, &msp->ms_lock);
805 metaslab_group_add(mg, msp);
807 if (metaslab_debug && smo->smo_object != 0) {
808 mutex_enter(&msp->ms_lock);
809 VERIFY(space_map_load(msp->ms_map, mg->mg_class->mc_ops,
810 SM_FREE, smo, spa_meta_objset(vd->vdev_spa)) == 0);
811 mutex_exit(&msp->ms_lock);
815 * If we're opening an existing pool (txg == 0) or creating
816 * a new one (txg == TXG_INITIAL), all space is available now.
817 * If we're adding space to an existing pool, the new space
818 * does not become available until after this txg has synced.
820 if (txg <= TXG_INITIAL)
821 metaslab_sync_done(msp, 0);
824 vdev_dirty(vd, 0, NULL, txg);
825 vdev_dirty(vd, VDD_METASLAB, msp, txg);
832 metaslab_fini(metaslab_t *msp)
834 metaslab_group_t *mg = msp->ms_group;
836 vdev_space_update(mg->mg_vd,
837 -msp->ms_smo.smo_alloc, 0, -msp->ms_map->sm_size);
839 metaslab_group_remove(mg, msp);
841 mutex_enter(&msp->ms_lock);
843 space_map_unload(msp->ms_map);
844 space_map_destroy(msp->ms_map);
845 kmem_free(msp->ms_map, sizeof (*msp->ms_map));
847 for (int t = 0; t < TXG_SIZE; t++) {
848 space_map_destroy(msp->ms_allocmap[t]);
849 space_map_destroy(msp->ms_freemap[t]);
850 kmem_free(msp->ms_allocmap[t], sizeof (*msp->ms_allocmap[t]));
851 kmem_free(msp->ms_freemap[t], sizeof (*msp->ms_freemap[t]));
854 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
855 space_map_destroy(msp->ms_defermap[t]);
856 kmem_free(msp->ms_defermap[t], sizeof (*msp->ms_defermap[t]));
859 ASSERT0(msp->ms_deferspace);
861 mutex_exit(&msp->ms_lock);
862 mutex_destroy(&msp->ms_lock);
864 kmem_free(msp, sizeof (metaslab_t));
867 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
868 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
869 #define METASLAB_ACTIVE_MASK \
870 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
873 metaslab_weight(metaslab_t *msp)
875 metaslab_group_t *mg = msp->ms_group;
876 space_map_t *sm = msp->ms_map;
877 space_map_obj_t *smo = &msp->ms_smo;
878 vdev_t *vd = mg->mg_vd;
879 uint64_t weight, space;
881 ASSERT(MUTEX_HELD(&msp->ms_lock));
884 * This vdev is in the process of being removed so there is nothing
887 if (vd->vdev_removing) {
888 ASSERT0(smo->smo_alloc);
889 ASSERT0(vd->vdev_ms_shift);
894 * The baseline weight is the metaslab's free space.
896 space = sm->sm_size - smo->smo_alloc;
900 * Modern disks have uniform bit density and constant angular velocity.
901 * Therefore, the outer recording zones are faster (higher bandwidth)
902 * than the inner zones by the ratio of outer to inner track diameter,
903 * which is typically around 2:1. We account for this by assigning
904 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
905 * In effect, this means that we'll select the metaslab with the most
906 * free bandwidth rather than simply the one with the most free space.
908 weight = 2 * weight -
909 ((sm->sm_start >> vd->vdev_ms_shift) * weight) / vd->vdev_ms_count;
910 ASSERT(weight >= space && weight <= 2 * space);
913 * For locality, assign higher weight to metaslabs which have
914 * a lower offset than what we've already activated.
916 if (sm->sm_start <= mg->mg_bonus_area)
917 weight *= (metaslab_smo_bonus_pct / 100);
918 ASSERT(weight >= space &&
919 weight <= 2 * (metaslab_smo_bonus_pct / 100) * space);
921 if (sm->sm_loaded && !sm->sm_ops->smop_fragmented(sm)) {
923 * If this metaslab is one we're actively using, adjust its
924 * weight to make it preferable to any inactive metaslab so
925 * we'll polish it off.
927 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
933 metaslab_prefetch(metaslab_group_t *mg)
935 spa_t *spa = mg->mg_vd->vdev_spa;
937 avl_tree_t *t = &mg->mg_metaslab_tree;
940 mutex_enter(&mg->mg_lock);
943 * Prefetch the next potential metaslabs
945 for (msp = avl_first(t), m = 0; msp; msp = AVL_NEXT(t, msp), m++) {
946 space_map_t *sm = msp->ms_map;
947 space_map_obj_t *smo = &msp->ms_smo;
949 /* If we have reached our prefetch limit then we're done */
950 if (m >= metaslab_prefetch_limit)
953 if (!sm->sm_loaded && smo->smo_object != 0) {
954 mutex_exit(&mg->mg_lock);
955 dmu_prefetch(spa_meta_objset(spa), smo->smo_object,
956 0ULL, smo->smo_objsize);
957 mutex_enter(&mg->mg_lock);
960 mutex_exit(&mg->mg_lock);
964 metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
966 metaslab_group_t *mg = msp->ms_group;
967 space_map_t *sm = msp->ms_map;
968 space_map_ops_t *sm_ops = msp->ms_group->mg_class->mc_ops;
970 ASSERT(MUTEX_HELD(&msp->ms_lock));
972 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
973 space_map_load_wait(sm);
974 if (!sm->sm_loaded) {
975 space_map_obj_t *smo = &msp->ms_smo;
977 int error = space_map_load(sm, sm_ops, SM_FREE, smo,
978 spa_meta_objset(msp->ms_group->mg_vd->vdev_spa));
980 metaslab_group_sort(msp->ms_group, msp, 0);
983 for (int t = 0; t < TXG_DEFER_SIZE; t++)
984 space_map_walk(msp->ms_defermap[t],
985 space_map_claim, sm);
990 * Track the bonus area as we activate new metaslabs.
992 if (sm->sm_start > mg->mg_bonus_area) {
993 mutex_enter(&mg->mg_lock);
994 mg->mg_bonus_area = sm->sm_start;
995 mutex_exit(&mg->mg_lock);
998 metaslab_group_sort(msp->ms_group, msp,
999 msp->ms_weight | activation_weight);
1001 ASSERT(sm->sm_loaded);
1002 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
1008 metaslab_passivate(metaslab_t *msp, uint64_t size)
1011 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
1012 * this metaslab again. In that case, it had better be empty,
1013 * or we would be leaving space on the table.
1015 ASSERT(size >= SPA_MINBLOCKSIZE || msp->ms_map->sm_space == 0);
1016 metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size));
1017 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
1021 * Determine if the in-core space map representation can be condensed on-disk.
1022 * We would like to use the following criteria to make our decision:
1024 * 1. The size of the space map object should not dramatically increase as a
1025 * result of writing out our in-core free map.
1027 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
1028 * times the size than the in-core representation (i.e. zfs_condense_pct = 110
1029 * and in-core = 1MB, minimal = 1.1.MB).
1031 * Checking the first condition is tricky since we don't want to walk
1032 * the entire AVL tree calculating the estimated on-disk size. Instead we
1033 * use the size-ordered AVL tree in the space map and calculate the
1034 * size required for the largest segment in our in-core free map. If the
1035 * size required to represent that segment on disk is larger than the space
1036 * map object then we avoid condensing this map.
1038 * To determine the second criterion we use a best-case estimate and assume
1039 * each segment can be represented on-disk as a single 64-bit entry. We refer
1040 * to this best-case estimate as the space map's minimal form.
1043 metaslab_should_condense(metaslab_t *msp)
1045 space_map_t *sm = msp->ms_map;
1046 space_map_obj_t *smo = &msp->ms_smo_syncing;
1048 uint64_t size, entries, segsz;
1050 ASSERT(MUTEX_HELD(&msp->ms_lock));
1051 ASSERT(sm->sm_loaded);
1054 * Use the sm_pp_root AVL tree, which is ordered by size, to obtain
1055 * the largest segment in the in-core free map. If the tree is
1056 * empty then we should condense the map.
1058 ss = avl_last(sm->sm_pp_root);
1063 * Calculate the number of 64-bit entries this segment would
1064 * require when written to disk. If this single segment would be
1065 * larger on-disk than the entire current on-disk structure, then
1066 * clearly condensing will increase the on-disk structure size.
1068 size = (ss->ss_end - ss->ss_start) >> sm->sm_shift;
1069 entries = size / (MIN(size, SM_RUN_MAX));
1070 segsz = entries * sizeof (uint64_t);
1072 return (segsz <= smo->smo_objsize &&
1073 smo->smo_objsize >= (zfs_condense_pct *
1074 sizeof (uint64_t) * avl_numnodes(&sm->sm_root)) / 100);
1078 * Condense the on-disk space map representation to its minimized form.
1079 * The minimized form consists of a small number of allocations followed by
1080 * the in-core free map.
1083 metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
1085 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1086 space_map_t *freemap = msp->ms_freemap[txg & TXG_MASK];
1087 space_map_t condense_map;
1088 space_map_t *sm = msp->ms_map;
1089 objset_t *mos = spa_meta_objset(spa);
1090 space_map_obj_t *smo = &msp->ms_smo_syncing;
1092 ASSERT(MUTEX_HELD(&msp->ms_lock));
1093 ASSERT3U(spa_sync_pass(spa), ==, 1);
1094 ASSERT(sm->sm_loaded);
1096 spa_dbgmsg(spa, "condensing: txg %llu, msp[%llu] %p, "
1097 "smo size %llu, segments %lu", txg,
1098 (msp->ms_map->sm_start / msp->ms_map->sm_size), msp,
1099 smo->smo_objsize, avl_numnodes(&sm->sm_root));
1102 * Create an map that is a 100% allocated map. We remove segments
1103 * that have been freed in this txg, any deferred frees that exist,
1104 * and any allocation in the future. Removing segments should be
1105 * a relatively inexpensive operation since we expect these maps to
1106 * a small number of nodes.
1108 space_map_create(&condense_map, sm->sm_start, sm->sm_size,
1109 sm->sm_shift, sm->sm_lock);
1110 space_map_add(&condense_map, condense_map.sm_start,
1111 condense_map.sm_size);
1114 * Remove what's been freed in this txg from the condense_map.
1115 * Since we're in sync_pass 1, we know that all the frees from
1116 * this txg are in the freemap.
1118 space_map_walk(freemap, space_map_remove, &condense_map);
1120 for (int t = 0; t < TXG_DEFER_SIZE; t++)
1121 space_map_walk(msp->ms_defermap[t],
1122 space_map_remove, &condense_map);
1124 for (int t = 1; t < TXG_CONCURRENT_STATES; t++)
1125 space_map_walk(msp->ms_allocmap[(txg + t) & TXG_MASK],
1126 space_map_remove, &condense_map);
1129 * We're about to drop the metaslab's lock thus allowing
1130 * other consumers to change it's content. Set the
1131 * space_map's sm_condensing flag to ensure that
1132 * allocations on this metaslab do not occur while we're
1133 * in the middle of committing it to disk. This is only critical
1134 * for the ms_map as all other space_maps use per txg
1135 * views of their content.
1137 sm->sm_condensing = B_TRUE;
1139 mutex_exit(&msp->ms_lock);
1140 space_map_truncate(smo, mos, tx);
1141 mutex_enter(&msp->ms_lock);
1144 * While we would ideally like to create a space_map representation
1145 * that consists only of allocation records, doing so can be
1146 * prohibitively expensive because the in-core free map can be
1147 * large, and therefore computationally expensive to subtract
1148 * from the condense_map. Instead we sync out two maps, a cheap
1149 * allocation only map followed by the in-core free map. While not
1150 * optimal, this is typically close to optimal, and much cheaper to
1153 space_map_sync(&condense_map, SM_ALLOC, smo, mos, tx);
1154 space_map_vacate(&condense_map, NULL, NULL);
1155 space_map_destroy(&condense_map);
1157 space_map_sync(sm, SM_FREE, smo, mos, tx);
1158 sm->sm_condensing = B_FALSE;
1160 spa_dbgmsg(spa, "condensed: txg %llu, msp[%llu] %p, "
1161 "smo size %llu", txg,
1162 (msp->ms_map->sm_start / msp->ms_map->sm_size), msp,
1167 * Write a metaslab to disk in the context of the specified transaction group.
1170 metaslab_sync(metaslab_t *msp, uint64_t txg)
1172 vdev_t *vd = msp->ms_group->mg_vd;
1173 spa_t *spa = vd->vdev_spa;
1174 objset_t *mos = spa_meta_objset(spa);
1175 space_map_t *allocmap = msp->ms_allocmap[txg & TXG_MASK];
1176 space_map_t **freemap = &msp->ms_freemap[txg & TXG_MASK];
1177 space_map_t **freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
1178 space_map_t *sm = msp->ms_map;
1179 space_map_obj_t *smo = &msp->ms_smo_syncing;
1183 ASSERT(!vd->vdev_ishole);
1186 * This metaslab has just been added so there's no work to do now.
1188 if (*freemap == NULL) {
1189 ASSERT3P(allocmap, ==, NULL);
1193 ASSERT3P(allocmap, !=, NULL);
1194 ASSERT3P(*freemap, !=, NULL);
1195 ASSERT3P(*freed_map, !=, NULL);
1197 if (allocmap->sm_space == 0 && (*freemap)->sm_space == 0)
1201 * The only state that can actually be changing concurrently with
1202 * metaslab_sync() is the metaslab's ms_map. No other thread can
1203 * be modifying this txg's allocmap, freemap, freed_map, or smo.
1204 * Therefore, we only hold ms_lock to satify space_map ASSERTs.
1205 * We drop it whenever we call into the DMU, because the DMU
1206 * can call down to us (e.g. via zio_free()) at any time.
1209 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
1211 if (smo->smo_object == 0) {
1212 ASSERT(smo->smo_objsize == 0);
1213 ASSERT(smo->smo_alloc == 0);
1214 smo->smo_object = dmu_object_alloc(mos,
1215 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT,
1216 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx);
1217 ASSERT(smo->smo_object != 0);
1218 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
1219 (sm->sm_start >> vd->vdev_ms_shift),
1220 sizeof (uint64_t), &smo->smo_object, tx);
1223 mutex_enter(&msp->ms_lock);
1225 if (sm->sm_loaded && spa_sync_pass(spa) == 1 &&
1226 metaslab_should_condense(msp)) {
1227 metaslab_condense(msp, txg, tx);
1229 space_map_sync(allocmap, SM_ALLOC, smo, mos, tx);
1230 space_map_sync(*freemap, SM_FREE, smo, mos, tx);
1233 space_map_vacate(allocmap, NULL, NULL);
1236 * For sync pass 1, we avoid walking the entire space map and
1237 * instead will just swap the pointers for freemap and
1238 * freed_map. We can safely do this since the freed_map is
1239 * guaranteed to be empty on the initial pass.
1241 if (spa_sync_pass(spa) == 1) {
1242 ASSERT0((*freed_map)->sm_space);
1243 ASSERT0(avl_numnodes(&(*freed_map)->sm_root));
1244 space_map_swap(freemap, freed_map);
1246 space_map_vacate(*freemap, space_map_add, *freed_map);
1249 ASSERT0(msp->ms_allocmap[txg & TXG_MASK]->sm_space);
1250 ASSERT0(msp->ms_freemap[txg & TXG_MASK]->sm_space);
1252 mutex_exit(&msp->ms_lock);
1254 VERIFY0(dmu_bonus_hold(mos, smo->smo_object, FTAG, &db));
1255 dmu_buf_will_dirty(db, tx);
1256 ASSERT3U(db->db_size, >=, sizeof (*smo));
1257 bcopy(smo, db->db_data, sizeof (*smo));
1258 dmu_buf_rele(db, FTAG);
1264 * Called after a transaction group has completely synced to mark
1265 * all of the metaslab's free space as usable.
1268 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
1270 space_map_obj_t *smo = &msp->ms_smo;
1271 space_map_obj_t *smosync = &msp->ms_smo_syncing;
1272 space_map_t *sm = msp->ms_map;
1273 space_map_t **freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
1274 space_map_t **defer_map = &msp->ms_defermap[txg % TXG_DEFER_SIZE];
1275 metaslab_group_t *mg = msp->ms_group;
1276 vdev_t *vd = mg->mg_vd;
1277 int64_t alloc_delta, defer_delta;
1279 ASSERT(!vd->vdev_ishole);
1281 mutex_enter(&msp->ms_lock);
1284 * If this metaslab is just becoming available, initialize its
1285 * allocmaps, freemaps, and defermap and add its capacity to the vdev.
1287 if (*freed_map == NULL) {
1288 ASSERT(*defer_map == NULL);
1289 for (int t = 0; t < TXG_SIZE; t++) {
1290 msp->ms_allocmap[t] = kmem_zalloc(sizeof (space_map_t),
1292 space_map_create(msp->ms_allocmap[t], sm->sm_start,
1293 sm->sm_size, sm->sm_shift, sm->sm_lock);
1294 msp->ms_freemap[t] = kmem_zalloc(sizeof (space_map_t),
1296 space_map_create(msp->ms_freemap[t], sm->sm_start,
1297 sm->sm_size, sm->sm_shift, sm->sm_lock);
1300 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1301 msp->ms_defermap[t] = kmem_zalloc(sizeof (space_map_t),
1303 space_map_create(msp->ms_defermap[t], sm->sm_start,
1304 sm->sm_size, sm->sm_shift, sm->sm_lock);
1307 freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
1308 defer_map = &msp->ms_defermap[txg % TXG_DEFER_SIZE];
1310 vdev_space_update(vd, 0, 0, sm->sm_size);
1313 alloc_delta = smosync->smo_alloc - smo->smo_alloc;
1314 defer_delta = (*freed_map)->sm_space - (*defer_map)->sm_space;
1316 vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
1318 ASSERT(msp->ms_allocmap[txg & TXG_MASK]->sm_space == 0);
1319 ASSERT(msp->ms_freemap[txg & TXG_MASK]->sm_space == 0);
1322 * If there's a space_map_load() in progress, wait for it to complete
1323 * so that we have a consistent view of the in-core space map.
1325 space_map_load_wait(sm);
1328 * Move the frees from the defer_map to this map (if it's loaded).
1329 * Swap the freed_map and the defer_map -- this is safe to do
1330 * because we've just emptied out the defer_map.
1332 space_map_vacate(*defer_map, sm->sm_loaded ? space_map_free : NULL, sm);
1333 ASSERT0((*defer_map)->sm_space);
1334 ASSERT0(avl_numnodes(&(*defer_map)->sm_root));
1335 space_map_swap(freed_map, defer_map);
1339 msp->ms_deferspace += defer_delta;
1340 ASSERT3S(msp->ms_deferspace, >=, 0);
1341 ASSERT3S(msp->ms_deferspace, <=, sm->sm_size);
1342 if (msp->ms_deferspace != 0) {
1344 * Keep syncing this metaslab until all deferred frees
1345 * are back in circulation.
1347 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
1351 * If the map is loaded but no longer active, evict it as soon as all
1352 * future allocations have synced. (If we unloaded it now and then
1353 * loaded a moment later, the map wouldn't reflect those allocations.)
1355 if (sm->sm_loaded && (msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
1358 for (int t = 1; t < TXG_CONCURRENT_STATES; t++)
1359 if (msp->ms_allocmap[(txg + t) & TXG_MASK]->sm_space)
1362 if (evictable && !metaslab_debug)
1363 space_map_unload(sm);
1366 metaslab_group_sort(mg, msp, metaslab_weight(msp));
1368 mutex_exit(&msp->ms_lock);
1372 metaslab_sync_reassess(metaslab_group_t *mg)
1374 vdev_t *vd = mg->mg_vd;
1375 int64_t failures = mg->mg_alloc_failures;
1378 * Re-evaluate all metaslabs which have lower offsets than the
1381 for (int m = 0; m < vd->vdev_ms_count; m++) {
1382 metaslab_t *msp = vd->vdev_ms[m];
1384 if (msp->ms_map->sm_start > mg->mg_bonus_area)
1387 mutex_enter(&msp->ms_lock);
1388 metaslab_group_sort(mg, msp, metaslab_weight(msp));
1389 mutex_exit(&msp->ms_lock);
1392 atomic_add_64(&mg->mg_alloc_failures, -failures);
1395 * Prefetch the next potential metaslabs
1397 metaslab_prefetch(mg);
1401 metaslab_distance(metaslab_t *msp, dva_t *dva)
1403 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
1404 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
1405 uint64_t start = msp->ms_map->sm_start >> ms_shift;
1407 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
1408 return (1ULL << 63);
1411 return ((start - offset) << ms_shift);
1413 return ((offset - start) << ms_shift);
1418 metaslab_group_alloc(metaslab_group_t *mg, uint64_t psize, uint64_t asize,
1419 uint64_t txg, uint64_t min_distance, dva_t *dva, int d, int flags)
1421 spa_t *spa = mg->mg_vd->vdev_spa;
1422 metaslab_t *msp = NULL;
1423 uint64_t offset = -1ULL;
1424 avl_tree_t *t = &mg->mg_metaslab_tree;
1425 uint64_t activation_weight;
1426 uint64_t target_distance;
1429 activation_weight = METASLAB_WEIGHT_PRIMARY;
1430 for (i = 0; i < d; i++) {
1431 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
1432 activation_weight = METASLAB_WEIGHT_SECONDARY;
1438 boolean_t was_active;
1440 mutex_enter(&mg->mg_lock);
1441 for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) {
1442 if (msp->ms_weight < asize) {
1443 spa_dbgmsg(spa, "%s: failed to meet weight "
1444 "requirement: vdev %llu, txg %llu, mg %p, "
1445 "msp %p, psize %llu, asize %llu, "
1446 "failures %llu, weight %llu",
1447 spa_name(spa), mg->mg_vd->vdev_id, txg,
1448 mg, msp, psize, asize,
1449 mg->mg_alloc_failures, msp->ms_weight);
1450 mutex_exit(&mg->mg_lock);
1455 * If the selected metaslab is condensing, skip it.
1457 if (msp->ms_map->sm_condensing)
1460 was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
1461 if (activation_weight == METASLAB_WEIGHT_PRIMARY)
1464 target_distance = min_distance +
1465 (msp->ms_smo.smo_alloc ? 0 : min_distance >> 1);
1467 for (i = 0; i < d; i++)
1468 if (metaslab_distance(msp, &dva[i]) <
1474 mutex_exit(&mg->mg_lock);
1479 * If we've already reached the allowable number of failed
1480 * allocation attempts on this metaslab group then we
1481 * consider skipping it. We skip it only if we're allowed
1482 * to "fast" gang, the physical size is larger than
1483 * a gang block, and we're attempting to allocate from
1484 * the primary metaslab.
1486 if (mg->mg_alloc_failures > zfs_mg_alloc_failures &&
1487 CAN_FASTGANG(flags) && psize > SPA_GANGBLOCKSIZE &&
1488 activation_weight == METASLAB_WEIGHT_PRIMARY) {
1489 spa_dbgmsg(spa, "%s: skipping metaslab group: "
1490 "vdev %llu, txg %llu, mg %p, psize %llu, "
1491 "asize %llu, failures %llu", spa_name(spa),
1492 mg->mg_vd->vdev_id, txg, mg, psize, asize,
1493 mg->mg_alloc_failures);
1497 mutex_enter(&msp->ms_lock);
1500 * Ensure that the metaslab we have selected is still
1501 * capable of handling our request. It's possible that
1502 * another thread may have changed the weight while we
1503 * were blocked on the metaslab lock.
1505 if (msp->ms_weight < asize || (was_active &&
1506 !(msp->ms_weight & METASLAB_ACTIVE_MASK) &&
1507 activation_weight == METASLAB_WEIGHT_PRIMARY)) {
1508 mutex_exit(&msp->ms_lock);
1512 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
1513 activation_weight == METASLAB_WEIGHT_PRIMARY) {
1514 metaslab_passivate(msp,
1515 msp->ms_weight & ~METASLAB_ACTIVE_MASK);
1516 mutex_exit(&msp->ms_lock);
1520 if (metaslab_activate(msp, activation_weight) != 0) {
1521 mutex_exit(&msp->ms_lock);
1526 * If this metaslab is currently condensing then pick again as
1527 * we can't manipulate this metaslab until it's committed
1530 if (msp->ms_map->sm_condensing) {
1531 mutex_exit(&msp->ms_lock);
1535 if ((offset = space_map_alloc(msp->ms_map, asize)) != -1ULL)
1538 atomic_inc_64(&mg->mg_alloc_failures);
1540 metaslab_passivate(msp, space_map_maxsize(msp->ms_map));
1542 mutex_exit(&msp->ms_lock);
1545 if (msp->ms_allocmap[txg & TXG_MASK]->sm_space == 0)
1546 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
1548 space_map_add(msp->ms_allocmap[txg & TXG_MASK], offset, asize);
1550 mutex_exit(&msp->ms_lock);
1556 * Allocate a block for the specified i/o.
1559 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
1560 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags)
1562 metaslab_group_t *mg, *rotor;
1566 int zio_lock = B_FALSE;
1567 boolean_t allocatable;
1568 uint64_t offset = -1ULL;
1572 ASSERT(!DVA_IS_VALID(&dva[d]));
1575 * For testing, make some blocks above a certain size be gang blocks.
1577 if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0)
1578 return (SET_ERROR(ENOSPC));
1581 * Start at the rotor and loop through all mgs until we find something.
1582 * Note that there's no locking on mc_rotor or mc_aliquot because
1583 * nothing actually breaks if we miss a few updates -- we just won't
1584 * allocate quite as evenly. It all balances out over time.
1586 * If we are doing ditto or log blocks, try to spread them across
1587 * consecutive vdevs. If we're forced to reuse a vdev before we've
1588 * allocated all of our ditto blocks, then try and spread them out on
1589 * that vdev as much as possible. If it turns out to not be possible,
1590 * gradually lower our standards until anything becomes acceptable.
1591 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
1592 * gives us hope of containing our fault domains to something we're
1593 * able to reason about. Otherwise, any two top-level vdev failures
1594 * will guarantee the loss of data. With consecutive allocation,
1595 * only two adjacent top-level vdev failures will result in data loss.
1597 * If we are doing gang blocks (hintdva is non-NULL), try to keep
1598 * ourselves on the same vdev as our gang block header. That
1599 * way, we can hope for locality in vdev_cache, plus it makes our
1600 * fault domains something tractable.
1603 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
1606 * It's possible the vdev we're using as the hint no
1607 * longer exists (i.e. removed). Consult the rotor when
1613 if (flags & METASLAB_HINTBP_AVOID &&
1614 mg->mg_next != NULL)
1619 } else if (d != 0) {
1620 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
1621 mg = vd->vdev_mg->mg_next;
1627 * If the hint put us into the wrong metaslab class, or into a
1628 * metaslab group that has been passivated, just follow the rotor.
1630 if (mg->mg_class != mc || mg->mg_activation_count <= 0)
1637 ASSERT(mg->mg_activation_count == 1);
1642 * Don't allocate from faulted devices.
1645 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
1646 allocatable = vdev_allocatable(vd);
1647 spa_config_exit(spa, SCL_ZIO, FTAG);
1649 allocatable = vdev_allocatable(vd);
1655 * Avoid writing single-copy data to a failing vdev
1656 * unless the user instructs us that it is okay.
1658 if ((vd->vdev_stat.vs_write_errors > 0 ||
1659 vd->vdev_state < VDEV_STATE_HEALTHY) &&
1660 d == 0 && dshift == 3 &&
1661 !(zfs_write_to_degraded && vd->vdev_state ==
1662 VDEV_STATE_DEGRADED)) {
1667 ASSERT(mg->mg_class == mc);
1669 distance = vd->vdev_asize >> dshift;
1670 if (distance <= (1ULL << vd->vdev_ms_shift))
1675 asize = vdev_psize_to_asize(vd, psize);
1676 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
1678 offset = metaslab_group_alloc(mg, psize, asize, txg, distance,
1680 if (offset != -1ULL) {
1682 * If we've just selected this metaslab group,
1683 * figure out whether the corresponding vdev is
1684 * over- or under-used relative to the pool,
1685 * and set an allocation bias to even it out.
1687 if (mc->mc_aliquot == 0) {
1688 vdev_stat_t *vs = &vd->vdev_stat;
1691 vu = (vs->vs_alloc * 100) / (vs->vs_space + 1);
1692 cu = (mc->mc_alloc * 100) / (mc->mc_space + 1);
1695 * Calculate how much more or less we should
1696 * try to allocate from this device during
1697 * this iteration around the rotor.
1698 * For example, if a device is 80% full
1699 * and the pool is 20% full then we should
1700 * reduce allocations by 60% on this device.
1702 * mg_bias = (20 - 80) * 512K / 100 = -307K
1704 * This reduces allocations by 307K for this
1707 mg->mg_bias = ((cu - vu) *
1708 (int64_t)mg->mg_aliquot) / 100;
1711 if (atomic_add_64_nv(&mc->mc_aliquot, asize) >=
1712 mg->mg_aliquot + mg->mg_bias) {
1713 mc->mc_rotor = mg->mg_next;
1717 DVA_SET_VDEV(&dva[d], vd->vdev_id);
1718 DVA_SET_OFFSET(&dva[d], offset);
1719 DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER));
1720 DVA_SET_ASIZE(&dva[d], asize);
1725 mc->mc_rotor = mg->mg_next;
1727 } while ((mg = mg->mg_next) != rotor);
1731 ASSERT(dshift < 64);
1735 if (!allocatable && !zio_lock) {
1741 bzero(&dva[d], sizeof (dva_t));
1743 return (SET_ERROR(ENOSPC));
1747 * Free the block represented by DVA in the context of the specified
1748 * transaction group.
1751 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
1753 uint64_t vdev = DVA_GET_VDEV(dva);
1754 uint64_t offset = DVA_GET_OFFSET(dva);
1755 uint64_t size = DVA_GET_ASIZE(dva);
1759 ASSERT(DVA_IS_VALID(dva));
1761 if (txg > spa_freeze_txg(spa))
1764 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
1765 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
1766 cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu",
1767 (u_longlong_t)vdev, (u_longlong_t)offset);
1772 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
1774 if (DVA_GET_GANG(dva))
1775 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
1777 mutex_enter(&msp->ms_lock);
1780 space_map_remove(msp->ms_allocmap[txg & TXG_MASK],
1782 space_map_free(msp->ms_map, offset, size);
1784 if (msp->ms_freemap[txg & TXG_MASK]->sm_space == 0)
1785 vdev_dirty(vd, VDD_METASLAB, msp, txg);
1786 space_map_add(msp->ms_freemap[txg & TXG_MASK], offset, size);
1789 mutex_exit(&msp->ms_lock);
1793 * Intent log support: upon opening the pool after a crash, notify the SPA
1794 * of blocks that the intent log has allocated for immediate write, but
1795 * which are still considered free by the SPA because the last transaction
1796 * group didn't commit yet.
1799 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
1801 uint64_t vdev = DVA_GET_VDEV(dva);
1802 uint64_t offset = DVA_GET_OFFSET(dva);
1803 uint64_t size = DVA_GET_ASIZE(dva);
1808 ASSERT(DVA_IS_VALID(dva));
1810 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
1811 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
1812 return (SET_ERROR(ENXIO));
1814 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
1816 if (DVA_GET_GANG(dva))
1817 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
1819 mutex_enter(&msp->ms_lock);
1821 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_map->sm_loaded)
1822 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
1824 if (error == 0 && !space_map_contains(msp->ms_map, offset, size))
1825 error = SET_ERROR(ENOENT);
1827 if (error || txg == 0) { /* txg == 0 indicates dry run */
1828 mutex_exit(&msp->ms_lock);
1832 space_map_claim(msp->ms_map, offset, size);
1834 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
1835 if (msp->ms_allocmap[txg & TXG_MASK]->sm_space == 0)
1836 vdev_dirty(vd, VDD_METASLAB, msp, txg);
1837 space_map_add(msp->ms_allocmap[txg & TXG_MASK], offset, size);
1840 mutex_exit(&msp->ms_lock);
1846 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
1847 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags)
1849 dva_t *dva = bp->blk_dva;
1850 dva_t *hintdva = hintbp->blk_dva;
1853 ASSERT(bp->blk_birth == 0);
1854 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
1856 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
1858 if (mc->mc_rotor == NULL) { /* no vdevs in this class */
1859 spa_config_exit(spa, SCL_ALLOC, FTAG);
1860 return (SET_ERROR(ENOSPC));
1863 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
1864 ASSERT(BP_GET_NDVAS(bp) == 0);
1865 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
1867 for (int d = 0; d < ndvas; d++) {
1868 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
1871 for (d--; d >= 0; d--) {
1872 metaslab_free_dva(spa, &dva[d], txg, B_TRUE);
1873 bzero(&dva[d], sizeof (dva_t));
1875 spa_config_exit(spa, SCL_ALLOC, FTAG);
1880 ASSERT(BP_GET_NDVAS(bp) == ndvas);
1882 spa_config_exit(spa, SCL_ALLOC, FTAG);
1884 BP_SET_BIRTH(bp, txg, txg);
1890 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
1892 const dva_t *dva = bp->blk_dva;
1893 int ndvas = BP_GET_NDVAS(bp);
1895 ASSERT(!BP_IS_HOLE(bp));
1896 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
1898 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
1900 for (int d = 0; d < ndvas; d++)
1901 metaslab_free_dva(spa, &dva[d], txg, now);
1903 spa_config_exit(spa, SCL_FREE, FTAG);
1907 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
1909 const dva_t *dva = bp->blk_dva;
1910 int ndvas = BP_GET_NDVAS(bp);
1913 ASSERT(!BP_IS_HOLE(bp));
1917 * First do a dry run to make sure all DVAs are claimable,
1918 * so we don't have to unwind from partial failures below.
1920 if ((error = metaslab_claim(spa, bp, 0)) != 0)
1924 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
1926 for (int d = 0; d < ndvas; d++)
1927 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
1930 spa_config_exit(spa, SCL_ALLOC, FTAG);
1932 ASSERT(error == 0 || txg == 0);
1938 checkmap(space_map_t *sm, uint64_t off, uint64_t size)
1943 mutex_enter(sm->sm_lock);
1944 ss = space_map_find(sm, off, size, &where);
1946 panic("freeing free block; ss=%p", (void *)ss);
1947 mutex_exit(sm->sm_lock);
1951 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
1953 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
1956 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
1957 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
1958 uint64_t vdid = DVA_GET_VDEV(&bp->blk_dva[i]);
1959 vdev_t *vd = vdev_lookup_top(spa, vdid);
1960 uint64_t off = DVA_GET_OFFSET(&bp->blk_dva[i]);
1961 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
1962 metaslab_t *ms = vd->vdev_ms[off >> vd->vdev_ms_shift];
1964 if (ms->ms_map->sm_loaded)
1965 checkmap(ms->ms_map, off, size);
1967 for (int j = 0; j < TXG_SIZE; j++)
1968 checkmap(ms->ms_freemap[j], off, size);
1969 for (int j = 0; j < TXG_DEFER_SIZE; j++)
1970 checkmap(ms->ms_defermap[j], off, size);
1972 spa_config_exit(spa, SCL_VDEV, FTAG);