4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
27 #include <sys/zfs_context.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/space_map.h>
31 #include <sys/metaslab_impl.h>
32 #include <sys/vdev_impl.h>
36 * Allow allocations to switch to gang blocks quickly. We do this to
37 * avoid having to load lots of space_maps in a given txg. There are,
38 * however, some cases where we want to avoid "fast" ganging and instead
39 * we want to do an exhaustive search of all metaslabs on this device.
40 * Currently we don't allow any gang, zil, or dump device related allocations
43 #define CAN_FASTGANG(flags) \
44 (!((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER | \
45 METASLAB_GANG_AVOID)))
47 uint64_t metaslab_aliquot = 512ULL << 10;
48 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
51 * The in-core space map representation is more compact than its on-disk form.
52 * The zfs_condense_pct determines how much more compact the in-core
53 * space_map representation must be before we compact it on-disk.
54 * Values should be greater than or equal to 100.
56 int zfs_condense_pct = 200;
59 * This value defines the number of allowed allocation failures per vdev.
60 * If a device reaches this threshold in a given txg then we consider skipping
61 * allocations on that device.
63 int zfs_mg_alloc_failures = 0;
65 SYSCTL_DECL(_vfs_zfs);
66 SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_alloc_failures, CTLFLAG_RDTUN,
67 &zfs_mg_alloc_failures, 0,
68 "Number of allowed allocation failures per vdev");
69 TUNABLE_INT("vfs.zfs.mg_alloc_failures", &zfs_mg_alloc_failures);
72 * Metaslab debugging: when set, keeps all space maps in core to verify frees.
74 static int metaslab_debug = 0;
77 * Minimum size which forces the dynamic allocator to change
78 * it's allocation strategy. Once the space map cannot satisfy
79 * an allocation of this size then it switches to using more
80 * aggressive strategy (i.e search by size rather than offset).
82 uint64_t metaslab_df_alloc_threshold = SPA_MAXBLOCKSIZE;
85 * The minimum free space, in percent, which must be available
86 * in a space map to continue allocations in a first-fit fashion.
87 * Once the space_map's free space drops below this level we dynamically
88 * switch to using best-fit allocations.
90 int metaslab_df_free_pct = 4;
93 * A metaslab is considered "free" if it contains a contiguous
94 * segment which is greater than metaslab_min_alloc_size.
96 uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS;
99 * Max number of space_maps to prefetch.
101 int metaslab_prefetch_limit = SPA_DVAS_PER_BP;
104 * Percentage bonus multiplier for metaslabs that are in the bonus area.
106 int metaslab_smo_bonus_pct = 150;
109 * Should we be willing to write data to degraded vdevs?
111 boolean_t zfs_write_to_degraded = B_FALSE;
112 SYSCTL_INT(_vfs_zfs, OID_AUTO, write_to_degraded, CTLFLAG_RWTUN,
113 &zfs_write_to_degraded, 0, "Allow writing data to degraded vdevs");
114 TUNABLE_INT("vfs.zfs.write_to_degraded", &zfs_write_to_degraded);
117 * ==========================================================================
119 * ==========================================================================
122 metaslab_class_create(spa_t *spa, space_map_ops_t *ops)
124 metaslab_class_t *mc;
126 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
136 metaslab_class_destroy(metaslab_class_t *mc)
138 ASSERT(mc->mc_rotor == NULL);
139 ASSERT(mc->mc_alloc == 0);
140 ASSERT(mc->mc_deferred == 0);
141 ASSERT(mc->mc_space == 0);
142 ASSERT(mc->mc_dspace == 0);
144 kmem_free(mc, sizeof (metaslab_class_t));
148 metaslab_class_validate(metaslab_class_t *mc)
150 metaslab_group_t *mg;
154 * Must hold one of the spa_config locks.
156 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
157 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
159 if ((mg = mc->mc_rotor) == NULL)
164 ASSERT(vd->vdev_mg != NULL);
165 ASSERT3P(vd->vdev_top, ==, vd);
166 ASSERT3P(mg->mg_class, ==, mc);
167 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
168 } while ((mg = mg->mg_next) != mc->mc_rotor);
174 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
175 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
177 atomic_add_64(&mc->mc_alloc, alloc_delta);
178 atomic_add_64(&mc->mc_deferred, defer_delta);
179 atomic_add_64(&mc->mc_space, space_delta);
180 atomic_add_64(&mc->mc_dspace, dspace_delta);
184 metaslab_class_minblocksize_update(metaslab_class_t *mc)
186 metaslab_group_t *mg;
188 uint64_t minashift = UINT64_MAX;
190 if ((mg = mc->mc_rotor) == NULL) {
191 mc->mc_minblocksize = SPA_MINBLOCKSIZE;
197 if (vd->vdev_ashift < minashift)
198 minashift = vd->vdev_ashift;
199 } while ((mg = mg->mg_next) != mc->mc_rotor);
201 mc->mc_minblocksize = 1ULL << minashift;
205 metaslab_class_get_alloc(metaslab_class_t *mc)
207 return (mc->mc_alloc);
211 metaslab_class_get_deferred(metaslab_class_t *mc)
213 return (mc->mc_deferred);
217 metaslab_class_get_space(metaslab_class_t *mc)
219 return (mc->mc_space);
223 metaslab_class_get_dspace(metaslab_class_t *mc)
225 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
229 metaslab_class_get_minblocksize(metaslab_class_t *mc)
231 return (mc->mc_minblocksize);
235 * ==========================================================================
237 * ==========================================================================
240 metaslab_compare(const void *x1, const void *x2)
242 const metaslab_t *m1 = x1;
243 const metaslab_t *m2 = x2;
245 if (m1->ms_weight < m2->ms_weight)
247 if (m1->ms_weight > m2->ms_weight)
251 * If the weights are identical, use the offset to force uniqueness.
253 if (m1->ms_map->sm_start < m2->ms_map->sm_start)
255 if (m1->ms_map->sm_start > m2->ms_map->sm_start)
258 ASSERT3P(m1, ==, m2);
264 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
266 metaslab_group_t *mg;
268 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
269 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
270 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
271 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
274 mg->mg_activation_count = 0;
280 metaslab_group_destroy(metaslab_group_t *mg)
282 ASSERT(mg->mg_prev == NULL);
283 ASSERT(mg->mg_next == NULL);
285 * We may have gone below zero with the activation count
286 * either because we never activated in the first place or
287 * because we're done, and possibly removing the vdev.
289 ASSERT(mg->mg_activation_count <= 0);
291 avl_destroy(&mg->mg_metaslab_tree);
292 mutex_destroy(&mg->mg_lock);
293 kmem_free(mg, sizeof (metaslab_group_t));
297 metaslab_group_activate(metaslab_group_t *mg)
299 metaslab_class_t *mc = mg->mg_class;
300 metaslab_group_t *mgprev, *mgnext;
302 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
304 ASSERT(mc->mc_rotor != mg);
305 ASSERT(mg->mg_prev == NULL);
306 ASSERT(mg->mg_next == NULL);
307 ASSERT(mg->mg_activation_count <= 0);
309 if (++mg->mg_activation_count <= 0)
312 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
314 if ((mgprev = mc->mc_rotor) == NULL) {
318 mgnext = mgprev->mg_next;
319 mg->mg_prev = mgprev;
320 mg->mg_next = mgnext;
321 mgprev->mg_next = mg;
322 mgnext->mg_prev = mg;
325 metaslab_class_minblocksize_update(mc);
329 metaslab_group_passivate(metaslab_group_t *mg)
331 metaslab_class_t *mc = mg->mg_class;
332 metaslab_group_t *mgprev, *mgnext;
334 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
336 if (--mg->mg_activation_count != 0) {
337 ASSERT(mc->mc_rotor != mg);
338 ASSERT(mg->mg_prev == NULL);
339 ASSERT(mg->mg_next == NULL);
340 ASSERT(mg->mg_activation_count < 0);
344 mgprev = mg->mg_prev;
345 mgnext = mg->mg_next;
350 mc->mc_rotor = mgnext;
351 mgprev->mg_next = mgnext;
352 mgnext->mg_prev = mgprev;
357 metaslab_class_minblocksize_update(mc);
361 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
363 mutex_enter(&mg->mg_lock);
364 ASSERT(msp->ms_group == NULL);
367 avl_add(&mg->mg_metaslab_tree, msp);
368 mutex_exit(&mg->mg_lock);
372 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
374 mutex_enter(&mg->mg_lock);
375 ASSERT(msp->ms_group == mg);
376 avl_remove(&mg->mg_metaslab_tree, msp);
377 msp->ms_group = NULL;
378 mutex_exit(&mg->mg_lock);
382 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
385 * Although in principle the weight can be any value, in
386 * practice we do not use values in the range [1, 510].
388 ASSERT(weight >= SPA_MINBLOCKSIZE-1 || weight == 0);
389 ASSERT(MUTEX_HELD(&msp->ms_lock));
391 mutex_enter(&mg->mg_lock);
392 ASSERT(msp->ms_group == mg);
393 avl_remove(&mg->mg_metaslab_tree, msp);
394 msp->ms_weight = weight;
395 avl_add(&mg->mg_metaslab_tree, msp);
396 mutex_exit(&mg->mg_lock);
400 * ==========================================================================
401 * Common allocator routines
402 * ==========================================================================
405 metaslab_segsize_compare(const void *x1, const void *x2)
407 const space_seg_t *s1 = x1;
408 const space_seg_t *s2 = x2;
409 uint64_t ss_size1 = s1->ss_end - s1->ss_start;
410 uint64_t ss_size2 = s2->ss_end - s2->ss_start;
412 if (ss_size1 < ss_size2)
414 if (ss_size1 > ss_size2)
417 if (s1->ss_start < s2->ss_start)
419 if (s1->ss_start > s2->ss_start)
426 * This is a helper function that can be used by the allocator to find
427 * a suitable block to allocate. This will search the specified AVL
428 * tree looking for a block that matches the specified criteria.
431 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
434 space_seg_t *ss, ssearch;
437 ssearch.ss_start = *cursor;
438 ssearch.ss_end = *cursor + size;
440 ss = avl_find(t, &ssearch, &where);
442 ss = avl_nearest(t, where, AVL_AFTER);
445 uint64_t offset = P2ROUNDUP(ss->ss_start, align);
447 if (offset + size <= ss->ss_end) {
448 *cursor = offset + size;
451 ss = AVL_NEXT(t, ss);
455 * If we know we've searched the whole map (*cursor == 0), give up.
456 * Otherwise, reset the cursor to the beginning and try again.
462 return (metaslab_block_picker(t, cursor, size, align));
466 metaslab_pp_load(space_map_t *sm)
470 ASSERT(sm->sm_ppd == NULL);
471 sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP);
473 sm->sm_pp_root = kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
474 avl_create(sm->sm_pp_root, metaslab_segsize_compare,
475 sizeof (space_seg_t), offsetof(struct space_seg, ss_pp_node));
477 for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss))
478 avl_add(sm->sm_pp_root, ss);
482 metaslab_pp_unload(space_map_t *sm)
486 kmem_free(sm->sm_ppd, 64 * sizeof (uint64_t));
489 while (avl_destroy_nodes(sm->sm_pp_root, &cookie) != NULL) {
490 /* tear down the tree */
493 avl_destroy(sm->sm_pp_root);
494 kmem_free(sm->sm_pp_root, sizeof (avl_tree_t));
495 sm->sm_pp_root = NULL;
500 metaslab_pp_claim(space_map_t *sm, uint64_t start, uint64_t size)
502 /* No need to update cursor */
507 metaslab_pp_free(space_map_t *sm, uint64_t start, uint64_t size)
509 /* No need to update cursor */
513 * Return the maximum contiguous segment within the metaslab.
516 metaslab_pp_maxsize(space_map_t *sm)
518 avl_tree_t *t = sm->sm_pp_root;
521 if (t == NULL || (ss = avl_last(t)) == NULL)
524 return (ss->ss_end - ss->ss_start);
528 * ==========================================================================
529 * The first-fit block allocator
530 * ==========================================================================
533 metaslab_ff_alloc(space_map_t *sm, uint64_t size)
535 avl_tree_t *t = &sm->sm_root;
536 uint64_t align = size & -size;
537 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1;
539 return (metaslab_block_picker(t, cursor, size, align));
544 metaslab_ff_fragmented(space_map_t *sm)
549 static space_map_ops_t metaslab_ff_ops = {
556 metaslab_ff_fragmented
560 * ==========================================================================
561 * Dynamic block allocator -
562 * Uses the first fit allocation scheme until space get low and then
563 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
564 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
565 * ==========================================================================
568 metaslab_df_alloc(space_map_t *sm, uint64_t size)
570 avl_tree_t *t = &sm->sm_root;
571 uint64_t align = size & -size;
572 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1;
573 uint64_t max_size = metaslab_pp_maxsize(sm);
574 int free_pct = sm->sm_space * 100 / sm->sm_size;
576 ASSERT(MUTEX_HELD(sm->sm_lock));
577 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root));
583 * If we're running low on space switch to using the size
584 * sorted AVL tree (best-fit).
586 if (max_size < metaslab_df_alloc_threshold ||
587 free_pct < metaslab_df_free_pct) {
592 return (metaslab_block_picker(t, cursor, size, 1ULL));
596 metaslab_df_fragmented(space_map_t *sm)
598 uint64_t max_size = metaslab_pp_maxsize(sm);
599 int free_pct = sm->sm_space * 100 / sm->sm_size;
601 if (max_size >= metaslab_df_alloc_threshold &&
602 free_pct >= metaslab_df_free_pct)
608 static space_map_ops_t metaslab_df_ops = {
615 metaslab_df_fragmented
619 * ==========================================================================
620 * Other experimental allocators
621 * ==========================================================================
624 metaslab_cdf_alloc(space_map_t *sm, uint64_t size)
626 avl_tree_t *t = &sm->sm_root;
627 uint64_t *cursor = (uint64_t *)sm->sm_ppd;
628 uint64_t *extent_end = (uint64_t *)sm->sm_ppd + 1;
629 uint64_t max_size = metaslab_pp_maxsize(sm);
630 uint64_t rsize = size;
633 ASSERT(MUTEX_HELD(sm->sm_lock));
634 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root));
639 ASSERT3U(*extent_end, >=, *cursor);
642 * If we're running low on space switch to using the size
643 * sorted AVL tree (best-fit).
645 if ((*cursor + size) > *extent_end) {
648 *cursor = *extent_end = 0;
650 if (max_size > 2 * SPA_MAXBLOCKSIZE)
651 rsize = MIN(metaslab_min_alloc_size, max_size);
652 offset = metaslab_block_picker(t, extent_end, rsize, 1ULL);
654 *cursor = offset + size;
656 offset = metaslab_block_picker(t, cursor, rsize, 1ULL);
658 ASSERT3U(*cursor, <=, *extent_end);
663 metaslab_cdf_fragmented(space_map_t *sm)
665 uint64_t max_size = metaslab_pp_maxsize(sm);
667 if (max_size > (metaslab_min_alloc_size * 10))
672 static space_map_ops_t metaslab_cdf_ops = {
679 metaslab_cdf_fragmented
682 uint64_t metaslab_ndf_clump_shift = 4;
685 metaslab_ndf_alloc(space_map_t *sm, uint64_t size)
687 avl_tree_t *t = &sm->sm_root;
689 space_seg_t *ss, ssearch;
690 uint64_t hbit = highbit(size);
691 uint64_t *cursor = (uint64_t *)sm->sm_ppd + hbit - 1;
692 uint64_t max_size = metaslab_pp_maxsize(sm);
694 ASSERT(MUTEX_HELD(sm->sm_lock));
695 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root));
700 ssearch.ss_start = *cursor;
701 ssearch.ss_end = *cursor + size;
703 ss = avl_find(t, &ssearch, &where);
704 if (ss == NULL || (ss->ss_start + size > ss->ss_end)) {
707 ssearch.ss_start = 0;
708 ssearch.ss_end = MIN(max_size,
709 1ULL << (hbit + metaslab_ndf_clump_shift));
710 ss = avl_find(t, &ssearch, &where);
712 ss = avl_nearest(t, where, AVL_AFTER);
717 if (ss->ss_start + size <= ss->ss_end) {
718 *cursor = ss->ss_start + size;
719 return (ss->ss_start);
726 metaslab_ndf_fragmented(space_map_t *sm)
728 uint64_t max_size = metaslab_pp_maxsize(sm);
730 if (max_size > (metaslab_min_alloc_size << metaslab_ndf_clump_shift))
736 static space_map_ops_t metaslab_ndf_ops = {
743 metaslab_ndf_fragmented
746 space_map_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
749 * ==========================================================================
751 * ==========================================================================
754 metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo,
755 uint64_t start, uint64_t size, uint64_t txg)
757 vdev_t *vd = mg->mg_vd;
760 msp = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
761 mutex_init(&msp->ms_lock, NULL, MUTEX_DEFAULT, NULL);
763 msp->ms_smo_syncing = *smo;
766 * We create the main space map here, but we don't create the
767 * allocmaps and freemaps until metaslab_sync_done(). This serves
768 * two purposes: it allows metaslab_sync_done() to detect the
769 * addition of new space; and for debugging, it ensures that we'd
770 * data fault on any attempt to use this metaslab before it's ready.
772 msp->ms_map = kmem_zalloc(sizeof (space_map_t), KM_SLEEP);
773 space_map_create(msp->ms_map, start, size,
774 vd->vdev_ashift, &msp->ms_lock);
776 metaslab_group_add(mg, msp);
778 if (metaslab_debug && smo->smo_object != 0) {
779 mutex_enter(&msp->ms_lock);
780 VERIFY(space_map_load(msp->ms_map, mg->mg_class->mc_ops,
781 SM_FREE, smo, spa_meta_objset(vd->vdev_spa)) == 0);
782 mutex_exit(&msp->ms_lock);
786 * If we're opening an existing pool (txg == 0) or creating
787 * a new one (txg == TXG_INITIAL), all space is available now.
788 * If we're adding space to an existing pool, the new space
789 * does not become available until after this txg has synced.
791 if (txg <= TXG_INITIAL)
792 metaslab_sync_done(msp, 0);
795 vdev_dirty(vd, 0, NULL, txg);
796 vdev_dirty(vd, VDD_METASLAB, msp, txg);
803 metaslab_fini(metaslab_t *msp)
805 metaslab_group_t *mg = msp->ms_group;
807 vdev_space_update(mg->mg_vd,
808 -msp->ms_smo.smo_alloc, 0, -msp->ms_map->sm_size);
810 metaslab_group_remove(mg, msp);
812 mutex_enter(&msp->ms_lock);
814 space_map_unload(msp->ms_map);
815 space_map_destroy(msp->ms_map);
816 kmem_free(msp->ms_map, sizeof (*msp->ms_map));
818 for (int t = 0; t < TXG_SIZE; t++) {
819 space_map_destroy(msp->ms_allocmap[t]);
820 space_map_destroy(msp->ms_freemap[t]);
821 kmem_free(msp->ms_allocmap[t], sizeof (*msp->ms_allocmap[t]));
822 kmem_free(msp->ms_freemap[t], sizeof (*msp->ms_freemap[t]));
825 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
826 space_map_destroy(msp->ms_defermap[t]);
827 kmem_free(msp->ms_defermap[t], sizeof (*msp->ms_defermap[t]));
830 ASSERT0(msp->ms_deferspace);
832 mutex_exit(&msp->ms_lock);
833 mutex_destroy(&msp->ms_lock);
835 kmem_free(msp, sizeof (metaslab_t));
838 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
839 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
840 #define METASLAB_ACTIVE_MASK \
841 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
844 metaslab_weight(metaslab_t *msp)
846 metaslab_group_t *mg = msp->ms_group;
847 space_map_t *sm = msp->ms_map;
848 space_map_obj_t *smo = &msp->ms_smo;
849 vdev_t *vd = mg->mg_vd;
850 uint64_t weight, space;
852 ASSERT(MUTEX_HELD(&msp->ms_lock));
855 * This vdev is in the process of being removed so there is nothing
858 if (vd->vdev_removing) {
859 ASSERT0(smo->smo_alloc);
860 ASSERT0(vd->vdev_ms_shift);
865 * The baseline weight is the metaslab's free space.
867 space = sm->sm_size - smo->smo_alloc;
871 * Modern disks have uniform bit density and constant angular velocity.
872 * Therefore, the outer recording zones are faster (higher bandwidth)
873 * than the inner zones by the ratio of outer to inner track diameter,
874 * which is typically around 2:1. We account for this by assigning
875 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
876 * In effect, this means that we'll select the metaslab with the most
877 * free bandwidth rather than simply the one with the most free space.
879 weight = 2 * weight -
880 ((sm->sm_start >> vd->vdev_ms_shift) * weight) / vd->vdev_ms_count;
881 ASSERT(weight >= space && weight <= 2 * space);
884 * For locality, assign higher weight to metaslabs which have
885 * a lower offset than what we've already activated.
887 if (sm->sm_start <= mg->mg_bonus_area)
888 weight *= (metaslab_smo_bonus_pct / 100);
889 ASSERT(weight >= space &&
890 weight <= 2 * (metaslab_smo_bonus_pct / 100) * space);
892 if (sm->sm_loaded && !sm->sm_ops->smop_fragmented(sm)) {
894 * If this metaslab is one we're actively using, adjust its
895 * weight to make it preferable to any inactive metaslab so
896 * we'll polish it off.
898 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
904 metaslab_prefetch(metaslab_group_t *mg)
906 spa_t *spa = mg->mg_vd->vdev_spa;
908 avl_tree_t *t = &mg->mg_metaslab_tree;
911 mutex_enter(&mg->mg_lock);
914 * Prefetch the next potential metaslabs
916 for (msp = avl_first(t), m = 0; msp; msp = AVL_NEXT(t, msp), m++) {
917 space_map_t *sm = msp->ms_map;
918 space_map_obj_t *smo = &msp->ms_smo;
920 /* If we have reached our prefetch limit then we're done */
921 if (m >= metaslab_prefetch_limit)
924 if (!sm->sm_loaded && smo->smo_object != 0) {
925 mutex_exit(&mg->mg_lock);
926 dmu_prefetch(spa_meta_objset(spa), smo->smo_object,
927 0ULL, smo->smo_objsize);
928 mutex_enter(&mg->mg_lock);
931 mutex_exit(&mg->mg_lock);
935 metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
937 metaslab_group_t *mg = msp->ms_group;
938 space_map_t *sm = msp->ms_map;
939 space_map_ops_t *sm_ops = msp->ms_group->mg_class->mc_ops;
941 ASSERT(MUTEX_HELD(&msp->ms_lock));
943 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
944 space_map_load_wait(sm);
945 if (!sm->sm_loaded) {
946 space_map_obj_t *smo = &msp->ms_smo;
948 int error = space_map_load(sm, sm_ops, SM_FREE, smo,
949 spa_meta_objset(msp->ms_group->mg_vd->vdev_spa));
951 metaslab_group_sort(msp->ms_group, msp, 0);
954 for (int t = 0; t < TXG_DEFER_SIZE; t++)
955 space_map_walk(msp->ms_defermap[t],
956 space_map_claim, sm);
961 * Track the bonus area as we activate new metaslabs.
963 if (sm->sm_start > mg->mg_bonus_area) {
964 mutex_enter(&mg->mg_lock);
965 mg->mg_bonus_area = sm->sm_start;
966 mutex_exit(&mg->mg_lock);
969 metaslab_group_sort(msp->ms_group, msp,
970 msp->ms_weight | activation_weight);
972 ASSERT(sm->sm_loaded);
973 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
979 metaslab_passivate(metaslab_t *msp, uint64_t size)
982 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
983 * this metaslab again. In that case, it had better be empty,
984 * or we would be leaving space on the table.
986 ASSERT(size >= SPA_MINBLOCKSIZE || msp->ms_map->sm_space == 0);
987 metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size));
988 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
992 * Determine if the in-core space map representation can be condensed on-disk.
993 * We would like to use the following criteria to make our decision:
995 * 1. The size of the space map object should not dramatically increase as a
996 * result of writing out our in-core free map.
998 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
999 * times the size than the in-core representation (i.e. zfs_condense_pct = 110
1000 * and in-core = 1MB, minimal = 1.1.MB).
1002 * Checking the first condition is tricky since we don't want to walk
1003 * the entire AVL tree calculating the estimated on-disk size. Instead we
1004 * use the size-ordered AVL tree in the space map and calculate the
1005 * size required for the largest segment in our in-core free map. If the
1006 * size required to represent that segment on disk is larger than the space
1007 * map object then we avoid condensing this map.
1009 * To determine the second criterion we use a best-case estimate and assume
1010 * each segment can be represented on-disk as a single 64-bit entry. We refer
1011 * to this best-case estimate as the space map's minimal form.
1014 metaslab_should_condense(metaslab_t *msp)
1016 space_map_t *sm = msp->ms_map;
1017 space_map_obj_t *smo = &msp->ms_smo_syncing;
1019 uint64_t size, entries, segsz;
1021 ASSERT(MUTEX_HELD(&msp->ms_lock));
1022 ASSERT(sm->sm_loaded);
1025 * Use the sm_pp_root AVL tree, which is ordered by size, to obtain
1026 * the largest segment in the in-core free map. If the tree is
1027 * empty then we should condense the map.
1029 ss = avl_last(sm->sm_pp_root);
1034 * Calculate the number of 64-bit entries this segment would
1035 * require when written to disk. If this single segment would be
1036 * larger on-disk than the entire current on-disk structure, then
1037 * clearly condensing will increase the on-disk structure size.
1039 size = (ss->ss_end - ss->ss_start) >> sm->sm_shift;
1040 entries = size / (MIN(size, SM_RUN_MAX));
1041 segsz = entries * sizeof (uint64_t);
1043 return (segsz <= smo->smo_objsize &&
1044 smo->smo_objsize >= (zfs_condense_pct *
1045 sizeof (uint64_t) * avl_numnodes(&sm->sm_root)) / 100);
1049 * Condense the on-disk space map representation to its minimized form.
1050 * The minimized form consists of a small number of allocations followed by
1051 * the in-core free map.
1054 metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
1056 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1057 space_map_t *freemap = msp->ms_freemap[txg & TXG_MASK];
1058 space_map_t condense_map;
1059 space_map_t *sm = msp->ms_map;
1060 objset_t *mos = spa_meta_objset(spa);
1061 space_map_obj_t *smo = &msp->ms_smo_syncing;
1063 ASSERT(MUTEX_HELD(&msp->ms_lock));
1064 ASSERT3U(spa_sync_pass(spa), ==, 1);
1065 ASSERT(sm->sm_loaded);
1067 spa_dbgmsg(spa, "condensing: txg %llu, msp[%llu] %p, "
1068 "smo size %llu, segments %lu", txg,
1069 (msp->ms_map->sm_start / msp->ms_map->sm_size), msp,
1070 smo->smo_objsize, avl_numnodes(&sm->sm_root));
1073 * Create an map that is a 100% allocated map. We remove segments
1074 * that have been freed in this txg, any deferred frees that exist,
1075 * and any allocation in the future. Removing segments should be
1076 * a relatively inexpensive operation since we expect these maps to
1077 * a small number of nodes.
1079 space_map_create(&condense_map, sm->sm_start, sm->sm_size,
1080 sm->sm_shift, sm->sm_lock);
1081 space_map_add(&condense_map, condense_map.sm_start,
1082 condense_map.sm_size);
1085 * Remove what's been freed in this txg from the condense_map.
1086 * Since we're in sync_pass 1, we know that all the frees from
1087 * this txg are in the freemap.
1089 space_map_walk(freemap, space_map_remove, &condense_map);
1091 for (int t = 0; t < TXG_DEFER_SIZE; t++)
1092 space_map_walk(msp->ms_defermap[t],
1093 space_map_remove, &condense_map);
1095 for (int t = 1; t < TXG_CONCURRENT_STATES; t++)
1096 space_map_walk(msp->ms_allocmap[(txg + t) & TXG_MASK],
1097 space_map_remove, &condense_map);
1100 * We're about to drop the metaslab's lock thus allowing
1101 * other consumers to change it's content. Set the
1102 * space_map's sm_condensing flag to ensure that
1103 * allocations on this metaslab do not occur while we're
1104 * in the middle of committing it to disk. This is only critical
1105 * for the ms_map as all other space_maps use per txg
1106 * views of their content.
1108 sm->sm_condensing = B_TRUE;
1110 mutex_exit(&msp->ms_lock);
1111 space_map_truncate(smo, mos, tx);
1112 mutex_enter(&msp->ms_lock);
1115 * While we would ideally like to create a space_map representation
1116 * that consists only of allocation records, doing so can be
1117 * prohibitively expensive because the in-core free map can be
1118 * large, and therefore computationally expensive to subtract
1119 * from the condense_map. Instead we sync out two maps, a cheap
1120 * allocation only map followed by the in-core free map. While not
1121 * optimal, this is typically close to optimal, and much cheaper to
1124 space_map_sync(&condense_map, SM_ALLOC, smo, mos, tx);
1125 space_map_vacate(&condense_map, NULL, NULL);
1126 space_map_destroy(&condense_map);
1128 space_map_sync(sm, SM_FREE, smo, mos, tx);
1129 sm->sm_condensing = B_FALSE;
1131 spa_dbgmsg(spa, "condensed: txg %llu, msp[%llu] %p, "
1132 "smo size %llu", txg,
1133 (msp->ms_map->sm_start / msp->ms_map->sm_size), msp,
1138 * Write a metaslab to disk in the context of the specified transaction group.
1141 metaslab_sync(metaslab_t *msp, uint64_t txg)
1143 vdev_t *vd = msp->ms_group->mg_vd;
1144 spa_t *spa = vd->vdev_spa;
1145 objset_t *mos = spa_meta_objset(spa);
1146 space_map_t *allocmap = msp->ms_allocmap[txg & TXG_MASK];
1147 space_map_t **freemap = &msp->ms_freemap[txg & TXG_MASK];
1148 space_map_t **freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
1149 space_map_t *sm = msp->ms_map;
1150 space_map_obj_t *smo = &msp->ms_smo_syncing;
1154 ASSERT(!vd->vdev_ishole);
1157 * This metaslab has just been added so there's no work to do now.
1159 if (*freemap == NULL) {
1160 ASSERT3P(allocmap, ==, NULL);
1164 ASSERT3P(allocmap, !=, NULL);
1165 ASSERT3P(*freemap, !=, NULL);
1166 ASSERT3P(*freed_map, !=, NULL);
1168 if (allocmap->sm_space == 0 && (*freemap)->sm_space == 0)
1172 * The only state that can actually be changing concurrently with
1173 * metaslab_sync() is the metaslab's ms_map. No other thread can
1174 * be modifying this txg's allocmap, freemap, freed_map, or smo.
1175 * Therefore, we only hold ms_lock to satify space_map ASSERTs.
1176 * We drop it whenever we call into the DMU, because the DMU
1177 * can call down to us (e.g. via zio_free()) at any time.
1180 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
1182 if (smo->smo_object == 0) {
1183 ASSERT(smo->smo_objsize == 0);
1184 ASSERT(smo->smo_alloc == 0);
1185 smo->smo_object = dmu_object_alloc(mos,
1186 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT,
1187 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx);
1188 ASSERT(smo->smo_object != 0);
1189 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
1190 (sm->sm_start >> vd->vdev_ms_shift),
1191 sizeof (uint64_t), &smo->smo_object, tx);
1194 mutex_enter(&msp->ms_lock);
1196 if (sm->sm_loaded && spa_sync_pass(spa) == 1 &&
1197 metaslab_should_condense(msp)) {
1198 metaslab_condense(msp, txg, tx);
1200 space_map_sync(allocmap, SM_ALLOC, smo, mos, tx);
1201 space_map_sync(*freemap, SM_FREE, smo, mos, tx);
1204 space_map_vacate(allocmap, NULL, NULL);
1207 * For sync pass 1, we avoid walking the entire space map and
1208 * instead will just swap the pointers for freemap and
1209 * freed_map. We can safely do this since the freed_map is
1210 * guaranteed to be empty on the initial pass.
1212 if (spa_sync_pass(spa) == 1) {
1213 ASSERT0((*freed_map)->sm_space);
1214 ASSERT0(avl_numnodes(&(*freed_map)->sm_root));
1215 space_map_swap(freemap, freed_map);
1217 space_map_vacate(*freemap, space_map_add, *freed_map);
1220 ASSERT0(msp->ms_allocmap[txg & TXG_MASK]->sm_space);
1221 ASSERT0(msp->ms_freemap[txg & TXG_MASK]->sm_space);
1223 mutex_exit(&msp->ms_lock);
1225 VERIFY0(dmu_bonus_hold(mos, smo->smo_object, FTAG, &db));
1226 dmu_buf_will_dirty(db, tx);
1227 ASSERT3U(db->db_size, >=, sizeof (*smo));
1228 bcopy(smo, db->db_data, sizeof (*smo));
1229 dmu_buf_rele(db, FTAG);
1235 * Called after a transaction group has completely synced to mark
1236 * all of the metaslab's free space as usable.
1239 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
1241 space_map_obj_t *smo = &msp->ms_smo;
1242 space_map_obj_t *smosync = &msp->ms_smo_syncing;
1243 space_map_t *sm = msp->ms_map;
1244 space_map_t **freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
1245 space_map_t **defer_map = &msp->ms_defermap[txg % TXG_DEFER_SIZE];
1246 metaslab_group_t *mg = msp->ms_group;
1247 vdev_t *vd = mg->mg_vd;
1248 int64_t alloc_delta, defer_delta;
1250 ASSERT(!vd->vdev_ishole);
1252 mutex_enter(&msp->ms_lock);
1255 * If this metaslab is just becoming available, initialize its
1256 * allocmaps, freemaps, and defermap and add its capacity to the vdev.
1258 if (*freed_map == NULL) {
1259 ASSERT(*defer_map == NULL);
1260 for (int t = 0; t < TXG_SIZE; t++) {
1261 msp->ms_allocmap[t] = kmem_zalloc(sizeof (space_map_t),
1263 space_map_create(msp->ms_allocmap[t], sm->sm_start,
1264 sm->sm_size, sm->sm_shift, sm->sm_lock);
1265 msp->ms_freemap[t] = kmem_zalloc(sizeof (space_map_t),
1267 space_map_create(msp->ms_freemap[t], sm->sm_start,
1268 sm->sm_size, sm->sm_shift, sm->sm_lock);
1271 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1272 msp->ms_defermap[t] = kmem_zalloc(sizeof (space_map_t),
1274 space_map_create(msp->ms_defermap[t], sm->sm_start,
1275 sm->sm_size, sm->sm_shift, sm->sm_lock);
1278 freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
1279 defer_map = &msp->ms_defermap[txg % TXG_DEFER_SIZE];
1281 vdev_space_update(vd, 0, 0, sm->sm_size);
1284 alloc_delta = smosync->smo_alloc - smo->smo_alloc;
1285 defer_delta = (*freed_map)->sm_space - (*defer_map)->sm_space;
1287 vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
1289 ASSERT(msp->ms_allocmap[txg & TXG_MASK]->sm_space == 0);
1290 ASSERT(msp->ms_freemap[txg & TXG_MASK]->sm_space == 0);
1293 * If there's a space_map_load() in progress, wait for it to complete
1294 * so that we have a consistent view of the in-core space map.
1296 space_map_load_wait(sm);
1299 * Move the frees from the defer_map to this map (if it's loaded).
1300 * Swap the freed_map and the defer_map -- this is safe to do
1301 * because we've just emptied out the defer_map.
1303 space_map_vacate(*defer_map, sm->sm_loaded ? space_map_free : NULL, sm);
1304 ASSERT0((*defer_map)->sm_space);
1305 ASSERT0(avl_numnodes(&(*defer_map)->sm_root));
1306 space_map_swap(freed_map, defer_map);
1310 msp->ms_deferspace += defer_delta;
1311 ASSERT3S(msp->ms_deferspace, >=, 0);
1312 ASSERT3S(msp->ms_deferspace, <=, sm->sm_size);
1313 if (msp->ms_deferspace != 0) {
1315 * Keep syncing this metaslab until all deferred frees
1316 * are back in circulation.
1318 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
1322 * If the map is loaded but no longer active, evict it as soon as all
1323 * future allocations have synced. (If we unloaded it now and then
1324 * loaded a moment later, the map wouldn't reflect those allocations.)
1326 if (sm->sm_loaded && (msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
1329 for (int t = 1; t < TXG_CONCURRENT_STATES; t++)
1330 if (msp->ms_allocmap[(txg + t) & TXG_MASK]->sm_space)
1333 if (evictable && !metaslab_debug)
1334 space_map_unload(sm);
1337 metaslab_group_sort(mg, msp, metaslab_weight(msp));
1339 mutex_exit(&msp->ms_lock);
1343 metaslab_sync_reassess(metaslab_group_t *mg)
1345 vdev_t *vd = mg->mg_vd;
1346 int64_t failures = mg->mg_alloc_failures;
1349 * Re-evaluate all metaslabs which have lower offsets than the
1352 for (int m = 0; m < vd->vdev_ms_count; m++) {
1353 metaslab_t *msp = vd->vdev_ms[m];
1355 if (msp->ms_map->sm_start > mg->mg_bonus_area)
1358 mutex_enter(&msp->ms_lock);
1359 metaslab_group_sort(mg, msp, metaslab_weight(msp));
1360 mutex_exit(&msp->ms_lock);
1363 atomic_add_64(&mg->mg_alloc_failures, -failures);
1366 * Prefetch the next potential metaslabs
1368 metaslab_prefetch(mg);
1372 metaslab_distance(metaslab_t *msp, dva_t *dva)
1374 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
1375 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
1376 uint64_t start = msp->ms_map->sm_start >> ms_shift;
1378 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
1379 return (1ULL << 63);
1382 return ((start - offset) << ms_shift);
1384 return ((offset - start) << ms_shift);
1389 metaslab_group_alloc(metaslab_group_t *mg, uint64_t psize, uint64_t asize,
1390 uint64_t txg, uint64_t min_distance, dva_t *dva, int d, int flags)
1392 spa_t *spa = mg->mg_vd->vdev_spa;
1393 metaslab_t *msp = NULL;
1394 uint64_t offset = -1ULL;
1395 avl_tree_t *t = &mg->mg_metaslab_tree;
1396 uint64_t activation_weight;
1397 uint64_t target_distance;
1400 activation_weight = METASLAB_WEIGHT_PRIMARY;
1401 for (i = 0; i < d; i++) {
1402 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
1403 activation_weight = METASLAB_WEIGHT_SECONDARY;
1409 boolean_t was_active;
1411 mutex_enter(&mg->mg_lock);
1412 for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) {
1413 if (msp->ms_weight < asize) {
1414 spa_dbgmsg(spa, "%s: failed to meet weight "
1415 "requirement: vdev %llu, txg %llu, mg %p, "
1416 "msp %p, psize %llu, asize %llu, "
1417 "failures %llu, weight %llu",
1418 spa_name(spa), mg->mg_vd->vdev_id, txg,
1419 mg, msp, psize, asize,
1420 mg->mg_alloc_failures, msp->ms_weight);
1421 mutex_exit(&mg->mg_lock);
1426 * If the selected metaslab is condensing, skip it.
1428 if (msp->ms_map->sm_condensing)
1431 was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
1432 if (activation_weight == METASLAB_WEIGHT_PRIMARY)
1435 target_distance = min_distance +
1436 (msp->ms_smo.smo_alloc ? 0 : min_distance >> 1);
1438 for (i = 0; i < d; i++)
1439 if (metaslab_distance(msp, &dva[i]) <
1445 mutex_exit(&mg->mg_lock);
1450 * If we've already reached the allowable number of failed
1451 * allocation attempts on this metaslab group then we
1452 * consider skipping it. We skip it only if we're allowed
1453 * to "fast" gang, the physical size is larger than
1454 * a gang block, and we're attempting to allocate from
1455 * the primary metaslab.
1457 if (mg->mg_alloc_failures > zfs_mg_alloc_failures &&
1458 CAN_FASTGANG(flags) && psize > SPA_GANGBLOCKSIZE &&
1459 activation_weight == METASLAB_WEIGHT_PRIMARY) {
1460 spa_dbgmsg(spa, "%s: skipping metaslab group: "
1461 "vdev %llu, txg %llu, mg %p, psize %llu, "
1462 "asize %llu, failures %llu", spa_name(spa),
1463 mg->mg_vd->vdev_id, txg, mg, psize, asize,
1464 mg->mg_alloc_failures);
1468 mutex_enter(&msp->ms_lock);
1471 * Ensure that the metaslab we have selected is still
1472 * capable of handling our request. It's possible that
1473 * another thread may have changed the weight while we
1474 * were blocked on the metaslab lock.
1476 if (msp->ms_weight < asize || (was_active &&
1477 !(msp->ms_weight & METASLAB_ACTIVE_MASK) &&
1478 activation_weight == METASLAB_WEIGHT_PRIMARY)) {
1479 mutex_exit(&msp->ms_lock);
1483 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
1484 activation_weight == METASLAB_WEIGHT_PRIMARY) {
1485 metaslab_passivate(msp,
1486 msp->ms_weight & ~METASLAB_ACTIVE_MASK);
1487 mutex_exit(&msp->ms_lock);
1491 if (metaslab_activate(msp, activation_weight) != 0) {
1492 mutex_exit(&msp->ms_lock);
1497 * If this metaslab is currently condensing then pick again as
1498 * we can't manipulate this metaslab until it's committed
1501 if (msp->ms_map->sm_condensing) {
1502 mutex_exit(&msp->ms_lock);
1506 if ((offset = space_map_alloc(msp->ms_map, asize)) != -1ULL)
1509 atomic_inc_64(&mg->mg_alloc_failures);
1511 metaslab_passivate(msp, space_map_maxsize(msp->ms_map));
1513 mutex_exit(&msp->ms_lock);
1516 if (msp->ms_allocmap[txg & TXG_MASK]->sm_space == 0)
1517 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
1519 space_map_add(msp->ms_allocmap[txg & TXG_MASK], offset, asize);
1521 mutex_exit(&msp->ms_lock);
1527 * Allocate a block for the specified i/o.
1530 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
1531 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags)
1533 metaslab_group_t *mg, *rotor;
1537 int zio_lock = B_FALSE;
1538 boolean_t allocatable;
1539 uint64_t offset = -1ULL;
1543 ASSERT(!DVA_IS_VALID(&dva[d]));
1546 * For testing, make some blocks above a certain size be gang blocks.
1548 if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0)
1549 return (SET_ERROR(ENOSPC));
1552 * Start at the rotor and loop through all mgs until we find something.
1553 * Note that there's no locking on mc_rotor or mc_aliquot because
1554 * nothing actually breaks if we miss a few updates -- we just won't
1555 * allocate quite as evenly. It all balances out over time.
1557 * If we are doing ditto or log blocks, try to spread them across
1558 * consecutive vdevs. If we're forced to reuse a vdev before we've
1559 * allocated all of our ditto blocks, then try and spread them out on
1560 * that vdev as much as possible. If it turns out to not be possible,
1561 * gradually lower our standards until anything becomes acceptable.
1562 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
1563 * gives us hope of containing our fault domains to something we're
1564 * able to reason about. Otherwise, any two top-level vdev failures
1565 * will guarantee the loss of data. With consecutive allocation,
1566 * only two adjacent top-level vdev failures will result in data loss.
1568 * If we are doing gang blocks (hintdva is non-NULL), try to keep
1569 * ourselves on the same vdev as our gang block header. That
1570 * way, we can hope for locality in vdev_cache, plus it makes our
1571 * fault domains something tractable.
1574 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
1577 * It's possible the vdev we're using as the hint no
1578 * longer exists (i.e. removed). Consult the rotor when
1584 if (flags & METASLAB_HINTBP_AVOID &&
1585 mg->mg_next != NULL)
1590 } else if (d != 0) {
1591 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
1592 mg = vd->vdev_mg->mg_next;
1598 * If the hint put us into the wrong metaslab class, or into a
1599 * metaslab group that has been passivated, just follow the rotor.
1601 if (mg->mg_class != mc || mg->mg_activation_count <= 0)
1608 ASSERT(mg->mg_activation_count == 1);
1613 * Don't allocate from faulted devices.
1616 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
1617 allocatable = vdev_allocatable(vd);
1618 spa_config_exit(spa, SCL_ZIO, FTAG);
1620 allocatable = vdev_allocatable(vd);
1626 * Avoid writing single-copy data to a failing vdev
1627 * unless the user instructs us that it is okay.
1629 if ((vd->vdev_stat.vs_write_errors > 0 ||
1630 vd->vdev_state < VDEV_STATE_HEALTHY) &&
1631 d == 0 && dshift == 3 &&
1632 !(zfs_write_to_degraded && vd->vdev_state ==
1633 VDEV_STATE_DEGRADED)) {
1638 ASSERT(mg->mg_class == mc);
1640 distance = vd->vdev_asize >> dshift;
1641 if (distance <= (1ULL << vd->vdev_ms_shift))
1646 asize = vdev_psize_to_asize(vd, psize);
1647 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
1649 offset = metaslab_group_alloc(mg, psize, asize, txg, distance,
1651 if (offset != -1ULL) {
1653 * If we've just selected this metaslab group,
1654 * figure out whether the corresponding vdev is
1655 * over- or under-used relative to the pool,
1656 * and set an allocation bias to even it out.
1658 if (mc->mc_aliquot == 0) {
1659 vdev_stat_t *vs = &vd->vdev_stat;
1662 vu = (vs->vs_alloc * 100) / (vs->vs_space + 1);
1663 cu = (mc->mc_alloc * 100) / (mc->mc_space + 1);
1666 * Calculate how much more or less we should
1667 * try to allocate from this device during
1668 * this iteration around the rotor.
1669 * For example, if a device is 80% full
1670 * and the pool is 20% full then we should
1671 * reduce allocations by 60% on this device.
1673 * mg_bias = (20 - 80) * 512K / 100 = -307K
1675 * This reduces allocations by 307K for this
1678 mg->mg_bias = ((cu - vu) *
1679 (int64_t)mg->mg_aliquot) / 100;
1682 if (atomic_add_64_nv(&mc->mc_aliquot, asize) >=
1683 mg->mg_aliquot + mg->mg_bias) {
1684 mc->mc_rotor = mg->mg_next;
1688 DVA_SET_VDEV(&dva[d], vd->vdev_id);
1689 DVA_SET_OFFSET(&dva[d], offset);
1690 DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER));
1691 DVA_SET_ASIZE(&dva[d], asize);
1696 mc->mc_rotor = mg->mg_next;
1698 } while ((mg = mg->mg_next) != rotor);
1702 ASSERT(dshift < 64);
1706 if (!allocatable && !zio_lock) {
1712 bzero(&dva[d], sizeof (dva_t));
1714 return (SET_ERROR(ENOSPC));
1718 * Free the block represented by DVA in the context of the specified
1719 * transaction group.
1722 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
1724 uint64_t vdev = DVA_GET_VDEV(dva);
1725 uint64_t offset = DVA_GET_OFFSET(dva);
1726 uint64_t size = DVA_GET_ASIZE(dva);
1730 ASSERT(DVA_IS_VALID(dva));
1732 if (txg > spa_freeze_txg(spa))
1735 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
1736 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
1737 cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu",
1738 (u_longlong_t)vdev, (u_longlong_t)offset);
1743 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
1745 if (DVA_GET_GANG(dva))
1746 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
1748 mutex_enter(&msp->ms_lock);
1751 space_map_remove(msp->ms_allocmap[txg & TXG_MASK],
1753 space_map_free(msp->ms_map, offset, size);
1755 if (msp->ms_freemap[txg & TXG_MASK]->sm_space == 0)
1756 vdev_dirty(vd, VDD_METASLAB, msp, txg);
1757 space_map_add(msp->ms_freemap[txg & TXG_MASK], offset, size);
1760 mutex_exit(&msp->ms_lock);
1764 * Intent log support: upon opening the pool after a crash, notify the SPA
1765 * of blocks that the intent log has allocated for immediate write, but
1766 * which are still considered free by the SPA because the last transaction
1767 * group didn't commit yet.
1770 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
1772 uint64_t vdev = DVA_GET_VDEV(dva);
1773 uint64_t offset = DVA_GET_OFFSET(dva);
1774 uint64_t size = DVA_GET_ASIZE(dva);
1779 ASSERT(DVA_IS_VALID(dva));
1781 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
1782 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
1783 return (SET_ERROR(ENXIO));
1785 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
1787 if (DVA_GET_GANG(dva))
1788 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
1790 mutex_enter(&msp->ms_lock);
1792 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_map->sm_loaded)
1793 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
1795 if (error == 0 && !space_map_contains(msp->ms_map, offset, size))
1796 error = SET_ERROR(ENOENT);
1798 if (error || txg == 0) { /* txg == 0 indicates dry run */
1799 mutex_exit(&msp->ms_lock);
1803 space_map_claim(msp->ms_map, offset, size);
1805 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
1806 if (msp->ms_allocmap[txg & TXG_MASK]->sm_space == 0)
1807 vdev_dirty(vd, VDD_METASLAB, msp, txg);
1808 space_map_add(msp->ms_allocmap[txg & TXG_MASK], offset, size);
1811 mutex_exit(&msp->ms_lock);
1817 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
1818 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags)
1820 dva_t *dva = bp->blk_dva;
1821 dva_t *hintdva = hintbp->blk_dva;
1824 ASSERT(bp->blk_birth == 0);
1825 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
1827 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
1829 if (mc->mc_rotor == NULL) { /* no vdevs in this class */
1830 spa_config_exit(spa, SCL_ALLOC, FTAG);
1831 return (SET_ERROR(ENOSPC));
1834 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
1835 ASSERT(BP_GET_NDVAS(bp) == 0);
1836 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
1838 for (int d = 0; d < ndvas; d++) {
1839 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
1842 for (d--; d >= 0; d--) {
1843 metaslab_free_dva(spa, &dva[d], txg, B_TRUE);
1844 bzero(&dva[d], sizeof (dva_t));
1846 spa_config_exit(spa, SCL_ALLOC, FTAG);
1851 ASSERT(BP_GET_NDVAS(bp) == ndvas);
1853 spa_config_exit(spa, SCL_ALLOC, FTAG);
1855 BP_SET_BIRTH(bp, txg, txg);
1861 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
1863 const dva_t *dva = bp->blk_dva;
1864 int ndvas = BP_GET_NDVAS(bp);
1866 ASSERT(!BP_IS_HOLE(bp));
1867 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
1869 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
1871 for (int d = 0; d < ndvas; d++)
1872 metaslab_free_dva(spa, &dva[d], txg, now);
1874 spa_config_exit(spa, SCL_FREE, FTAG);
1878 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
1880 const dva_t *dva = bp->blk_dva;
1881 int ndvas = BP_GET_NDVAS(bp);
1884 ASSERT(!BP_IS_HOLE(bp));
1888 * First do a dry run to make sure all DVAs are claimable,
1889 * so we don't have to unwind from partial failures below.
1891 if ((error = metaslab_claim(spa, bp, 0)) != 0)
1895 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
1897 for (int d = 0; d < ndvas; d++)
1898 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
1901 spa_config_exit(spa, SCL_ALLOC, FTAG);
1903 ASSERT(error == 0 || txg == 0);
1909 checkmap(space_map_t *sm, uint64_t off, uint64_t size)
1914 mutex_enter(sm->sm_lock);
1915 ss = space_map_find(sm, off, size, &where);
1917 panic("freeing free block; ss=%p", (void *)ss);
1918 mutex_exit(sm->sm_lock);
1922 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
1924 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
1927 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
1928 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
1929 uint64_t vdid = DVA_GET_VDEV(&bp->blk_dva[i]);
1930 vdev_t *vd = vdev_lookup_top(spa, vdid);
1931 uint64_t off = DVA_GET_OFFSET(&bp->blk_dva[i]);
1932 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
1933 metaslab_t *ms = vd->vdev_ms[off >> vd->vdev_ms_shift];
1935 if (ms->ms_map->sm_loaded)
1936 checkmap(ms->ms_map, off, size);
1938 for (int j = 0; j < TXG_SIZE; j++)
1939 checkmap(ms->ms_freemap[j], off, size);
1940 for (int j = 0; j < TXG_DEFER_SIZE; j++)
1941 checkmap(ms->ms_defermap[j], off, size);
1943 spa_config_exit(spa, SCL_VDEV, FTAG);