4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
30 #ifndef _SYS_METASLAB_IMPL_H
31 #define _SYS_METASLAB_IMPL_H
33 #include <sys/metaslab.h>
34 #include <sys/space_map.h>
35 #include <sys/range_tree.h>
45 * Metaslab allocation tracing record.
47 typedef struct metaslab_alloc_trace {
48 list_node_t mat_list_node;
49 metaslab_group_t *mat_mg;
56 } metaslab_alloc_trace_t;
59 * Used by the metaslab allocation tracing facility to indicate
60 * error conditions. These errors are stored to the offset member
61 * of the metaslab_alloc_trace_t record and displayed by mdb.
63 typedef enum trace_alloc_type {
64 TRACE_ALLOC_FAILURE = -1ULL,
65 TRACE_TOO_SMALL = -2ULL,
66 TRACE_FORCE_GANG = -3ULL,
67 TRACE_NOT_ALLOCATABLE = -4ULL,
68 TRACE_GROUP_FAILURE = -5ULL,
70 TRACE_CONDENSING = -7ULL,
71 TRACE_VDEV_ERROR = -8ULL,
72 TRACE_INITIALIZING = -9ULL
75 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
76 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
77 #define METASLAB_WEIGHT_CLAIM (1ULL << 61)
78 #define METASLAB_WEIGHT_TYPE (1ULL << 60)
79 #define METASLAB_ACTIVE_MASK \
80 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY | \
81 METASLAB_WEIGHT_CLAIM)
84 * The metaslab weight is used to encode the amount of free space in a
85 * metaslab, such that the "best" metaslab appears first when sorting the
86 * metaslabs by weight. The weight (and therefore the "best" metaslab) can
87 * be determined in two different ways: by computing a weighted sum of all
88 * the free space in the metaslab (a space based weight) or by counting only
89 * the free segments of the largest size (a segment based weight). We prefer
90 * the segment based weight because it reflects how the free space is
91 * comprised, but we cannot always use it -- legacy pools do not have the
92 * space map histogram information necessary to determine the largest
93 * contiguous regions. Pools that have the space map histogram determine
94 * the segment weight by looking at each bucket in the histogram and
95 * determining the free space whose size in bytes is in the range:
97 * We then encode the largest index, i, that contains regions into the
98 * segment-weighted value.
100 * Space-based weight:
102 * 64 56 48 40 32 24 16 8 0
103 * +-------+-------+-------+-------+-------+-------+-------+-------+
104 * |PSC1| weighted-free space |
105 * +-------+-------+-------+-------+-------+-------+-------+-------+
107 * PS - indicates primary and secondary activation
108 * C - indicates activation for claimed block zio
109 * space - the fragmentation-weighted space
111 * Segment-based weight:
113 * 64 56 48 40 32 24 16 8 0
114 * +-------+-------+-------+-------+-------+-------+-------+-------+
115 * |PSC0| idx| count of segments in region |
116 * +-------+-------+-------+-------+-------+-------+-------+-------+
118 * PS - indicates primary and secondary activation
119 * C - indicates activation for claimed block zio
120 * idx - index for the highest bucket in the histogram
121 * count - number of segments in the specified bucket
123 #define WEIGHT_GET_ACTIVE(weight) BF64_GET((weight), 61, 3)
124 #define WEIGHT_SET_ACTIVE(weight, x) BF64_SET((weight), 61, 3, x)
126 #define WEIGHT_IS_SPACEBASED(weight) \
127 ((weight) == 0 || BF64_GET((weight), 60, 1))
128 #define WEIGHT_SET_SPACEBASED(weight) BF64_SET((weight), 60, 1, 1)
131 * These macros are only applicable to segment-based weighting.
133 #define WEIGHT_GET_INDEX(weight) BF64_GET((weight), 54, 6)
134 #define WEIGHT_SET_INDEX(weight, x) BF64_SET((weight), 54, 6, x)
135 #define WEIGHT_GET_COUNT(weight) BF64_GET((weight), 0, 54)
136 #define WEIGHT_SET_COUNT(weight, x) BF64_SET((weight), 0, 54, x)
139 * A metaslab class encompasses a category of allocatable top-level vdevs.
140 * Each top-level vdev is associated with a metaslab group which defines
141 * the allocatable region for that vdev. Examples of these categories include
142 * "normal" for data block allocations (i.e. main pool allocations) or "log"
143 * for allocations designated for intent log devices (i.e. slog devices).
144 * When a block allocation is requested from the SPA it is associated with a
145 * metaslab_class_t, and only top-level vdevs (i.e. metaslab groups) belonging
146 * to the class can be used to satisfy that request. Allocations are done
147 * by traversing the metaslab groups that are linked off of the mc_rotor field.
148 * This rotor points to the next metaslab group where allocations will be
149 * attempted. Allocating a block is a 3 step process -- select the metaslab
150 * group, select the metaslab, and then allocate the block. The metaslab
151 * class defines the low-level block allocator that will be used as the
152 * final step in allocation. These allocators are pluggable allowing each class
153 * to use a block allocator that best suits that class.
155 struct metaslab_class {
158 metaslab_group_t *mc_rotor;
159 metaslab_ops_t *mc_ops;
163 * Track the number of metaslab groups that have been initialized
164 * and can accept allocations. An initialized metaslab group is
165 * one has been completely added to the config (i.e. we have
166 * updated the MOS config and the space has been added to the pool).
171 * Toggle to enable/disable the allocation throttle.
173 boolean_t mc_alloc_throttle_enabled;
176 * The allocation throttle works on a reservation system. Whenever
177 * an asynchronous zio wants to perform an allocation it must
178 * first reserve the number of blocks that it wants to allocate.
179 * If there aren't sufficient slots available for the pending zio
180 * then that I/O is throttled until more slots free up. The current
181 * number of reserved allocations is maintained by the mc_alloc_slots
182 * refcount. The mc_alloc_max_slots value determines the maximum
183 * number of allocations that the system allows. Gang blocks are
184 * allowed to reserve slots even if we've reached the maximum
185 * number of allocations allowed.
187 uint64_t *mc_alloc_max_slots;
188 refcount_t *mc_alloc_slots;
190 uint64_t mc_alloc_groups; /* # of allocatable groups */
192 uint64_t mc_alloc; /* total allocated space */
193 uint64_t mc_deferred; /* total deferred frees */
194 uint64_t mc_space; /* total space (alloc + free) */
195 uint64_t mc_dspace; /* total deflated space */
196 uint64_t mc_minblocksize;
197 uint64_t mc_histogram[RANGE_TREE_HISTOGRAM_SIZE];
201 * Metaslab groups encapsulate all the allocatable regions (i.e. metaslabs)
202 * of a top-level vdev. They are linked togther to form a circular linked
203 * list and can belong to only one metaslab class. Metaslab groups may become
204 * ineligible for allocations for a number of reasons such as limited free
205 * space, fragmentation, or going offline. When this happens the allocator will
206 * simply find the next metaslab group in the linked list and attempt
207 * to allocate from that group instead.
209 struct metaslab_group {
211 metaslab_t **mg_primaries;
212 metaslab_t **mg_secondaries;
213 avl_tree_t mg_metaslab_tree;
215 boolean_t mg_allocatable; /* can we allocate? */
216 uint64_t mg_ms_ready;
219 * A metaslab group is considered to be initialized only after
220 * we have updated the MOS config and added the space to the pool.
221 * We only allow allocation attempts to a metaslab group if it
222 * has been initialized.
224 boolean_t mg_initialized;
226 uint64_t mg_free_capacity; /* percentage free */
228 int64_t mg_activation_count;
229 metaslab_class_t *mg_class;
232 metaslab_group_t *mg_prev;
233 metaslab_group_t *mg_next;
236 * In order for the allocation throttle to function properly, we cannot
237 * have too many IOs going to each disk by default; the throttle
238 * operates by allocating more work to disks that finish quickly, so
239 * allocating larger chunks to each disk reduces its effectiveness.
240 * However, if the number of IOs going to each allocator is too small,
241 * we will not perform proper aggregation at the vdev_queue layer,
242 * also resulting in decreased performance. Therefore, we will use a
245 * Each allocator in each metaslab group has a current queue depth
246 * (mg_alloc_queue_depth[allocator]) and a current max queue depth
247 * (mg_cur_max_alloc_queue_depth[allocator]), and each metaslab group
248 * has an absolute max queue depth (mg_max_alloc_queue_depth). We
249 * add IOs to an allocator until the mg_alloc_queue_depth for that
250 * allocator hits the cur_max. Every time an IO completes for a given
251 * allocator on a given metaslab group, we increment its cur_max until
252 * it reaches mg_max_alloc_queue_depth. The cur_max resets every txg to
253 * help protect against disks that decrease in performance over time.
255 * It's possible for an allocator to handle more allocations than
256 * its max. This can occur when gang blocks are required or when other
257 * groups are unable to handle their share of allocations.
259 uint64_t mg_max_alloc_queue_depth;
260 uint64_t *mg_cur_max_alloc_queue_depth;
261 refcount_t *mg_alloc_queue_depth;
264 * A metalab group that can no longer allocate the minimum block
265 * size will set mg_no_free_space. Once a metaslab group is out
266 * of space then its share of work must be distributed to other
269 boolean_t mg_no_free_space;
271 uint64_t mg_allocations;
272 uint64_t mg_failed_allocations;
273 uint64_t mg_fragmentation;
274 uint64_t mg_histogram[RANGE_TREE_HISTOGRAM_SIZE];
276 int mg_ms_initializing;
277 boolean_t mg_initialize_updating;
278 kmutex_t mg_ms_initialize_lock;
279 kcondvar_t mg_ms_initialize_cv;
283 * This value defines the number of elements in the ms_lbas array. The value
284 * of 64 was chosen as it covers all power of 2 buckets up to UINT64_MAX.
285 * This is the equivalent of highbit(UINT64_MAX).
290 * Each metaslab maintains a set of in-core trees to track metaslab
291 * operations. The in-core free tree (ms_allocatable) contains the list of
292 * free segments which are eligible for allocation. As blocks are
293 * allocated, the allocated segment are removed from the ms_allocatable and
294 * added to a per txg allocation tree (ms_allocating). As blocks are
295 * freed, they are added to the free tree (ms_freeing). These trees
296 * allow us to process all allocations and frees in syncing context
297 * where it is safe to update the on-disk space maps. An additional set
298 * of in-core trees is maintained to track deferred frees
299 * (ms_defer). Once a block is freed it will move from the
300 * ms_freed to the ms_defer tree. A deferred free means that a block
301 * has been freed but cannot be used by the pool until TXG_DEFER_SIZE
302 * transactions groups later. For example, a block that is freed in txg
303 * 50 will not be available for reallocation until txg 52 (50 +
304 * TXG_DEFER_SIZE). This provides a safety net for uberblock rollback.
305 * A pool could be safely rolled back TXG_DEFERS_SIZE transactions
306 * groups and ensure that no block has been reallocated.
308 * The simplified transition diagram looks like this:
314 * free segment (ms_allocatable) -> ms_allocating[4] -> (write to space map)
316 * | ms_freeing <--- FREE
321 * +-------- ms_defer[2] <-------+-------> (write to space map)
324 * Each metaslab's space is tracked in a single space map in the MOS,
325 * which is only updated in syncing context. Each time we sync a txg,
326 * we append the allocs and frees from that txg to the space map. The
327 * pool space is only updated once all metaslabs have finished syncing.
329 * To load the in-core free tree we read the space map from disk. This
330 * object contains a series of alloc and free records that are combined
331 * to make up the list of all free segments in this metaslab. These
332 * segments are represented in-core by the ms_allocatable and are stored
335 * As the space map grows (as a result of the appends) it will
336 * eventually become space-inefficient. When the metaslab's in-core
337 * free tree is zfs_condense_pct/100 times the size of the minimal
338 * on-disk representation, we rewrite it in its minimized form. If a
339 * metaslab needs to condense then we must set the ms_condensing flag to
340 * ensure that allocations are not performed on the metaslab that is
345 kmutex_t ms_sync_lock;
346 kcondvar_t ms_load_cv;
351 uint64_t ms_fragmentation;
353 range_tree_t *ms_allocating[TXG_SIZE];
354 range_tree_t *ms_allocatable;
357 * The following range trees are accessed only from syncing context.
358 * ms_free*tree only have entries while syncing, and are empty
361 range_tree_t *ms_freeing; /* to free this syncing txg */
362 range_tree_t *ms_freed; /* already freed this syncing txg */
363 range_tree_t *ms_defer[TXG_DEFER_SIZE];
364 range_tree_t *ms_checkpointing; /* to add to the checkpoint */
366 boolean_t ms_condensing; /* condensing? */
367 boolean_t ms_condense_wanted;
368 uint64_t ms_condense_checked_txg;
370 uint64_t ms_initializing; /* leaves initializing this ms */
373 * We must hold both ms_lock and ms_group->mg_lock in order to
377 boolean_t ms_loading;
379 int64_t ms_deferspace; /* sum of ms_defermap[] space */
380 uint64_t ms_weight; /* weight vs. others in group */
381 uint64_t ms_activation_weight; /* activation weight */
384 * Track of whenever a metaslab is selected for loading or allocation.
385 * We use this value to determine how long the metaslab should
388 uint64_t ms_selected_txg;
390 uint64_t ms_alloc_txg; /* last successful alloc (debug only) */
391 uint64_t ms_max_size; /* maximum allocatable size */
394 * -1 if it's not active in an allocator, otherwise set to the allocator
395 * this metaslab is active for.
398 boolean_t ms_primary; /* Only valid if ms_allocator is not -1 */
401 * The metaslab block allocators can optionally use a size-ordered
402 * range tree and/or an array of LBAs. Not all allocators use
403 * this functionality. The ms_allocatable_by_size should always
404 * contain the same number of segments as the ms_allocatable. The
405 * only difference is that the ms_allocatable_by_size is ordered by
408 avl_tree_t ms_allocatable_by_size;
409 uint64_t ms_lbas[MAX_LBAS];
411 metaslab_group_t *ms_group; /* metaslab group */
412 avl_node_t ms_group_node; /* node in metaslab group tree */
413 txg_node_t ms_txg_node; /* per-txg dirty metaslab links */
422 #endif /* _SYS_METASLAB_IMPL_H */