4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
29 #include <sys/zfs_context.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dnode.h>
34 #include <sys/dsl_pool.h>
36 #include <sys/space_map.h>
37 #include <sys/refcount.h>
38 #include <sys/zfeature.h>
40 SYSCTL_DECL(_vfs_zfs);
43 * Note on space map block size:
45 * The data for a given space map can be kept on blocks of any size.
46 * Larger blocks entail fewer i/o operations, but they also cause the
47 * DMU to keep more data in-core, and also to waste more i/o bandwidth
48 * when only a few blocks have changed since the last transaction group.
52 * Iterate through the space map, invoking the callback on each (non-debug)
56 space_map_iterate(space_map_t *sm, sm_cb_t callback, void *arg)
58 uint64_t *entry, *entry_map, *entry_map_end;
59 uint64_t bufsize, size, offset, end;
62 end = space_map_length(sm);
64 bufsize = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE);
65 entry_map = zio_buf_alloc(bufsize);
68 dmu_prefetch(sm->sm_os, space_map_object(sm), 0, bufsize,
69 end - bufsize, ZIO_PRIORITY_SYNC_READ);
72 for (offset = 0; offset < end && error == 0; offset += bufsize) {
73 size = MIN(end - offset, bufsize);
74 VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0);
76 ASSERT3U(sm->sm_blksz, !=, 0);
78 dprintf("object=%llu offset=%llx size=%llx\n",
79 space_map_object(sm), offset, size);
81 error = dmu_read(sm->sm_os, space_map_object(sm), offset, size,
82 entry_map, DMU_READ_PREFETCH);
86 entry_map_end = entry_map + (size / sizeof (uint64_t));
87 for (entry = entry_map; entry < entry_map_end && error == 0;
90 uint64_t offset, size;
92 if (SM_DEBUG_DECODE(e)) /* Skip debug entries */
95 offset = (SM_OFFSET_DECODE(e) << sm->sm_shift) +
97 size = SM_RUN_DECODE(e) << sm->sm_shift;
99 VERIFY0(P2PHASE(offset, 1ULL << sm->sm_shift));
100 VERIFY0(P2PHASE(size, 1ULL << sm->sm_shift));
101 VERIFY3U(offset, >=, sm->sm_start);
102 VERIFY3U(offset + size, <=, sm->sm_start + sm->sm_size);
103 error = callback(SM_TYPE_DECODE(e), offset, size, arg);
107 zio_buf_free(entry_map, bufsize);
112 * Note: This function performs destructive actions - specifically
113 * it deletes entries from the end of the space map. Thus, callers
114 * should ensure that they are holding the appropriate locks for
115 * the space map that they provide.
118 space_map_incremental_destroy(space_map_t *sm, sm_cb_t callback, void *arg,
121 uint64_t bufsize, len;
125 len = space_map_length(sm);
126 bufsize = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE);
127 entry_map = zio_buf_alloc(bufsize);
129 dmu_buf_will_dirty(sm->sm_dbuf, tx);
132 * Since we can't move the starting offset of the space map
133 * (e.g there are reference on-disk pointing to it), we destroy
134 * its entries incrementally starting from the end.
136 * The logic that follows is basically the same as the one used
137 * in space_map_iterate() but it traverses the space map
140 * 1] We figure out the size of the buffer that we want to use
141 * to read the on-disk space map entries.
142 * 2] We figure out the offset at the end of the space map where
143 * we will start reading entries into our buffer.
144 * 3] We read the on-disk entries into the buffer.
145 * 4] We iterate over the entries from end to beginning calling
146 * the callback function on each one. As we move from entry
147 * to entry we decrease the size of the space map, deleting
148 * effectively each entry.
149 * 5] If there are no more entries in the space map or the
150 * callback returns a value other than 0, we stop iterating
151 * over the space map. If there are entries remaining and
152 * the callback returned zero we go back to step [1].
154 uint64_t offset = 0, size = 0;
155 while (len > 0 && error == 0) {
156 size = MIN(bufsize, len);
158 VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0);
159 VERIFY3U(size, >, 0);
160 ASSERT3U(sm->sm_blksz, !=, 0);
164 IMPLY(bufsize > len, offset == 0);
165 IMPLY(bufsize == len, offset == 0);
166 IMPLY(bufsize < len, offset > 0);
169 EQUIV(size == len, offset == 0);
170 IMPLY(size < len, bufsize < len);
172 dprintf("object=%llu offset=%llx size=%llx\n",
173 space_map_object(sm), offset, size);
175 error = dmu_read(sm->sm_os, space_map_object(sm),
176 offset, size, entry_map, DMU_READ_PREFETCH);
180 uint64_t num_entries = size / sizeof (uint64_t);
182 ASSERT3U(num_entries, >, 0);
184 while (num_entries > 0) {
185 uint64_t e, entry_offset, entry_size;
188 e = entry_map[num_entries - 1];
190 ASSERT3U(num_entries, >, 0);
193 if (SM_DEBUG_DECODE(e)) {
194 sm->sm_phys->smp_objsize -= sizeof (uint64_t);
195 space_map_update(sm);
196 len -= sizeof (uint64_t);
201 type = SM_TYPE_DECODE(e);
202 entry_offset = (SM_OFFSET_DECODE(e) << sm->sm_shift) +
204 entry_size = SM_RUN_DECODE(e) << sm->sm_shift;
206 VERIFY0(P2PHASE(entry_offset, 1ULL << sm->sm_shift));
207 VERIFY0(P2PHASE(entry_size, 1ULL << sm->sm_shift));
208 VERIFY3U(entry_offset, >=, sm->sm_start);
209 VERIFY3U(entry_offset + entry_size, <=,
210 sm->sm_start + sm->sm_size);
212 error = callback(type, entry_offset, entry_size, arg);
216 if (type == SM_ALLOC)
217 sm->sm_phys->smp_alloc -= entry_size;
219 sm->sm_phys->smp_alloc += entry_size;
221 sm->sm_phys->smp_objsize -= sizeof (uint64_t);
222 space_map_update(sm);
223 len -= sizeof (uint64_t);
226 IMPLY(error == 0, num_entries == 0);
227 EQUIV(offset == 0 && error == 0, len == 0 && num_entries == 0);
233 ASSERT0(sm->sm_length);
234 ASSERT0(sm->sm_phys->smp_objsize);
235 ASSERT0(sm->sm_alloc);
238 zio_buf_free(entry_map, bufsize);
242 typedef struct space_map_load_arg {
243 space_map_t *smla_sm;
244 range_tree_t *smla_rt;
246 } space_map_load_arg_t;
249 space_map_load_callback(maptype_t type, uint64_t offset, uint64_t size,
252 space_map_load_arg_t *smla = arg;
253 if (type == smla->smla_type) {
254 VERIFY3U(range_tree_space(smla->smla_rt) + size, <=,
255 smla->smla_sm->sm_size);
256 range_tree_add(smla->smla_rt, offset, size);
258 range_tree_remove(smla->smla_rt, offset, size);
265 * Load the space map disk into the specified range tree. Segments of maptype
266 * are added to the range tree, other segment types are removed.
269 space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype)
273 space_map_load_arg_t smla;
275 VERIFY0(range_tree_space(rt));
276 space = space_map_allocated(sm);
278 if (maptype == SM_FREE) {
279 range_tree_add(rt, sm->sm_start, sm->sm_size);
280 space = sm->sm_size - space;
285 smla.smla_type = maptype;
286 err = space_map_iterate(sm, space_map_load_callback, &smla);
289 VERIFY3U(range_tree_space(rt), ==, space);
291 range_tree_vacate(rt, NULL, NULL);
298 space_map_histogram_clear(space_map_t *sm)
300 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
303 bzero(sm->sm_phys->smp_histogram, sizeof (sm->sm_phys->smp_histogram));
307 space_map_histogram_verify(space_map_t *sm, range_tree_t *rt)
310 * Verify that the in-core range tree does not have any
311 * ranges smaller than our sm_shift size.
313 for (int i = 0; i < sm->sm_shift; i++) {
314 if (rt->rt_histogram[i] != 0)
321 space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx)
325 ASSERT(dmu_tx_is_syncing(tx));
326 VERIFY3U(space_map_object(sm), !=, 0);
328 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
331 dmu_buf_will_dirty(sm->sm_dbuf, tx);
333 ASSERT(space_map_histogram_verify(sm, rt));
335 * Transfer the content of the range tree histogram to the space
336 * map histogram. The space map histogram contains 32 buckets ranging
337 * between 2^sm_shift to 2^(32+sm_shift-1). The range tree,
338 * however, can represent ranges from 2^0 to 2^63. Since the space
339 * map only cares about allocatable blocks (minimum of sm_shift) we
340 * can safely ignore all ranges in the range tree smaller than sm_shift.
342 for (int i = sm->sm_shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
345 * Since the largest histogram bucket in the space map is
346 * 2^(32+sm_shift-1), we need to normalize the values in
347 * the range tree for any bucket larger than that size. For
348 * example given an sm_shift of 9, ranges larger than 2^40
349 * would get normalized as if they were 1TB ranges. Assume
350 * the range tree had a count of 5 in the 2^44 (16TB) bucket,
351 * the calculation below would normalize this to 5 * 2^4 (16).
353 ASSERT3U(i, >=, idx + sm->sm_shift);
354 sm->sm_phys->smp_histogram[idx] +=
355 rt->rt_histogram[i] << (i - idx - sm->sm_shift);
358 * Increment the space map's index as long as we haven't
359 * reached the maximum bucket size. Accumulate all ranges
360 * larger than the max bucket size into the last bucket.
362 if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
363 ASSERT3U(idx + sm->sm_shift, ==, i);
365 ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
371 space_map_entries(space_map_t *sm, range_tree_t *rt)
373 avl_tree_t *t = &rt->rt_root;
375 uint64_t size, entries;
378 * All space_maps always have a debug entry so account for it here.
383 * Traverse the range tree and calculate the number of space map
384 * entries that would be required to write out the range tree.
386 for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
387 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
388 entries += howmany(size, SM_RUN_MAX);
394 space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
397 objset_t *os = sm->sm_os;
398 spa_t *spa = dmu_objset_spa(os);
399 avl_tree_t *t = &rt->rt_root;
401 uint64_t size, total, rt_space, nodes;
402 uint64_t *entry, *entry_map, *entry_map_end;
403 uint64_t expected_entries, actual_entries = 1;
405 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
406 VERIFY3U(space_map_object(sm), !=, 0);
407 dmu_buf_will_dirty(sm->sm_dbuf, tx);
410 * This field is no longer necessary since the in-core space map
411 * now contains the object number but is maintained for backwards
414 sm->sm_phys->smp_object = sm->sm_object;
416 if (range_tree_is_empty(rt)) {
417 VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object);
421 if (maptype == SM_ALLOC)
422 sm->sm_phys->smp_alloc += range_tree_space(rt);
424 sm->sm_phys->smp_alloc -= range_tree_space(rt);
426 expected_entries = space_map_entries(sm, rt);
428 entry_map = zio_buf_alloc(sm->sm_blksz);
429 entry_map_end = entry_map + (sm->sm_blksz / sizeof (uint64_t));
432 *entry++ = SM_DEBUG_ENCODE(1) |
433 SM_DEBUG_ACTION_ENCODE(maptype) |
434 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa)) |
435 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
438 nodes = avl_numnodes(&rt->rt_root);
439 rt_space = range_tree_space(rt);
440 for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
443 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
444 start = (rs->rs_start - sm->sm_start) >> sm->sm_shift;
446 total += size << sm->sm_shift;
451 run_len = MIN(size, SM_RUN_MAX);
453 if (entry == entry_map_end) {
454 dmu_write(os, space_map_object(sm),
455 sm->sm_phys->smp_objsize, sm->sm_blksz,
457 sm->sm_phys->smp_objsize += sm->sm_blksz;
461 *entry++ = SM_OFFSET_ENCODE(start) |
462 SM_TYPE_ENCODE(maptype) |
463 SM_RUN_ENCODE(run_len);
471 if (entry != entry_map) {
472 size = (entry - entry_map) * sizeof (uint64_t);
473 dmu_write(os, space_map_object(sm), sm->sm_phys->smp_objsize,
474 size, entry_map, tx);
475 sm->sm_phys->smp_objsize += size;
477 ASSERT3U(expected_entries, ==, actual_entries);
480 * Ensure that the space_map's accounting wasn't changed
481 * while we were in the middle of writing it out.
483 VERIFY3U(nodes, ==, avl_numnodes(&rt->rt_root));
484 VERIFY3U(range_tree_space(rt), ==, rt_space);
485 VERIFY3U(range_tree_space(rt), ==, total);
487 zio_buf_free(entry_map, sm->sm_blksz);
491 space_map_open_impl(space_map_t *sm)
496 error = dmu_bonus_hold(sm->sm_os, sm->sm_object, sm, &sm->sm_dbuf);
500 dmu_object_size_from_db(sm->sm_dbuf, &sm->sm_blksz, &blocks);
501 sm->sm_phys = sm->sm_dbuf->db_data;
506 space_map_open(space_map_t **smp, objset_t *os, uint64_t object,
507 uint64_t start, uint64_t size, uint8_t shift)
512 ASSERT(*smp == NULL);
516 sm = kmem_zalloc(sizeof (space_map_t), KM_SLEEP);
518 sm->sm_start = start;
520 sm->sm_shift = shift;
522 sm->sm_object = object;
524 error = space_map_open_impl(sm);
536 space_map_close(space_map_t *sm)
541 if (sm->sm_dbuf != NULL)
542 dmu_buf_rele(sm->sm_dbuf, sm);
546 kmem_free(sm, sizeof (*sm));
550 space_map_truncate(space_map_t *sm, int blocksize, dmu_tx_t *tx)
552 objset_t *os = sm->sm_os;
553 spa_t *spa = dmu_objset_spa(os);
554 dmu_object_info_t doi;
556 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
557 ASSERT(dmu_tx_is_syncing(tx));
558 VERIFY3U(dmu_tx_get_txg(tx), <=, spa_final_dirty_txg(spa));
560 dmu_object_info_from_db(sm->sm_dbuf, &doi);
563 * If the space map has the wrong bonus size (because
564 * SPA_FEATURE_SPACEMAP_HISTOGRAM has recently been enabled), or
565 * the wrong block size (because space_map_blksz has changed),
566 * free and re-allocate its object with the updated sizes.
568 * Otherwise, just truncate the current object.
570 if ((spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
571 doi.doi_bonus_size != sizeof (space_map_phys_t)) ||
572 doi.doi_data_block_size != blocksize) {
573 zfs_dbgmsg("txg %llu, spa %s, sm %p, reallocating "
574 "object[%llu]: old bonus %u, old blocksz %u",
575 dmu_tx_get_txg(tx), spa_name(spa), sm, sm->sm_object,
576 doi.doi_bonus_size, doi.doi_data_block_size);
578 space_map_free(sm, tx);
579 dmu_buf_rele(sm->sm_dbuf, sm);
581 sm->sm_object = space_map_alloc(sm->sm_os, blocksize, tx);
582 VERIFY0(space_map_open_impl(sm));
584 VERIFY0(dmu_free_range(os, space_map_object(sm), 0, -1ULL, tx));
587 * If the spacemap is reallocated, its histogram
588 * will be reset. Do the same in the common case so that
589 * bugs related to the uncommon case do not go unnoticed.
591 bzero(sm->sm_phys->smp_histogram,
592 sizeof (sm->sm_phys->smp_histogram));
595 dmu_buf_will_dirty(sm->sm_dbuf, tx);
596 sm->sm_phys->smp_objsize = 0;
597 sm->sm_phys->smp_alloc = 0;
601 * Update the in-core space_map allocation and length values.
604 space_map_update(space_map_t *sm)
609 sm->sm_alloc = sm->sm_phys->smp_alloc;
610 sm->sm_length = sm->sm_phys->smp_objsize;
614 space_map_alloc(objset_t *os, int blocksize, dmu_tx_t *tx)
616 spa_t *spa = dmu_objset_spa(os);
620 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
621 spa_feature_incr(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
622 bonuslen = sizeof (space_map_phys_t);
623 ASSERT3U(bonuslen, <=, dmu_bonus_max());
625 bonuslen = SPACE_MAP_SIZE_V0;
628 object = dmu_object_alloc(os, DMU_OT_SPACE_MAP, blocksize,
629 DMU_OT_SPACE_MAP_HEADER, bonuslen, tx);
635 space_map_free_obj(objset_t *os, uint64_t smobj, dmu_tx_t *tx)
637 spa_t *spa = dmu_objset_spa(os);
638 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
639 dmu_object_info_t doi;
641 VERIFY0(dmu_object_info(os, smobj, &doi));
642 if (doi.doi_bonus_size != SPACE_MAP_SIZE_V0) {
643 spa_feature_decr(spa,
644 SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
648 VERIFY0(dmu_object_free(os, smobj, tx));
652 space_map_free(space_map_t *sm, dmu_tx_t *tx)
657 space_map_free_obj(sm->sm_os, space_map_object(sm), tx);
662 space_map_object(space_map_t *sm)
664 return (sm != NULL ? sm->sm_object : 0);
668 * Returns the already synced, on-disk allocated space.
671 space_map_allocated(space_map_t *sm)
673 return (sm != NULL ? sm->sm_alloc : 0);
677 * Returns the already synced, on-disk length;
680 space_map_length(space_map_t *sm)
682 return (sm != NULL ? sm->sm_length : 0);
686 * Returns the allocated space that is currently syncing.
689 space_map_alloc_delta(space_map_t *sm)
693 ASSERT(sm->sm_dbuf != NULL);
694 return (sm->sm_phys->smp_alloc - space_map_allocated(sm));