4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
29 #include <sys/zfs_context.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dnode.h>
34 #include <sys/dsl_pool.h>
36 #include <sys/space_map.h>
37 #include <sys/refcount.h>
38 #include <sys/zfeature.h>
40 SYSCTL_DECL(_vfs_zfs);
43 * Note on space map block size:
45 * The data for a given space map can be kept on blocks of any size.
46 * Larger blocks entail fewer I/O operations, but they also cause the
47 * DMU to keep more data in-core, and also to waste more I/O bandwidth
48 * when only a few blocks have changed since the last transaction group.
52 * Enabled whenever we want to stress test the use of double-word
55 boolean_t zfs_force_some_double_word_sm_entries = B_FALSE;
58 * Override the default indirect block size of 128K, instead using 16K for
59 * spacemaps (2^14 bytes). This dramatically reduces write inflation since
60 * appending to a spacemap typically has to write one data block (4KB) and one
61 * or two indirect blocks (16K-32K, rather than 128K).
63 int space_map_ibs = 14;
65 SYSCTL_INT(_vfs_zfs, OID_AUTO, space_map_ibs, CTLFLAG_RWTUN,
66 &space_map_ibs, 0, "Space map indirect block shift");
69 sm_entry_is_debug(uint64_t e)
71 return (SM_PREFIX_DECODE(e) == SM_DEBUG_PREFIX);
75 sm_entry_is_single_word(uint64_t e)
77 uint8_t prefix = SM_PREFIX_DECODE(e);
78 return (prefix != SM_DEBUG_PREFIX && prefix != SM2_PREFIX);
82 sm_entry_is_double_word(uint64_t e)
84 return (SM_PREFIX_DECODE(e) == SM2_PREFIX);
88 * Iterate through the space map, invoking the callback on each (non-debug)
92 space_map_iterate(space_map_t *sm, sm_cb_t callback, void *arg)
94 uint64_t sm_len = space_map_length(sm);
95 ASSERT3U(sm->sm_blksz, !=, 0);
97 dmu_prefetch(sm->sm_os, space_map_object(sm), 0, 0, sm_len,
98 ZIO_PRIORITY_SYNC_READ);
100 uint64_t blksz = sm->sm_blksz;
102 for (uint64_t block_base = 0; block_base < sm_len && error == 0;
103 block_base += blksz) {
105 error = dmu_buf_hold(sm->sm_os, space_map_object(sm),
106 block_base, FTAG, &db, DMU_READ_PREFETCH);
110 uint64_t *block_start = db->db_data;
111 uint64_t block_length = MIN(sm_len - block_base, blksz);
112 uint64_t *block_end = block_start +
113 (block_length / sizeof (uint64_t));
115 VERIFY0(P2PHASE(block_length, sizeof (uint64_t)));
116 VERIFY3U(block_length, !=, 0);
117 ASSERT3U(blksz, ==, db->db_size);
119 for (uint64_t *block_cursor = block_start;
120 block_cursor < block_end && error == 0; block_cursor++) {
121 uint64_t e = *block_cursor;
123 if (sm_entry_is_debug(e)) /* Skip debug entries */
126 uint64_t raw_offset, raw_run, vdev_id;
128 if (sm_entry_is_single_word(e)) {
129 type = SM_TYPE_DECODE(e);
130 vdev_id = SM_NO_VDEVID;
131 raw_offset = SM_OFFSET_DECODE(e);
132 raw_run = SM_RUN_DECODE(e);
134 /* it is a two-word entry */
135 ASSERT(sm_entry_is_double_word(e));
136 raw_run = SM2_RUN_DECODE(e);
137 vdev_id = SM2_VDEV_DECODE(e);
139 /* move on to the second word */
142 VERIFY3P(block_cursor, <=, block_end);
144 type = SM2_TYPE_DECODE(e);
145 raw_offset = SM2_OFFSET_DECODE(e);
148 uint64_t entry_offset = (raw_offset << sm->sm_shift) +
150 uint64_t entry_run = raw_run << sm->sm_shift;
152 VERIFY0(P2PHASE(entry_offset, 1ULL << sm->sm_shift));
153 VERIFY0(P2PHASE(entry_run, 1ULL << sm->sm_shift));
154 ASSERT3U(entry_offset, >=, sm->sm_start);
155 ASSERT3U(entry_offset, <, sm->sm_start + sm->sm_size);
156 ASSERT3U(entry_run, <=, sm->sm_size);
157 ASSERT3U(entry_offset + entry_run, <=,
158 sm->sm_start + sm->sm_size);
160 space_map_entry_t sme = {
163 .sme_offset = entry_offset,
166 error = callback(&sme, arg);
168 dmu_buf_rele(db, FTAG);
174 * Reads the entries from the last block of the space map into
175 * buf in reverse order. Populates nwords with number of words
178 * Refer to block comment within space_map_incremental_destroy()
179 * to understand why this function is needed.
182 space_map_reversed_last_block_entries(space_map_t *sm, uint64_t *buf,
183 uint64_t bufsz, uint64_t *nwords)
189 * Find the offset of the last word in the space map and use
190 * that to read the last block of the space map with
193 uint64_t last_word_offset =
194 sm->sm_phys->smp_objsize - sizeof (uint64_t);
195 error = dmu_buf_hold(sm->sm_os, space_map_object(sm), last_word_offset,
196 FTAG, &db, DMU_READ_NO_PREFETCH);
200 ASSERT3U(sm->sm_object, ==, db->db_object);
201 ASSERT3U(sm->sm_blksz, ==, db->db_size);
202 ASSERT3U(bufsz, >=, db->db_size);
203 ASSERT(nwords != NULL);
205 uint64_t *words = db->db_data;
207 (sm->sm_phys->smp_objsize - db->db_offset) / sizeof (uint64_t);
209 ASSERT3U(*nwords, <=, bufsz / sizeof (uint64_t));
211 uint64_t n = *nwords;
213 for (uint64_t i = 0; i < n; i++) {
214 uint64_t entry = words[i];
215 if (sm_entry_is_double_word(entry)) {
217 * Since we are populating the buffer backwards
218 * we have to be extra careful and add the two
219 * words of the double-word entry in the right
231 ASSERT(sm_entry_is_debug(entry) ||
232 sm_entry_is_single_word(entry));
239 * Assert that we wrote backwards all the
240 * way to the beginning of the buffer.
244 dmu_buf_rele(db, FTAG);
249 * Note: This function performs destructive actions - specifically
250 * it deletes entries from the end of the space map. Thus, callers
251 * should ensure that they are holding the appropriate locks for
252 * the space map that they provide.
255 space_map_incremental_destroy(space_map_t *sm, sm_cb_t callback, void *arg,
258 uint64_t bufsz = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE);
259 uint64_t *buf = zio_buf_alloc(bufsz);
261 dmu_buf_will_dirty(sm->sm_dbuf, tx);
264 * Ideally we would want to iterate from the beginning of the
265 * space map to the end in incremental steps. The issue with this
266 * approach is that we don't have any field on-disk that points
267 * us where to start between each step. We could try zeroing out
268 * entries that we've destroyed, but this doesn't work either as
269 * an entry that is 0 is a valid one (ALLOC for range [0x0:0x200]).
271 * As a result, we destroy its entries incrementally starting from
272 * the end after applying the callback to each of them.
274 * The problem with this approach is that we cannot literally
275 * iterate through the words in the space map backwards as we
276 * can't distinguish two-word space map entries from their second
277 * word. Thus we do the following:
279 * 1] We get all the entries from the last block of the space map
280 * and put them into a buffer in reverse order. This way the
281 * last entry comes first in the buffer, the second to last is
283 * 2] We iterate through the entries in the buffer and we apply
284 * the callback to each one. As we move from entry to entry we
285 * we decrease the size of the space map, deleting effectively
287 * 3] If there are no more entries in the space map or the callback
288 * returns a value other than 0, we stop iterating over the
289 * space map. If there are entries remaining and the callback
290 * returned 0, we go back to step [1].
293 while (space_map_length(sm) > 0 && error == 0) {
295 error = space_map_reversed_last_block_entries(sm, buf, bufsz,
300 ASSERT3U(nwords, <=, bufsz / sizeof (uint64_t));
302 for (uint64_t i = 0; i < nwords; i++) {
305 if (sm_entry_is_debug(e)) {
306 sm->sm_phys->smp_objsize -= sizeof (uint64_t);
307 space_map_update(sm);
312 uint64_t raw_offset, raw_run, vdev_id;
314 if (sm_entry_is_single_word(e)) {
315 type = SM_TYPE_DECODE(e);
316 vdev_id = SM_NO_VDEVID;
317 raw_offset = SM_OFFSET_DECODE(e);
318 raw_run = SM_RUN_DECODE(e);
320 ASSERT(sm_entry_is_double_word(e));
323 raw_run = SM2_RUN_DECODE(e);
324 vdev_id = SM2_VDEV_DECODE(e);
326 /* move to the second word */
330 ASSERT3P(i, <=, nwords);
332 type = SM2_TYPE_DECODE(e);
333 raw_offset = SM2_OFFSET_DECODE(e);
336 uint64_t entry_offset =
337 (raw_offset << sm->sm_shift) + sm->sm_start;
338 uint64_t entry_run = raw_run << sm->sm_shift;
340 VERIFY0(P2PHASE(entry_offset, 1ULL << sm->sm_shift));
341 VERIFY0(P2PHASE(entry_run, 1ULL << sm->sm_shift));
342 VERIFY3U(entry_offset, >=, sm->sm_start);
343 VERIFY3U(entry_offset, <, sm->sm_start + sm->sm_size);
344 VERIFY3U(entry_run, <=, sm->sm_size);
345 VERIFY3U(entry_offset + entry_run, <=,
346 sm->sm_start + sm->sm_size);
348 space_map_entry_t sme = {
351 .sme_offset = entry_offset,
354 error = callback(&sme, arg);
358 if (type == SM_ALLOC)
359 sm->sm_phys->smp_alloc -= entry_run;
361 sm->sm_phys->smp_alloc += entry_run;
362 sm->sm_phys->smp_objsize -= words * sizeof (uint64_t);
363 space_map_update(sm);
367 if (space_map_length(sm) == 0) {
369 ASSERT0(sm->sm_phys->smp_objsize);
370 ASSERT0(sm->sm_alloc);
373 zio_buf_free(buf, bufsz);
377 typedef struct space_map_load_arg {
378 space_map_t *smla_sm;
379 range_tree_t *smla_rt;
381 } space_map_load_arg_t;
384 space_map_load_callback(space_map_entry_t *sme, void *arg)
386 space_map_load_arg_t *smla = arg;
387 if (sme->sme_type == smla->smla_type) {
388 VERIFY3U(range_tree_space(smla->smla_rt) + sme->sme_run, <=,
389 smla->smla_sm->sm_size);
390 range_tree_add(smla->smla_rt, sme->sme_offset, sme->sme_run);
392 range_tree_remove(smla->smla_rt, sme->sme_offset, sme->sme_run);
399 * Load the space map disk into the specified range tree. Segments of maptype
400 * are added to the range tree, other segment types are removed.
403 space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype)
407 space_map_load_arg_t smla;
409 VERIFY0(range_tree_space(rt));
410 space = space_map_allocated(sm);
412 if (maptype == SM_FREE) {
413 range_tree_add(rt, sm->sm_start, sm->sm_size);
414 space = sm->sm_size - space;
419 smla.smla_type = maptype;
420 err = space_map_iterate(sm, space_map_load_callback, &smla);
423 VERIFY3U(range_tree_space(rt), ==, space);
425 range_tree_vacate(rt, NULL, NULL);
432 space_map_histogram_clear(space_map_t *sm)
434 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
437 bzero(sm->sm_phys->smp_histogram, sizeof (sm->sm_phys->smp_histogram));
441 space_map_histogram_verify(space_map_t *sm, range_tree_t *rt)
444 * Verify that the in-core range tree does not have any
445 * ranges smaller than our sm_shift size.
447 for (int i = 0; i < sm->sm_shift; i++) {
448 if (rt->rt_histogram[i] != 0)
455 space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx)
459 ASSERT(dmu_tx_is_syncing(tx));
460 VERIFY3U(space_map_object(sm), !=, 0);
462 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
465 dmu_buf_will_dirty(sm->sm_dbuf, tx);
467 ASSERT(space_map_histogram_verify(sm, rt));
469 * Transfer the content of the range tree histogram to the space
470 * map histogram. The space map histogram contains 32 buckets ranging
471 * between 2^sm_shift to 2^(32+sm_shift-1). The range tree,
472 * however, can represent ranges from 2^0 to 2^63. Since the space
473 * map only cares about allocatable blocks (minimum of sm_shift) we
474 * can safely ignore all ranges in the range tree smaller than sm_shift.
476 for (int i = sm->sm_shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
479 * Since the largest histogram bucket in the space map is
480 * 2^(32+sm_shift-1), we need to normalize the values in
481 * the range tree for any bucket larger than that size. For
482 * example given an sm_shift of 9, ranges larger than 2^40
483 * would get normalized as if they were 1TB ranges. Assume
484 * the range tree had a count of 5 in the 2^44 (16TB) bucket,
485 * the calculation below would normalize this to 5 * 2^4 (16).
487 ASSERT3U(i, >=, idx + sm->sm_shift);
488 sm->sm_phys->smp_histogram[idx] +=
489 rt->rt_histogram[i] << (i - idx - sm->sm_shift);
492 * Increment the space map's index as long as we haven't
493 * reached the maximum bucket size. Accumulate all ranges
494 * larger than the max bucket size into the last bucket.
496 if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
497 ASSERT3U(idx + sm->sm_shift, ==, i);
499 ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
505 space_map_write_intro_debug(space_map_t *sm, maptype_t maptype, dmu_tx_t *tx)
507 dmu_buf_will_dirty(sm->sm_dbuf, tx);
509 uint64_t dentry = SM_PREFIX_ENCODE(SM_DEBUG_PREFIX) |
510 SM_DEBUG_ACTION_ENCODE(maptype) |
511 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(tx->tx_pool->dp_spa)) |
512 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
514 dmu_write(sm->sm_os, space_map_object(sm), sm->sm_phys->smp_objsize,
515 sizeof (dentry), &dentry, tx);
517 sm->sm_phys->smp_objsize += sizeof (dentry);
521 * Writes one or more entries given a segment.
523 * Note: The function may release the dbuf from the pointer initially
524 * passed to it, and return a different dbuf. Also, the space map's
525 * dbuf must be dirty for the changes in sm_phys to take effect.
528 space_map_write_seg(space_map_t *sm, range_seg_t *rs, maptype_t maptype,
529 uint64_t vdev_id, uint8_t words, dmu_buf_t **dbp, void *tag, dmu_tx_t *tx)
531 ASSERT3U(words, !=, 0);
532 ASSERT3U(words, <=, 2);
534 /* ensure the vdev_id can be represented by the space map */
535 ASSERT3U(vdev_id, <=, SM_NO_VDEVID);
538 * if this is a single word entry, ensure that no vdev was
541 IMPLY(words == 1, vdev_id == SM_NO_VDEVID);
543 dmu_buf_t *db = *dbp;
544 ASSERT3U(db->db_size, ==, sm->sm_blksz);
546 uint64_t *block_base = db->db_data;
547 uint64_t *block_end = block_base + (sm->sm_blksz / sizeof (uint64_t));
548 uint64_t *block_cursor = block_base +
549 (sm->sm_phys->smp_objsize - db->db_offset) / sizeof (uint64_t);
551 ASSERT3P(block_cursor, <=, block_end);
553 uint64_t size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
554 uint64_t start = (rs->rs_start - sm->sm_start) >> sm->sm_shift;
555 uint64_t run_max = (words == 2) ? SM2_RUN_MAX : SM_RUN_MAX;
557 ASSERT3U(rs->rs_start, >=, sm->sm_start);
558 ASSERT3U(rs->rs_start, <, sm->sm_start + sm->sm_size);
559 ASSERT3U(rs->rs_end - rs->rs_start, <=, sm->sm_size);
560 ASSERT3U(rs->rs_end, <=, sm->sm_start + sm->sm_size);
563 ASSERT3P(block_cursor, <=, block_end);
566 * If we are at the end of this block, flush it and start
567 * writing again from the beginning.
569 if (block_cursor == block_end) {
570 dmu_buf_rele(db, tag);
572 uint64_t next_word_offset = sm->sm_phys->smp_objsize;
573 VERIFY0(dmu_buf_hold(sm->sm_os,
574 space_map_object(sm), next_word_offset,
575 tag, &db, DMU_READ_PREFETCH));
576 dmu_buf_will_dirty(db, tx);
578 /* update caller's dbuf */
581 ASSERT3U(db->db_size, ==, sm->sm_blksz);
583 block_base = db->db_data;
584 block_cursor = block_base;
585 block_end = block_base +
586 (db->db_size / sizeof (uint64_t));
590 * If we are writing a two-word entry and we only have one
591 * word left on this block, just pad it with an empty debug
592 * entry and write the two-word entry in the next block.
594 uint64_t *next_entry = block_cursor + 1;
595 if (next_entry == block_end && words > 1) {
596 ASSERT3U(words, ==, 2);
597 *block_cursor = SM_PREFIX_ENCODE(SM_DEBUG_PREFIX) |
598 SM_DEBUG_ACTION_ENCODE(0) |
599 SM_DEBUG_SYNCPASS_ENCODE(0) |
600 SM_DEBUG_TXG_ENCODE(0);
602 sm->sm_phys->smp_objsize += sizeof (uint64_t);
603 ASSERT3P(block_cursor, ==, block_end);
607 uint64_t run_len = MIN(size, run_max);
610 *block_cursor = SM_OFFSET_ENCODE(start) |
611 SM_TYPE_ENCODE(maptype) |
612 SM_RUN_ENCODE(run_len);
616 /* write the first word of the entry */
617 *block_cursor = SM_PREFIX_ENCODE(SM2_PREFIX) |
618 SM2_RUN_ENCODE(run_len) |
619 SM2_VDEV_ENCODE(vdev_id);
622 /* move on to the second word of the entry */
623 ASSERT3P(block_cursor, <, block_end);
624 *block_cursor = SM2_TYPE_ENCODE(maptype) |
625 SM2_OFFSET_ENCODE(start);
629 panic("%d-word space map entries are not supported",
633 sm->sm_phys->smp_objsize += words * sizeof (uint64_t);
643 * Note: The space map's dbuf must be dirty for the changes in sm_phys to
647 space_map_write_impl(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
648 uint64_t vdev_id, dmu_tx_t *tx)
650 spa_t *spa = tx->tx_pool->dp_spa;
653 space_map_write_intro_debug(sm, maptype, tx);
657 * We do this right after we write the intro debug entry
658 * because the estimate does not take it into account.
660 uint64_t initial_objsize = sm->sm_phys->smp_objsize;
661 uint64_t estimated_growth =
662 space_map_estimate_optimal_size(sm, rt, SM_NO_VDEVID);
663 uint64_t estimated_final_objsize = initial_objsize + estimated_growth;
667 * Find the offset right after the last word in the space map
668 * and use that to get a hold of the last block, so we can
669 * start appending to it.
671 uint64_t next_word_offset = sm->sm_phys->smp_objsize;
672 VERIFY0(dmu_buf_hold(sm->sm_os, space_map_object(sm),
673 next_word_offset, FTAG, &db, DMU_READ_PREFETCH));
674 ASSERT3U(db->db_size, ==, sm->sm_blksz);
676 dmu_buf_will_dirty(db, tx);
678 avl_tree_t *t = &rt->rt_root;
679 for (range_seg_t *rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
680 uint64_t offset = (rs->rs_start - sm->sm_start) >> sm->sm_shift;
681 uint64_t length = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
685 * We only write two-word entries when both of the following
688 * [1] The feature is enabled.
689 * [2] The offset or run is too big for a single-word entry,
690 * or the vdev_id is set (meaning not equal to
693 * Note that for purposes of testing we've added the case that
694 * we write two-word entries occasionally when the feature is
695 * enabled and zfs_force_some_double_word_sm_entries has been
698 if (spa_feature_is_active(spa, SPA_FEATURE_SPACEMAP_V2) &&
699 (offset >= (1ULL << SM_OFFSET_BITS) ||
700 length > SM_RUN_MAX ||
701 vdev_id != SM_NO_VDEVID ||
702 (zfs_force_some_double_word_sm_entries &&
703 spa_get_random(100) == 0)))
706 space_map_write_seg(sm, rs, maptype, vdev_id, words,
710 dmu_buf_rele(db, FTAG);
714 * We expect our estimation to be based on the worst case
715 * scenario [see comment in space_map_estimate_optimal_size()].
716 * Therefore we expect the actual objsize to be equal or less
717 * than whatever we estimated it to be.
719 ASSERT3U(estimated_final_objsize, >=, sm->sm_phys->smp_objsize);
724 * Note: This function manipulates the state of the given space map but
725 * does not hold any locks implicitly. Thus the caller is responsible
726 * for synchronizing writes to the space map.
729 space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
730 uint64_t vdev_id, dmu_tx_t *tx)
732 objset_t *os = sm->sm_os;
734 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
735 VERIFY3U(space_map_object(sm), !=, 0);
737 dmu_buf_will_dirty(sm->sm_dbuf, tx);
740 * This field is no longer necessary since the in-core space map
741 * now contains the object number but is maintained for backwards
744 sm->sm_phys->smp_object = sm->sm_object;
746 if (range_tree_is_empty(rt)) {
747 VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object);
751 if (maptype == SM_ALLOC)
752 sm->sm_phys->smp_alloc += range_tree_space(rt);
754 sm->sm_phys->smp_alloc -= range_tree_space(rt);
756 uint64_t nodes = avl_numnodes(&rt->rt_root);
757 uint64_t rt_space = range_tree_space(rt);
759 space_map_write_impl(sm, rt, maptype, vdev_id, tx);
762 * Ensure that the space_map's accounting wasn't changed
763 * while we were in the middle of writing it out.
765 VERIFY3U(nodes, ==, avl_numnodes(&rt->rt_root));
766 VERIFY3U(range_tree_space(rt), ==, rt_space);
770 space_map_open_impl(space_map_t *sm)
775 error = dmu_bonus_hold(sm->sm_os, sm->sm_object, sm, &sm->sm_dbuf);
779 dmu_object_size_from_db(sm->sm_dbuf, &sm->sm_blksz, &blocks);
780 sm->sm_phys = sm->sm_dbuf->db_data;
785 space_map_open(space_map_t **smp, objset_t *os, uint64_t object,
786 uint64_t start, uint64_t size, uint8_t shift)
791 ASSERT(*smp == NULL);
795 sm = kmem_zalloc(sizeof (space_map_t), KM_SLEEP);
797 sm->sm_start = start;
799 sm->sm_shift = shift;
801 sm->sm_object = object;
803 error = space_map_open_impl(sm);
814 space_map_close(space_map_t *sm)
819 if (sm->sm_dbuf != NULL)
820 dmu_buf_rele(sm->sm_dbuf, sm);
824 kmem_free(sm, sizeof (*sm));
828 space_map_truncate(space_map_t *sm, int blocksize, dmu_tx_t *tx)
830 objset_t *os = sm->sm_os;
831 spa_t *spa = dmu_objset_spa(os);
832 dmu_object_info_t doi;
834 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
835 ASSERT(dmu_tx_is_syncing(tx));
836 VERIFY3U(dmu_tx_get_txg(tx), <=, spa_final_dirty_txg(spa));
838 dmu_object_info_from_db(sm->sm_dbuf, &doi);
841 * If the space map has the wrong bonus size (because
842 * SPA_FEATURE_SPACEMAP_HISTOGRAM has recently been enabled), or
843 * the wrong block size (because space_map_blksz has changed),
844 * free and re-allocate its object with the updated sizes.
846 * Otherwise, just truncate the current object.
848 if ((spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
849 doi.doi_bonus_size != sizeof (space_map_phys_t)) ||
850 doi.doi_data_block_size != blocksize ||
851 doi.doi_metadata_block_size != 1 << space_map_ibs) {
852 zfs_dbgmsg("txg %llu, spa %s, sm %p, reallocating "
853 "object[%llu]: old bonus %u, old blocksz %u",
854 dmu_tx_get_txg(tx), spa_name(spa), sm, sm->sm_object,
855 doi.doi_bonus_size, doi.doi_data_block_size);
857 space_map_free(sm, tx);
858 dmu_buf_rele(sm->sm_dbuf, sm);
860 sm->sm_object = space_map_alloc(sm->sm_os, blocksize, tx);
861 VERIFY0(space_map_open_impl(sm));
863 VERIFY0(dmu_free_range(os, space_map_object(sm), 0, -1ULL, tx));
866 * If the spacemap is reallocated, its histogram
867 * will be reset. Do the same in the common case so that
868 * bugs related to the uncommon case do not go unnoticed.
870 bzero(sm->sm_phys->smp_histogram,
871 sizeof (sm->sm_phys->smp_histogram));
874 dmu_buf_will_dirty(sm->sm_dbuf, tx);
875 sm->sm_phys->smp_objsize = 0;
876 sm->sm_phys->smp_alloc = 0;
880 * Update the in-core space_map allocation and length values.
883 space_map_update(space_map_t *sm)
888 sm->sm_alloc = sm->sm_phys->smp_alloc;
889 sm->sm_length = sm->sm_phys->smp_objsize;
893 space_map_alloc(objset_t *os, int blocksize, dmu_tx_t *tx)
895 spa_t *spa = dmu_objset_spa(os);
899 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
900 spa_feature_incr(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
901 bonuslen = sizeof (space_map_phys_t);
902 ASSERT3U(bonuslen, <=, dmu_bonus_max());
904 bonuslen = SPACE_MAP_SIZE_V0;
907 object = dmu_object_alloc_ibs(os, DMU_OT_SPACE_MAP, blocksize,
908 space_map_ibs, DMU_OT_SPACE_MAP_HEADER, bonuslen, tx);
914 space_map_free_obj(objset_t *os, uint64_t smobj, dmu_tx_t *tx)
916 spa_t *spa = dmu_objset_spa(os);
917 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
918 dmu_object_info_t doi;
920 VERIFY0(dmu_object_info(os, smobj, &doi));
921 if (doi.doi_bonus_size != SPACE_MAP_SIZE_V0) {
922 spa_feature_decr(spa,
923 SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
927 VERIFY0(dmu_object_free(os, smobj, tx));
931 space_map_free(space_map_t *sm, dmu_tx_t *tx)
936 space_map_free_obj(sm->sm_os, space_map_object(sm), tx);
941 * Given a range tree, it makes a worst-case estimate of how much
942 * space would the tree's segments take if they were written to
943 * the given space map.
946 space_map_estimate_optimal_size(space_map_t *sm, range_tree_t *rt,
949 spa_t *spa = dmu_objset_spa(sm->sm_os);
950 uint64_t shift = sm->sm_shift;
951 uint64_t *histogram = rt->rt_histogram;
952 uint64_t entries_for_seg = 0;
955 * In order to get a quick estimate of the optimal size that this
956 * range tree would have on-disk as a space map, we iterate through
957 * its histogram buckets instead of iterating through its nodes.
959 * Note that this is a highest-bound/worst-case estimate for the
962 * 1] We assume that we always add a debug padding for each block
963 * we write and we also assume that we start at the last word
964 * of a block attempting to write a two-word entry.
965 * 2] Rounding up errors due to the way segments are distributed
966 * in the buckets of the range tree's histogram.
967 * 3] The activation of zfs_force_some_double_word_sm_entries
968 * (tunable) when testing.
970 * = Math and Rounding Errors =
972 * rt_histogram[i] bucket of a range tree represents the number
973 * of entries in [2^i, (2^(i+1))-1] of that range_tree. Given
974 * that, we want to divide the buckets into groups: Buckets that
975 * can be represented using a single-word entry, ones that can
976 * be represented with a double-word entry, and ones that can
977 * only be represented with multiple two-word entries.
979 * [Note that if the new encoding feature is not enabled there
980 * are only two groups: single-word entry buckets and multiple
981 * single-word entry buckets. The information below assumes
982 * two-word entries enabled, but it can easily applied when
983 * the feature is not enabled]
985 * To find the highest bucket that can be represented with a
986 * single-word entry we look at the maximum run that such entry
987 * can have, which is 2^(SM_RUN_BITS + sm_shift) [remember that
988 * the run of a space map entry is shifted by sm_shift, thus we
989 * add it to the exponent]. This way, excluding the value of the
990 * maximum run that can be represented by a single-word entry,
991 * all runs that are smaller exist in buckets 0 to
992 * SM_RUN_BITS + shift - 1.
994 * To find the highest bucket that can be represented with a
995 * double-word entry, we follow the same approach. Finally, any
996 * bucket higher than that are represented with multiple two-word
997 * entries. To be more specific, if the highest bucket whose
998 * segments can be represented with a single two-word entry is X,
999 * then bucket X+1 will need 2 two-word entries for each of its
1000 * segments, X+2 will need 4, X+3 will need 8, ...etc.
1002 * With all of the above we make our estimation based on bucket
1003 * groups. There is a rounding error though. As we mentioned in
1004 * the example with the one-word entry, the maximum run that can
1005 * be represented in a one-word entry 2^(SM_RUN_BITS + shift) is
1006 * not part of bucket SM_RUN_BITS + shift - 1. Thus, segments of
1007 * that length fall into the next bucket (and bucket group) where
1008 * we start counting two-word entries and this is one more reason
1009 * why the estimated size may end up being bigger than the actual
1015 if (!spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2) ||
1016 (vdev_id == SM_NO_VDEVID && sm->sm_size < SM_OFFSET_MAX)) {
1019 * If we are trying to force some double word entries just
1020 * assume the worst-case of every single word entry being
1021 * written as a double word entry.
1023 uint64_t entry_size =
1024 (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2) &&
1025 zfs_force_some_double_word_sm_entries) ?
1026 (2 * sizeof (uint64_t)) : sizeof (uint64_t);
1028 uint64_t single_entry_max_bucket = SM_RUN_BITS + shift - 1;
1029 for (; idx <= single_entry_max_bucket; idx++)
1030 size += histogram[idx] * entry_size;
1032 if (!spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2)) {
1033 for (; idx < RANGE_TREE_HISTOGRAM_SIZE; idx++) {
1034 ASSERT3U(idx, >=, single_entry_max_bucket);
1036 1ULL << (idx - single_entry_max_bucket);
1037 size += histogram[idx] *
1038 entries_for_seg * entry_size;
1044 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2));
1046 uint64_t double_entry_max_bucket = SM2_RUN_BITS + shift - 1;
1047 for (; idx <= double_entry_max_bucket; idx++)
1048 size += histogram[idx] * 2 * sizeof (uint64_t);
1050 for (; idx < RANGE_TREE_HISTOGRAM_SIZE; idx++) {
1051 ASSERT3U(idx, >=, double_entry_max_bucket);
1052 entries_for_seg = 1ULL << (idx - double_entry_max_bucket);
1053 size += histogram[idx] *
1054 entries_for_seg * 2 * sizeof (uint64_t);
1058 * Assume the worst case where we start with the padding at the end
1059 * of the current block and we add an extra padding entry at the end
1060 * of all subsequent blocks.
1062 size += ((size / sm->sm_blksz) + 1) * sizeof (uint64_t);
1068 space_map_object(space_map_t *sm)
1070 return (sm != NULL ? sm->sm_object : 0);
1074 * Returns the already synced, on-disk allocated space.
1077 space_map_allocated(space_map_t *sm)
1079 return (sm != NULL ? sm->sm_alloc : 0);
1083 * Returns the already synced, on-disk length;
1086 space_map_length(space_map_t *sm)
1088 return (sm != NULL ? sm->sm_length : 0);
1092 * Returns the allocated space that is currently syncing.
1095 space_map_alloc_delta(space_map_t *sm)
1099 ASSERT(sm->sm_dbuf != NULL);
1100 return (sm->sm_phys->smp_alloc - space_map_allocated(sm));