4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 * Copyright (c) 2013 by Delphix. All rights reserved.
29 #include <sys/zfs_context.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dnode.h>
34 #include <sys/dsl_pool.h>
36 #include <sys/space_map.h>
37 #include <sys/refcount.h>
38 #include <sys/zfeature.h>
41 * This value controls how the space map's block size is allowed to grow.
42 * If the value is set to the same size as SPACE_MAP_INITIAL_BLOCKSIZE then
43 * the space map block size will remain fixed. Setting this value to something
44 * greater than SPACE_MAP_INITIAL_BLOCKSIZE will allow the space map to
45 * increase its block size as needed. To maintain backwards compatibilty the
46 * space map's block size must be a power of 2 and SPACE_MAP_INITIAL_BLOCKSIZE
49 int space_map_max_blksz = (1 << 12);
52 * Load the space map disk into the specified range tree. Segments of maptype
53 * are added to the range tree, other segment types are removed.
55 * Note: space_map_load() will drop sm_lock across dmu_read() calls.
56 * The caller must be OK with this.
59 space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype)
61 uint64_t *entry, *entry_map, *entry_map_end;
62 uint64_t bufsize, size, offset, end, space;
65 ASSERT(MUTEX_HELD(sm->sm_lock));
67 end = space_map_length(sm);
68 space = space_map_allocated(sm);
70 VERIFY0(range_tree_space(rt));
72 if (maptype == SM_FREE) {
73 range_tree_add(rt, sm->sm_start, sm->sm_size);
74 space = sm->sm_size - space;
77 bufsize = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE);
78 entry_map = zio_buf_alloc(bufsize);
80 mutex_exit(sm->sm_lock);
82 dmu_prefetch(sm->sm_os, space_map_object(sm), bufsize,
85 mutex_enter(sm->sm_lock);
87 for (offset = 0; offset < end; offset += bufsize) {
88 size = MIN(end - offset, bufsize);
89 VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0);
91 ASSERT3U(sm->sm_blksz, !=, 0);
93 dprintf("object=%llu offset=%llx size=%llx\n",
94 space_map_object(sm), offset, size);
96 mutex_exit(sm->sm_lock);
97 error = dmu_read(sm->sm_os, space_map_object(sm), offset, size,
98 entry_map, DMU_READ_PREFETCH);
99 mutex_enter(sm->sm_lock);
103 entry_map_end = entry_map + (size / sizeof (uint64_t));
104 for (entry = entry_map; entry < entry_map_end; entry++) {
106 uint64_t offset, size;
108 if (SM_DEBUG_DECODE(e)) /* Skip debug entries */
111 offset = (SM_OFFSET_DECODE(e) << sm->sm_shift) +
113 size = SM_RUN_DECODE(e) << sm->sm_shift;
115 VERIFY0(P2PHASE(offset, 1ULL << sm->sm_shift));
116 VERIFY0(P2PHASE(size, 1ULL << sm->sm_shift));
117 VERIFY3U(offset, >=, sm->sm_start);
118 VERIFY3U(offset + size, <=, sm->sm_start + sm->sm_size);
119 if (SM_TYPE_DECODE(e) == maptype) {
120 VERIFY3U(range_tree_space(rt) + size, <=,
122 range_tree_add(rt, offset, size);
124 range_tree_remove(rt, offset, size);
130 VERIFY3U(range_tree_space(rt), ==, space);
132 range_tree_vacate(rt, NULL, NULL);
134 zio_buf_free(entry_map, bufsize);
139 space_map_histogram_clear(space_map_t *sm)
141 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
144 bzero(sm->sm_phys->smp_histogram, sizeof (sm->sm_phys->smp_histogram));
148 space_map_histogram_verify(space_map_t *sm, range_tree_t *rt)
151 * Verify that the in-core range tree does not have any
152 * ranges smaller than our sm_shift size.
154 for (int i = 0; i < sm->sm_shift; i++) {
155 if (rt->rt_histogram[i] != 0)
162 space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx)
166 ASSERT(MUTEX_HELD(rt->rt_lock));
167 ASSERT(dmu_tx_is_syncing(tx));
168 VERIFY3U(space_map_object(sm), !=, 0);
170 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
173 dmu_buf_will_dirty(sm->sm_dbuf, tx);
175 ASSERT(space_map_histogram_verify(sm, rt));
178 * Transfer the content of the range tree histogram to the space
179 * map histogram. The space map histogram contains 32 buckets ranging
180 * between 2^sm_shift to 2^(32+sm_shift-1). The range tree,
181 * however, can represent ranges from 2^0 to 2^63. Since the space
182 * map only cares about allocatable blocks (minimum of sm_shift) we
183 * can safely ignore all ranges in the range tree smaller than sm_shift.
185 for (int i = sm->sm_shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
188 * Since the largest histogram bucket in the space map is
189 * 2^(32+sm_shift-1), we need to normalize the values in
190 * the range tree for any bucket larger than that size. For
191 * example given an sm_shift of 9, ranges larger than 2^40
192 * would get normalized as if they were 1TB ranges. Assume
193 * the range tree had a count of 5 in the 2^44 (16TB) bucket,
194 * the calculation below would normalize this to 5 * 2^4 (16).
196 ASSERT3U(i, >=, idx + sm->sm_shift);
197 sm->sm_phys->smp_histogram[idx] +=
198 rt->rt_histogram[i] << (i - idx - sm->sm_shift);
201 * Increment the space map's index as long as we haven't
202 * reached the maximum bucket size. Accumulate all ranges
203 * larger than the max bucket size into the last bucket.
205 if (idx < SPACE_MAP_HISTOGRAM_SIZE(sm) - 1) {
206 ASSERT3U(idx + sm->sm_shift, ==, i);
208 ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE(sm));
214 space_map_entries(space_map_t *sm, range_tree_t *rt)
216 avl_tree_t *t = &rt->rt_root;
218 uint64_t size, entries;
221 * All space_maps always have a debug entry so account for it here.
226 * Traverse the range tree and calculate the number of space map
227 * entries that would be required to write out the range tree.
229 for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
230 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
231 entries += howmany(size, SM_RUN_MAX);
237 space_map_set_blocksize(space_map_t *sm, uint64_t size, dmu_tx_t *tx)
242 ASSERT3U(sm->sm_blksz, !=, 0);
243 ASSERT3U(space_map_object(sm), !=, 0);
244 ASSERT(sm->sm_dbuf != NULL);
245 VERIFY(ISP2(space_map_max_blksz));
247 if (sm->sm_blksz >= space_map_max_blksz)
251 * The object contains more than one block so we can't adjust
254 if (sm->sm_phys->smp_objsize > sm->sm_blksz)
257 if (size > sm->sm_blksz) {
261 * Older software versions treat space map blocks as fixed
262 * entities. The DMU is capable of handling different block
263 * sizes making it possible for us to increase the
264 * block size and maintain backwards compatibility. The
265 * caveat is that the new block sizes must be a
266 * power of 2 so that old software can append to the file,
267 * adding more blocks. The block size can grow until it
268 * reaches space_map_max_blksz.
270 newsz = ISP2(size) ? size : 1ULL << highbit(size);
271 if (newsz > space_map_max_blksz)
272 newsz = space_map_max_blksz;
274 VERIFY0(dmu_object_set_blocksize(sm->sm_os,
275 space_map_object(sm), newsz, 0, tx));
276 dmu_object_size_from_db(sm->sm_dbuf, &blksz, &blocks);
278 zfs_dbgmsg("txg %llu, spa %s, increasing blksz from %d to %d",
279 dmu_tx_get_txg(tx), spa_name(dmu_objset_spa(sm->sm_os)),
280 sm->sm_blksz, blksz);
282 VERIFY3U(newsz, ==, blksz);
283 VERIFY3U(sm->sm_blksz, <, blksz);
284 sm->sm_blksz = blksz;
289 * Note: space_map_write() will drop sm_lock across dmu_write() calls.
292 space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
295 objset_t *os = sm->sm_os;
296 spa_t *spa = dmu_objset_spa(os);
297 avl_tree_t *t = &rt->rt_root;
299 uint64_t size, total, rt_space, nodes;
300 uint64_t *entry, *entry_map, *entry_map_end;
301 uint64_t newsz, expected_entries, actual_entries = 1;
303 ASSERT(MUTEX_HELD(rt->rt_lock));
304 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
305 VERIFY3U(space_map_object(sm), !=, 0);
306 dmu_buf_will_dirty(sm->sm_dbuf, tx);
309 * This field is no longer necessary since the in-core space map
310 * now contains the object number but is maintained for backwards
313 sm->sm_phys->smp_object = sm->sm_object;
315 if (range_tree_space(rt) == 0) {
316 VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object);
320 if (maptype == SM_ALLOC)
321 sm->sm_phys->smp_alloc += range_tree_space(rt);
323 sm->sm_phys->smp_alloc -= range_tree_space(rt);
325 expected_entries = space_map_entries(sm, rt);
328 * Calculate the new size for the space map on-disk and see if
329 * we can grow the block size to accommodate the new size.
331 newsz = sm->sm_phys->smp_objsize + expected_entries * sizeof (uint64_t);
332 space_map_set_blocksize(sm, newsz, tx);
334 entry_map = zio_buf_alloc(sm->sm_blksz);
335 entry_map_end = entry_map + (sm->sm_blksz / sizeof (uint64_t));
338 *entry++ = SM_DEBUG_ENCODE(1) |
339 SM_DEBUG_ACTION_ENCODE(maptype) |
340 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa)) |
341 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
344 nodes = avl_numnodes(&rt->rt_root);
345 rt_space = range_tree_space(rt);
346 for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
349 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
350 start = (rs->rs_start - sm->sm_start) >> sm->sm_shift;
352 total += size << sm->sm_shift;
357 run_len = MIN(size, SM_RUN_MAX);
359 if (entry == entry_map_end) {
360 mutex_exit(rt->rt_lock);
361 dmu_write(os, space_map_object(sm),
362 sm->sm_phys->smp_objsize, sm->sm_blksz,
364 mutex_enter(rt->rt_lock);
365 sm->sm_phys->smp_objsize += sm->sm_blksz;
369 *entry++ = SM_OFFSET_ENCODE(start) |
370 SM_TYPE_ENCODE(maptype) |
371 SM_RUN_ENCODE(run_len);
379 if (entry != entry_map) {
380 size = (entry - entry_map) * sizeof (uint64_t);
381 mutex_exit(rt->rt_lock);
382 dmu_write(os, space_map_object(sm), sm->sm_phys->smp_objsize,
383 size, entry_map, tx);
384 mutex_enter(rt->rt_lock);
385 sm->sm_phys->smp_objsize += size;
387 ASSERT3U(expected_entries, ==, actual_entries);
390 * Ensure that the space_map's accounting wasn't changed
391 * while we were in the middle of writing it out.
393 VERIFY3U(nodes, ==, avl_numnodes(&rt->rt_root));
394 VERIFY3U(range_tree_space(rt), ==, rt_space);
395 VERIFY3U(range_tree_space(rt), ==, total);
397 zio_buf_free(entry_map, sm->sm_blksz);
401 space_map_open_impl(space_map_t *sm)
406 error = dmu_bonus_hold(sm->sm_os, sm->sm_object, sm, &sm->sm_dbuf);
410 dmu_object_size_from_db(sm->sm_dbuf, &sm->sm_blksz, &blocks);
411 sm->sm_phys = sm->sm_dbuf->db_data;
416 space_map_open(space_map_t **smp, objset_t *os, uint64_t object,
417 uint64_t start, uint64_t size, uint8_t shift, kmutex_t *lp)
422 ASSERT(*smp == NULL);
426 sm = kmem_zalloc(sizeof (space_map_t), KM_SLEEP);
428 sm->sm_start = start;
430 sm->sm_shift = shift;
433 sm->sm_object = object;
435 error = space_map_open_impl(sm);
447 space_map_close(space_map_t *sm)
452 if (sm->sm_dbuf != NULL)
453 dmu_buf_rele(sm->sm_dbuf, sm);
457 kmem_free(sm, sizeof (*sm));
461 space_map_reallocate(space_map_t *sm, dmu_tx_t *tx)
463 ASSERT(dmu_tx_is_syncing(tx));
465 space_map_free(sm, tx);
466 dmu_buf_rele(sm->sm_dbuf, sm);
468 sm->sm_object = space_map_alloc(sm->sm_os, tx);
469 VERIFY0(space_map_open_impl(sm));
473 space_map_truncate(space_map_t *sm, dmu_tx_t *tx)
475 objset_t *os = sm->sm_os;
476 spa_t *spa = dmu_objset_spa(os);
477 zfeature_info_t *space_map_histogram =
478 &spa_feature_table[SPA_FEATURE_SPACEMAP_HISTOGRAM];
479 dmu_object_info_t doi;
482 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
483 ASSERT(dmu_tx_is_syncing(tx));
485 VERIFY0(dmu_free_range(os, space_map_object(sm), 0, -1ULL, tx));
486 dmu_object_info_from_db(sm->sm_dbuf, &doi);
488 if (spa_feature_is_enabled(spa, space_map_histogram)) {
489 bonuslen = sizeof (space_map_phys_t);
490 ASSERT3U(bonuslen, <=, dmu_bonus_max());
492 bonuslen = SPACE_MAP_SIZE_V0;
495 if (bonuslen != doi.doi_bonus_size ||
496 doi.doi_data_block_size != SPACE_MAP_INITIAL_BLOCKSIZE) {
497 zfs_dbgmsg("txg %llu, spa %s, reallocating: "
498 "old bonus %u, old blocksz %u", dmu_tx_get_txg(tx),
499 spa_name(spa), doi.doi_bonus_size, doi.doi_data_block_size);
500 space_map_reallocate(sm, tx);
501 VERIFY3U(sm->sm_blksz, ==, SPACE_MAP_INITIAL_BLOCKSIZE);
504 dmu_buf_will_dirty(sm->sm_dbuf, tx);
505 sm->sm_phys->smp_objsize = 0;
506 sm->sm_phys->smp_alloc = 0;
510 * Update the in-core space_map allocation and length values.
513 space_map_update(space_map_t *sm)
518 ASSERT(MUTEX_HELD(sm->sm_lock));
520 sm->sm_alloc = sm->sm_phys->smp_alloc;
521 sm->sm_length = sm->sm_phys->smp_objsize;
525 space_map_alloc(objset_t *os, dmu_tx_t *tx)
527 spa_t *spa = dmu_objset_spa(os);
528 zfeature_info_t *space_map_histogram =
529 &spa_feature_table[SPA_FEATURE_SPACEMAP_HISTOGRAM];
533 if (spa_feature_is_enabled(spa, space_map_histogram)) {
534 spa_feature_incr(spa, space_map_histogram, tx);
535 bonuslen = sizeof (space_map_phys_t);
536 ASSERT3U(bonuslen, <=, dmu_bonus_max());
538 bonuslen = SPACE_MAP_SIZE_V0;
541 object = dmu_object_alloc(os,
542 DMU_OT_SPACE_MAP, SPACE_MAP_INITIAL_BLOCKSIZE,
543 DMU_OT_SPACE_MAP_HEADER, bonuslen, tx);
549 space_map_free(space_map_t *sm, dmu_tx_t *tx)
552 zfeature_info_t *space_map_histogram =
553 &spa_feature_table[SPA_FEATURE_SPACEMAP_HISTOGRAM];
558 spa = dmu_objset_spa(sm->sm_os);
559 if (spa_feature_is_enabled(spa, space_map_histogram)) {
560 dmu_object_info_t doi;
562 dmu_object_info_from_db(sm->sm_dbuf, &doi);
563 if (doi.doi_bonus_size != SPACE_MAP_SIZE_V0) {
564 VERIFY(spa_feature_is_active(spa, space_map_histogram));
565 spa_feature_decr(spa, space_map_histogram, tx);
569 VERIFY3U(dmu_object_free(sm->sm_os, space_map_object(sm), tx), ==, 0);
574 space_map_object(space_map_t *sm)
576 return (sm != NULL ? sm->sm_object : 0);
580 * Returns the already synced, on-disk allocated space.
583 space_map_allocated(space_map_t *sm)
585 return (sm != NULL ? sm->sm_alloc : 0);
589 * Returns the already synced, on-disk length;
592 space_map_length(space_map_t *sm)
594 return (sm != NULL ? sm->sm_length : 0);
598 * Returns the allocated space that is currently syncing.
601 space_map_alloc_delta(space_map_t *sm)
605 ASSERT(sm->sm_dbuf != NULL);
606 return (sm->sm_phys->smp_alloc - space_map_allocated(sm));