4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/zfs_context.h>
30 #include <sys/space_map.h>
34 * NOTE: caller is responsible for all locking.
37 space_map_seg_compare(const void *x1, const void *x2)
39 const space_seg_t *s1 = x1;
40 const space_seg_t *s2 = x2;
42 if (s1->ss_start < s2->ss_start) {
43 if (s1->ss_end > s2->ss_start)
47 if (s1->ss_start > s2->ss_start) {
48 if (s1->ss_start < s2->ss_end)
56 space_map_create(space_map_t *sm, uint64_t start, uint64_t size, uint8_t shift,
59 bzero(sm, sizeof (*sm));
61 cv_init(&sm->sm_load_cv, NULL, CV_DEFAULT, NULL);
63 avl_create(&sm->sm_root, space_map_seg_compare,
64 sizeof (space_seg_t), offsetof(struct space_seg, ss_node));
73 space_map_destroy(space_map_t *sm)
75 ASSERT(!sm->sm_loaded && !sm->sm_loading);
76 VERIFY3U(sm->sm_space, ==, 0);
77 avl_destroy(&sm->sm_root);
78 cv_destroy(&sm->sm_load_cv);
82 space_map_add(space_map_t *sm, uint64_t start, uint64_t size)
85 space_seg_t ssearch, *ss_before, *ss_after, *ss;
86 uint64_t end = start + size;
87 int merge_before, merge_after;
89 ASSERT(MUTEX_HELD(sm->sm_lock));
91 VERIFY3U(start, >=, sm->sm_start);
92 VERIFY3U(end, <=, sm->sm_start + sm->sm_size);
93 VERIFY(sm->sm_space + size <= sm->sm_size);
94 VERIFY(P2PHASE(start, 1ULL << sm->sm_shift) == 0);
95 VERIFY(P2PHASE(size, 1ULL << sm->sm_shift) == 0);
97 ssearch.ss_start = start;
99 ss = avl_find(&sm->sm_root, &ssearch, &where);
101 if (ss != NULL && ss->ss_start <= start && ss->ss_end >= end) {
102 zfs_panic_recover("zfs: allocating allocated segment"
103 "(offset=%llu size=%llu)\n",
104 (longlong_t)start, (longlong_t)size);
108 /* Make sure we don't overlap with either of our neighbors */
111 ss_before = avl_nearest(&sm->sm_root, where, AVL_BEFORE);
112 ss_after = avl_nearest(&sm->sm_root, where, AVL_AFTER);
114 merge_before = (ss_before != NULL && ss_before->ss_end == start);
115 merge_after = (ss_after != NULL && ss_after->ss_start == end);
117 if (merge_before && merge_after) {
118 avl_remove(&sm->sm_root, ss_before);
119 if (sm->sm_pp_root) {
120 avl_remove(sm->sm_pp_root, ss_before);
121 avl_remove(sm->sm_pp_root, ss_after);
123 ss_after->ss_start = ss_before->ss_start;
124 kmem_free(ss_before, sizeof (*ss_before));
126 } else if (merge_before) {
127 ss_before->ss_end = end;
129 avl_remove(sm->sm_pp_root, ss_before);
131 } else if (merge_after) {
132 ss_after->ss_start = start;
134 avl_remove(sm->sm_pp_root, ss_after);
137 ss = kmem_alloc(sizeof (*ss), KM_SLEEP);
138 ss->ss_start = start;
140 avl_insert(&sm->sm_root, ss, where);
144 avl_add(sm->sm_pp_root, ss);
146 sm->sm_space += size;
150 space_map_remove(space_map_t *sm, uint64_t start, uint64_t size)
152 space_seg_t ssearch, *ss, *newseg;
153 uint64_t end = start + size;
154 int left_over, right_over;
156 ASSERT(MUTEX_HELD(sm->sm_lock));
158 VERIFY(P2PHASE(start, 1ULL << sm->sm_shift) == 0);
159 VERIFY(P2PHASE(size, 1ULL << sm->sm_shift) == 0);
161 ssearch.ss_start = start;
162 ssearch.ss_end = end;
163 ss = avl_find(&sm->sm_root, &ssearch, NULL);
165 /* Make sure we completely overlap with someone */
167 zfs_panic_recover("zfs: freeing free segment "
168 "(offset=%llu size=%llu)",
169 (longlong_t)start, (longlong_t)size);
172 VERIFY3U(ss->ss_start, <=, start);
173 VERIFY3U(ss->ss_end, >=, end);
174 VERIFY(sm->sm_space - size < sm->sm_size);
176 left_over = (ss->ss_start != start);
177 right_over = (ss->ss_end != end);
180 avl_remove(sm->sm_pp_root, ss);
182 if (left_over && right_over) {
183 newseg = kmem_alloc(sizeof (*newseg), KM_SLEEP);
184 newseg->ss_start = end;
185 newseg->ss_end = ss->ss_end;
187 avl_insert_here(&sm->sm_root, newseg, ss, AVL_AFTER);
189 avl_add(sm->sm_pp_root, newseg);
190 } else if (left_over) {
192 } else if (right_over) {
195 avl_remove(&sm->sm_root, ss);
196 kmem_free(ss, sizeof (*ss));
200 if (sm->sm_pp_root && ss != NULL)
201 avl_add(sm->sm_pp_root, ss);
203 sm->sm_space -= size;
207 space_map_contains(space_map_t *sm, uint64_t start, uint64_t size)
210 space_seg_t ssearch, *ss;
211 uint64_t end = start + size;
213 ASSERT(MUTEX_HELD(sm->sm_lock));
215 VERIFY(P2PHASE(start, 1ULL << sm->sm_shift) == 0);
216 VERIFY(P2PHASE(size, 1ULL << sm->sm_shift) == 0);
218 ssearch.ss_start = start;
219 ssearch.ss_end = end;
220 ss = avl_find(&sm->sm_root, &ssearch, &where);
222 return (ss != NULL && ss->ss_start <= start && ss->ss_end >= end);
226 space_map_vacate(space_map_t *sm, space_map_func_t *func, space_map_t *mdest)
231 ASSERT(MUTEX_HELD(sm->sm_lock));
233 while ((ss = avl_destroy_nodes(&sm->sm_root, &cookie)) != NULL) {
235 func(mdest, ss->ss_start, ss->ss_end - ss->ss_start);
236 kmem_free(ss, sizeof (*ss));
242 space_map_walk(space_map_t *sm, space_map_func_t *func, space_map_t *mdest)
246 ASSERT(MUTEX_HELD(sm->sm_lock));
248 for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss))
249 func(mdest, ss->ss_start, ss->ss_end - ss->ss_start);
253 * Wait for any in-progress space_map_load() to complete.
256 space_map_load_wait(space_map_t *sm)
258 ASSERT(MUTEX_HELD(sm->sm_lock));
260 while (sm->sm_loading) {
261 ASSERT(!sm->sm_loaded);
262 cv_wait(&sm->sm_load_cv, sm->sm_lock);
267 * Note: space_map_load() will drop sm_lock across dmu_read() calls.
268 * The caller must be OK with this.
271 space_map_load(space_map_t *sm, space_map_ops_t *ops, uint8_t maptype,
272 space_map_obj_t *smo, objset_t *os)
274 uint64_t *entry, *entry_map, *entry_map_end;
275 uint64_t bufsize, size, offset, end, space;
276 uint64_t mapstart = sm->sm_start;
279 ASSERT(MUTEX_HELD(sm->sm_lock));
280 ASSERT(!sm->sm_loaded);
281 ASSERT(!sm->sm_loading);
283 sm->sm_loading = B_TRUE;
284 end = smo->smo_objsize;
285 space = smo->smo_alloc;
287 ASSERT(sm->sm_ops == NULL);
288 VERIFY3U(sm->sm_space, ==, 0);
290 if (maptype == SM_FREE) {
291 space_map_add(sm, sm->sm_start, sm->sm_size);
292 space = sm->sm_size - space;
295 bufsize = 1ULL << SPACE_MAP_BLOCKSHIFT;
296 entry_map = zio_buf_alloc(bufsize);
298 mutex_exit(sm->sm_lock);
300 dmu_prefetch(os, smo->smo_object, bufsize, end - bufsize);
301 mutex_enter(sm->sm_lock);
303 for (offset = 0; offset < end; offset += bufsize) {
304 size = MIN(end - offset, bufsize);
305 VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0);
308 dprintf("object=%llu offset=%llx size=%llx\n",
309 smo->smo_object, offset, size);
311 mutex_exit(sm->sm_lock);
312 error = dmu_read(os, smo->smo_object, offset, size, entry_map,
314 mutex_enter(sm->sm_lock);
318 entry_map_end = entry_map + (size / sizeof (uint64_t));
319 for (entry = entry_map; entry < entry_map_end; entry++) {
322 if (SM_DEBUG_DECODE(e)) /* Skip debug entries */
325 (SM_TYPE_DECODE(e) == maptype ?
326 space_map_add : space_map_remove)(sm,
327 (SM_OFFSET_DECODE(e) << sm->sm_shift) + mapstart,
328 SM_RUN_DECODE(e) << sm->sm_shift);
333 VERIFY3U(sm->sm_space, ==, space);
335 sm->sm_loaded = B_TRUE;
340 space_map_vacate(sm, NULL, NULL);
343 zio_buf_free(entry_map, bufsize);
345 sm->sm_loading = B_FALSE;
347 cv_broadcast(&sm->sm_load_cv);
353 space_map_unload(space_map_t *sm)
355 ASSERT(MUTEX_HELD(sm->sm_lock));
357 if (sm->sm_loaded && sm->sm_ops != NULL)
358 sm->sm_ops->smop_unload(sm);
360 sm->sm_loaded = B_FALSE;
363 space_map_vacate(sm, NULL, NULL);
367 space_map_maxsize(space_map_t *sm)
369 ASSERT(sm->sm_ops != NULL);
370 return (sm->sm_ops->smop_max(sm));
374 space_map_alloc(space_map_t *sm, uint64_t size)
378 start = sm->sm_ops->smop_alloc(sm, size);
380 space_map_remove(sm, start, size);
385 space_map_claim(space_map_t *sm, uint64_t start, uint64_t size)
387 sm->sm_ops->smop_claim(sm, start, size);
388 space_map_remove(sm, start, size);
392 space_map_free(space_map_t *sm, uint64_t start, uint64_t size)
394 space_map_add(sm, start, size);
395 sm->sm_ops->smop_free(sm, start, size);
399 * Note: space_map_sync() will drop sm_lock across dmu_write() calls.
402 space_map_sync(space_map_t *sm, uint8_t maptype,
403 space_map_obj_t *smo, objset_t *os, dmu_tx_t *tx)
405 spa_t *spa = dmu_objset_spa(os);
408 uint64_t bufsize, start, size, run_len;
409 uint64_t *entry, *entry_map, *entry_map_end;
411 ASSERT(MUTEX_HELD(sm->sm_lock));
413 if (sm->sm_space == 0)
416 dprintf("object %4llu, txg %llu, pass %d, %c, count %lu, space %llx\n",
417 smo->smo_object, dmu_tx_get_txg(tx), spa_sync_pass(spa),
418 maptype == SM_ALLOC ? 'A' : 'F', avl_numnodes(&sm->sm_root),
421 if (maptype == SM_ALLOC)
422 smo->smo_alloc += sm->sm_space;
424 smo->smo_alloc -= sm->sm_space;
426 bufsize = (8 + avl_numnodes(&sm->sm_root)) * sizeof (uint64_t);
427 bufsize = MIN(bufsize, 1ULL << SPACE_MAP_BLOCKSHIFT);
428 entry_map = zio_buf_alloc(bufsize);
429 entry_map_end = entry_map + (bufsize / sizeof (uint64_t));
432 *entry++ = SM_DEBUG_ENCODE(1) |
433 SM_DEBUG_ACTION_ENCODE(maptype) |
434 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa)) |
435 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
437 while ((ss = avl_destroy_nodes(&sm->sm_root, &cookie)) != NULL) {
438 size = ss->ss_end - ss->ss_start;
439 start = (ss->ss_start - sm->sm_start) >> sm->sm_shift;
441 sm->sm_space -= size;
442 size >>= sm->sm_shift;
445 run_len = MIN(size, SM_RUN_MAX);
447 if (entry == entry_map_end) {
448 mutex_exit(sm->sm_lock);
449 dmu_write(os, smo->smo_object, smo->smo_objsize,
450 bufsize, entry_map, tx);
451 mutex_enter(sm->sm_lock);
452 smo->smo_objsize += bufsize;
456 *entry++ = SM_OFFSET_ENCODE(start) |
457 SM_TYPE_ENCODE(maptype) |
458 SM_RUN_ENCODE(run_len);
463 kmem_free(ss, sizeof (*ss));
466 if (entry != entry_map) {
467 size = (entry - entry_map) * sizeof (uint64_t);
468 mutex_exit(sm->sm_lock);
469 dmu_write(os, smo->smo_object, smo->smo_objsize,
470 size, entry_map, tx);
471 mutex_enter(sm->sm_lock);
472 smo->smo_objsize += size;
475 zio_buf_free(entry_map, bufsize);
477 VERIFY3U(sm->sm_space, ==, 0);
481 space_map_truncate(space_map_obj_t *smo, objset_t *os, dmu_tx_t *tx)
483 VERIFY(dmu_free_range(os, smo->smo_object, 0, -1ULL, tx) == 0);
485 smo->smo_objsize = 0;
490 * Space map reference trees.
492 * A space map is a collection of integers. Every integer is either
493 * in the map, or it's not. A space map reference tree generalizes
494 * the idea: it allows its members to have arbitrary reference counts,
495 * as opposed to the implicit reference count of 0 or 1 in a space map.
496 * This representation comes in handy when computing the union or
497 * intersection of multiple space maps. For example, the union of
498 * N space maps is the subset of the reference tree with refcnt >= 1.
499 * The intersection of N space maps is the subset with refcnt >= N.
501 * [It's very much like a Fourier transform. Unions and intersections
502 * are hard to perform in the 'space map domain', so we convert the maps
503 * into the 'reference count domain', where it's trivial, then invert.]
505 * vdev_dtl_reassess() uses computations of this form to determine
506 * DTL_MISSING and DTL_OUTAGE for interior vdevs -- e.g. a RAID-Z vdev
507 * has an outage wherever refcnt >= vdev_nparity + 1, and a mirror vdev
508 * has an outage wherever refcnt >= vdev_children.
511 space_map_ref_compare(const void *x1, const void *x2)
513 const space_ref_t *sr1 = x1;
514 const space_ref_t *sr2 = x2;
516 if (sr1->sr_offset < sr2->sr_offset)
518 if (sr1->sr_offset > sr2->sr_offset)
530 space_map_ref_create(avl_tree_t *t)
532 avl_create(t, space_map_ref_compare,
533 sizeof (space_ref_t), offsetof(space_ref_t, sr_node));
537 space_map_ref_destroy(avl_tree_t *t)
542 while ((sr = avl_destroy_nodes(t, &cookie)) != NULL)
543 kmem_free(sr, sizeof (*sr));
549 space_map_ref_add_node(avl_tree_t *t, uint64_t offset, int64_t refcnt)
553 sr = kmem_alloc(sizeof (*sr), KM_SLEEP);
554 sr->sr_offset = offset;
555 sr->sr_refcnt = refcnt;
561 space_map_ref_add_seg(avl_tree_t *t, uint64_t start, uint64_t end,
564 space_map_ref_add_node(t, start, refcnt);
565 space_map_ref_add_node(t, end, -refcnt);
569 * Convert (or add) a space map into a reference tree.
572 space_map_ref_add_map(avl_tree_t *t, space_map_t *sm, int64_t refcnt)
576 ASSERT(MUTEX_HELD(sm->sm_lock));
578 for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss))
579 space_map_ref_add_seg(t, ss->ss_start, ss->ss_end, refcnt);
583 * Convert a reference tree into a space map. The space map will contain
584 * all members of the reference tree for which refcnt >= minref.
587 space_map_ref_generate_map(avl_tree_t *t, space_map_t *sm, int64_t minref)
589 uint64_t start = -1ULL;
593 ASSERT(MUTEX_HELD(sm->sm_lock));
595 space_map_vacate(sm, NULL, NULL);
597 for (sr = avl_first(t); sr != NULL; sr = AVL_NEXT(t, sr)) {
598 refcnt += sr->sr_refcnt;
599 if (refcnt >= minref) {
600 if (start == -1ULL) {
601 start = sr->sr_offset;
604 if (start != -1ULL) {
605 uint64_t end = sr->sr_offset;
606 ASSERT(start <= end);
608 space_map_add(sm, start, end - start);
614 ASSERT(start == -1ULL);