4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 * Copyright (c) 2012 by Delphix. All rights reserved.
29 #include <sys/zfs_context.h>
33 #include <sys/space_map.h>
35 SYSCTL_DECL(_vfs_zfs);
36 static int space_map_last_hope;
37 TUNABLE_INT("vfs.zfs.space_map_last_hope", &space_map_last_hope);
38 SYSCTL_INT(_vfs_zfs, OID_AUTO, space_map_last_hope, CTLFLAG_RDTUN,
39 &space_map_last_hope, 0,
40 "If kernel panic in space_map code on pool import, import the pool in readonly mode and backup all your data before trying this option.");
42 static kmem_cache_t *space_seg_cache;
47 ASSERT(space_seg_cache == NULL);
48 space_seg_cache = kmem_cache_create("space_seg_cache",
49 sizeof (space_seg_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
55 kmem_cache_destroy(space_seg_cache);
56 space_seg_cache = NULL;
61 * NOTE: caller is responsible for all locking.
64 space_map_seg_compare(const void *x1, const void *x2)
66 const space_seg_t *s1 = x1;
67 const space_seg_t *s2 = x2;
69 if (s1->ss_start < s2->ss_start) {
70 if (s1->ss_end > s2->ss_start)
74 if (s1->ss_start > s2->ss_start) {
75 if (s1->ss_start < s2->ss_end)
83 space_map_create(space_map_t *sm, uint64_t start, uint64_t size, uint8_t shift,
86 bzero(sm, sizeof (*sm));
88 cv_init(&sm->sm_load_cv, NULL, CV_DEFAULT, NULL);
90 avl_create(&sm->sm_root, space_map_seg_compare,
91 sizeof (space_seg_t), offsetof(struct space_seg, ss_node));
100 space_map_destroy(space_map_t *sm)
102 ASSERT(!sm->sm_loaded && !sm->sm_loading);
103 VERIFY0(sm->sm_space);
104 avl_destroy(&sm->sm_root);
105 cv_destroy(&sm->sm_load_cv);
109 space_map_add(space_map_t *sm, uint64_t start, uint64_t size)
112 space_seg_t *ss_before, *ss_after, *ss;
113 uint64_t end = start + size;
114 int merge_before, merge_after;
116 ASSERT(MUTEX_HELD(sm->sm_lock));
117 VERIFY(!sm->sm_condensing);
119 VERIFY3U(start, >=, sm->sm_start);
120 VERIFY3U(end, <=, sm->sm_start + sm->sm_size);
121 VERIFY(sm->sm_space + size <= sm->sm_size);
122 VERIFY(P2PHASE(start, 1ULL << sm->sm_shift) == 0);
123 VERIFY(P2PHASE(size, 1ULL << sm->sm_shift) == 0);
125 ss = space_map_find(sm, start, size, &where);
127 zfs_panic_recover("zfs: allocating allocated segment"
128 "(offset=%llu size=%llu)\n",
129 (longlong_t)start, (longlong_t)size);
132 if (ss != NULL && space_map_last_hope) {
133 uint64_t sstart, ssize;
135 if (ss->ss_start > start)
136 sstart = ss->ss_start;
139 if (ss->ss_end > end)
140 ssize = end - sstart;
142 ssize = ss->ss_end - sstart;
144 "Removing colliding space_map range (start=%ju end=%ju). Good luck!",
145 (uintmax_t)sstart, (uintmax_t)(sstart + ssize));
146 space_map_remove(sm, sstart, ssize);
150 /* Make sure we don't overlap with either of our neighbors */
153 ss_before = avl_nearest(&sm->sm_root, where, AVL_BEFORE);
154 ss_after = avl_nearest(&sm->sm_root, where, AVL_AFTER);
156 merge_before = (ss_before != NULL && ss_before->ss_end == start);
157 merge_after = (ss_after != NULL && ss_after->ss_start == end);
159 if (merge_before && merge_after) {
160 avl_remove(&sm->sm_root, ss_before);
161 if (sm->sm_pp_root) {
162 avl_remove(sm->sm_pp_root, ss_before);
163 avl_remove(sm->sm_pp_root, ss_after);
165 ss_after->ss_start = ss_before->ss_start;
166 kmem_cache_free(space_seg_cache, ss_before);
168 } else if (merge_before) {
169 ss_before->ss_end = end;
171 avl_remove(sm->sm_pp_root, ss_before);
173 } else if (merge_after) {
174 ss_after->ss_start = start;
176 avl_remove(sm->sm_pp_root, ss_after);
179 ss = kmem_cache_alloc(space_seg_cache, KM_SLEEP);
180 ss->ss_start = start;
182 avl_insert(&sm->sm_root, ss, where);
186 avl_add(sm->sm_pp_root, ss);
188 sm->sm_space += size;
192 space_map_remove(space_map_t *sm, uint64_t start, uint64_t size)
197 space_seg_t *ss, *newseg;
198 uint64_t end = start + size;
199 int left_over, right_over;
201 VERIFY(!sm->sm_condensing);
203 ss = space_map_find(sm, start, size, &where);
205 ss = space_map_find(sm, start, size, NULL);
208 /* Make sure we completely overlap with someone */
210 zfs_panic_recover("zfs: freeing free segment "
211 "(offset=%llu size=%llu)",
212 (longlong_t)start, (longlong_t)size);
215 VERIFY3U(ss->ss_start, <=, start);
216 VERIFY3U(ss->ss_end, >=, end);
217 VERIFY(sm->sm_space - size < sm->sm_size);
219 left_over = (ss->ss_start != start);
220 right_over = (ss->ss_end != end);
223 avl_remove(sm->sm_pp_root, ss);
225 if (left_over && right_over) {
226 newseg = kmem_cache_alloc(space_seg_cache, KM_SLEEP);
227 newseg->ss_start = end;
228 newseg->ss_end = ss->ss_end;
230 avl_insert_here(&sm->sm_root, newseg, ss, AVL_AFTER);
232 avl_add(sm->sm_pp_root, newseg);
233 } else if (left_over) {
235 } else if (right_over) {
238 avl_remove(&sm->sm_root, ss);
239 kmem_cache_free(space_seg_cache, ss);
243 if (sm->sm_pp_root && ss != NULL)
244 avl_add(sm->sm_pp_root, ss);
246 sm->sm_space -= size;
250 space_map_find(space_map_t *sm, uint64_t start, uint64_t size,
253 space_seg_t ssearch, *ss;
255 ASSERT(MUTEX_HELD(sm->sm_lock));
257 VERIFY(P2PHASE(start, 1ULL << sm->sm_shift) == 0);
258 VERIFY(P2PHASE(size, 1ULL << sm->sm_shift) == 0);
260 ssearch.ss_start = start;
261 ssearch.ss_end = start + size;
262 ss = avl_find(&sm->sm_root, &ssearch, wherep);
264 if (ss != NULL && ss->ss_start <= start && ss->ss_end >= start + size)
270 space_map_contains(space_map_t *sm, uint64_t start, uint64_t size)
274 return (space_map_find(sm, start, size, &where) != 0);
278 space_map_swap(space_map_t **msrc, space_map_t **mdst)
282 ASSERT(MUTEX_HELD((*msrc)->sm_lock));
283 ASSERT0((*mdst)->sm_space);
284 ASSERT0(avl_numnodes(&(*mdst)->sm_root));
292 space_map_vacate(space_map_t *sm, space_map_func_t *func, space_map_t *mdest)
297 ASSERT(MUTEX_HELD(sm->sm_lock));
299 while ((ss = avl_destroy_nodes(&sm->sm_root, &cookie)) != NULL) {
301 func(mdest, ss->ss_start, ss->ss_end - ss->ss_start);
302 kmem_cache_free(space_seg_cache, ss);
308 space_map_walk(space_map_t *sm, space_map_func_t *func, space_map_t *mdest)
312 ASSERT(MUTEX_HELD(sm->sm_lock));
314 for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss))
315 func(mdest, ss->ss_start, ss->ss_end - ss->ss_start);
319 * Wait for any in-progress space_map_load() to complete.
322 space_map_load_wait(space_map_t *sm)
324 ASSERT(MUTEX_HELD(sm->sm_lock));
326 while (sm->sm_loading) {
327 ASSERT(!sm->sm_loaded);
328 cv_wait(&sm->sm_load_cv, sm->sm_lock);
333 * Note: space_map_load() will drop sm_lock across dmu_read() calls.
334 * The caller must be OK with this.
337 space_map_load(space_map_t *sm, space_map_ops_t *ops, uint8_t maptype,
338 space_map_obj_t *smo, objset_t *os)
340 uint64_t *entry, *entry_map, *entry_map_end;
341 uint64_t bufsize, size, offset, end, space;
342 uint64_t mapstart = sm->sm_start;
345 ASSERT(MUTEX_HELD(sm->sm_lock));
346 ASSERT(!sm->sm_loaded);
347 ASSERT(!sm->sm_loading);
349 sm->sm_loading = B_TRUE;
350 end = smo->smo_objsize;
351 space = smo->smo_alloc;
353 ASSERT(sm->sm_ops == NULL);
354 VERIFY0(sm->sm_space);
356 if (maptype == SM_FREE) {
357 space_map_add(sm, sm->sm_start, sm->sm_size);
358 space = sm->sm_size - space;
361 bufsize = 1ULL << SPACE_MAP_BLOCKSHIFT;
362 entry_map = zio_buf_alloc(bufsize);
364 mutex_exit(sm->sm_lock);
366 dmu_prefetch(os, smo->smo_object, bufsize, end - bufsize);
367 mutex_enter(sm->sm_lock);
369 for (offset = 0; offset < end; offset += bufsize) {
370 size = MIN(end - offset, bufsize);
371 VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0);
374 dprintf("object=%llu offset=%llx size=%llx\n",
375 smo->smo_object, offset, size);
377 mutex_exit(sm->sm_lock);
378 error = dmu_read(os, smo->smo_object, offset, size, entry_map,
380 mutex_enter(sm->sm_lock);
384 entry_map_end = entry_map + (size / sizeof (uint64_t));
385 for (entry = entry_map; entry < entry_map_end; entry++) {
388 if (SM_DEBUG_DECODE(e)) /* Skip debug entries */
391 (SM_TYPE_DECODE(e) == maptype ?
392 space_map_add : space_map_remove)(sm,
393 (SM_OFFSET_DECODE(e) << sm->sm_shift) + mapstart,
394 SM_RUN_DECODE(e) << sm->sm_shift);
399 VERIFY3U(sm->sm_space, ==, space);
401 sm->sm_loaded = B_TRUE;
406 space_map_vacate(sm, NULL, NULL);
409 zio_buf_free(entry_map, bufsize);
411 sm->sm_loading = B_FALSE;
413 cv_broadcast(&sm->sm_load_cv);
419 space_map_unload(space_map_t *sm)
421 ASSERT(MUTEX_HELD(sm->sm_lock));
423 if (sm->sm_loaded && sm->sm_ops != NULL)
424 sm->sm_ops->smop_unload(sm);
426 sm->sm_loaded = B_FALSE;
429 space_map_vacate(sm, NULL, NULL);
433 space_map_maxsize(space_map_t *sm)
435 ASSERT(sm->sm_ops != NULL);
436 return (sm->sm_ops->smop_max(sm));
440 space_map_alloc(space_map_t *sm, uint64_t size)
444 start = sm->sm_ops->smop_alloc(sm, size);
446 space_map_remove(sm, start, size);
451 space_map_claim(space_map_t *sm, uint64_t start, uint64_t size)
453 sm->sm_ops->smop_claim(sm, start, size);
454 space_map_remove(sm, start, size);
458 space_map_free(space_map_t *sm, uint64_t start, uint64_t size)
460 space_map_add(sm, start, size);
461 sm->sm_ops->smop_free(sm, start, size);
465 * Note: space_map_sync() will drop sm_lock across dmu_write() calls.
468 space_map_sync(space_map_t *sm, uint8_t maptype,
469 space_map_obj_t *smo, objset_t *os, dmu_tx_t *tx)
471 spa_t *spa = dmu_objset_spa(os);
472 avl_tree_t *t = &sm->sm_root;
474 uint64_t bufsize, start, size, run_len, total, sm_space, nodes;
475 uint64_t *entry, *entry_map, *entry_map_end;
477 ASSERT(MUTEX_HELD(sm->sm_lock));
479 if (sm->sm_space == 0)
482 dprintf("object %4llu, txg %llu, pass %d, %c, count %lu, space %llx\n",
483 smo->smo_object, dmu_tx_get_txg(tx), spa_sync_pass(spa),
484 maptype == SM_ALLOC ? 'A' : 'F', avl_numnodes(&sm->sm_root),
487 if (maptype == SM_ALLOC)
488 smo->smo_alloc += sm->sm_space;
490 smo->smo_alloc -= sm->sm_space;
492 bufsize = (8 + avl_numnodes(&sm->sm_root)) * sizeof (uint64_t);
493 bufsize = MIN(bufsize, 1ULL << SPACE_MAP_BLOCKSHIFT);
494 entry_map = zio_buf_alloc(bufsize);
495 entry_map_end = entry_map + (bufsize / sizeof (uint64_t));
498 *entry++ = SM_DEBUG_ENCODE(1) |
499 SM_DEBUG_ACTION_ENCODE(maptype) |
500 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa)) |
501 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
504 nodes = avl_numnodes(&sm->sm_root);
505 sm_space = sm->sm_space;
506 for (ss = avl_first(t); ss != NULL; ss = AVL_NEXT(t, ss)) {
507 size = ss->ss_end - ss->ss_start;
508 start = (ss->ss_start - sm->sm_start) >> sm->sm_shift;
511 size >>= sm->sm_shift;
514 run_len = MIN(size, SM_RUN_MAX);
516 if (entry == entry_map_end) {
517 mutex_exit(sm->sm_lock);
518 dmu_write(os, smo->smo_object, smo->smo_objsize,
519 bufsize, entry_map, tx);
520 mutex_enter(sm->sm_lock);
521 smo->smo_objsize += bufsize;
525 *entry++ = SM_OFFSET_ENCODE(start) |
526 SM_TYPE_ENCODE(maptype) |
527 SM_RUN_ENCODE(run_len);
534 if (entry != entry_map) {
535 size = (entry - entry_map) * sizeof (uint64_t);
536 mutex_exit(sm->sm_lock);
537 dmu_write(os, smo->smo_object, smo->smo_objsize,
538 size, entry_map, tx);
539 mutex_enter(sm->sm_lock);
540 smo->smo_objsize += size;
544 * Ensure that the space_map's accounting wasn't changed
545 * while we were in the middle of writing it out.
547 VERIFY3U(nodes, ==, avl_numnodes(&sm->sm_root));
548 VERIFY3U(sm->sm_space, ==, sm_space);
549 VERIFY3U(sm->sm_space, ==, total);
551 zio_buf_free(entry_map, bufsize);
555 space_map_truncate(space_map_obj_t *smo, objset_t *os, dmu_tx_t *tx)
557 VERIFY(dmu_free_range(os, smo->smo_object, 0, -1ULL, tx) == 0);
559 smo->smo_objsize = 0;
564 * Space map reference trees.
566 * A space map is a collection of integers. Every integer is either
567 * in the map, or it's not. A space map reference tree generalizes
568 * the idea: it allows its members to have arbitrary reference counts,
569 * as opposed to the implicit reference count of 0 or 1 in a space map.
570 * This representation comes in handy when computing the union or
571 * intersection of multiple space maps. For example, the union of
572 * N space maps is the subset of the reference tree with refcnt >= 1.
573 * The intersection of N space maps is the subset with refcnt >= N.
575 * [It's very much like a Fourier transform. Unions and intersections
576 * are hard to perform in the 'space map domain', so we convert the maps
577 * into the 'reference count domain', where it's trivial, then invert.]
579 * vdev_dtl_reassess() uses computations of this form to determine
580 * DTL_MISSING and DTL_OUTAGE for interior vdevs -- e.g. a RAID-Z vdev
581 * has an outage wherever refcnt >= vdev_nparity + 1, and a mirror vdev
582 * has an outage wherever refcnt >= vdev_children.
585 space_map_ref_compare(const void *x1, const void *x2)
587 const space_ref_t *sr1 = x1;
588 const space_ref_t *sr2 = x2;
590 if (sr1->sr_offset < sr2->sr_offset)
592 if (sr1->sr_offset > sr2->sr_offset)
604 space_map_ref_create(avl_tree_t *t)
606 avl_create(t, space_map_ref_compare,
607 sizeof (space_ref_t), offsetof(space_ref_t, sr_node));
611 space_map_ref_destroy(avl_tree_t *t)
616 while ((sr = avl_destroy_nodes(t, &cookie)) != NULL)
617 kmem_free(sr, sizeof (*sr));
623 space_map_ref_add_node(avl_tree_t *t, uint64_t offset, int64_t refcnt)
627 sr = kmem_alloc(sizeof (*sr), KM_SLEEP);
628 sr->sr_offset = offset;
629 sr->sr_refcnt = refcnt;
635 space_map_ref_add_seg(avl_tree_t *t, uint64_t start, uint64_t end,
638 space_map_ref_add_node(t, start, refcnt);
639 space_map_ref_add_node(t, end, -refcnt);
643 * Convert (or add) a space map into a reference tree.
646 space_map_ref_add_map(avl_tree_t *t, space_map_t *sm, int64_t refcnt)
650 ASSERT(MUTEX_HELD(sm->sm_lock));
652 for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss))
653 space_map_ref_add_seg(t, ss->ss_start, ss->ss_end, refcnt);
657 * Convert a reference tree into a space map. The space map will contain
658 * all members of the reference tree for which refcnt >= minref.
661 space_map_ref_generate_map(avl_tree_t *t, space_map_t *sm, int64_t minref)
663 uint64_t start = -1ULL;
667 ASSERT(MUTEX_HELD(sm->sm_lock));
669 space_map_vacate(sm, NULL, NULL);
671 for (sr = avl_first(t); sr != NULL; sr = AVL_NEXT(t, sr)) {
672 refcnt += sr->sr_refcnt;
673 if (refcnt >= minref) {
674 if (start == -1ULL) {
675 start = sr->sr_offset;
678 if (start != -1ULL) {
679 uint64_t end = sr->sr_offset;
680 ASSERT(start <= end);
682 space_map_add(sm, start, end - start);
688 ASSERT(start == -1ULL);