4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "%Z%%M% %I% %E% SMI"
28 #include <sys/zfs_context.h>
32 #include <sys/space_map.h>
36 * NOTE: caller is responsible for all locking.
39 space_map_seg_compare(const void *x1, const void *x2)
41 const space_seg_t *s1 = x1;
42 const space_seg_t *s2 = x2;
44 if (s1->ss_start < s2->ss_start) {
45 if (s1->ss_end > s2->ss_start)
49 if (s1->ss_start > s2->ss_start) {
50 if (s1->ss_start < s2->ss_end)
58 space_map_create(space_map_t *sm, uint64_t start, uint64_t size, uint8_t shift,
61 bzero(sm, sizeof (*sm));
63 cv_init(&sm->sm_load_cv, NULL, CV_DEFAULT, NULL);
64 avl_create(&sm->sm_root, space_map_seg_compare,
65 sizeof (space_seg_t), offsetof(struct space_seg, ss_node));
74 space_map_destroy(space_map_t *sm)
76 ASSERT(!sm->sm_loaded && !sm->sm_loading);
77 VERIFY3U(sm->sm_space, ==, 0);
78 avl_destroy(&sm->sm_root);
79 cv_destroy(&sm->sm_load_cv);
83 space_map_add(space_map_t *sm, uint64_t start, uint64_t size)
86 space_seg_t ssearch, *ss_before, *ss_after, *ss;
87 uint64_t end = start + size;
88 int merge_before, merge_after;
90 ASSERT(MUTEX_HELD(sm->sm_lock));
92 VERIFY3U(start, >=, sm->sm_start);
93 VERIFY3U(end, <=, sm->sm_start + sm->sm_size);
94 VERIFY(sm->sm_space + size <= sm->sm_size);
95 VERIFY(P2PHASE(start, 1ULL << sm->sm_shift) == 0);
96 VERIFY(P2PHASE(size, 1ULL << sm->sm_shift) == 0);
98 ssearch.ss_start = start;
100 ss = avl_find(&sm->sm_root, &ssearch, &where);
102 if (ss != NULL && ss->ss_start <= start && ss->ss_end >= end) {
103 zfs_panic_recover("zfs: allocating allocated segment"
104 "(offset=%llu size=%llu)\n",
105 (longlong_t)start, (longlong_t)size);
109 /* Make sure we don't overlap with either of our neighbors */
112 ss_before = avl_nearest(&sm->sm_root, where, AVL_BEFORE);
113 ss_after = avl_nearest(&sm->sm_root, where, AVL_AFTER);
115 merge_before = (ss_before != NULL && ss_before->ss_end == start);
116 merge_after = (ss_after != NULL && ss_after->ss_start == end);
118 if (merge_before && merge_after) {
119 avl_remove(&sm->sm_root, ss_before);
120 ss_after->ss_start = ss_before->ss_start;
121 kmem_free(ss_before, sizeof (*ss_before));
122 } else if (merge_before) {
123 ss_before->ss_end = end;
124 } else if (merge_after) {
125 ss_after->ss_start = start;
127 ss = kmem_alloc(sizeof (*ss), KM_SLEEP);
128 ss->ss_start = start;
130 avl_insert(&sm->sm_root, ss, where);
133 sm->sm_space += size;
137 space_map_remove(space_map_t *sm, uint64_t start, uint64_t size)
140 space_seg_t ssearch, *ss, *newseg;
141 uint64_t end = start + size;
142 int left_over, right_over;
144 ASSERT(MUTEX_HELD(sm->sm_lock));
146 VERIFY(P2PHASE(start, 1ULL << sm->sm_shift) == 0);
147 VERIFY(P2PHASE(size, 1ULL << sm->sm_shift) == 0);
149 ssearch.ss_start = start;
150 ssearch.ss_end = end;
151 ss = avl_find(&sm->sm_root, &ssearch, &where);
153 /* Make sure we completely overlap with someone */
155 zfs_panic_recover("zfs: freeing free segment "
156 "(offset=%llu size=%llu)",
157 (longlong_t)start, (longlong_t)size);
160 VERIFY3U(ss->ss_start, <=, start);
161 VERIFY3U(ss->ss_end, >=, end);
162 VERIFY(sm->sm_space - size <= sm->sm_size);
164 left_over = (ss->ss_start != start);
165 right_over = (ss->ss_end != end);
167 if (left_over && right_over) {
168 newseg = kmem_alloc(sizeof (*newseg), KM_SLEEP);
169 newseg->ss_start = end;
170 newseg->ss_end = ss->ss_end;
172 avl_insert_here(&sm->sm_root, newseg, ss, AVL_AFTER);
173 } else if (left_over) {
175 } else if (right_over) {
178 avl_remove(&sm->sm_root, ss);
179 kmem_free(ss, sizeof (*ss));
182 sm->sm_space -= size;
186 space_map_contains(space_map_t *sm, uint64_t start, uint64_t size)
189 space_seg_t ssearch, *ss;
190 uint64_t end = start + size;
192 ASSERT(MUTEX_HELD(sm->sm_lock));
194 VERIFY(P2PHASE(start, 1ULL << sm->sm_shift) == 0);
195 VERIFY(P2PHASE(size, 1ULL << sm->sm_shift) == 0);
197 ssearch.ss_start = start;
198 ssearch.ss_end = end;
199 ss = avl_find(&sm->sm_root, &ssearch, &where);
201 return (ss != NULL && ss->ss_start <= start && ss->ss_end >= end);
205 space_map_vacate(space_map_t *sm, space_map_func_t *func, space_map_t *mdest)
210 ASSERT(MUTEX_HELD(sm->sm_lock));
212 while ((ss = avl_destroy_nodes(&sm->sm_root, &cookie)) != NULL) {
214 func(mdest, ss->ss_start, ss->ss_end - ss->ss_start);
215 kmem_free(ss, sizeof (*ss));
221 space_map_walk(space_map_t *sm, space_map_func_t *func, space_map_t *mdest)
225 for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss))
226 func(mdest, ss->ss_start, ss->ss_end - ss->ss_start);
230 space_map_excise(space_map_t *sm, uint64_t start, uint64_t size)
232 avl_tree_t *t = &sm->sm_root;
234 space_seg_t *ss, search;
235 uint64_t end = start + size;
236 uint64_t rm_start, rm_end;
238 ASSERT(MUTEX_HELD(sm->sm_lock));
240 search.ss_start = start;
241 search.ss_end = start;
244 ss = avl_find(t, &search, &where);
247 ss = avl_nearest(t, where, AVL_AFTER);
249 if (ss == NULL || ss->ss_start >= end)
252 rm_start = MAX(ss->ss_start, start);
253 rm_end = MIN(ss->ss_end, end);
255 space_map_remove(sm, rm_start, rm_end - rm_start);
260 * Replace smd with the union of smd and sms.
263 space_map_union(space_map_t *smd, space_map_t *sms)
265 avl_tree_t *t = &sms->sm_root;
268 ASSERT(MUTEX_HELD(smd->sm_lock));
271 * For each source segment, remove any intersections with the
272 * destination, then add the source segment to the destination.
274 for (ss = avl_first(t); ss != NULL; ss = AVL_NEXT(t, ss)) {
275 space_map_excise(smd, ss->ss_start, ss->ss_end - ss->ss_start);
276 space_map_add(smd, ss->ss_start, ss->ss_end - ss->ss_start);
281 * Wait for any in-progress space_map_load() to complete.
284 space_map_load_wait(space_map_t *sm)
286 ASSERT(MUTEX_HELD(sm->sm_lock));
288 while (sm->sm_loading)
289 cv_wait(&sm->sm_load_cv, sm->sm_lock);
293 * Note: space_map_load() will drop sm_lock across dmu_read() calls.
294 * The caller must be OK with this.
297 space_map_load(space_map_t *sm, space_map_ops_t *ops, uint8_t maptype,
298 space_map_obj_t *smo, objset_t *os)
300 uint64_t *entry, *entry_map, *entry_map_end;
301 uint64_t bufsize, size, offset, end, space;
302 uint64_t mapstart = sm->sm_start;
305 ASSERT(MUTEX_HELD(sm->sm_lock));
307 space_map_load_wait(sm);
312 sm->sm_loading = B_TRUE;
313 end = smo->smo_objsize;
314 space = smo->smo_alloc;
316 ASSERT(sm->sm_ops == NULL);
317 VERIFY3U(sm->sm_space, ==, 0);
319 if (maptype == SM_FREE) {
320 space_map_add(sm, sm->sm_start, sm->sm_size);
321 space = sm->sm_size - space;
324 bufsize = 1ULL << SPACE_MAP_BLOCKSHIFT;
325 entry_map = zio_buf_alloc(bufsize);
327 mutex_exit(sm->sm_lock);
329 dmu_prefetch(os, smo->smo_object, bufsize, end - bufsize);
330 mutex_enter(sm->sm_lock);
332 for (offset = 0; offset < end; offset += bufsize) {
333 size = MIN(end - offset, bufsize);
334 VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0);
337 dprintf("object=%llu offset=%llx size=%llx\n",
338 smo->smo_object, offset, size);
340 mutex_exit(sm->sm_lock);
341 error = dmu_read(os, smo->smo_object, offset, size, entry_map);
342 mutex_enter(sm->sm_lock);
346 entry_map_end = entry_map + (size / sizeof (uint64_t));
347 for (entry = entry_map; entry < entry_map_end; entry++) {
350 if (SM_DEBUG_DECODE(e)) /* Skip debug entries */
353 (SM_TYPE_DECODE(e) == maptype ?
354 space_map_add : space_map_remove)(sm,
355 (SM_OFFSET_DECODE(e) << sm->sm_shift) + mapstart,
356 SM_RUN_DECODE(e) << sm->sm_shift);
361 VERIFY3U(sm->sm_space, ==, space);
363 sm->sm_loaded = B_TRUE;
368 space_map_vacate(sm, NULL, NULL);
371 zio_buf_free(entry_map, bufsize);
373 sm->sm_loading = B_FALSE;
375 cv_broadcast(&sm->sm_load_cv);
381 space_map_unload(space_map_t *sm)
383 ASSERT(MUTEX_HELD(sm->sm_lock));
385 if (sm->sm_loaded && sm->sm_ops != NULL)
386 sm->sm_ops->smop_unload(sm);
388 sm->sm_loaded = B_FALSE;
391 space_map_vacate(sm, NULL, NULL);
395 space_map_alloc(space_map_t *sm, uint64_t size)
399 start = sm->sm_ops->smop_alloc(sm, size);
401 space_map_remove(sm, start, size);
406 space_map_claim(space_map_t *sm, uint64_t start, uint64_t size)
408 sm->sm_ops->smop_claim(sm, start, size);
409 space_map_remove(sm, start, size);
413 space_map_free(space_map_t *sm, uint64_t start, uint64_t size)
415 space_map_add(sm, start, size);
416 sm->sm_ops->smop_free(sm, start, size);
420 * Note: space_map_sync() will drop sm_lock across dmu_write() calls.
423 space_map_sync(space_map_t *sm, uint8_t maptype,
424 space_map_obj_t *smo, objset_t *os, dmu_tx_t *tx)
426 spa_t *spa = dmu_objset_spa(os);
429 uint64_t bufsize, start, size, run_len;
430 uint64_t *entry, *entry_map, *entry_map_end;
432 ASSERT(MUTEX_HELD(sm->sm_lock));
434 if (sm->sm_space == 0)
437 dprintf("object %4llu, txg %llu, pass %d, %c, count %lu, space %llx\n",
438 smo->smo_object, dmu_tx_get_txg(tx), spa_sync_pass(spa),
439 maptype == SM_ALLOC ? 'A' : 'F', avl_numnodes(&sm->sm_root),
442 if (maptype == SM_ALLOC)
443 smo->smo_alloc += sm->sm_space;
445 smo->smo_alloc -= sm->sm_space;
447 bufsize = (8 + avl_numnodes(&sm->sm_root)) * sizeof (uint64_t);
448 bufsize = MIN(bufsize, 1ULL << SPACE_MAP_BLOCKSHIFT);
449 entry_map = zio_buf_alloc(bufsize);
450 entry_map_end = entry_map + (bufsize / sizeof (uint64_t));
453 *entry++ = SM_DEBUG_ENCODE(1) |
454 SM_DEBUG_ACTION_ENCODE(maptype) |
455 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa)) |
456 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
458 while ((ss = avl_destroy_nodes(&sm->sm_root, &cookie)) != NULL) {
459 size = ss->ss_end - ss->ss_start;
460 start = (ss->ss_start - sm->sm_start) >> sm->sm_shift;
462 sm->sm_space -= size;
463 size >>= sm->sm_shift;
466 run_len = MIN(size, SM_RUN_MAX);
468 if (entry == entry_map_end) {
469 mutex_exit(sm->sm_lock);
470 dmu_write(os, smo->smo_object, smo->smo_objsize,
471 bufsize, entry_map, tx);
472 mutex_enter(sm->sm_lock);
473 smo->smo_objsize += bufsize;
477 *entry++ = SM_OFFSET_ENCODE(start) |
478 SM_TYPE_ENCODE(maptype) |
479 SM_RUN_ENCODE(run_len);
484 kmem_free(ss, sizeof (*ss));
487 if (entry != entry_map) {
488 size = (entry - entry_map) * sizeof (uint64_t);
489 mutex_exit(sm->sm_lock);
490 dmu_write(os, smo->smo_object, smo->smo_objsize,
491 size, entry_map, tx);
492 mutex_enter(sm->sm_lock);
493 smo->smo_objsize += size;
496 zio_buf_free(entry_map, bufsize);
498 VERIFY3U(sm->sm_space, ==, 0);
502 space_map_truncate(space_map_obj_t *smo, objset_t *os, dmu_tx_t *tx)
504 VERIFY(dmu_free_range(os, smo->smo_object, 0, -1ULL, tx) == 0);
506 smo->smo_objsize = 0;