4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 * Copyright (c) 2013, 2019 by Delphix. All rights reserved.
27 * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
30 #include <sys/zfs_context.h>
33 #include <sys/dnode.h>
35 #include <sys/range_tree.h>
38 * Range trees are tree-based data structures that can be used to
39 * track free space or generally any space allocation information.
40 * A range tree keeps track of individual segments and automatically
41 * provides facilities such as adjacent extent merging and extent
42 * splitting in response to range add/remove requests.
44 * A range tree starts out completely empty, with no segments in it.
45 * Adding an allocation via range_tree_add to the range tree can either:
46 * 1) create a new extent
47 * 2) extend an adjacent extent
48 * 3) merge two adjacent extents
49 * Conversely, removing an allocation via range_tree_remove can:
50 * 1) completely remove an extent
51 * 2) shorten an extent (if the allocation was near one of its ends)
52 * 3) split an extent into two extents, in effect punching a hole
54 * A range tree is also capable of 'bridging' gaps when adding
55 * allocations. This is useful for cases when close proximity of
56 * allocations is an important detail that needs to be represented
57 * in the range tree. See range_tree_set_gap(). The default behavior
58 * is not to bridge gaps (i.e. the maximum allowed gap size is 0).
60 * In order to traverse a range tree, use either the range_tree_walk()
61 * or range_tree_vacate() functions.
63 * To obtain more accurate information on individual segment
64 * operations that the range tree performs "under the hood", you can
65 * specify a set of callbacks by passing a range_tree_ops_t structure
66 * to the range_tree_create function. Any callbacks that are non-NULL
67 * are then called at the appropriate times.
69 * The range tree code also supports a special variant of range trees
70 * that can bridge small gaps between segments. This kind of tree is used
71 * by the dsl scanning code to group I/Os into mostly sequential chunks to
72 * optimize disk performance. The code here attempts to do this with as
73 * little memory and computational overhead as possible. One limitation of
74 * this implementation is that segments of range trees with gaps can only
75 * support removing complete segments.
79 rs_copy(range_seg_t *src, range_seg_t *dest, range_tree_t *rt)
81 ASSERT3U(rt->rt_type, <, RANGE_SEG_NUM_TYPES);
83 switch (rt->rt_type) {
85 size = sizeof (range_seg32_t);
88 size = sizeof (range_seg64_t);
91 size = sizeof (range_seg_gap_t);
94 __builtin_unreachable();
96 memcpy(dest, src, size);
100 range_tree_stat_verify(range_tree_t *rt)
103 zfs_btree_index_t where;
104 uint64_t hist[RANGE_TREE_HISTOGRAM_SIZE] = { 0 };
107 for (rs = zfs_btree_first(&rt->rt_root, &where); rs != NULL;
108 rs = zfs_btree_next(&rt->rt_root, &where, &where)) {
109 uint64_t size = rs_get_end(rs, rt) - rs_get_start(rs, rt);
110 int idx = highbit64(size) - 1;
113 ASSERT3U(hist[idx], !=, 0);
116 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
117 if (hist[i] != rt->rt_histogram[i]) {
118 zfs_dbgmsg("i=%d, hist=%px, hist=%llu, rt_hist=%llu",
119 i, hist, (u_longlong_t)hist[i],
120 (u_longlong_t)rt->rt_histogram[i]);
122 VERIFY3U(hist[i], ==, rt->rt_histogram[i]);
127 range_tree_stat_incr(range_tree_t *rt, range_seg_t *rs)
129 uint64_t size = rs_get_end(rs, rt) - rs_get_start(rs, rt);
130 int idx = highbit64(size) - 1;
134 sizeof (rt->rt_histogram) / sizeof (*rt->rt_histogram));
136 rt->rt_histogram[idx]++;
137 ASSERT3U(rt->rt_histogram[idx], !=, 0);
141 range_tree_stat_decr(range_tree_t *rt, range_seg_t *rs)
143 uint64_t size = rs_get_end(rs, rt) - rs_get_start(rs, rt);
144 int idx = highbit64(size) - 1;
148 sizeof (rt->rt_histogram) / sizeof (*rt->rt_histogram));
150 ASSERT3U(rt->rt_histogram[idx], !=, 0);
151 rt->rt_histogram[idx]--;
155 range_tree_seg32_compare(const void *x1, const void *x2)
157 const range_seg32_t *r1 = x1;
158 const range_seg32_t *r2 = x2;
160 ASSERT3U(r1->rs_start, <=, r1->rs_end);
161 ASSERT3U(r2->rs_start, <=, r2->rs_end);
163 return ((r1->rs_start >= r2->rs_end) - (r1->rs_end <= r2->rs_start));
167 range_tree_seg64_compare(const void *x1, const void *x2)
169 const range_seg64_t *r1 = x1;
170 const range_seg64_t *r2 = x2;
172 ASSERT3U(r1->rs_start, <=, r1->rs_end);
173 ASSERT3U(r2->rs_start, <=, r2->rs_end);
175 return ((r1->rs_start >= r2->rs_end) - (r1->rs_end <= r2->rs_start));
179 range_tree_seg_gap_compare(const void *x1, const void *x2)
181 const range_seg_gap_t *r1 = x1;
182 const range_seg_gap_t *r2 = x2;
184 ASSERT3U(r1->rs_start, <=, r1->rs_end);
185 ASSERT3U(r2->rs_start, <=, r2->rs_end);
187 return ((r1->rs_start >= r2->rs_end) - (r1->rs_end <= r2->rs_start));
191 range_tree_create_impl(const range_tree_ops_t *ops, range_seg_type_t type,
192 void *arg, uint64_t start, uint64_t shift,
193 int (*zfs_btree_compare) (const void *, const void *),
196 range_tree_t *rt = kmem_zalloc(sizeof (range_tree_t), KM_SLEEP);
198 ASSERT3U(shift, <, 64);
199 ASSERT3U(type, <=, RANGE_SEG_NUM_TYPES);
201 int (*compare) (const void *, const void *);
204 size = sizeof (range_seg32_t);
205 compare = range_tree_seg32_compare;
208 size = sizeof (range_seg64_t);
209 compare = range_tree_seg64_compare;
212 size = sizeof (range_seg_gap_t);
213 compare = range_tree_seg_gap_compare;
216 panic("Invalid range seg type %d", type);
218 zfs_btree_create(&rt->rt_root, compare, size);
224 rt->rt_start = start;
225 rt->rt_shift = shift;
226 rt->rt_btree_compare = zfs_btree_compare;
228 if (rt->rt_ops != NULL && rt->rt_ops->rtop_create != NULL)
229 rt->rt_ops->rtop_create(rt, rt->rt_arg);
235 range_tree_create(const range_tree_ops_t *ops, range_seg_type_t type,
236 void *arg, uint64_t start, uint64_t shift)
238 return (range_tree_create_impl(ops, type, arg, start, shift, NULL, 0));
242 range_tree_destroy(range_tree_t *rt)
244 VERIFY0(rt->rt_space);
246 if (rt->rt_ops != NULL && rt->rt_ops->rtop_destroy != NULL)
247 rt->rt_ops->rtop_destroy(rt, rt->rt_arg);
249 zfs_btree_destroy(&rt->rt_root);
250 kmem_free(rt, sizeof (*rt));
254 range_tree_adjust_fill(range_tree_t *rt, range_seg_t *rs, int64_t delta)
256 if (delta < 0 && delta * -1 >= rs_get_fill(rs, rt)) {
257 zfs_panic_recover("zfs: attempting to decrease fill to or "
258 "below 0; probable double remove in segment [%llx:%llx]",
259 (longlong_t)rs_get_start(rs, rt),
260 (longlong_t)rs_get_end(rs, rt));
262 if (rs_get_fill(rs, rt) + delta > rs_get_end(rs, rt) -
263 rs_get_start(rs, rt)) {
264 zfs_panic_recover("zfs: attempting to increase fill beyond "
265 "max; probable double add in segment [%llx:%llx]",
266 (longlong_t)rs_get_start(rs, rt),
267 (longlong_t)rs_get_end(rs, rt));
270 if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
271 rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
272 rs_set_fill(rs, rt, rs_get_fill(rs, rt) + delta);
273 if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
274 rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
278 range_tree_add_impl(void *arg, uint64_t start, uint64_t size, uint64_t fill)
280 range_tree_t *rt = arg;
281 zfs_btree_index_t where;
282 range_seg_t *rs_before, *rs_after, *rs;
283 range_seg_max_t tmp, rsearch;
284 uint64_t end = start + size, gap = rt->rt_gap;
285 uint64_t bridge_size = 0;
286 boolean_t merge_before, merge_after;
288 ASSERT3U(size, !=, 0);
289 ASSERT3U(fill, <=, size);
290 ASSERT3U(start + size, >, start);
292 rs_set_start(&rsearch, rt, start);
293 rs_set_end(&rsearch, rt, end);
294 rs = zfs_btree_find(&rt->rt_root, &rsearch, &where);
297 * If this is a gap-supporting range tree, it is possible that we
298 * are inserting into an existing segment. In this case simply
299 * bump the fill count and call the remove / add callbacks. If the
300 * new range will extend an existing segment, we remove the
301 * existing one, apply the new extent to it and re-insert it using
302 * the normal code paths.
306 zfs_panic_recover("zfs: adding existent segment to "
307 "range tree (offset=%llx size=%llx)",
308 (longlong_t)start, (longlong_t)size);
311 uint64_t rstart = rs_get_start(rs, rt);
312 uint64_t rend = rs_get_end(rs, rt);
313 if (rstart <= start && rend >= end) {
314 range_tree_adjust_fill(rt, rs, fill);
318 if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
319 rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
321 range_tree_stat_decr(rt, rs);
322 rt->rt_space -= rend - rstart;
324 fill += rs_get_fill(rs, rt);
325 start = MIN(start, rstart);
326 end = MAX(end, rend);
329 zfs_btree_remove(&rt->rt_root, rs);
330 range_tree_add_impl(rt, start, size, fill);
334 ASSERT3P(rs, ==, NULL);
337 * Determine whether or not we will have to merge with our neighbors.
338 * If gap != 0, we might need to merge with our neighbors even if we
339 * aren't directly touching.
341 zfs_btree_index_t where_before, where_after;
342 rs_before = zfs_btree_prev(&rt->rt_root, &where, &where_before);
343 rs_after = zfs_btree_next(&rt->rt_root, &where, &where_after);
345 merge_before = (rs_before != NULL && rs_get_end(rs_before, rt) >=
347 merge_after = (rs_after != NULL && rs_get_start(rs_after, rt) <= end +
350 if (merge_before && gap != 0)
351 bridge_size += start - rs_get_end(rs_before, rt);
352 if (merge_after && gap != 0)
353 bridge_size += rs_get_start(rs_after, rt) - end;
355 if (merge_before && merge_after) {
356 if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL) {
357 rt->rt_ops->rtop_remove(rt, rs_before, rt->rt_arg);
358 rt->rt_ops->rtop_remove(rt, rs_after, rt->rt_arg);
361 range_tree_stat_decr(rt, rs_before);
362 range_tree_stat_decr(rt, rs_after);
364 rs_copy(rs_after, &tmp, rt);
365 uint64_t before_start = rs_get_start_raw(rs_before, rt);
366 uint64_t before_fill = rs_get_fill(rs_before, rt);
367 uint64_t after_fill = rs_get_fill(rs_after, rt);
368 zfs_btree_remove_idx(&rt->rt_root, &where_before);
371 * We have to re-find the node because our old reference is
372 * invalid as soon as we do any mutating btree operations.
374 rs_after = zfs_btree_find(&rt->rt_root, &tmp, &where_after);
375 rs_set_start_raw(rs_after, rt, before_start);
376 rs_set_fill(rs_after, rt, after_fill + before_fill + fill);
378 } else if (merge_before) {
379 if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
380 rt->rt_ops->rtop_remove(rt, rs_before, rt->rt_arg);
382 range_tree_stat_decr(rt, rs_before);
384 uint64_t before_fill = rs_get_fill(rs_before, rt);
385 rs_set_end(rs_before, rt, end);
386 rs_set_fill(rs_before, rt, before_fill + fill);
388 } else if (merge_after) {
389 if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
390 rt->rt_ops->rtop_remove(rt, rs_after, rt->rt_arg);
392 range_tree_stat_decr(rt, rs_after);
394 uint64_t after_fill = rs_get_fill(rs_after, rt);
395 rs_set_start(rs_after, rt, start);
396 rs_set_fill(rs_after, rt, after_fill + fill);
401 rs_set_start(rs, rt, start);
402 rs_set_end(rs, rt, end);
403 rs_set_fill(rs, rt, fill);
404 zfs_btree_add_idx(&rt->rt_root, rs, &where);
408 ASSERT3U(rs_get_fill(rs, rt), <=, rs_get_end(rs, rt) -
409 rs_get_start(rs, rt));
411 ASSERT3U(rs_get_fill(rs, rt), ==, rs_get_end(rs, rt) -
412 rs_get_start(rs, rt));
415 if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
416 rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
418 range_tree_stat_incr(rt, rs);
419 rt->rt_space += size + bridge_size;
423 range_tree_add(void *arg, uint64_t start, uint64_t size)
425 range_tree_add_impl(arg, start, size, size);
429 range_tree_remove_impl(range_tree_t *rt, uint64_t start, uint64_t size,
432 zfs_btree_index_t where;
434 range_seg_max_t rsearch, rs_tmp;
435 uint64_t end = start + size;
436 boolean_t left_over, right_over;
438 VERIFY3U(size, !=, 0);
439 VERIFY3U(size, <=, rt->rt_space);
440 if (rt->rt_type == RANGE_SEG64)
441 ASSERT3U(start + size, >, start);
443 rs_set_start(&rsearch, rt, start);
444 rs_set_end(&rsearch, rt, end);
445 rs = zfs_btree_find(&rt->rt_root, &rsearch, &where);
447 /* Make sure we completely overlap with someone */
449 zfs_panic_recover("zfs: removing nonexistent segment from "
450 "range tree (offset=%llx size=%llx)",
451 (longlong_t)start, (longlong_t)size);
456 * Range trees with gap support must only remove complete segments
457 * from the tree. This allows us to maintain accurate fill accounting
458 * and to ensure that bridged sections are not leaked. If we need to
459 * remove less than the full segment, we can only adjust the fill count.
461 if (rt->rt_gap != 0) {
463 if (rs_get_fill(rs, rt) == size) {
464 start = rs_get_start(rs, rt);
465 end = rs_get_end(rs, rt);
468 range_tree_adjust_fill(rt, rs, -size);
471 } else if (rs_get_start(rs, rt) != start ||
472 rs_get_end(rs, rt) != end) {
473 zfs_panic_recover("zfs: freeing partial segment of "
474 "gap tree (offset=%llx size=%llx) of "
475 "(offset=%llx size=%llx)",
476 (longlong_t)start, (longlong_t)size,
477 (longlong_t)rs_get_start(rs, rt),
478 (longlong_t)rs_get_end(rs, rt) - rs_get_start(rs,
484 VERIFY3U(rs_get_start(rs, rt), <=, start);
485 VERIFY3U(rs_get_end(rs, rt), >=, end);
487 left_over = (rs_get_start(rs, rt) != start);
488 right_over = (rs_get_end(rs, rt) != end);
490 range_tree_stat_decr(rt, rs);
492 if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
493 rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
495 if (left_over && right_over) {
496 range_seg_max_t newseg;
497 rs_set_start(&newseg, rt, end);
498 rs_set_end_raw(&newseg, rt, rs_get_end_raw(rs, rt));
499 rs_set_fill(&newseg, rt, rs_get_end(rs, rt) - end);
500 range_tree_stat_incr(rt, &newseg);
502 // This modifies the buffer already inside the range tree
503 rs_set_end(rs, rt, start);
505 rs_copy(rs, &rs_tmp, rt);
506 if (zfs_btree_next(&rt->rt_root, &where, &where) != NULL)
507 zfs_btree_add_idx(&rt->rt_root, &newseg, &where);
509 zfs_btree_add(&rt->rt_root, &newseg);
511 if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
512 rt->rt_ops->rtop_add(rt, &newseg, rt->rt_arg);
513 } else if (left_over) {
514 // This modifies the buffer already inside the range tree
515 rs_set_end(rs, rt, start);
516 rs_copy(rs, &rs_tmp, rt);
517 } else if (right_over) {
518 // This modifies the buffer already inside the range tree
519 rs_set_start(rs, rt, end);
520 rs_copy(rs, &rs_tmp, rt);
522 zfs_btree_remove_idx(&rt->rt_root, &where);
528 * The fill of the leftover segment will always be equal to
529 * the size, since we do not support removing partial segments
530 * of range trees with gaps.
532 rs_set_fill_raw(rs, rt, rs_get_end_raw(rs, rt) -
533 rs_get_start_raw(rs, rt));
534 range_tree_stat_incr(rt, &rs_tmp);
536 if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
537 rt->rt_ops->rtop_add(rt, &rs_tmp, rt->rt_arg);
540 rt->rt_space -= size;
544 range_tree_remove(void *arg, uint64_t start, uint64_t size)
546 range_tree_remove_impl(arg, start, size, B_FALSE);
550 range_tree_remove_fill(range_tree_t *rt, uint64_t start, uint64_t size)
552 range_tree_remove_impl(rt, start, size, B_TRUE);
556 range_tree_resize_segment(range_tree_t *rt, range_seg_t *rs,
557 uint64_t newstart, uint64_t newsize)
559 int64_t delta = newsize - (rs_get_end(rs, rt) - rs_get_start(rs, rt));
561 range_tree_stat_decr(rt, rs);
562 if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
563 rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
565 rs_set_start(rs, rt, newstart);
566 rs_set_end(rs, rt, newstart + newsize);
568 range_tree_stat_incr(rt, rs);
569 if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
570 rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
572 rt->rt_space += delta;
576 range_tree_find_impl(range_tree_t *rt, uint64_t start, uint64_t size)
578 range_seg_max_t rsearch;
579 uint64_t end = start + size;
583 rs_set_start(&rsearch, rt, start);
584 rs_set_end(&rsearch, rt, end);
585 return (zfs_btree_find(&rt->rt_root, &rsearch, NULL));
589 range_tree_find(range_tree_t *rt, uint64_t start, uint64_t size)
591 if (rt->rt_type == RANGE_SEG64)
592 ASSERT3U(start + size, >, start);
594 range_seg_t *rs = range_tree_find_impl(rt, start, size);
595 if (rs != NULL && rs_get_start(rs, rt) <= start &&
596 rs_get_end(rs, rt) >= start + size) {
603 range_tree_verify_not_present(range_tree_t *rt, uint64_t off, uint64_t size)
605 range_seg_t *rs = range_tree_find(rt, off, size);
607 panic("segment already in tree; rs=%p", (void *)rs);
611 range_tree_contains(range_tree_t *rt, uint64_t start, uint64_t size)
613 return (range_tree_find(rt, start, size) != NULL);
617 * Returns the first subset of the given range which overlaps with the range
618 * tree. Returns true if there is a segment in the range, and false if there
622 range_tree_find_in(range_tree_t *rt, uint64_t start, uint64_t size,
623 uint64_t *ostart, uint64_t *osize)
625 if (rt->rt_type == RANGE_SEG64)
626 ASSERT3U(start + size, >, start);
628 range_seg_max_t rsearch;
629 rs_set_start(&rsearch, rt, start);
630 rs_set_end_raw(&rsearch, rt, rs_get_start_raw(&rsearch, rt) + 1);
632 zfs_btree_index_t where;
633 range_seg_t *rs = zfs_btree_find(&rt->rt_root, &rsearch, &where);
636 *osize = MIN(size, rs_get_end(rs, rt) - start);
640 rs = zfs_btree_next(&rt->rt_root, &where, &where);
641 if (rs == NULL || rs_get_start(rs, rt) > start + size)
644 *ostart = rs_get_start(rs, rt);
645 *osize = MIN(start + size, rs_get_end(rs, rt)) -
646 rs_get_start(rs, rt);
651 * Ensure that this range is not in the tree, regardless of whether
652 * it is currently in the tree.
655 range_tree_clear(range_tree_t *rt, uint64_t start, uint64_t size)
662 if (rt->rt_type == RANGE_SEG64)
663 ASSERT3U(start + size, >, start);
665 while ((rs = range_tree_find_impl(rt, start, size)) != NULL) {
666 uint64_t free_start = MAX(rs_get_start(rs, rt), start);
667 uint64_t free_end = MIN(rs_get_end(rs, rt), start + size);
668 range_tree_remove(rt, free_start, free_end - free_start);
673 range_tree_swap(range_tree_t **rtsrc, range_tree_t **rtdst)
677 ASSERT0(range_tree_space(*rtdst));
678 ASSERT0(zfs_btree_numnodes(&(*rtdst)->rt_root));
686 range_tree_vacate(range_tree_t *rt, range_tree_func_t *func, void *arg)
688 if (rt->rt_ops != NULL && rt->rt_ops->rtop_vacate != NULL)
689 rt->rt_ops->rtop_vacate(rt, rt->rt_arg);
693 zfs_btree_index_t *cookie = NULL;
695 while ((rs = zfs_btree_destroy_nodes(&rt->rt_root, &cookie)) !=
697 func(arg, rs_get_start(rs, rt), rs_get_end(rs, rt) -
698 rs_get_start(rs, rt));
701 zfs_btree_clear(&rt->rt_root);
704 memset(rt->rt_histogram, 0, sizeof (rt->rt_histogram));
709 range_tree_walk(range_tree_t *rt, range_tree_func_t *func, void *arg)
711 zfs_btree_index_t where;
712 for (range_seg_t *rs = zfs_btree_first(&rt->rt_root, &where);
713 rs != NULL; rs = zfs_btree_next(&rt->rt_root, &where, &where)) {
714 func(arg, rs_get_start(rs, rt), rs_get_end(rs, rt) -
715 rs_get_start(rs, rt));
720 range_tree_first(range_tree_t *rt)
722 return (zfs_btree_first(&rt->rt_root, NULL));
726 range_tree_space(range_tree_t *rt)
728 return (rt->rt_space);
732 range_tree_numsegs(range_tree_t *rt)
734 return ((rt == NULL) ? 0 : zfs_btree_numnodes(&rt->rt_root));
738 range_tree_is_empty(range_tree_t *rt)
741 return (range_tree_space(rt) == 0);
745 rt_btree_create(range_tree_t *rt, void *arg)
747 zfs_btree_t *size_tree = arg;
750 switch (rt->rt_type) {
752 size = sizeof (range_seg32_t);
755 size = sizeof (range_seg64_t);
758 size = sizeof (range_seg_gap_t);
761 panic("Invalid range seg type %d", rt->rt_type);
763 zfs_btree_create(size_tree, rt->rt_btree_compare, size);
767 rt_btree_destroy(range_tree_t *rt, void *arg)
770 zfs_btree_t *size_tree = arg;
771 ASSERT0(zfs_btree_numnodes(size_tree));
773 zfs_btree_destroy(size_tree);
777 rt_btree_add(range_tree_t *rt, range_seg_t *rs, void *arg)
780 zfs_btree_t *size_tree = arg;
782 zfs_btree_add(size_tree, rs);
786 rt_btree_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
789 zfs_btree_t *size_tree = arg;
791 zfs_btree_remove(size_tree, rs);
795 rt_btree_vacate(range_tree_t *rt, void *arg)
797 zfs_btree_t *size_tree = arg;
798 zfs_btree_clear(size_tree);
799 zfs_btree_destroy(size_tree);
801 rt_btree_create(rt, arg);
804 const range_tree_ops_t rt_btree_ops = {
805 .rtop_create = rt_btree_create,
806 .rtop_destroy = rt_btree_destroy,
807 .rtop_add = rt_btree_add,
808 .rtop_remove = rt_btree_remove,
809 .rtop_vacate = rt_btree_vacate
813 * Remove any overlapping ranges between the given segment [start, end)
814 * from removefrom. Add non-overlapping leftovers to addto.
817 range_tree_remove_xor_add_segment(uint64_t start, uint64_t end,
818 range_tree_t *removefrom, range_tree_t *addto)
820 zfs_btree_index_t where;
821 range_seg_max_t starting_rs;
822 rs_set_start(&starting_rs, removefrom, start);
823 rs_set_end_raw(&starting_rs, removefrom, rs_get_start_raw(&starting_rs,
826 range_seg_t *curr = zfs_btree_find(&removefrom->rt_root,
827 &starting_rs, &where);
830 curr = zfs_btree_next(&removefrom->rt_root, &where, &where);
833 for (; curr != NULL; curr = next) {
836 VERIFY3U(start, <, end);
838 /* there is no overlap */
839 if (end <= rs_get_start(curr, removefrom)) {
840 range_tree_add(addto, start, end - start);
844 uint64_t overlap_start = MAX(rs_get_start(curr, removefrom),
846 uint64_t overlap_end = MIN(rs_get_end(curr, removefrom),
848 uint64_t overlap_size = overlap_end - overlap_start;
849 ASSERT3S(overlap_size, >, 0);
851 rs_copy(curr, &rs, removefrom);
853 range_tree_remove(removefrom, overlap_start, overlap_size);
855 if (start < overlap_start)
856 range_tree_add(addto, start, overlap_start - start);
859 next = zfs_btree_find(&removefrom->rt_root, &rs, &where);
861 * If we find something here, we only removed part of the
862 * curr segment. Either there's some left at the end
863 * because we've reached the end of the range we're removing,
864 * or there's some left at the start because we started
865 * partway through the range. Either way, we continue with
866 * the loop. If it's the former, we'll return at the start of
867 * the loop, and if it's the latter we'll see if there is more
871 ASSERT(start == end || start == rs_get_end(&rs,
875 next = zfs_btree_next(&removefrom->rt_root, &where, &where);
877 VERIFY3P(curr, ==, NULL);
880 VERIFY3U(start, <, end);
881 range_tree_add(addto, start, end - start);
883 VERIFY3U(start, ==, end);
888 * For each entry in rt, if it exists in removefrom, remove it
889 * from removefrom. Otherwise, add it to addto.
892 range_tree_remove_xor_add(range_tree_t *rt, range_tree_t *removefrom,
895 zfs_btree_index_t where;
896 for (range_seg_t *rs = zfs_btree_first(&rt->rt_root, &where); rs;
897 rs = zfs_btree_next(&rt->rt_root, &where, &where)) {
898 range_tree_remove_xor_add_segment(rs_get_start(rs, rt),
899 rs_get_end(rs, rt), removefrom, addto);
904 range_tree_min(range_tree_t *rt)
906 range_seg_t *rs = zfs_btree_first(&rt->rt_root, NULL);
907 return (rs != NULL ? rs_get_start(rs, rt) : 0);
911 range_tree_max(range_tree_t *rt)
913 range_seg_t *rs = zfs_btree_last(&rt->rt_root, NULL);
914 return (rs != NULL ? rs_get_end(rs, rt) : 0);
918 range_tree_span(range_tree_t *rt)
920 return (range_tree_max(rt) - range_tree_min(rt));