]> CyberLeo.Net >> Repos - FreeBSD/stable/9.git/blob - sys/cddl/contrib/opensolaris/uts/common/fs/zfs/bpobj.c
MFC r259813 + r259816: MFV r258374:
[FreeBSD/stable/9.git] / sys / cddl / contrib / opensolaris / uts / common / fs / zfs / bpobj.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2013 by Delphix. All rights reserved.
24  */
25
26 #include <sys/bpobj.h>
27 #include <sys/zfs_context.h>
28 #include <sys/refcount.h>
29 #include <sys/dsl_pool.h>
30 #include <sys/zfeature.h>
31 #include <sys/zap.h>
32
33 /*
34  * Return an empty bpobj, preferably the empty dummy one (dp_empty_bpobj).
35  */
36 uint64_t
37 bpobj_alloc_empty(objset_t *os, int blocksize, dmu_tx_t *tx)
38 {
39         spa_t *spa = dmu_objset_spa(os);
40         dsl_pool_t *dp = dmu_objset_pool(os);
41
42         if (spa_feature_is_enabled(spa, SPA_FEATURE_EMPTY_BPOBJ)) {
43                 if (!spa_feature_is_active(spa, SPA_FEATURE_EMPTY_BPOBJ)) {
44                         ASSERT0(dp->dp_empty_bpobj);
45                         dp->dp_empty_bpobj =
46                             bpobj_alloc(os, SPA_MAXBLOCKSIZE, tx);
47                         VERIFY(zap_add(os,
48                             DMU_POOL_DIRECTORY_OBJECT,
49                             DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1,
50                             &dp->dp_empty_bpobj, tx) == 0);
51                 }
52                 spa_feature_incr(spa, SPA_FEATURE_EMPTY_BPOBJ, tx);
53                 ASSERT(dp->dp_empty_bpobj != 0);
54                 return (dp->dp_empty_bpobj);
55         } else {
56                 return (bpobj_alloc(os, blocksize, tx));
57         }
58 }
59
60 void
61 bpobj_decr_empty(objset_t *os, dmu_tx_t *tx)
62 {
63         dsl_pool_t *dp = dmu_objset_pool(os);
64
65         spa_feature_decr(dmu_objset_spa(os), SPA_FEATURE_EMPTY_BPOBJ, tx);
66         if (!spa_feature_is_active(dmu_objset_spa(os),
67             SPA_FEATURE_EMPTY_BPOBJ)) {
68                 VERIFY3U(0, ==, zap_remove(dp->dp_meta_objset,
69                     DMU_POOL_DIRECTORY_OBJECT,
70                     DMU_POOL_EMPTY_BPOBJ, tx));
71                 VERIFY3U(0, ==, dmu_object_free(os, dp->dp_empty_bpobj, tx));
72                 dp->dp_empty_bpobj = 0;
73         }
74 }
75
76 uint64_t
77 bpobj_alloc(objset_t *os, int blocksize, dmu_tx_t *tx)
78 {
79         int size;
80
81         if (spa_version(dmu_objset_spa(os)) < SPA_VERSION_BPOBJ_ACCOUNT)
82                 size = BPOBJ_SIZE_V0;
83         else if (spa_version(dmu_objset_spa(os)) < SPA_VERSION_DEADLISTS)
84                 size = BPOBJ_SIZE_V1;
85         else
86                 size = sizeof (bpobj_phys_t);
87
88         return (dmu_object_alloc(os, DMU_OT_BPOBJ, blocksize,
89             DMU_OT_BPOBJ_HDR, size, tx));
90 }
91
92 void
93 bpobj_free(objset_t *os, uint64_t obj, dmu_tx_t *tx)
94 {
95         int64_t i;
96         bpobj_t bpo;
97         dmu_object_info_t doi;
98         int epb;
99         dmu_buf_t *dbuf = NULL;
100
101         ASSERT(obj != dmu_objset_pool(os)->dp_empty_bpobj);
102         VERIFY3U(0, ==, bpobj_open(&bpo, os, obj));
103
104         mutex_enter(&bpo.bpo_lock);
105
106         if (!bpo.bpo_havesubobj || bpo.bpo_phys->bpo_subobjs == 0)
107                 goto out;
108
109         VERIFY3U(0, ==, dmu_object_info(os, bpo.bpo_phys->bpo_subobjs, &doi));
110         epb = doi.doi_data_block_size / sizeof (uint64_t);
111
112         for (i = bpo.bpo_phys->bpo_num_subobjs - 1; i >= 0; i--) {
113                 uint64_t *objarray;
114                 uint64_t offset, blkoff;
115
116                 offset = i * sizeof (uint64_t);
117                 blkoff = P2PHASE(i, epb);
118
119                 if (dbuf == NULL || dbuf->db_offset > offset) {
120                         if (dbuf)
121                                 dmu_buf_rele(dbuf, FTAG);
122                         VERIFY3U(0, ==, dmu_buf_hold(os,
123                             bpo.bpo_phys->bpo_subobjs, offset, FTAG, &dbuf, 0));
124                 }
125
126                 ASSERT3U(offset, >=, dbuf->db_offset);
127                 ASSERT3U(offset, <, dbuf->db_offset + dbuf->db_size);
128
129                 objarray = dbuf->db_data;
130                 bpobj_free(os, objarray[blkoff], tx);
131         }
132         if (dbuf) {
133                 dmu_buf_rele(dbuf, FTAG);
134                 dbuf = NULL;
135         }
136         VERIFY3U(0, ==, dmu_object_free(os, bpo.bpo_phys->bpo_subobjs, tx));
137
138 out:
139         mutex_exit(&bpo.bpo_lock);
140         bpobj_close(&bpo);
141
142         VERIFY3U(0, ==, dmu_object_free(os, obj, tx));
143 }
144
145 int
146 bpobj_open(bpobj_t *bpo, objset_t *os, uint64_t object)
147 {
148         dmu_object_info_t doi;
149         int err;
150
151         err = dmu_object_info(os, object, &doi);
152         if (err)
153                 return (err);
154
155         bzero(bpo, sizeof (*bpo));
156         mutex_init(&bpo->bpo_lock, NULL, MUTEX_DEFAULT, NULL);
157
158         ASSERT(bpo->bpo_dbuf == NULL);
159         ASSERT(bpo->bpo_phys == NULL);
160         ASSERT(object != 0);
161         ASSERT3U(doi.doi_type, ==, DMU_OT_BPOBJ);
162         ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_BPOBJ_HDR);
163
164         err = dmu_bonus_hold(os, object, bpo, &bpo->bpo_dbuf);
165         if (err)
166                 return (err);
167
168         bpo->bpo_os = os;
169         bpo->bpo_object = object;
170         bpo->bpo_epb = doi.doi_data_block_size >> SPA_BLKPTRSHIFT;
171         bpo->bpo_havecomp = (doi.doi_bonus_size > BPOBJ_SIZE_V0);
172         bpo->bpo_havesubobj = (doi.doi_bonus_size > BPOBJ_SIZE_V1);
173         bpo->bpo_phys = bpo->bpo_dbuf->db_data;
174         return (0);
175 }
176
177 void
178 bpobj_close(bpobj_t *bpo)
179 {
180         /* Lame workaround for closing a bpobj that was never opened. */
181         if (bpo->bpo_object == 0)
182                 return;
183
184         dmu_buf_rele(bpo->bpo_dbuf, bpo);
185         if (bpo->bpo_cached_dbuf != NULL)
186                 dmu_buf_rele(bpo->bpo_cached_dbuf, bpo);
187         bpo->bpo_dbuf = NULL;
188         bpo->bpo_phys = NULL;
189         bpo->bpo_cached_dbuf = NULL;
190         bpo->bpo_object = 0;
191
192         mutex_destroy(&bpo->bpo_lock);
193 }
194
195 static int
196 bpobj_iterate_impl(bpobj_t *bpo, bpobj_itor_t func, void *arg, dmu_tx_t *tx,
197     boolean_t free)
198 {
199         dmu_object_info_t doi;
200         int epb;
201         int64_t i;
202         int err = 0;
203         dmu_buf_t *dbuf = NULL;
204
205         mutex_enter(&bpo->bpo_lock);
206
207         if (free)
208                 dmu_buf_will_dirty(bpo->bpo_dbuf, tx);
209
210         for (i = bpo->bpo_phys->bpo_num_blkptrs - 1; i >= 0; i--) {
211                 blkptr_t *bparray;
212                 blkptr_t *bp;
213                 uint64_t offset, blkoff;
214
215                 offset = i * sizeof (blkptr_t);
216                 blkoff = P2PHASE(i, bpo->bpo_epb);
217
218                 if (dbuf == NULL || dbuf->db_offset > offset) {
219                         if (dbuf)
220                                 dmu_buf_rele(dbuf, FTAG);
221                         err = dmu_buf_hold(bpo->bpo_os, bpo->bpo_object, offset,
222                             FTAG, &dbuf, 0);
223                         if (err)
224                                 break;
225                 }
226
227                 ASSERT3U(offset, >=, dbuf->db_offset);
228                 ASSERT3U(offset, <, dbuf->db_offset + dbuf->db_size);
229
230                 bparray = dbuf->db_data;
231                 bp = &bparray[blkoff];
232                 err = func(arg, bp, tx);
233                 if (err)
234                         break;
235                 if (free) {
236                         bpo->bpo_phys->bpo_bytes -=
237                             bp_get_dsize_sync(dmu_objset_spa(bpo->bpo_os), bp);
238                         ASSERT3S(bpo->bpo_phys->bpo_bytes, >=, 0);
239                         if (bpo->bpo_havecomp) {
240                                 bpo->bpo_phys->bpo_comp -= BP_GET_PSIZE(bp);
241                                 bpo->bpo_phys->bpo_uncomp -= BP_GET_UCSIZE(bp);
242                         }
243                         bpo->bpo_phys->bpo_num_blkptrs--;
244                         ASSERT3S(bpo->bpo_phys->bpo_num_blkptrs, >=, 0);
245                 }
246         }
247         if (dbuf) {
248                 dmu_buf_rele(dbuf, FTAG);
249                 dbuf = NULL;
250         }
251         if (free) {
252                 i++;
253                 VERIFY3U(0, ==, dmu_free_range(bpo->bpo_os, bpo->bpo_object,
254                     i * sizeof (blkptr_t), -1ULL, tx));
255         }
256         if (err || !bpo->bpo_havesubobj || bpo->bpo_phys->bpo_subobjs == 0)
257                 goto out;
258
259         ASSERT(bpo->bpo_havecomp);
260         err = dmu_object_info(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs, &doi);
261         if (err) {
262                 mutex_exit(&bpo->bpo_lock);
263                 return (err);
264         }
265         ASSERT3U(doi.doi_type, ==, DMU_OT_BPOBJ_SUBOBJ);
266         epb = doi.doi_data_block_size / sizeof (uint64_t);
267
268         for (i = bpo->bpo_phys->bpo_num_subobjs - 1; i >= 0; i--) {
269                 uint64_t *objarray;
270                 uint64_t offset, blkoff;
271                 bpobj_t sublist;
272                 uint64_t used_before, comp_before, uncomp_before;
273                 uint64_t used_after, comp_after, uncomp_after;
274
275                 offset = i * sizeof (uint64_t);
276                 blkoff = P2PHASE(i, epb);
277
278                 if (dbuf == NULL || dbuf->db_offset > offset) {
279                         if (dbuf)
280                                 dmu_buf_rele(dbuf, FTAG);
281                         err = dmu_buf_hold(bpo->bpo_os,
282                             bpo->bpo_phys->bpo_subobjs, offset, FTAG, &dbuf, 0);
283                         if (err)
284                                 break;
285                 }
286
287                 ASSERT3U(offset, >=, dbuf->db_offset);
288                 ASSERT3U(offset, <, dbuf->db_offset + dbuf->db_size);
289
290                 objarray = dbuf->db_data;
291                 err = bpobj_open(&sublist, bpo->bpo_os, objarray[blkoff]);
292                 if (err)
293                         break;
294                 if (free) {
295                         err = bpobj_space(&sublist,
296                             &used_before, &comp_before, &uncomp_before);
297                         if (err)
298                                 break;
299                 }
300                 err = bpobj_iterate_impl(&sublist, func, arg, tx, free);
301                 if (free) {
302                         VERIFY3U(0, ==, bpobj_space(&sublist,
303                             &used_after, &comp_after, &uncomp_after));
304                         bpo->bpo_phys->bpo_bytes -= used_before - used_after;
305                         ASSERT3S(bpo->bpo_phys->bpo_bytes, >=, 0);
306                         bpo->bpo_phys->bpo_comp -= comp_before - comp_after;
307                         bpo->bpo_phys->bpo_uncomp -=
308                             uncomp_before - uncomp_after;
309                 }
310
311                 bpobj_close(&sublist);
312                 if (err)
313                         break;
314                 if (free) {
315                         err = dmu_object_free(bpo->bpo_os,
316                             objarray[blkoff], tx);
317                         if (err)
318                                 break;
319                         bpo->bpo_phys->bpo_num_subobjs--;
320                         ASSERT3S(bpo->bpo_phys->bpo_num_subobjs, >=, 0);
321                 }
322         }
323         if (dbuf) {
324                 dmu_buf_rele(dbuf, FTAG);
325                 dbuf = NULL;
326         }
327         if (free) {
328                 VERIFY3U(0, ==, dmu_free_range(bpo->bpo_os,
329                     bpo->bpo_phys->bpo_subobjs,
330                     (i + 1) * sizeof (uint64_t), -1ULL, tx));
331         }
332
333 out:
334         /* If there are no entries, there should be no bytes. */
335         ASSERT(bpo->bpo_phys->bpo_num_blkptrs > 0 ||
336             (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_num_subobjs > 0) ||
337             bpo->bpo_phys->bpo_bytes == 0);
338
339         mutex_exit(&bpo->bpo_lock);
340         return (err);
341 }
342
343 /*
344  * Iterate and remove the entries.  If func returns nonzero, iteration
345  * will stop and that entry will not be removed.
346  */
347 int
348 bpobj_iterate(bpobj_t *bpo, bpobj_itor_t func, void *arg, dmu_tx_t *tx)
349 {
350         return (bpobj_iterate_impl(bpo, func, arg, tx, B_TRUE));
351 }
352
353 /*
354  * Iterate the entries.  If func returns nonzero, iteration will stop.
355  */
356 int
357 bpobj_iterate_nofree(bpobj_t *bpo, bpobj_itor_t func, void *arg, dmu_tx_t *tx)
358 {
359         return (bpobj_iterate_impl(bpo, func, arg, tx, B_FALSE));
360 }
361
362 void
363 bpobj_enqueue_subobj(bpobj_t *bpo, uint64_t subobj, dmu_tx_t *tx)
364 {
365         bpobj_t subbpo;
366         uint64_t used, comp, uncomp, subsubobjs;
367
368         ASSERT(bpo->bpo_havesubobj);
369         ASSERT(bpo->bpo_havecomp);
370         ASSERT(bpo->bpo_object != dmu_objset_pool(bpo->bpo_os)->dp_empty_bpobj);
371
372         if (subobj == dmu_objset_pool(bpo->bpo_os)->dp_empty_bpobj) {
373                 bpobj_decr_empty(bpo->bpo_os, tx);
374                 return;
375         }
376
377         VERIFY3U(0, ==, bpobj_open(&subbpo, bpo->bpo_os, subobj));
378         VERIFY3U(0, ==, bpobj_space(&subbpo, &used, &comp, &uncomp));
379
380         if (used == 0) {
381                 /* No point in having an empty subobj. */
382                 bpobj_close(&subbpo);
383                 bpobj_free(bpo->bpo_os, subobj, tx);
384                 return;
385         }
386
387         dmu_buf_will_dirty(bpo->bpo_dbuf, tx);
388         if (bpo->bpo_phys->bpo_subobjs == 0) {
389                 bpo->bpo_phys->bpo_subobjs = dmu_object_alloc(bpo->bpo_os,
390                     DMU_OT_BPOBJ_SUBOBJ, SPA_MAXBLOCKSIZE, DMU_OT_NONE, 0, tx);
391         }
392
393         dmu_object_info_t doi;
394         ASSERT0(dmu_object_info(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs, &doi));
395         ASSERT3U(doi.doi_type, ==, DMU_OT_BPOBJ_SUBOBJ);
396
397         mutex_enter(&bpo->bpo_lock);
398         dmu_write(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs,
399             bpo->bpo_phys->bpo_num_subobjs * sizeof (subobj),
400             sizeof (subobj), &subobj, tx);
401         bpo->bpo_phys->bpo_num_subobjs++;
402
403         /*
404          * If subobj has only one block of subobjs, then move subobj's
405          * subobjs to bpo's subobj list directly.  This reduces
406          * recursion in bpobj_iterate due to nested subobjs.
407          */
408         subsubobjs = subbpo.bpo_phys->bpo_subobjs;
409         if (subsubobjs != 0) {
410                 dmu_object_info_t doi;
411
412                 VERIFY3U(0, ==, dmu_object_info(bpo->bpo_os, subsubobjs, &doi));
413                 if (doi.doi_max_offset == doi.doi_data_block_size) {
414                         dmu_buf_t *subdb;
415                         uint64_t numsubsub = subbpo.bpo_phys->bpo_num_subobjs;
416
417                         VERIFY3U(0, ==, dmu_buf_hold(bpo->bpo_os, subsubobjs,
418                             0, FTAG, &subdb, 0));
419                         /*
420                          * Make sure that we are not asking dmu_write()
421                          * to write more data than we have in our buffer.
422                          */
423                         VERIFY3U(subdb->db_size, >=,
424                             numsubsub * sizeof (subobj));
425                         dmu_write(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs,
426                             bpo->bpo_phys->bpo_num_subobjs * sizeof (subobj),
427                             numsubsub * sizeof (subobj), subdb->db_data, tx);
428                         dmu_buf_rele(subdb, FTAG);
429                         bpo->bpo_phys->bpo_num_subobjs += numsubsub;
430
431                         dmu_buf_will_dirty(subbpo.bpo_dbuf, tx);
432                         subbpo.bpo_phys->bpo_subobjs = 0;
433                         VERIFY3U(0, ==, dmu_object_free(bpo->bpo_os,
434                             subsubobjs, tx));
435                 }
436         }
437         bpo->bpo_phys->bpo_bytes += used;
438         bpo->bpo_phys->bpo_comp += comp;
439         bpo->bpo_phys->bpo_uncomp += uncomp;
440         mutex_exit(&bpo->bpo_lock);
441
442         bpobj_close(&subbpo);
443 }
444
445 void
446 bpobj_enqueue(bpobj_t *bpo, const blkptr_t *bp, dmu_tx_t *tx)
447 {
448         blkptr_t stored_bp = *bp;
449         uint64_t offset;
450         int blkoff;
451         blkptr_t *bparray;
452
453         ASSERT(!BP_IS_HOLE(bp));
454         ASSERT(bpo->bpo_object != dmu_objset_pool(bpo->bpo_os)->dp_empty_bpobj);
455
456         /* We never need the fill count. */
457         stored_bp.blk_fill = 0;
458
459         /* The bpobj will compress better if we can leave off the checksum */
460         if (!BP_GET_DEDUP(bp))
461                 bzero(&stored_bp.blk_cksum, sizeof (stored_bp.blk_cksum));
462
463         mutex_enter(&bpo->bpo_lock);
464
465         offset = bpo->bpo_phys->bpo_num_blkptrs * sizeof (stored_bp);
466         blkoff = P2PHASE(bpo->bpo_phys->bpo_num_blkptrs, bpo->bpo_epb);
467
468         if (bpo->bpo_cached_dbuf == NULL ||
469             offset < bpo->bpo_cached_dbuf->db_offset ||
470             offset >= bpo->bpo_cached_dbuf->db_offset +
471             bpo->bpo_cached_dbuf->db_size) {
472                 if (bpo->bpo_cached_dbuf)
473                         dmu_buf_rele(bpo->bpo_cached_dbuf, bpo);
474                 VERIFY3U(0, ==, dmu_buf_hold(bpo->bpo_os, bpo->bpo_object,
475                     offset, bpo, &bpo->bpo_cached_dbuf, 0));
476         }
477
478         dmu_buf_will_dirty(bpo->bpo_cached_dbuf, tx);
479         bparray = bpo->bpo_cached_dbuf->db_data;
480         bparray[blkoff] = stored_bp;
481
482         dmu_buf_will_dirty(bpo->bpo_dbuf, tx);
483         bpo->bpo_phys->bpo_num_blkptrs++;
484         bpo->bpo_phys->bpo_bytes +=
485             bp_get_dsize_sync(dmu_objset_spa(bpo->bpo_os), bp);
486         if (bpo->bpo_havecomp) {
487                 bpo->bpo_phys->bpo_comp += BP_GET_PSIZE(bp);
488                 bpo->bpo_phys->bpo_uncomp += BP_GET_UCSIZE(bp);
489         }
490         mutex_exit(&bpo->bpo_lock);
491 }
492
493 struct space_range_arg {
494         spa_t *spa;
495         uint64_t mintxg;
496         uint64_t maxtxg;
497         uint64_t used;
498         uint64_t comp;
499         uint64_t uncomp;
500 };
501
502 /* ARGSUSED */
503 static int
504 space_range_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
505 {
506         struct space_range_arg *sra = arg;
507
508         if (bp->blk_birth > sra->mintxg && bp->blk_birth <= sra->maxtxg) {
509                 if (dsl_pool_sync_context(spa_get_dsl(sra->spa)))
510                         sra->used += bp_get_dsize_sync(sra->spa, bp);
511                 else
512                         sra->used += bp_get_dsize(sra->spa, bp);
513                 sra->comp += BP_GET_PSIZE(bp);
514                 sra->uncomp += BP_GET_UCSIZE(bp);
515         }
516         return (0);
517 }
518
519 int
520 bpobj_space(bpobj_t *bpo, uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
521 {
522         mutex_enter(&bpo->bpo_lock);
523
524         *usedp = bpo->bpo_phys->bpo_bytes;
525         if (bpo->bpo_havecomp) {
526                 *compp = bpo->bpo_phys->bpo_comp;
527                 *uncompp = bpo->bpo_phys->bpo_uncomp;
528                 mutex_exit(&bpo->bpo_lock);
529                 return (0);
530         } else {
531                 mutex_exit(&bpo->bpo_lock);
532                 return (bpobj_space_range(bpo, 0, UINT64_MAX,
533                     usedp, compp, uncompp));
534         }
535 }
536
537 /*
538  * Return the amount of space in the bpobj which is:
539  * mintxg < blk_birth <= maxtxg
540  */
541 int
542 bpobj_space_range(bpobj_t *bpo, uint64_t mintxg, uint64_t maxtxg,
543     uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
544 {
545         struct space_range_arg sra = { 0 };
546         int err;
547
548         /*
549          * As an optimization, if they want the whole txg range, just
550          * get bpo_bytes rather than iterating over the bps.
551          */
552         if (mintxg < TXG_INITIAL && maxtxg == UINT64_MAX && bpo->bpo_havecomp)
553                 return (bpobj_space(bpo, usedp, compp, uncompp));
554
555         sra.spa = dmu_objset_spa(bpo->bpo_os);
556         sra.mintxg = mintxg;
557         sra.maxtxg = maxtxg;
558
559         err = bpobj_iterate_nofree(bpo, space_range_cb, &sra, NULL);
560         *usedp = sra.used;
561         *compp = sra.comp;
562         *uncompp = sra.uncomp;
563         return (err);
564 }