4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013 by Delphix. All rights reserved.
26 #include <sys/bpobj.h>
27 #include <sys/zfs_context.h>
28 #include <sys/refcount.h>
29 #include <sys/dsl_pool.h>
30 #include <sys/zfeature.h>
34 * Return an empty bpobj, preferably the empty dummy one (dp_empty_bpobj).
37 bpobj_alloc_empty(objset_t *os, int blocksize, dmu_tx_t *tx)
39 spa_t *spa = dmu_objset_spa(os);
40 dsl_pool_t *dp = dmu_objset_pool(os);
42 if (spa_feature_is_enabled(spa, SPA_FEATURE_EMPTY_BPOBJ)) {
43 if (!spa_feature_is_active(spa, SPA_FEATURE_EMPTY_BPOBJ)) {
44 ASSERT0(dp->dp_empty_bpobj);
46 bpobj_alloc(os, SPA_MAXBLOCKSIZE, tx);
48 DMU_POOL_DIRECTORY_OBJECT,
49 DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1,
50 &dp->dp_empty_bpobj, tx) == 0);
52 spa_feature_incr(spa, SPA_FEATURE_EMPTY_BPOBJ, tx);
53 ASSERT(dp->dp_empty_bpobj != 0);
54 return (dp->dp_empty_bpobj);
56 return (bpobj_alloc(os, blocksize, tx));
61 bpobj_decr_empty(objset_t *os, dmu_tx_t *tx)
63 dsl_pool_t *dp = dmu_objset_pool(os);
65 spa_feature_decr(dmu_objset_spa(os), SPA_FEATURE_EMPTY_BPOBJ, tx);
66 if (!spa_feature_is_active(dmu_objset_spa(os),
67 SPA_FEATURE_EMPTY_BPOBJ)) {
68 VERIFY3U(0, ==, zap_remove(dp->dp_meta_objset,
69 DMU_POOL_DIRECTORY_OBJECT,
70 DMU_POOL_EMPTY_BPOBJ, tx));
71 VERIFY3U(0, ==, dmu_object_free(os, dp->dp_empty_bpobj, tx));
72 dp->dp_empty_bpobj = 0;
77 bpobj_alloc(objset_t *os, int blocksize, dmu_tx_t *tx)
81 if (spa_version(dmu_objset_spa(os)) < SPA_VERSION_BPOBJ_ACCOUNT)
83 else if (spa_version(dmu_objset_spa(os)) < SPA_VERSION_DEADLISTS)
86 size = sizeof (bpobj_phys_t);
88 return (dmu_object_alloc(os, DMU_OT_BPOBJ, blocksize,
89 DMU_OT_BPOBJ_HDR, size, tx));
93 bpobj_free(objset_t *os, uint64_t obj, dmu_tx_t *tx)
97 dmu_object_info_t doi;
99 dmu_buf_t *dbuf = NULL;
101 ASSERT(obj != dmu_objset_pool(os)->dp_empty_bpobj);
102 VERIFY3U(0, ==, bpobj_open(&bpo, os, obj));
104 mutex_enter(&bpo.bpo_lock);
106 if (!bpo.bpo_havesubobj || bpo.bpo_phys->bpo_subobjs == 0)
109 VERIFY3U(0, ==, dmu_object_info(os, bpo.bpo_phys->bpo_subobjs, &doi));
110 epb = doi.doi_data_block_size / sizeof (uint64_t);
112 for (i = bpo.bpo_phys->bpo_num_subobjs - 1; i >= 0; i--) {
114 uint64_t offset, blkoff;
116 offset = i * sizeof (uint64_t);
117 blkoff = P2PHASE(i, epb);
119 if (dbuf == NULL || dbuf->db_offset > offset) {
121 dmu_buf_rele(dbuf, FTAG);
122 VERIFY3U(0, ==, dmu_buf_hold(os,
123 bpo.bpo_phys->bpo_subobjs, offset, FTAG, &dbuf, 0));
126 ASSERT3U(offset, >=, dbuf->db_offset);
127 ASSERT3U(offset, <, dbuf->db_offset + dbuf->db_size);
129 objarray = dbuf->db_data;
130 bpobj_free(os, objarray[blkoff], tx);
133 dmu_buf_rele(dbuf, FTAG);
136 VERIFY3U(0, ==, dmu_object_free(os, bpo.bpo_phys->bpo_subobjs, tx));
139 mutex_exit(&bpo.bpo_lock);
142 VERIFY3U(0, ==, dmu_object_free(os, obj, tx));
146 bpobj_open(bpobj_t *bpo, objset_t *os, uint64_t object)
148 dmu_object_info_t doi;
151 err = dmu_object_info(os, object, &doi);
155 bzero(bpo, sizeof (*bpo));
156 mutex_init(&bpo->bpo_lock, NULL, MUTEX_DEFAULT, NULL);
158 ASSERT(bpo->bpo_dbuf == NULL);
159 ASSERT(bpo->bpo_phys == NULL);
161 ASSERT3U(doi.doi_type, ==, DMU_OT_BPOBJ);
162 ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_BPOBJ_HDR);
164 err = dmu_bonus_hold(os, object, bpo, &bpo->bpo_dbuf);
169 bpo->bpo_object = object;
170 bpo->bpo_epb = doi.doi_data_block_size >> SPA_BLKPTRSHIFT;
171 bpo->bpo_havecomp = (doi.doi_bonus_size > BPOBJ_SIZE_V0);
172 bpo->bpo_havesubobj = (doi.doi_bonus_size > BPOBJ_SIZE_V1);
173 bpo->bpo_phys = bpo->bpo_dbuf->db_data;
178 bpobj_close(bpobj_t *bpo)
180 /* Lame workaround for closing a bpobj that was never opened. */
181 if (bpo->bpo_object == 0)
184 dmu_buf_rele(bpo->bpo_dbuf, bpo);
185 if (bpo->bpo_cached_dbuf != NULL)
186 dmu_buf_rele(bpo->bpo_cached_dbuf, bpo);
187 bpo->bpo_dbuf = NULL;
188 bpo->bpo_phys = NULL;
189 bpo->bpo_cached_dbuf = NULL;
192 mutex_destroy(&bpo->bpo_lock);
196 bpobj_iterate_impl(bpobj_t *bpo, bpobj_itor_t func, void *arg, dmu_tx_t *tx,
199 dmu_object_info_t doi;
203 dmu_buf_t *dbuf = NULL;
205 mutex_enter(&bpo->bpo_lock);
208 dmu_buf_will_dirty(bpo->bpo_dbuf, tx);
210 for (i = bpo->bpo_phys->bpo_num_blkptrs - 1; i >= 0; i--) {
213 uint64_t offset, blkoff;
215 offset = i * sizeof (blkptr_t);
216 blkoff = P2PHASE(i, bpo->bpo_epb);
218 if (dbuf == NULL || dbuf->db_offset > offset) {
220 dmu_buf_rele(dbuf, FTAG);
221 err = dmu_buf_hold(bpo->bpo_os, bpo->bpo_object, offset,
227 ASSERT3U(offset, >=, dbuf->db_offset);
228 ASSERT3U(offset, <, dbuf->db_offset + dbuf->db_size);
230 bparray = dbuf->db_data;
231 bp = &bparray[blkoff];
232 err = func(arg, bp, tx);
236 bpo->bpo_phys->bpo_bytes -=
237 bp_get_dsize_sync(dmu_objset_spa(bpo->bpo_os), bp);
238 ASSERT3S(bpo->bpo_phys->bpo_bytes, >=, 0);
239 if (bpo->bpo_havecomp) {
240 bpo->bpo_phys->bpo_comp -= BP_GET_PSIZE(bp);
241 bpo->bpo_phys->bpo_uncomp -= BP_GET_UCSIZE(bp);
243 bpo->bpo_phys->bpo_num_blkptrs--;
244 ASSERT3S(bpo->bpo_phys->bpo_num_blkptrs, >=, 0);
248 dmu_buf_rele(dbuf, FTAG);
253 VERIFY3U(0, ==, dmu_free_range(bpo->bpo_os, bpo->bpo_object,
254 i * sizeof (blkptr_t), -1ULL, tx));
256 if (err || !bpo->bpo_havesubobj || bpo->bpo_phys->bpo_subobjs == 0)
259 ASSERT(bpo->bpo_havecomp);
260 err = dmu_object_info(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs, &doi);
262 mutex_exit(&bpo->bpo_lock);
265 ASSERT3U(doi.doi_type, ==, DMU_OT_BPOBJ_SUBOBJ);
266 epb = doi.doi_data_block_size / sizeof (uint64_t);
268 for (i = bpo->bpo_phys->bpo_num_subobjs - 1; i >= 0; i--) {
270 uint64_t offset, blkoff;
272 uint64_t used_before, comp_before, uncomp_before;
273 uint64_t used_after, comp_after, uncomp_after;
275 offset = i * sizeof (uint64_t);
276 blkoff = P2PHASE(i, epb);
278 if (dbuf == NULL || dbuf->db_offset > offset) {
280 dmu_buf_rele(dbuf, FTAG);
281 err = dmu_buf_hold(bpo->bpo_os,
282 bpo->bpo_phys->bpo_subobjs, offset, FTAG, &dbuf, 0);
287 ASSERT3U(offset, >=, dbuf->db_offset);
288 ASSERT3U(offset, <, dbuf->db_offset + dbuf->db_size);
290 objarray = dbuf->db_data;
291 err = bpobj_open(&sublist, bpo->bpo_os, objarray[blkoff]);
295 err = bpobj_space(&sublist,
296 &used_before, &comp_before, &uncomp_before);
300 err = bpobj_iterate_impl(&sublist, func, arg, tx, free);
302 VERIFY3U(0, ==, bpobj_space(&sublist,
303 &used_after, &comp_after, &uncomp_after));
304 bpo->bpo_phys->bpo_bytes -= used_before - used_after;
305 ASSERT3S(bpo->bpo_phys->bpo_bytes, >=, 0);
306 bpo->bpo_phys->bpo_comp -= comp_before - comp_after;
307 bpo->bpo_phys->bpo_uncomp -=
308 uncomp_before - uncomp_after;
311 bpobj_close(&sublist);
315 err = dmu_object_free(bpo->bpo_os,
316 objarray[blkoff], tx);
319 bpo->bpo_phys->bpo_num_subobjs--;
320 ASSERT3S(bpo->bpo_phys->bpo_num_subobjs, >=, 0);
324 dmu_buf_rele(dbuf, FTAG);
328 VERIFY3U(0, ==, dmu_free_range(bpo->bpo_os,
329 bpo->bpo_phys->bpo_subobjs,
330 (i + 1) * sizeof (uint64_t), -1ULL, tx));
334 /* If there are no entries, there should be no bytes. */
335 ASSERT(bpo->bpo_phys->bpo_num_blkptrs > 0 ||
336 (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_num_subobjs > 0) ||
337 bpo->bpo_phys->bpo_bytes == 0);
339 mutex_exit(&bpo->bpo_lock);
344 * Iterate and remove the entries. If func returns nonzero, iteration
345 * will stop and that entry will not be removed.
348 bpobj_iterate(bpobj_t *bpo, bpobj_itor_t func, void *arg, dmu_tx_t *tx)
350 return (bpobj_iterate_impl(bpo, func, arg, tx, B_TRUE));
354 * Iterate the entries. If func returns nonzero, iteration will stop.
357 bpobj_iterate_nofree(bpobj_t *bpo, bpobj_itor_t func, void *arg, dmu_tx_t *tx)
359 return (bpobj_iterate_impl(bpo, func, arg, tx, B_FALSE));
363 bpobj_enqueue_subobj(bpobj_t *bpo, uint64_t subobj, dmu_tx_t *tx)
366 uint64_t used, comp, uncomp, subsubobjs;
368 ASSERT(bpo->bpo_havesubobj);
369 ASSERT(bpo->bpo_havecomp);
370 ASSERT(bpo->bpo_object != dmu_objset_pool(bpo->bpo_os)->dp_empty_bpobj);
372 if (subobj == dmu_objset_pool(bpo->bpo_os)->dp_empty_bpobj) {
373 bpobj_decr_empty(bpo->bpo_os, tx);
377 VERIFY3U(0, ==, bpobj_open(&subbpo, bpo->bpo_os, subobj));
378 VERIFY3U(0, ==, bpobj_space(&subbpo, &used, &comp, &uncomp));
381 /* No point in having an empty subobj. */
382 bpobj_close(&subbpo);
383 bpobj_free(bpo->bpo_os, subobj, tx);
387 dmu_buf_will_dirty(bpo->bpo_dbuf, tx);
388 if (bpo->bpo_phys->bpo_subobjs == 0) {
389 bpo->bpo_phys->bpo_subobjs = dmu_object_alloc(bpo->bpo_os,
390 DMU_OT_BPOBJ_SUBOBJ, SPA_MAXBLOCKSIZE, DMU_OT_NONE, 0, tx);
393 dmu_object_info_t doi;
394 ASSERT0(dmu_object_info(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs, &doi));
395 ASSERT3U(doi.doi_type, ==, DMU_OT_BPOBJ_SUBOBJ);
397 mutex_enter(&bpo->bpo_lock);
398 dmu_write(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs,
399 bpo->bpo_phys->bpo_num_subobjs * sizeof (subobj),
400 sizeof (subobj), &subobj, tx);
401 bpo->bpo_phys->bpo_num_subobjs++;
404 * If subobj has only one block of subobjs, then move subobj's
405 * subobjs to bpo's subobj list directly. This reduces
406 * recursion in bpobj_iterate due to nested subobjs.
408 subsubobjs = subbpo.bpo_phys->bpo_subobjs;
409 if (subsubobjs != 0) {
410 dmu_object_info_t doi;
412 VERIFY3U(0, ==, dmu_object_info(bpo->bpo_os, subsubobjs, &doi));
413 if (doi.doi_max_offset == doi.doi_data_block_size) {
415 uint64_t numsubsub = subbpo.bpo_phys->bpo_num_subobjs;
417 VERIFY3U(0, ==, dmu_buf_hold(bpo->bpo_os, subsubobjs,
418 0, FTAG, &subdb, 0));
420 * Make sure that we are not asking dmu_write()
421 * to write more data than we have in our buffer.
423 VERIFY3U(subdb->db_size, >=,
424 numsubsub * sizeof (subobj));
425 dmu_write(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs,
426 bpo->bpo_phys->bpo_num_subobjs * sizeof (subobj),
427 numsubsub * sizeof (subobj), subdb->db_data, tx);
428 dmu_buf_rele(subdb, FTAG);
429 bpo->bpo_phys->bpo_num_subobjs += numsubsub;
431 dmu_buf_will_dirty(subbpo.bpo_dbuf, tx);
432 subbpo.bpo_phys->bpo_subobjs = 0;
433 VERIFY3U(0, ==, dmu_object_free(bpo->bpo_os,
437 bpo->bpo_phys->bpo_bytes += used;
438 bpo->bpo_phys->bpo_comp += comp;
439 bpo->bpo_phys->bpo_uncomp += uncomp;
440 mutex_exit(&bpo->bpo_lock);
442 bpobj_close(&subbpo);
446 bpobj_enqueue(bpobj_t *bpo, const blkptr_t *bp, dmu_tx_t *tx)
448 blkptr_t stored_bp = *bp;
453 ASSERT(!BP_IS_HOLE(bp));
454 ASSERT(bpo->bpo_object != dmu_objset_pool(bpo->bpo_os)->dp_empty_bpobj);
456 /* We never need the fill count. */
457 stored_bp.blk_fill = 0;
459 /* The bpobj will compress better if we can leave off the checksum */
460 if (!BP_GET_DEDUP(bp))
461 bzero(&stored_bp.blk_cksum, sizeof (stored_bp.blk_cksum));
463 mutex_enter(&bpo->bpo_lock);
465 offset = bpo->bpo_phys->bpo_num_blkptrs * sizeof (stored_bp);
466 blkoff = P2PHASE(bpo->bpo_phys->bpo_num_blkptrs, bpo->bpo_epb);
468 if (bpo->bpo_cached_dbuf == NULL ||
469 offset < bpo->bpo_cached_dbuf->db_offset ||
470 offset >= bpo->bpo_cached_dbuf->db_offset +
471 bpo->bpo_cached_dbuf->db_size) {
472 if (bpo->bpo_cached_dbuf)
473 dmu_buf_rele(bpo->bpo_cached_dbuf, bpo);
474 VERIFY3U(0, ==, dmu_buf_hold(bpo->bpo_os, bpo->bpo_object,
475 offset, bpo, &bpo->bpo_cached_dbuf, 0));
478 dmu_buf_will_dirty(bpo->bpo_cached_dbuf, tx);
479 bparray = bpo->bpo_cached_dbuf->db_data;
480 bparray[blkoff] = stored_bp;
482 dmu_buf_will_dirty(bpo->bpo_dbuf, tx);
483 bpo->bpo_phys->bpo_num_blkptrs++;
484 bpo->bpo_phys->bpo_bytes +=
485 bp_get_dsize_sync(dmu_objset_spa(bpo->bpo_os), bp);
486 if (bpo->bpo_havecomp) {
487 bpo->bpo_phys->bpo_comp += BP_GET_PSIZE(bp);
488 bpo->bpo_phys->bpo_uncomp += BP_GET_UCSIZE(bp);
490 mutex_exit(&bpo->bpo_lock);
493 struct space_range_arg {
504 space_range_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
506 struct space_range_arg *sra = arg;
508 if (bp->blk_birth > sra->mintxg && bp->blk_birth <= sra->maxtxg) {
509 if (dsl_pool_sync_context(spa_get_dsl(sra->spa)))
510 sra->used += bp_get_dsize_sync(sra->spa, bp);
512 sra->used += bp_get_dsize(sra->spa, bp);
513 sra->comp += BP_GET_PSIZE(bp);
514 sra->uncomp += BP_GET_UCSIZE(bp);
520 bpobj_space(bpobj_t *bpo, uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
522 mutex_enter(&bpo->bpo_lock);
524 *usedp = bpo->bpo_phys->bpo_bytes;
525 if (bpo->bpo_havecomp) {
526 *compp = bpo->bpo_phys->bpo_comp;
527 *uncompp = bpo->bpo_phys->bpo_uncomp;
528 mutex_exit(&bpo->bpo_lock);
531 mutex_exit(&bpo->bpo_lock);
532 return (bpobj_space_range(bpo, 0, UINT64_MAX,
533 usedp, compp, uncompp));
538 * Return the amount of space in the bpobj which is:
539 * mintxg < blk_birth <= maxtxg
542 bpobj_space_range(bpobj_t *bpo, uint64_t mintxg, uint64_t maxtxg,
543 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
545 struct space_range_arg sra = { 0 };
549 * As an optimization, if they want the whole txg range, just
550 * get bpo_bytes rather than iterating over the bps.
552 if (mintxg < TXG_INITIAL && maxtxg == UINT64_MAX && bpo->bpo_havecomp)
553 return (bpobj_space(bpo, usedp, compp, uncompp));
555 sra.spa = dmu_objset_spa(bpo->bpo_os);
559 err = bpobj_iterate_nofree(bpo, space_range_cb, &sra, NULL);
562 *uncompp = sra.uncomp;