4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
28 * Copyright (c) 2014 Integros [integros.com]
31 #include <sys/zfs_context.h>
33 #include <sys/dmu_send.h>
34 #include <sys/dmu_impl.h>
36 #include <sys/dmu_objset.h>
37 #include <sys/dsl_dataset.h>
38 #include <sys/dsl_dir.h>
39 #include <sys/dmu_tx.h>
42 #include <sys/dmu_zfetch.h>
44 #include <sys/sa_impl.h>
45 #include <sys/zfeature.h>
46 #include <sys/blkptr.h>
47 #include <sys/range_tree.h>
50 * Number of times that zfs_free_range() took the slow path while doing
51 * a zfs receive. A nonzero value indicates a potential performance problem.
53 uint64_t zfs_free_range_recv_miss;
55 static void dbuf_destroy(dmu_buf_impl_t *db);
56 static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
57 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
60 extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu,
61 dmu_buf_evict_func_t *evict_func, dmu_buf_t **clear_on_evict_dbufp);
65 * Global data structures and functions for the dbuf cache.
67 static kmem_cache_t *dbuf_cache;
68 static taskq_t *dbu_evict_taskq;
72 dbuf_cons(void *vdb, void *unused, int kmflag)
74 dmu_buf_impl_t *db = vdb;
75 bzero(db, sizeof (dmu_buf_impl_t));
77 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
78 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
79 refcount_create(&db->db_holds);
86 dbuf_dest(void *vdb, void *unused)
88 dmu_buf_impl_t *db = vdb;
89 mutex_destroy(&db->db_mtx);
90 cv_destroy(&db->db_changed);
91 refcount_destroy(&db->db_holds);
95 * dbuf hash table routines
97 static dbuf_hash_table_t dbuf_hash_table;
99 static uint64_t dbuf_hash_count;
102 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
104 uintptr_t osv = (uintptr_t)os;
105 uint64_t crc = -1ULL;
107 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
108 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF];
109 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF];
110 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF];
111 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF];
112 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF];
113 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF];
115 crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16);
120 #define DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid);
122 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \
123 ((dbuf)->db.db_object == (obj) && \
124 (dbuf)->db_objset == (os) && \
125 (dbuf)->db_level == (level) && \
126 (dbuf)->db_blkid == (blkid))
129 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid)
131 dbuf_hash_table_t *h = &dbuf_hash_table;
132 uint64_t hv = DBUF_HASH(os, obj, level, blkid);
133 uint64_t idx = hv & h->hash_table_mask;
136 mutex_enter(DBUF_HASH_MUTEX(h, idx));
137 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
138 if (DBUF_EQUAL(db, os, obj, level, blkid)) {
139 mutex_enter(&db->db_mtx);
140 if (db->db_state != DB_EVICTING) {
141 mutex_exit(DBUF_HASH_MUTEX(h, idx));
144 mutex_exit(&db->db_mtx);
147 mutex_exit(DBUF_HASH_MUTEX(h, idx));
151 static dmu_buf_impl_t *
152 dbuf_find_bonus(objset_t *os, uint64_t object)
155 dmu_buf_impl_t *db = NULL;
157 if (dnode_hold(os, object, FTAG, &dn) == 0) {
158 rw_enter(&dn->dn_struct_rwlock, RW_READER);
159 if (dn->dn_bonus != NULL) {
161 mutex_enter(&db->db_mtx);
163 rw_exit(&dn->dn_struct_rwlock);
164 dnode_rele(dn, FTAG);
170 * Insert an entry into the hash table. If there is already an element
171 * equal to elem in the hash table, then the already existing element
172 * will be returned and the new element will not be inserted.
173 * Otherwise returns NULL.
175 static dmu_buf_impl_t *
176 dbuf_hash_insert(dmu_buf_impl_t *db)
178 dbuf_hash_table_t *h = &dbuf_hash_table;
179 objset_t *os = db->db_objset;
180 uint64_t obj = db->db.db_object;
181 int level = db->db_level;
182 uint64_t blkid = db->db_blkid;
183 uint64_t hv = DBUF_HASH(os, obj, level, blkid);
184 uint64_t idx = hv & h->hash_table_mask;
187 mutex_enter(DBUF_HASH_MUTEX(h, idx));
188 for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) {
189 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
190 mutex_enter(&dbf->db_mtx);
191 if (dbf->db_state != DB_EVICTING) {
192 mutex_exit(DBUF_HASH_MUTEX(h, idx));
195 mutex_exit(&dbf->db_mtx);
199 mutex_enter(&db->db_mtx);
200 db->db_hash_next = h->hash_table[idx];
201 h->hash_table[idx] = db;
202 mutex_exit(DBUF_HASH_MUTEX(h, idx));
203 atomic_inc_64(&dbuf_hash_count);
209 * Remove an entry from the hash table. It must be in the EVICTING state.
212 dbuf_hash_remove(dmu_buf_impl_t *db)
214 dbuf_hash_table_t *h = &dbuf_hash_table;
215 uint64_t hv = DBUF_HASH(db->db_objset, db->db.db_object,
216 db->db_level, db->db_blkid);
217 uint64_t idx = hv & h->hash_table_mask;
218 dmu_buf_impl_t *dbf, **dbp;
221 * We musn't hold db_mtx to maintain lock ordering:
222 * DBUF_HASH_MUTEX > db_mtx.
224 ASSERT(refcount_is_zero(&db->db_holds));
225 ASSERT(db->db_state == DB_EVICTING);
226 ASSERT(!MUTEX_HELD(&db->db_mtx));
228 mutex_enter(DBUF_HASH_MUTEX(h, idx));
229 dbp = &h->hash_table[idx];
230 while ((dbf = *dbp) != db) {
231 dbp = &dbf->db_hash_next;
234 *dbp = db->db_hash_next;
235 db->db_hash_next = NULL;
236 mutex_exit(DBUF_HASH_MUTEX(h, idx));
237 atomic_dec_64(&dbuf_hash_count);
240 static arc_evict_func_t dbuf_do_evict;
245 } dbvu_verify_type_t;
248 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
253 if (db->db_user == NULL)
256 /* Only data blocks support the attachment of user data. */
257 ASSERT(db->db_level == 0);
259 /* Clients must resolve a dbuf before attaching user data. */
260 ASSERT(db->db.db_data != NULL);
261 ASSERT3U(db->db_state, ==, DB_CACHED);
263 holds = refcount_count(&db->db_holds);
264 if (verify_type == DBVU_EVICTING) {
266 * Immediate eviction occurs when holds == dirtycnt.
267 * For normal eviction buffers, holds is zero on
268 * eviction, except when dbuf_fix_old_data() calls
269 * dbuf_clear_data(). However, the hold count can grow
270 * during eviction even though db_mtx is held (see
271 * dmu_bonus_hold() for an example), so we can only
272 * test the generic invariant that holds >= dirtycnt.
274 ASSERT3U(holds, >=, db->db_dirtycnt);
276 if (db->db_user_immediate_evict == TRUE)
277 ASSERT3U(holds, >=, db->db_dirtycnt);
279 ASSERT3U(holds, >, 0);
285 dbuf_evict_user(dmu_buf_impl_t *db)
287 dmu_buf_user_t *dbu = db->db_user;
289 ASSERT(MUTEX_HELD(&db->db_mtx));
294 dbuf_verify_user(db, DBVU_EVICTING);
298 if (dbu->dbu_clear_on_evict_dbufp != NULL)
299 *dbu->dbu_clear_on_evict_dbufp = NULL;
303 * Invoke the callback from a taskq to avoid lock order reversals
304 * and limit stack depth.
306 taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func, dbu, 0,
311 dbuf_is_metadata(dmu_buf_impl_t *db)
313 if (db->db_level > 0) {
316 boolean_t is_metadata;
319 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
322 return (is_metadata);
327 dbuf_evict(dmu_buf_impl_t *db)
329 ASSERT(MUTEX_HELD(&db->db_mtx));
330 ASSERT(db->db_buf == NULL);
331 ASSERT(db->db_data_pending == NULL);
340 uint64_t hsize = 1ULL << 16;
341 dbuf_hash_table_t *h = &dbuf_hash_table;
345 * The hash table is big enough to fill all of physical memory
346 * with an average 4K block size. The table will take up
347 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers).
349 while (hsize * 4096 < physmem * PAGESIZE)
353 h->hash_table_mask = hsize - 1;
354 h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
355 if (h->hash_table == NULL) {
356 /* XXX - we should really return an error instead of assert */
357 ASSERT(hsize > (1ULL << 10));
362 dbuf_cache = kmem_cache_create("dmu_buf_impl_t",
363 sizeof (dmu_buf_impl_t),
364 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
366 for (i = 0; i < DBUF_MUTEXES; i++)
367 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
370 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc
371 * configuration is not required.
373 dbu_evict_taskq = taskq_create("dbu_evict", 1, minclsyspri, 0, 0, 0);
379 dbuf_hash_table_t *h = &dbuf_hash_table;
382 for (i = 0; i < DBUF_MUTEXES; i++)
383 mutex_destroy(&h->hash_mutexes[i]);
384 kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
385 kmem_cache_destroy(dbuf_cache);
386 taskq_destroy(dbu_evict_taskq);
395 dbuf_verify(dmu_buf_impl_t *db)
398 dbuf_dirty_record_t *dr;
400 ASSERT(MUTEX_HELD(&db->db_mtx));
402 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
405 ASSERT(db->db_objset != NULL);
409 ASSERT(db->db_parent == NULL);
410 ASSERT(db->db_blkptr == NULL);
412 ASSERT3U(db->db.db_object, ==, dn->dn_object);
413 ASSERT3P(db->db_objset, ==, dn->dn_objset);
414 ASSERT3U(db->db_level, <, dn->dn_nlevels);
415 ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
416 db->db_blkid == DMU_SPILL_BLKID ||
417 !avl_is_empty(&dn->dn_dbufs));
419 if (db->db_blkid == DMU_BONUS_BLKID) {
421 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
422 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
423 } else if (db->db_blkid == DMU_SPILL_BLKID) {
425 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
426 ASSERT0(db->db.db_offset);
428 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
431 for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next)
432 ASSERT(dr->dr_dbuf == db);
434 for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next)
435 ASSERT(dr->dr_dbuf == db);
438 * We can't assert that db_size matches dn_datablksz because it
439 * can be momentarily different when another thread is doing
442 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
443 dr = db->db_data_pending;
445 * It should only be modified in syncing context, so
446 * make sure we only have one copy of the data.
448 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
451 /* verify db->db_blkptr */
453 if (db->db_parent == dn->dn_dbuf) {
454 /* db is pointed to by the dnode */
455 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
456 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
457 ASSERT(db->db_parent == NULL);
459 ASSERT(db->db_parent != NULL);
460 if (db->db_blkid != DMU_SPILL_BLKID)
461 ASSERT3P(db->db_blkptr, ==,
462 &dn->dn_phys->dn_blkptr[db->db_blkid]);
464 /* db is pointed to by an indirect block */
465 int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT;
466 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
467 ASSERT3U(db->db_parent->db.db_object, ==,
470 * dnode_grow_indblksz() can make this fail if we don't
471 * have the struct_rwlock. XXX indblksz no longer
472 * grows. safe to do this now?
474 if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
475 ASSERT3P(db->db_blkptr, ==,
476 ((blkptr_t *)db->db_parent->db.db_data +
477 db->db_blkid % epb));
481 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
482 (db->db_buf == NULL || db->db_buf->b_data) &&
483 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
484 db->db_state != DB_FILL && !dn->dn_free_txg) {
486 * If the blkptr isn't set but they have nonzero data,
487 * it had better be dirty, otherwise we'll lose that
488 * data when we evict this buffer.
490 * There is an exception to this rule for indirect blocks; in
491 * this case, if the indirect block is a hole, we fill in a few
492 * fields on each of the child blocks (importantly, birth time)
493 * to prevent hole birth times from being lost when you
494 * partially fill in a hole.
496 if (db->db_dirtycnt == 0) {
497 if (db->db_level == 0) {
498 uint64_t *buf = db->db.db_data;
501 for (i = 0; i < db->db.db_size >> 3; i++) {
505 blkptr_t *bps = db->db.db_data;
506 ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==,
509 * We want to verify that all the blkptrs in the
510 * indirect block are holes, but we may have
511 * automatically set up a few fields for them.
512 * We iterate through each blkptr and verify
513 * they only have those fields set.
516 i < db->db.db_size / sizeof (blkptr_t);
518 blkptr_t *bp = &bps[i];
519 ASSERT(ZIO_CHECKSUM_IS_ZERO(
522 DVA_IS_EMPTY(&bp->blk_dva[0]) &&
523 DVA_IS_EMPTY(&bp->blk_dva[1]) &&
524 DVA_IS_EMPTY(&bp->blk_dva[2]));
525 ASSERT0(bp->blk_fill);
526 ASSERT0(bp->blk_pad[0]);
527 ASSERT0(bp->blk_pad[1]);
528 ASSERT(!BP_IS_EMBEDDED(bp));
529 ASSERT(BP_IS_HOLE(bp));
530 ASSERT0(bp->blk_phys_birth);
540 dbuf_clear_data(dmu_buf_impl_t *db)
542 ASSERT(MUTEX_HELD(&db->db_mtx));
545 db->db.db_data = NULL;
546 if (db->db_state != DB_NOFILL)
547 db->db_state = DB_UNCACHED;
551 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
553 ASSERT(MUTEX_HELD(&db->db_mtx));
557 ASSERT(buf->b_data != NULL);
558 db->db.db_data = buf->b_data;
559 if (!arc_released(buf))
560 arc_set_callback(buf, dbuf_do_evict, db);
564 * Loan out an arc_buf for read. Return the loaned arc_buf.
567 dbuf_loan_arcbuf(dmu_buf_impl_t *db)
571 mutex_enter(&db->db_mtx);
572 if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) {
573 int blksz = db->db.db_size;
574 spa_t *spa = db->db_objset->os_spa;
576 mutex_exit(&db->db_mtx);
577 abuf = arc_loan_buf(spa, blksz);
578 bcopy(db->db.db_data, abuf->b_data, blksz);
581 arc_loan_inuse_buf(abuf, db);
583 mutex_exit(&db->db_mtx);
589 * Calculate which level n block references the data at the level 0 offset
593 dbuf_whichblock(dnode_t *dn, int64_t level, uint64_t offset)
595 if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) {
597 * The level n blkid is equal to the level 0 blkid divided by
598 * the number of level 0s in a level n block.
600 * The level 0 blkid is offset >> datablkshift =
601 * offset / 2^datablkshift.
603 * The number of level 0s in a level n is the number of block
604 * pointers in an indirect block, raised to the power of level.
605 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level =
606 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)).
608 * Thus, the level n blkid is: offset /
609 * ((2^datablkshift)*(2^(level*(indblkshift - SPA_BLKPTRSHIFT)))
610 * = offset / 2^(datablkshift + level *
611 * (indblkshift - SPA_BLKPTRSHIFT))
612 * = offset >> (datablkshift + level *
613 * (indblkshift - SPA_BLKPTRSHIFT))
615 return (offset >> (dn->dn_datablkshift + level *
616 (dn->dn_indblkshift - SPA_BLKPTRSHIFT)));
618 ASSERT3U(offset, <, dn->dn_datablksz);
624 dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb)
626 dmu_buf_impl_t *db = vdb;
628 mutex_enter(&db->db_mtx);
629 ASSERT3U(db->db_state, ==, DB_READ);
631 * All reads are synchronous, so we must have a hold on the dbuf
633 ASSERT(refcount_count(&db->db_holds) > 0);
634 ASSERT(db->db_buf == NULL);
635 ASSERT(db->db.db_data == NULL);
636 if (db->db_level == 0 && db->db_freed_in_flight) {
637 /* we were freed in flight; disregard any error */
638 arc_release(buf, db);
639 bzero(buf->b_data, db->db.db_size);
641 db->db_freed_in_flight = FALSE;
642 dbuf_set_data(db, buf);
643 db->db_state = DB_CACHED;
644 } else if (zio == NULL || zio->io_error == 0) {
645 dbuf_set_data(db, buf);
646 db->db_state = DB_CACHED;
648 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
649 ASSERT3P(db->db_buf, ==, NULL);
650 VERIFY(arc_buf_remove_ref(buf, db));
651 db->db_state = DB_UNCACHED;
653 cv_broadcast(&db->db_changed);
654 dbuf_rele_and_unlock(db, NULL);
658 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
662 arc_flags_t aflags = ARC_FLAG_NOWAIT;
666 ASSERT(!refcount_is_zero(&db->db_holds));
667 /* We need the struct_rwlock to prevent db_blkptr from changing. */
668 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
669 ASSERT(MUTEX_HELD(&db->db_mtx));
670 ASSERT(db->db_state == DB_UNCACHED);
671 ASSERT(db->db_buf == NULL);
673 if (db->db_blkid == DMU_BONUS_BLKID) {
674 int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
676 ASSERT3U(bonuslen, <=, db->db.db_size);
677 db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN);
678 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
679 if (bonuslen < DN_MAX_BONUSLEN)
680 bzero(db->db.db_data, DN_MAX_BONUSLEN);
682 bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
684 db->db_state = DB_CACHED;
685 mutex_exit(&db->db_mtx);
690 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
691 * processes the delete record and clears the bp while we are waiting
692 * for the dn_mtx (resulting in a "no" from block_freed).
694 if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) ||
695 (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) ||
696 BP_IS_HOLE(db->db_blkptr)))) {
697 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
699 dbuf_set_data(db, arc_buf_alloc(db->db_objset->os_spa,
700 db->db.db_size, db, type));
701 bzero(db->db.db_data, db->db.db_size);
703 if (db->db_blkptr != NULL && db->db_level > 0 &&
704 BP_IS_HOLE(db->db_blkptr) &&
705 db->db_blkptr->blk_birth != 0) {
706 blkptr_t *bps = db->db.db_data;
707 for (int i = 0; i < ((1 <<
708 DB_DNODE(db)->dn_indblkshift) / sizeof (blkptr_t));
710 blkptr_t *bp = &bps[i];
711 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
712 1 << dn->dn_indblkshift);
714 BP_GET_LEVEL(db->db_blkptr) == 1 ?
716 BP_GET_LSIZE(db->db_blkptr));
717 BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr));
719 BP_GET_LEVEL(db->db_blkptr) - 1);
720 BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0);
724 db->db_state = DB_CACHED;
725 mutex_exit(&db->db_mtx);
731 db->db_state = DB_READ;
732 mutex_exit(&db->db_mtx);
734 if (DBUF_IS_L2CACHEABLE(db))
735 aflags |= ARC_FLAG_L2CACHE;
736 if (DBUF_IS_L2COMPRESSIBLE(db))
737 aflags |= ARC_FLAG_L2COMPRESS;
739 SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ?
740 db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET,
741 db->db.db_object, db->db_level, db->db_blkid);
743 dbuf_add_ref(db, NULL);
745 (void) arc_read(zio, db->db_objset->os_spa, db->db_blkptr,
746 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ,
747 (flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED,
752 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
755 boolean_t havepzio = (zio != NULL);
760 * We don't have to hold the mutex to check db_state because it
761 * can't be freed while we have a hold on the buffer.
763 ASSERT(!refcount_is_zero(&db->db_holds));
765 if (db->db_state == DB_NOFILL)
766 return (SET_ERROR(EIO));
770 if ((flags & DB_RF_HAVESTRUCT) == 0)
771 rw_enter(&dn->dn_struct_rwlock, RW_READER);
773 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
774 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL &&
775 DBUF_IS_CACHEABLE(db);
777 mutex_enter(&db->db_mtx);
778 if (db->db_state == DB_CACHED) {
779 mutex_exit(&db->db_mtx);
781 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE);
782 if ((flags & DB_RF_HAVESTRUCT) == 0)
783 rw_exit(&dn->dn_struct_rwlock);
785 } else if (db->db_state == DB_UNCACHED) {
786 spa_t *spa = dn->dn_objset->os_spa;
789 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
790 dbuf_read_impl(db, zio, flags);
792 /* dbuf_read_impl has dropped db_mtx for us */
795 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE);
797 if ((flags & DB_RF_HAVESTRUCT) == 0)
798 rw_exit(&dn->dn_struct_rwlock);
805 * Another reader came in while the dbuf was in flight
806 * between UNCACHED and CACHED. Either a writer will finish
807 * writing the buffer (sending the dbuf to CACHED) or the
808 * first reader's request will reach the read_done callback
809 * and send the dbuf to CACHED. Otherwise, a failure
810 * occurred and the dbuf went to UNCACHED.
812 mutex_exit(&db->db_mtx);
814 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE);
815 if ((flags & DB_RF_HAVESTRUCT) == 0)
816 rw_exit(&dn->dn_struct_rwlock);
819 /* Skip the wait per the caller's request. */
820 mutex_enter(&db->db_mtx);
821 if ((flags & DB_RF_NEVERWAIT) == 0) {
822 while (db->db_state == DB_READ ||
823 db->db_state == DB_FILL) {
824 ASSERT(db->db_state == DB_READ ||
825 (flags & DB_RF_HAVESTRUCT) == 0);
826 DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *,
828 cv_wait(&db->db_changed, &db->db_mtx);
830 if (db->db_state == DB_UNCACHED)
831 err = SET_ERROR(EIO);
833 mutex_exit(&db->db_mtx);
836 ASSERT(err || havepzio || db->db_state == DB_CACHED);
841 dbuf_noread(dmu_buf_impl_t *db)
843 ASSERT(!refcount_is_zero(&db->db_holds));
844 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
845 mutex_enter(&db->db_mtx);
846 while (db->db_state == DB_READ || db->db_state == DB_FILL)
847 cv_wait(&db->db_changed, &db->db_mtx);
848 if (db->db_state == DB_UNCACHED) {
849 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
850 spa_t *spa = db->db_objset->os_spa;
852 ASSERT(db->db_buf == NULL);
853 ASSERT(db->db.db_data == NULL);
854 dbuf_set_data(db, arc_buf_alloc(spa, db->db.db_size, db, type));
855 db->db_state = DB_FILL;
856 } else if (db->db_state == DB_NOFILL) {
859 ASSERT3U(db->db_state, ==, DB_CACHED);
861 mutex_exit(&db->db_mtx);
865 * This is our just-in-time copy function. It makes a copy of
866 * buffers, that have been modified in a previous transaction
867 * group, before we modify them in the current active group.
869 * This function is used in two places: when we are dirtying a
870 * buffer for the first time in a txg, and when we are freeing
871 * a range in a dnode that includes this buffer.
873 * Note that when we are called from dbuf_free_range() we do
874 * not put a hold on the buffer, we just traverse the active
875 * dbuf list for the dnode.
878 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
880 dbuf_dirty_record_t *dr = db->db_last_dirty;
882 ASSERT(MUTEX_HELD(&db->db_mtx));
883 ASSERT(db->db.db_data != NULL);
884 ASSERT(db->db_level == 0);
885 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
888 (dr->dt.dl.dr_data !=
889 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
893 * If the last dirty record for this dbuf has not yet synced
894 * and its referencing the dbuf data, either:
895 * reset the reference to point to a new copy,
896 * or (if there a no active holders)
897 * just null out the current db_data pointer.
899 ASSERT(dr->dr_txg >= txg - 2);
900 if (db->db_blkid == DMU_BONUS_BLKID) {
901 /* Note that the data bufs here are zio_bufs */
902 dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN);
903 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
904 bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN);
905 } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
906 int size = db->db.db_size;
907 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
908 spa_t *spa = db->db_objset->os_spa;
910 dr->dt.dl.dr_data = arc_buf_alloc(spa, size, db, type);
911 bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
918 dbuf_unoverride(dbuf_dirty_record_t *dr)
920 dmu_buf_impl_t *db = dr->dr_dbuf;
921 blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
922 uint64_t txg = dr->dr_txg;
924 ASSERT(MUTEX_HELD(&db->db_mtx));
925 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
926 ASSERT(db->db_level == 0);
928 if (db->db_blkid == DMU_BONUS_BLKID ||
929 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
932 ASSERT(db->db_data_pending != dr);
934 /* free this block */
935 if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
936 zio_free(db->db_objset->os_spa, txg, bp);
938 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
939 dr->dt.dl.dr_nopwrite = B_FALSE;
942 * Release the already-written buffer, so we leave it in
943 * a consistent dirty state. Note that all callers are
944 * modifying the buffer, so they will immediately do
945 * another (redundant) arc_release(). Therefore, leave
946 * the buf thawed to save the effort of freezing &
947 * immediately re-thawing it.
949 arc_release(dr->dt.dl.dr_data, db);
953 * Evict (if its unreferenced) or clear (if its referenced) any level-0
954 * data blocks in the free range, so that any future readers will find
957 * This is a no-op if the dataset is in the middle of an incremental
958 * receive; see comment below for details.
961 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
964 dmu_buf_impl_t db_search;
965 dmu_buf_impl_t *db, *db_next;
966 uint64_t txg = tx->tx_txg;
968 boolean_t freespill =
969 (start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID);
971 if (end_blkid > dn->dn_maxblkid && !freespill)
972 end_blkid = dn->dn_maxblkid;
973 dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid);
975 db_search.db_level = 0;
976 db_search.db_blkid = start_blkid;
977 db_search.db_state = DB_SEARCH;
979 mutex_enter(&dn->dn_dbufs_mtx);
980 if (start_blkid >= dn->dn_unlisted_l0_blkid && !freespill) {
981 /* There can't be any dbufs in this range; no need to search. */
983 db = avl_find(&dn->dn_dbufs, &db_search, &where);
984 ASSERT3P(db, ==, NULL);
985 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
986 ASSERT(db == NULL || db->db_level > 0);
988 mutex_exit(&dn->dn_dbufs_mtx);
990 } else if (dmu_objset_is_receiving(dn->dn_objset)) {
992 * If we are receiving, we expect there to be no dbufs in
993 * the range to be freed, because receive modifies each
994 * block at most once, and in offset order. If this is
995 * not the case, it can lead to performance problems,
996 * so note that we unexpectedly took the slow path.
998 atomic_inc_64(&zfs_free_range_recv_miss);
1001 db = avl_find(&dn->dn_dbufs, &db_search, &where);
1002 ASSERT3P(db, ==, NULL);
1003 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
1005 for (; db != NULL; db = db_next) {
1006 db_next = AVL_NEXT(&dn->dn_dbufs, db);
1007 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1009 if (db->db_level != 0 || db->db_blkid > end_blkid) {
1012 ASSERT3U(db->db_blkid, >=, start_blkid);
1014 /* found a level 0 buffer in the range */
1015 mutex_enter(&db->db_mtx);
1016 if (dbuf_undirty(db, tx)) {
1017 /* mutex has been dropped and dbuf destroyed */
1021 if (db->db_state == DB_UNCACHED ||
1022 db->db_state == DB_NOFILL ||
1023 db->db_state == DB_EVICTING) {
1024 ASSERT(db->db.db_data == NULL);
1025 mutex_exit(&db->db_mtx);
1028 if (db->db_state == DB_READ || db->db_state == DB_FILL) {
1029 /* will be handled in dbuf_read_done or dbuf_rele */
1030 db->db_freed_in_flight = TRUE;
1031 mutex_exit(&db->db_mtx);
1034 if (refcount_count(&db->db_holds) == 0) {
1039 /* The dbuf is referenced */
1041 if (db->db_last_dirty != NULL) {
1042 dbuf_dirty_record_t *dr = db->db_last_dirty;
1044 if (dr->dr_txg == txg) {
1046 * This buffer is "in-use", re-adjust the file
1047 * size to reflect that this buffer may
1048 * contain new data when we sync.
1050 if (db->db_blkid != DMU_SPILL_BLKID &&
1051 db->db_blkid > dn->dn_maxblkid)
1052 dn->dn_maxblkid = db->db_blkid;
1053 dbuf_unoverride(dr);
1056 * This dbuf is not dirty in the open context.
1057 * Either uncache it (if its not referenced in
1058 * the open context) or reset its contents to
1061 dbuf_fix_old_data(db, txg);
1064 /* clear the contents if its cached */
1065 if (db->db_state == DB_CACHED) {
1066 ASSERT(db->db.db_data != NULL);
1067 arc_release(db->db_buf, db);
1068 bzero(db->db.db_data, db->db.db_size);
1069 arc_buf_freeze(db->db_buf);
1072 mutex_exit(&db->db_mtx);
1074 mutex_exit(&dn->dn_dbufs_mtx);
1078 dbuf_block_freeable(dmu_buf_impl_t *db)
1080 dsl_dataset_t *ds = db->db_objset->os_dsl_dataset;
1081 uint64_t birth_txg = 0;
1084 * We don't need any locking to protect db_blkptr:
1085 * If it's syncing, then db_last_dirty will be set
1086 * so we'll ignore db_blkptr.
1088 * This logic ensures that only block births for
1089 * filled blocks are considered.
1091 ASSERT(MUTEX_HELD(&db->db_mtx));
1092 if (db->db_last_dirty && (db->db_blkptr == NULL ||
1093 !BP_IS_HOLE(db->db_blkptr))) {
1094 birth_txg = db->db_last_dirty->dr_txg;
1095 } else if (db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) {
1096 birth_txg = db->db_blkptr->blk_birth;
1100 * If this block don't exist or is in a snapshot, it can't be freed.
1101 * Don't pass the bp to dsl_dataset_block_freeable() since we
1102 * are holding the db_mtx lock and might deadlock if we are
1103 * prefetching a dedup-ed block.
1106 return (ds == NULL ||
1107 dsl_dataset_block_freeable(ds, NULL, birth_txg));
1113 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
1115 arc_buf_t *buf, *obuf;
1116 int osize = db->db.db_size;
1117 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1120 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1125 /* XXX does *this* func really need the lock? */
1126 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
1129 * This call to dmu_buf_will_dirty() with the dn_struct_rwlock held
1130 * is OK, because there can be no other references to the db
1131 * when we are changing its size, so no concurrent DB_FILL can
1135 * XXX we should be doing a dbuf_read, checking the return
1136 * value and returning that up to our callers
1138 dmu_buf_will_dirty(&db->db, tx);
1140 /* create the data buffer for the new block */
1141 buf = arc_buf_alloc(dn->dn_objset->os_spa, size, db, type);
1143 /* copy old block data to the new block */
1145 bcopy(obuf->b_data, buf->b_data, MIN(osize, size));
1146 /* zero the remainder */
1148 bzero((uint8_t *)buf->b_data + osize, size - osize);
1150 mutex_enter(&db->db_mtx);
1151 dbuf_set_data(db, buf);
1152 VERIFY(arc_buf_remove_ref(obuf, db));
1153 db->db.db_size = size;
1155 if (db->db_level == 0) {
1156 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
1157 db->db_last_dirty->dt.dl.dr_data = buf;
1159 mutex_exit(&db->db_mtx);
1161 dnode_willuse_space(dn, size-osize, tx);
1166 dbuf_release_bp(dmu_buf_impl_t *db)
1168 objset_t *os = db->db_objset;
1170 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
1171 ASSERT(arc_released(os->os_phys_buf) ||
1172 list_link_active(&os->os_dsl_dataset->ds_synced_link));
1173 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
1175 (void) arc_release(db->db_buf, db);
1179 * We already have a dirty record for this TXG, and we are being
1183 dbuf_redirty(dbuf_dirty_record_t *dr)
1185 dmu_buf_impl_t *db = dr->dr_dbuf;
1187 ASSERT(MUTEX_HELD(&db->db_mtx));
1189 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
1191 * If this buffer has already been written out,
1192 * we now need to reset its state.
1194 dbuf_unoverride(dr);
1195 if (db->db.db_object != DMU_META_DNODE_OBJECT &&
1196 db->db_state != DB_NOFILL) {
1197 /* Already released on initial dirty, so just thaw. */
1198 ASSERT(arc_released(db->db_buf));
1199 arc_buf_thaw(db->db_buf);
1204 dbuf_dirty_record_t *
1205 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1209 dbuf_dirty_record_t **drp, *dr;
1210 int drop_struct_lock = FALSE;
1211 boolean_t do_free_accounting = B_FALSE;
1212 int txgoff = tx->tx_txg & TXG_MASK;
1214 ASSERT(tx->tx_txg != 0);
1215 ASSERT(!refcount_is_zero(&db->db_holds));
1216 DMU_TX_DIRTY_BUF(tx, db);
1221 * Shouldn't dirty a regular buffer in syncing context. Private
1222 * objects may be dirtied in syncing context, but only if they
1223 * were already pre-dirtied in open context.
1225 ASSERT(!dmu_tx_is_syncing(tx) ||
1226 BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
1227 DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1228 dn->dn_objset->os_dsl_dataset == NULL);
1230 * We make this assert for private objects as well, but after we
1231 * check if we're already dirty. They are allowed to re-dirty
1232 * in syncing context.
1234 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1235 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1236 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1238 mutex_enter(&db->db_mtx);
1240 * XXX make this true for indirects too? The problem is that
1241 * transactions created with dmu_tx_create_assigned() from
1242 * syncing context don't bother holding ahead.
1244 ASSERT(db->db_level != 0 ||
1245 db->db_state == DB_CACHED || db->db_state == DB_FILL ||
1246 db->db_state == DB_NOFILL);
1248 mutex_enter(&dn->dn_mtx);
1250 * Don't set dirtyctx to SYNC if we're just modifying this as we
1251 * initialize the objset.
1253 if (dn->dn_dirtyctx == DN_UNDIRTIED &&
1254 !BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
1256 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN);
1257 ASSERT(dn->dn_dirtyctx_firstset == NULL);
1258 dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP);
1260 mutex_exit(&dn->dn_mtx);
1262 if (db->db_blkid == DMU_SPILL_BLKID)
1263 dn->dn_have_spill = B_TRUE;
1266 * If this buffer is already dirty, we're done.
1268 drp = &db->db_last_dirty;
1269 ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg ||
1270 db->db.db_object == DMU_META_DNODE_OBJECT);
1271 while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg)
1273 if (dr && dr->dr_txg == tx->tx_txg) {
1277 mutex_exit(&db->db_mtx);
1282 * Only valid if not already dirty.
1284 ASSERT(dn->dn_object == 0 ||
1285 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1286 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1288 ASSERT3U(dn->dn_nlevels, >, db->db_level);
1289 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
1290 dn->dn_phys->dn_nlevels > db->db_level ||
1291 dn->dn_next_nlevels[txgoff] > db->db_level ||
1292 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
1293 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
1296 * We should only be dirtying in syncing context if it's the
1297 * mos or we're initializing the os or it's a special object.
1298 * However, we are allowed to dirty in syncing context provided
1299 * we already dirtied it in open context. Hence we must make
1300 * this assertion only if we're not already dirty.
1303 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1304 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
1305 ASSERT(db->db.db_size != 0);
1307 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1309 if (db->db_blkid != DMU_BONUS_BLKID) {
1311 * Update the accounting.
1312 * Note: we delay "free accounting" until after we drop
1313 * the db_mtx. This keeps us from grabbing other locks
1314 * (and possibly deadlocking) in bp_get_dsize() while
1315 * also holding the db_mtx.
1317 dnode_willuse_space(dn, db->db.db_size, tx);
1318 do_free_accounting = dbuf_block_freeable(db);
1322 * If this buffer is dirty in an old transaction group we need
1323 * to make a copy of it so that the changes we make in this
1324 * transaction group won't leak out when we sync the older txg.
1326 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
1327 if (db->db_level == 0) {
1328 void *data_old = db->db_buf;
1330 if (db->db_state != DB_NOFILL) {
1331 if (db->db_blkid == DMU_BONUS_BLKID) {
1332 dbuf_fix_old_data(db, tx->tx_txg);
1333 data_old = db->db.db_data;
1334 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
1336 * Release the data buffer from the cache so
1337 * that we can modify it without impacting
1338 * possible other users of this cached data
1339 * block. Note that indirect blocks and
1340 * private objects are not released until the
1341 * syncing state (since they are only modified
1344 arc_release(db->db_buf, db);
1345 dbuf_fix_old_data(db, tx->tx_txg);
1346 data_old = db->db_buf;
1348 ASSERT(data_old != NULL);
1350 dr->dt.dl.dr_data = data_old;
1352 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL);
1353 list_create(&dr->dt.di.dr_children,
1354 sizeof (dbuf_dirty_record_t),
1355 offsetof(dbuf_dirty_record_t, dr_dirty_node));
1357 if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL)
1358 dr->dr_accounted = db->db.db_size;
1360 dr->dr_txg = tx->tx_txg;
1365 * We could have been freed_in_flight between the dbuf_noread
1366 * and dbuf_dirty. We win, as though the dbuf_noread() had
1367 * happened after the free.
1369 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1370 db->db_blkid != DMU_SPILL_BLKID) {
1371 mutex_enter(&dn->dn_mtx);
1372 if (dn->dn_free_ranges[txgoff] != NULL) {
1373 range_tree_clear(dn->dn_free_ranges[txgoff],
1376 mutex_exit(&dn->dn_mtx);
1377 db->db_freed_in_flight = FALSE;
1381 * This buffer is now part of this txg
1383 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
1384 db->db_dirtycnt += 1;
1385 ASSERT3U(db->db_dirtycnt, <=, 3);
1387 mutex_exit(&db->db_mtx);
1389 if (db->db_blkid == DMU_BONUS_BLKID ||
1390 db->db_blkid == DMU_SPILL_BLKID) {
1391 mutex_enter(&dn->dn_mtx);
1392 ASSERT(!list_link_active(&dr->dr_dirty_node));
1393 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1394 mutex_exit(&dn->dn_mtx);
1395 dnode_setdirty(dn, tx);
1398 } else if (do_free_accounting) {
1399 blkptr_t *bp = db->db_blkptr;
1400 int64_t willfree = (bp && !BP_IS_HOLE(bp)) ?
1401 bp_get_dsize(os->os_spa, bp) : db->db.db_size;
1403 * This is only a guess -- if the dbuf is dirty
1404 * in a previous txg, we don't know how much
1405 * space it will use on disk yet. We should
1406 * really have the struct_rwlock to access
1407 * db_blkptr, but since this is just a guess,
1408 * it's OK if we get an odd answer.
1410 ddt_prefetch(os->os_spa, bp);
1411 dnode_willuse_space(dn, -willfree, tx);
1414 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
1415 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1416 drop_struct_lock = TRUE;
1419 if (db->db_level == 0) {
1420 dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock);
1421 ASSERT(dn->dn_maxblkid >= db->db_blkid);
1424 if (db->db_level+1 < dn->dn_nlevels) {
1425 dmu_buf_impl_t *parent = db->db_parent;
1426 dbuf_dirty_record_t *di;
1427 int parent_held = FALSE;
1429 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
1430 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1432 parent = dbuf_hold_level(dn, db->db_level+1,
1433 db->db_blkid >> epbs, FTAG);
1434 ASSERT(parent != NULL);
1437 if (drop_struct_lock)
1438 rw_exit(&dn->dn_struct_rwlock);
1439 ASSERT3U(db->db_level+1, ==, parent->db_level);
1440 di = dbuf_dirty(parent, tx);
1442 dbuf_rele(parent, FTAG);
1444 mutex_enter(&db->db_mtx);
1446 * Since we've dropped the mutex, it's possible that
1447 * dbuf_undirty() might have changed this out from under us.
1449 if (db->db_last_dirty == dr ||
1450 dn->dn_object == DMU_META_DNODE_OBJECT) {
1451 mutex_enter(&di->dt.di.dr_mtx);
1452 ASSERT3U(di->dr_txg, ==, tx->tx_txg);
1453 ASSERT(!list_link_active(&dr->dr_dirty_node));
1454 list_insert_tail(&di->dt.di.dr_children, dr);
1455 mutex_exit(&di->dt.di.dr_mtx);
1458 mutex_exit(&db->db_mtx);
1460 ASSERT(db->db_level+1 == dn->dn_nlevels);
1461 ASSERT(db->db_blkid < dn->dn_nblkptr);
1462 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
1463 mutex_enter(&dn->dn_mtx);
1464 ASSERT(!list_link_active(&dr->dr_dirty_node));
1465 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1466 mutex_exit(&dn->dn_mtx);
1467 if (drop_struct_lock)
1468 rw_exit(&dn->dn_struct_rwlock);
1471 dnode_setdirty(dn, tx);
1477 * Undirty a buffer in the transaction group referenced by the given
1478 * transaction. Return whether this evicted the dbuf.
1481 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1484 uint64_t txg = tx->tx_txg;
1485 dbuf_dirty_record_t *dr, **drp;
1490 * Due to our use of dn_nlevels below, this can only be called
1491 * in open context, unless we are operating on the MOS.
1492 * From syncing context, dn_nlevels may be different from the
1493 * dn_nlevels used when dbuf was dirtied.
1495 ASSERT(db->db_objset ==
1496 dmu_objset_pool(db->db_objset)->dp_meta_objset ||
1497 txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
1498 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1499 ASSERT0(db->db_level);
1500 ASSERT(MUTEX_HELD(&db->db_mtx));
1503 * If this buffer is not dirty, we're done.
1505 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
1506 if (dr->dr_txg <= txg)
1508 if (dr == NULL || dr->dr_txg < txg)
1510 ASSERT(dr->dr_txg == txg);
1511 ASSERT(dr->dr_dbuf == db);
1516 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1518 ASSERT(db->db.db_size != 0);
1520 dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset),
1521 dr->dr_accounted, txg);
1526 * Note that there are three places in dbuf_dirty()
1527 * where this dirty record may be put on a list.
1528 * Make sure to do a list_remove corresponding to
1529 * every one of those list_insert calls.
1531 if (dr->dr_parent) {
1532 mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
1533 list_remove(&dr->dr_parent->dt.di.dr_children, dr);
1534 mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
1535 } else if (db->db_blkid == DMU_SPILL_BLKID ||
1536 db->db_level + 1 == dn->dn_nlevels) {
1537 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
1538 mutex_enter(&dn->dn_mtx);
1539 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
1540 mutex_exit(&dn->dn_mtx);
1544 if (db->db_state != DB_NOFILL) {
1545 dbuf_unoverride(dr);
1547 ASSERT(db->db_buf != NULL);
1548 ASSERT(dr->dt.dl.dr_data != NULL);
1549 if (dr->dt.dl.dr_data != db->db_buf)
1550 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, db));
1553 kmem_free(dr, sizeof (dbuf_dirty_record_t));
1555 ASSERT(db->db_dirtycnt > 0);
1556 db->db_dirtycnt -= 1;
1558 if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
1559 arc_buf_t *buf = db->db_buf;
1561 ASSERT(db->db_state == DB_NOFILL || arc_released(buf));
1562 dbuf_clear_data(db);
1563 VERIFY(arc_buf_remove_ref(buf, db));
1572 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
1574 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1575 int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH;
1577 ASSERT(tx->tx_txg != 0);
1578 ASSERT(!refcount_is_zero(&db->db_holds));
1581 * Quick check for dirtyness. For already dirty blocks, this
1582 * reduces runtime of this function by >90%, and overall performance
1583 * by 50% for some workloads (e.g. file deletion with indirect blocks
1586 mutex_enter(&db->db_mtx);
1587 dbuf_dirty_record_t *dr;
1588 for (dr = db->db_last_dirty;
1589 dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) {
1591 * It's possible that it is already dirty but not cached,
1592 * because there are some calls to dbuf_dirty() that don't
1593 * go through dmu_buf_will_dirty().
1595 if (dr->dr_txg == tx->tx_txg && db->db_state == DB_CACHED) {
1596 /* This dbuf is already dirty and cached. */
1598 mutex_exit(&db->db_mtx);
1602 mutex_exit(&db->db_mtx);
1605 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
1606 rf |= DB_RF_HAVESTRUCT;
1608 (void) dbuf_read(db, NULL, rf);
1609 (void) dbuf_dirty(db, tx);
1613 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1615 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1617 db->db_state = DB_NOFILL;
1619 dmu_buf_will_fill(db_fake, tx);
1623 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1625 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1627 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1628 ASSERT(tx->tx_txg != 0);
1629 ASSERT(db->db_level == 0);
1630 ASSERT(!refcount_is_zero(&db->db_holds));
1632 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
1633 dmu_tx_private_ok(tx));
1636 (void) dbuf_dirty(db, tx);
1639 #pragma weak dmu_buf_fill_done = dbuf_fill_done
1642 dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx)
1644 mutex_enter(&db->db_mtx);
1647 if (db->db_state == DB_FILL) {
1648 if (db->db_level == 0 && db->db_freed_in_flight) {
1649 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1650 /* we were freed while filling */
1651 /* XXX dbuf_undirty? */
1652 bzero(db->db.db_data, db->db.db_size);
1653 db->db_freed_in_flight = FALSE;
1655 db->db_state = DB_CACHED;
1656 cv_broadcast(&db->db_changed);
1658 mutex_exit(&db->db_mtx);
1662 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
1663 bp_embedded_type_t etype, enum zio_compress comp,
1664 int uncompressed_size, int compressed_size, int byteorder,
1667 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
1668 struct dirty_leaf *dl;
1669 dmu_object_type_t type;
1671 if (etype == BP_EMBEDDED_TYPE_DATA) {
1672 ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset),
1673 SPA_FEATURE_EMBEDDED_DATA));
1677 type = DB_DNODE(db)->dn_type;
1680 ASSERT0(db->db_level);
1681 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1683 dmu_buf_will_not_fill(dbuf, tx);
1685 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
1686 dl = &db->db_last_dirty->dt.dl;
1687 encode_embedded_bp_compressed(&dl->dr_overridden_by,
1688 data, comp, uncompressed_size, compressed_size);
1689 BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
1690 BP_SET_TYPE(&dl->dr_overridden_by, type);
1691 BP_SET_LEVEL(&dl->dr_overridden_by, 0);
1692 BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
1694 dl->dr_override_state = DR_OVERRIDDEN;
1695 dl->dr_overridden_by.blk_birth = db->db_last_dirty->dr_txg;
1699 * Directly assign a provided arc buf to a given dbuf if it's not referenced
1700 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
1703 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
1705 ASSERT(!refcount_is_zero(&db->db_holds));
1706 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1707 ASSERT(db->db_level == 0);
1708 ASSERT(DBUF_GET_BUFC_TYPE(db) == ARC_BUFC_DATA);
1709 ASSERT(buf != NULL);
1710 ASSERT(arc_buf_size(buf) == db->db.db_size);
1711 ASSERT(tx->tx_txg != 0);
1713 arc_return_buf(buf, db);
1714 ASSERT(arc_released(buf));
1716 mutex_enter(&db->db_mtx);
1718 while (db->db_state == DB_READ || db->db_state == DB_FILL)
1719 cv_wait(&db->db_changed, &db->db_mtx);
1721 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
1723 if (db->db_state == DB_CACHED &&
1724 refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
1725 mutex_exit(&db->db_mtx);
1726 (void) dbuf_dirty(db, tx);
1727 bcopy(buf->b_data, db->db.db_data, db->db.db_size);
1728 VERIFY(arc_buf_remove_ref(buf, db));
1729 xuio_stat_wbuf_copied();
1733 xuio_stat_wbuf_nocopy();
1734 if (db->db_state == DB_CACHED) {
1735 dbuf_dirty_record_t *dr = db->db_last_dirty;
1737 ASSERT(db->db_buf != NULL);
1738 if (dr != NULL && dr->dr_txg == tx->tx_txg) {
1739 ASSERT(dr->dt.dl.dr_data == db->db_buf);
1740 if (!arc_released(db->db_buf)) {
1741 ASSERT(dr->dt.dl.dr_override_state ==
1743 arc_release(db->db_buf, db);
1745 dr->dt.dl.dr_data = buf;
1746 VERIFY(arc_buf_remove_ref(db->db_buf, db));
1747 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
1748 arc_release(db->db_buf, db);
1749 VERIFY(arc_buf_remove_ref(db->db_buf, db));
1753 ASSERT(db->db_buf == NULL);
1754 dbuf_set_data(db, buf);
1755 db->db_state = DB_FILL;
1756 mutex_exit(&db->db_mtx);
1757 (void) dbuf_dirty(db, tx);
1758 dmu_buf_fill_done(&db->db, tx);
1762 * "Clear" the contents of this dbuf. This will mark the dbuf
1763 * EVICTING and clear *most* of its references. Unfortunately,
1764 * when we are not holding the dn_dbufs_mtx, we can't clear the
1765 * entry in the dn_dbufs list. We have to wait until dbuf_destroy()
1766 * in this case. For callers from the DMU we will usually see:
1767 * dbuf_clear()->arc_clear_callback()->dbuf_do_evict()->dbuf_destroy()
1768 * For the arc callback, we will usually see:
1769 * dbuf_do_evict()->dbuf_clear();dbuf_destroy()
1770 * Sometimes, though, we will get a mix of these two:
1771 * DMU: dbuf_clear()->arc_clear_callback()
1772 * ARC: dbuf_do_evict()->dbuf_destroy()
1774 * This routine will dissociate the dbuf from the arc, by calling
1775 * arc_clear_callback(), but will not evict the data from the ARC.
1778 dbuf_clear(dmu_buf_impl_t *db)
1781 dmu_buf_impl_t *parent = db->db_parent;
1782 dmu_buf_impl_t *dndb;
1783 boolean_t dbuf_gone = B_FALSE;
1785 ASSERT(MUTEX_HELD(&db->db_mtx));
1786 ASSERT(refcount_is_zero(&db->db_holds));
1788 dbuf_evict_user(db);
1790 if (db->db_state == DB_CACHED) {
1791 ASSERT(db->db.db_data != NULL);
1792 if (db->db_blkid == DMU_BONUS_BLKID) {
1793 zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN);
1794 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
1796 db->db.db_data = NULL;
1797 db->db_state = DB_UNCACHED;
1800 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
1801 ASSERT(db->db_data_pending == NULL);
1803 db->db_state = DB_EVICTING;
1804 db->db_blkptr = NULL;
1809 if (db->db_blkid != DMU_BONUS_BLKID && MUTEX_HELD(&dn->dn_dbufs_mtx)) {
1810 avl_remove(&dn->dn_dbufs, db);
1811 atomic_dec_32(&dn->dn_dbufs_count);
1815 * Decrementing the dbuf count means that the hold corresponding
1816 * to the removed dbuf is no longer discounted in dnode_move(),
1817 * so the dnode cannot be moved until after we release the hold.
1818 * The membar_producer() ensures visibility of the decremented
1819 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
1823 db->db_dnode_handle = NULL;
1829 dbuf_gone = arc_clear_callback(db->db_buf);
1832 mutex_exit(&db->db_mtx);
1835 * If this dbuf is referenced from an indirect dbuf,
1836 * decrement the ref count on the indirect dbuf.
1838 if (parent && parent != dndb)
1839 dbuf_rele(parent, db);
1843 * Note: While bpp will always be updated if the function returns success,
1844 * parentp will not be updated if the dnode does not have dn_dbuf filled in;
1845 * this happens when the dnode is the meta-dnode, or a userused or groupused
1849 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
1850 dmu_buf_impl_t **parentp, blkptr_t **bpp)
1857 ASSERT(blkid != DMU_BONUS_BLKID);
1859 if (blkid == DMU_SPILL_BLKID) {
1860 mutex_enter(&dn->dn_mtx);
1861 if (dn->dn_have_spill &&
1862 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
1863 *bpp = &dn->dn_phys->dn_spill;
1866 dbuf_add_ref(dn->dn_dbuf, NULL);
1867 *parentp = dn->dn_dbuf;
1868 mutex_exit(&dn->dn_mtx);
1872 if (dn->dn_phys->dn_nlevels == 0)
1875 nlevels = dn->dn_phys->dn_nlevels;
1877 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1879 ASSERT3U(level * epbs, <, 64);
1880 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1881 if (level >= nlevels ||
1882 (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
1883 /* the buffer has no parent yet */
1884 return (SET_ERROR(ENOENT));
1885 } else if (level < nlevels-1) {
1886 /* this block is referenced from an indirect block */
1887 int err = dbuf_hold_impl(dn, level+1,
1888 blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
1891 err = dbuf_read(*parentp, NULL,
1892 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
1894 dbuf_rele(*parentp, NULL);
1898 *bpp = ((blkptr_t *)(*parentp)->db.db_data) +
1899 (blkid & ((1ULL << epbs) - 1));
1902 /* the block is referenced from the dnode */
1903 ASSERT3U(level, ==, nlevels-1);
1904 ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
1905 blkid < dn->dn_phys->dn_nblkptr);
1907 dbuf_add_ref(dn->dn_dbuf, NULL);
1908 *parentp = dn->dn_dbuf;
1910 *bpp = &dn->dn_phys->dn_blkptr[blkid];
1915 static dmu_buf_impl_t *
1916 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
1917 dmu_buf_impl_t *parent, blkptr_t *blkptr)
1919 objset_t *os = dn->dn_objset;
1920 dmu_buf_impl_t *db, *odb;
1922 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1923 ASSERT(dn->dn_type != DMU_OT_NONE);
1925 db = kmem_cache_alloc(dbuf_cache, KM_SLEEP);
1928 db->db.db_object = dn->dn_object;
1929 db->db_level = level;
1930 db->db_blkid = blkid;
1931 db->db_last_dirty = NULL;
1932 db->db_dirtycnt = 0;
1933 db->db_dnode_handle = dn->dn_handle;
1934 db->db_parent = parent;
1935 db->db_blkptr = blkptr;
1938 db->db_user_immediate_evict = FALSE;
1939 db->db_freed_in_flight = FALSE;
1940 db->db_pending_evict = FALSE;
1942 if (blkid == DMU_BONUS_BLKID) {
1943 ASSERT3P(parent, ==, dn->dn_dbuf);
1944 db->db.db_size = DN_MAX_BONUSLEN -
1945 (dn->dn_nblkptr-1) * sizeof (blkptr_t);
1946 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1947 db->db.db_offset = DMU_BONUS_BLKID;
1948 db->db_state = DB_UNCACHED;
1949 /* the bonus dbuf is not placed in the hash table */
1950 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1952 } else if (blkid == DMU_SPILL_BLKID) {
1953 db->db.db_size = (blkptr != NULL) ?
1954 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
1955 db->db.db_offset = 0;
1958 db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
1959 db->db.db_size = blocksize;
1960 db->db.db_offset = db->db_blkid * blocksize;
1964 * Hold the dn_dbufs_mtx while we get the new dbuf
1965 * in the hash table *and* added to the dbufs list.
1966 * This prevents a possible deadlock with someone
1967 * trying to look up this dbuf before its added to the
1970 mutex_enter(&dn->dn_dbufs_mtx);
1971 db->db_state = DB_EVICTING;
1972 if ((odb = dbuf_hash_insert(db)) != NULL) {
1973 /* someone else inserted it first */
1974 kmem_cache_free(dbuf_cache, db);
1975 mutex_exit(&dn->dn_dbufs_mtx);
1978 avl_add(&dn->dn_dbufs, db);
1979 if (db->db_level == 0 && db->db_blkid >=
1980 dn->dn_unlisted_l0_blkid)
1981 dn->dn_unlisted_l0_blkid = db->db_blkid + 1;
1982 db->db_state = DB_UNCACHED;
1983 mutex_exit(&dn->dn_dbufs_mtx);
1984 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1986 if (parent && parent != dn->dn_dbuf)
1987 dbuf_add_ref(parent, db);
1989 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1990 refcount_count(&dn->dn_holds) > 0);
1991 (void) refcount_add(&dn->dn_holds, db);
1992 atomic_inc_32(&dn->dn_dbufs_count);
1994 dprintf_dbuf(db, "db=%p\n", db);
2000 dbuf_do_evict(void *private)
2002 dmu_buf_impl_t *db = private;
2004 if (!MUTEX_HELD(&db->db_mtx))
2005 mutex_enter(&db->db_mtx);
2007 ASSERT(refcount_is_zero(&db->db_holds));
2009 if (db->db_state != DB_EVICTING) {
2010 ASSERT(db->db_state == DB_CACHED);
2015 mutex_exit(&db->db_mtx);
2022 dbuf_destroy(dmu_buf_impl_t *db)
2024 ASSERT(refcount_is_zero(&db->db_holds));
2026 if (db->db_blkid != DMU_BONUS_BLKID) {
2028 * If this dbuf is still on the dn_dbufs list,
2029 * remove it from that list.
2031 if (db->db_dnode_handle != NULL) {
2036 mutex_enter(&dn->dn_dbufs_mtx);
2037 avl_remove(&dn->dn_dbufs, db);
2038 atomic_dec_32(&dn->dn_dbufs_count);
2039 mutex_exit(&dn->dn_dbufs_mtx);
2042 * Decrementing the dbuf count means that the hold
2043 * corresponding to the removed dbuf is no longer
2044 * discounted in dnode_move(), so the dnode cannot be
2045 * moved until after we release the hold.
2048 db->db_dnode_handle = NULL;
2050 dbuf_hash_remove(db);
2052 db->db_parent = NULL;
2055 ASSERT(db->db.db_data == NULL);
2056 ASSERT(db->db_hash_next == NULL);
2057 ASSERT(db->db_blkptr == NULL);
2058 ASSERT(db->db_data_pending == NULL);
2060 kmem_cache_free(dbuf_cache, db);
2061 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
2064 typedef struct dbuf_prefetch_arg {
2065 spa_t *dpa_spa; /* The spa to issue the prefetch in. */
2066 zbookmark_phys_t dpa_zb; /* The target block to prefetch. */
2067 int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */
2068 int dpa_curlevel; /* The current level that we're reading */
2069 zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */
2070 zio_t *dpa_zio; /* The parent zio_t for all prefetches. */
2071 arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */
2072 } dbuf_prefetch_arg_t;
2075 * Actually issue the prefetch read for the block given.
2078 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
2080 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
2083 arc_flags_t aflags =
2084 dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH;
2086 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
2087 ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
2088 ASSERT(dpa->dpa_zio != NULL);
2089 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, NULL, NULL,
2090 dpa->dpa_prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
2091 &aflags, &dpa->dpa_zb);
2095 * Called when an indirect block above our prefetch target is read in. This
2096 * will either read in the next indirect block down the tree or issue the actual
2097 * prefetch if the next block down is our target.
2100 dbuf_prefetch_indirect_done(zio_t *zio, arc_buf_t *abuf, void *private)
2102 dbuf_prefetch_arg_t *dpa = private;
2104 ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
2105 ASSERT3S(dpa->dpa_curlevel, >, 0);
2107 ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel);
2108 ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
2109 ASSERT3P(zio->io_spa, ==, dpa->dpa_spa);
2112 dpa->dpa_curlevel--;
2114 uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
2115 (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
2116 blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
2117 P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
2118 if (BP_IS_HOLE(bp) || (zio != NULL && zio->io_error != 0)) {
2119 kmem_free(dpa, sizeof (*dpa));
2120 } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) {
2121 ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid);
2122 dbuf_issue_final_prefetch(dpa, bp);
2123 kmem_free(dpa, sizeof (*dpa));
2125 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
2126 zbookmark_phys_t zb;
2128 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
2130 SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
2131 dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid);
2133 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
2134 bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio,
2135 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
2138 (void) arc_buf_remove_ref(abuf, private);
2142 * Issue prefetch reads for the given block on the given level. If the indirect
2143 * blocks above that block are not in memory, we will read them in
2144 * asynchronously. As a result, this call never blocks waiting for a read to
2148 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
2152 int epbs, nlevels, curlevel;
2155 ASSERT(blkid != DMU_BONUS_BLKID);
2156 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
2158 if (blkid > dn->dn_maxblkid)
2161 if (dnode_block_freed(dn, blkid))
2165 * This dnode hasn't been written to disk yet, so there's nothing to
2168 nlevels = dn->dn_phys->dn_nlevels;
2169 if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0)
2172 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2173 if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
2176 dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
2179 mutex_exit(&db->db_mtx);
2181 * This dbuf already exists. It is either CACHED, or
2182 * (we assume) about to be read or filled.
2188 * Find the closest ancestor (indirect block) of the target block
2189 * that is present in the cache. In this indirect block, we will
2190 * find the bp that is at curlevel, curblkid.
2194 while (curlevel < nlevels - 1) {
2195 int parent_level = curlevel + 1;
2196 uint64_t parent_blkid = curblkid >> epbs;
2199 if (dbuf_hold_impl(dn, parent_level, parent_blkid,
2200 FALSE, TRUE, FTAG, &db) == 0) {
2201 blkptr_t *bpp = db->db_buf->b_data;
2202 bp = bpp[P2PHASE(curblkid, 1 << epbs)];
2203 dbuf_rele(db, FTAG);
2207 curlevel = parent_level;
2208 curblkid = parent_blkid;
2211 if (curlevel == nlevels - 1) {
2212 /* No cached indirect blocks found. */
2213 ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr);
2214 bp = dn->dn_phys->dn_blkptr[curblkid];
2216 if (BP_IS_HOLE(&bp))
2219 ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
2221 zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
2224 dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
2225 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
2226 SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
2227 dn->dn_object, level, blkid);
2228 dpa->dpa_curlevel = curlevel;
2229 dpa->dpa_prio = prio;
2230 dpa->dpa_aflags = aflags;
2231 dpa->dpa_spa = dn->dn_objset->os_spa;
2232 dpa->dpa_epbs = epbs;
2236 * If we have the indirect just above us, no need to do the asynchronous
2237 * prefetch chain; we'll just run the last step ourselves. If we're at
2238 * a higher level, though, we want to issue the prefetches for all the
2239 * indirect blocks asynchronously, so we can go on with whatever we were
2242 if (curlevel == level) {
2243 ASSERT3U(curblkid, ==, blkid);
2244 dbuf_issue_final_prefetch(dpa, &bp);
2245 kmem_free(dpa, sizeof (*dpa));
2247 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
2248 zbookmark_phys_t zb;
2250 SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
2251 dn->dn_object, curlevel, curblkid);
2252 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
2253 &bp, dbuf_prefetch_indirect_done, dpa, prio,
2254 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
2258 * We use pio here instead of dpa_zio since it's possible that
2259 * dpa may have already been freed.
2265 * Returns with db_holds incremented, and db_mtx not held.
2266 * Note: dn_struct_rwlock must be held.
2269 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
2270 boolean_t fail_sparse, boolean_t fail_uncached,
2271 void *tag, dmu_buf_impl_t **dbp)
2273 dmu_buf_impl_t *db, *parent = NULL;
2275 ASSERT(blkid != DMU_BONUS_BLKID);
2276 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
2277 ASSERT3U(dn->dn_nlevels, >, level);
2281 /* dbuf_find() returns with db_mtx held */
2282 db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid);
2285 blkptr_t *bp = NULL;
2289 return (SET_ERROR(ENOENT));
2291 ASSERT3P(parent, ==, NULL);
2292 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
2294 if (err == 0 && bp && BP_IS_HOLE(bp))
2295 err = SET_ERROR(ENOENT);
2298 dbuf_rele(parent, NULL);
2302 if (err && err != ENOENT)
2304 db = dbuf_create(dn, level, blkid, parent, bp);
2307 if (fail_uncached && db->db_state != DB_CACHED) {
2308 mutex_exit(&db->db_mtx);
2309 return (SET_ERROR(ENOENT));
2312 if (db->db_buf && refcount_is_zero(&db->db_holds)) {
2313 arc_buf_add_ref(db->db_buf, db);
2314 if (db->db_buf->b_data == NULL) {
2317 dbuf_rele(parent, NULL);
2322 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
2325 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
2328 * If this buffer is currently syncing out, and we are are
2329 * still referencing it from db_data, we need to make a copy
2330 * of it in case we decide we want to dirty it again in this txg.
2332 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
2333 dn->dn_object != DMU_META_DNODE_OBJECT &&
2334 db->db_state == DB_CACHED && db->db_data_pending) {
2335 dbuf_dirty_record_t *dr = db->db_data_pending;
2337 if (dr->dt.dl.dr_data == db->db_buf) {
2338 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2341 arc_buf_alloc(dn->dn_objset->os_spa,
2342 db->db.db_size, db, type));
2343 bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data,
2348 (void) refcount_add(&db->db_holds, tag);
2350 mutex_exit(&db->db_mtx);
2352 /* NOTE: we can't rele the parent until after we drop the db_mtx */
2354 dbuf_rele(parent, NULL);
2356 ASSERT3P(DB_DNODE(db), ==, dn);
2357 ASSERT3U(db->db_blkid, ==, blkid);
2358 ASSERT3U(db->db_level, ==, level);
2365 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag)
2367 return (dbuf_hold_level(dn, 0, blkid, tag));
2371 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag)
2374 int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db);
2375 return (err ? NULL : db);
2379 dbuf_create_bonus(dnode_t *dn)
2381 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
2383 ASSERT(dn->dn_bonus == NULL);
2384 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL);
2388 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
2390 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2393 if (db->db_blkid != DMU_SPILL_BLKID)
2394 return (SET_ERROR(ENOTSUP));
2396 blksz = SPA_MINBLOCKSIZE;
2397 ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset)));
2398 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
2402 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
2403 dbuf_new_size(db, blksz, tx);
2404 rw_exit(&dn->dn_struct_rwlock);
2411 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
2413 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
2416 #pragma weak dmu_buf_add_ref = dbuf_add_ref
2418 dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
2420 int64_t holds = refcount_add(&db->db_holds, tag);
2424 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
2426 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
2429 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2430 dmu_buf_impl_t *found_db;
2431 boolean_t result = B_FALSE;
2433 if (db->db_blkid == DMU_BONUS_BLKID)
2434 found_db = dbuf_find_bonus(os, obj);
2436 found_db = dbuf_find(os, obj, 0, blkid);
2438 if (found_db != NULL) {
2439 if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
2440 (void) refcount_add(&db->db_holds, tag);
2443 mutex_exit(&db->db_mtx);
2449 * If you call dbuf_rele() you had better not be referencing the dnode handle
2450 * unless you have some other direct or indirect hold on the dnode. (An indirect
2451 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
2452 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
2453 * dnode's parent dbuf evicting its dnode handles.
2456 dbuf_rele(dmu_buf_impl_t *db, void *tag)
2458 mutex_enter(&db->db_mtx);
2459 dbuf_rele_and_unlock(db, tag);
2463 dmu_buf_rele(dmu_buf_t *db, void *tag)
2465 dbuf_rele((dmu_buf_impl_t *)db, tag);
2469 * dbuf_rele() for an already-locked dbuf. This is necessary to allow
2470 * db_dirtycnt and db_holds to be updated atomically.
2473 dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
2477 ASSERT(MUTEX_HELD(&db->db_mtx));
2481 * Remove the reference to the dbuf before removing its hold on the
2482 * dnode so we can guarantee in dnode_move() that a referenced bonus
2483 * buffer has a corresponding dnode hold.
2485 holds = refcount_remove(&db->db_holds, tag);
2489 * We can't freeze indirects if there is a possibility that they
2490 * may be modified in the current syncing context.
2492 if (db->db_buf && holds == (db->db_level == 0 ? db->db_dirtycnt : 0))
2493 arc_buf_freeze(db->db_buf);
2495 if (holds == db->db_dirtycnt &&
2496 db->db_level == 0 && db->db_user_immediate_evict)
2497 dbuf_evict_user(db);
2500 if (db->db_blkid == DMU_BONUS_BLKID) {
2502 boolean_t evict_dbuf = db->db_pending_evict;
2505 * If the dnode moves here, we cannot cross this
2506 * barrier until the move completes.
2511 atomic_dec_32(&dn->dn_dbufs_count);
2514 * Decrementing the dbuf count means that the bonus
2515 * buffer's dnode hold is no longer discounted in
2516 * dnode_move(). The dnode cannot move until after
2517 * the dnode_rele() below.
2522 * Do not reference db after its lock is dropped.
2523 * Another thread may evict it.
2525 mutex_exit(&db->db_mtx);
2528 dnode_evict_bonus(dn);
2531 } else if (db->db_buf == NULL) {
2533 * This is a special case: we never associated this
2534 * dbuf with any data allocated from the ARC.
2536 ASSERT(db->db_state == DB_UNCACHED ||
2537 db->db_state == DB_NOFILL);
2539 } else if (arc_released(db->db_buf)) {
2540 arc_buf_t *buf = db->db_buf;
2542 * This dbuf has anonymous data associated with it.
2544 dbuf_clear_data(db);
2545 VERIFY(arc_buf_remove_ref(buf, db));
2548 VERIFY(!arc_buf_remove_ref(db->db_buf, db));
2551 * A dbuf will be eligible for eviction if either the
2552 * 'primarycache' property is set or a duplicate
2553 * copy of this buffer is already cached in the arc.
2555 * In the case of the 'primarycache' a buffer
2556 * is considered for eviction if it matches the
2557 * criteria set in the property.
2559 * To decide if our buffer is considered a
2560 * duplicate, we must call into the arc to determine
2561 * if multiple buffers are referencing the same
2562 * block on-disk. If so, then we simply evict
2565 if (!DBUF_IS_CACHEABLE(db)) {
2566 if (db->db_blkptr != NULL &&
2567 !BP_IS_HOLE(db->db_blkptr) &&
2568 !BP_IS_EMBEDDED(db->db_blkptr)) {
2570 dmu_objset_spa(db->db_objset);
2571 blkptr_t bp = *db->db_blkptr;
2573 arc_freed(spa, &bp);
2577 } else if (db->db_pending_evict ||
2578 arc_buf_eviction_needed(db->db_buf)) {
2581 mutex_exit(&db->db_mtx);
2585 mutex_exit(&db->db_mtx);
2589 #pragma weak dmu_buf_refcount = dbuf_refcount
2591 dbuf_refcount(dmu_buf_impl_t *db)
2593 return (refcount_count(&db->db_holds));
2597 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user,
2598 dmu_buf_user_t *new_user)
2600 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2602 mutex_enter(&db->db_mtx);
2603 dbuf_verify_user(db, DBVU_NOT_EVICTING);
2604 if (db->db_user == old_user)
2605 db->db_user = new_user;
2607 old_user = db->db_user;
2608 dbuf_verify_user(db, DBVU_NOT_EVICTING);
2609 mutex_exit(&db->db_mtx);
2615 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
2617 return (dmu_buf_replace_user(db_fake, NULL, user));
2621 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user)
2623 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2625 db->db_user_immediate_evict = TRUE;
2626 return (dmu_buf_set_user(db_fake, user));
2630 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
2632 return (dmu_buf_replace_user(db_fake, user, NULL));
2636 dmu_buf_get_user(dmu_buf_t *db_fake)
2638 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2640 dbuf_verify_user(db, DBVU_NOT_EVICTING);
2641 return (db->db_user);
2645 dmu_buf_user_evict_wait()
2647 taskq_wait(dbu_evict_taskq);
2651 dmu_buf_freeable(dmu_buf_t *dbuf)
2653 boolean_t res = B_FALSE;
2654 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2657 res = dsl_dataset_block_freeable(db->db_objset->os_dsl_dataset,
2658 db->db_blkptr, db->db_blkptr->blk_birth);
2664 dmu_buf_get_blkptr(dmu_buf_t *db)
2666 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
2667 return (dbi->db_blkptr);
2671 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
2673 /* ASSERT(dmu_tx_is_syncing(tx) */
2674 ASSERT(MUTEX_HELD(&db->db_mtx));
2676 if (db->db_blkptr != NULL)
2679 if (db->db_blkid == DMU_SPILL_BLKID) {
2680 db->db_blkptr = &dn->dn_phys->dn_spill;
2681 BP_ZERO(db->db_blkptr);
2684 if (db->db_level == dn->dn_phys->dn_nlevels-1) {
2686 * This buffer was allocated at a time when there was
2687 * no available blkptrs from the dnode, or it was
2688 * inappropriate to hook it in (i.e., nlevels mis-match).
2690 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
2691 ASSERT(db->db_parent == NULL);
2692 db->db_parent = dn->dn_dbuf;
2693 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
2696 dmu_buf_impl_t *parent = db->db_parent;
2697 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2699 ASSERT(dn->dn_phys->dn_nlevels > 1);
2700 if (parent == NULL) {
2701 mutex_exit(&db->db_mtx);
2702 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2703 parent = dbuf_hold_level(dn, db->db_level + 1,
2704 db->db_blkid >> epbs, db);
2705 rw_exit(&dn->dn_struct_rwlock);
2706 mutex_enter(&db->db_mtx);
2707 db->db_parent = parent;
2709 db->db_blkptr = (blkptr_t *)parent->db.db_data +
2710 (db->db_blkid & ((1ULL << epbs) - 1));
2716 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2718 dmu_buf_impl_t *db = dr->dr_dbuf;
2722 ASSERT(dmu_tx_is_syncing(tx));
2724 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2726 mutex_enter(&db->db_mtx);
2728 ASSERT(db->db_level > 0);
2731 /* Read the block if it hasn't been read yet. */
2732 if (db->db_buf == NULL) {
2733 mutex_exit(&db->db_mtx);
2734 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
2735 mutex_enter(&db->db_mtx);
2737 ASSERT3U(db->db_state, ==, DB_CACHED);
2738 ASSERT(db->db_buf != NULL);
2742 /* Indirect block size must match what the dnode thinks it is. */
2743 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2744 dbuf_check_blkptr(dn, db);
2747 /* Provide the pending dirty record to child dbufs */
2748 db->db_data_pending = dr;
2750 mutex_exit(&db->db_mtx);
2751 dbuf_write(dr, db->db_buf, tx);
2754 mutex_enter(&dr->dt.di.dr_mtx);
2755 dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx);
2756 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2757 mutex_exit(&dr->dt.di.dr_mtx);
2762 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2764 arc_buf_t **datap = &dr->dt.dl.dr_data;
2765 dmu_buf_impl_t *db = dr->dr_dbuf;
2768 uint64_t txg = tx->tx_txg;
2770 ASSERT(dmu_tx_is_syncing(tx));
2772 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2774 mutex_enter(&db->db_mtx);
2776 * To be synced, we must be dirtied. But we
2777 * might have been freed after the dirty.
2779 if (db->db_state == DB_UNCACHED) {
2780 /* This buffer has been freed since it was dirtied */
2781 ASSERT(db->db.db_data == NULL);
2782 } else if (db->db_state == DB_FILL) {
2783 /* This buffer was freed and is now being re-filled */
2784 ASSERT(db->db.db_data != dr->dt.dl.dr_data);
2786 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
2793 if (db->db_blkid == DMU_SPILL_BLKID) {
2794 mutex_enter(&dn->dn_mtx);
2795 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
2796 mutex_exit(&dn->dn_mtx);
2800 * If this is a bonus buffer, simply copy the bonus data into the
2801 * dnode. It will be written out when the dnode is synced (and it
2802 * will be synced, since it must have been dirty for dbuf_sync to
2805 if (db->db_blkid == DMU_BONUS_BLKID) {
2806 dbuf_dirty_record_t **drp;
2808 ASSERT(*datap != NULL);
2809 ASSERT0(db->db_level);
2810 ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN);
2811 bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen);
2814 if (*datap != db->db.db_data) {
2815 zio_buf_free(*datap, DN_MAX_BONUSLEN);
2816 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
2818 db->db_data_pending = NULL;
2819 drp = &db->db_last_dirty;
2821 drp = &(*drp)->dr_next;
2822 ASSERT(dr->dr_next == NULL);
2823 ASSERT(dr->dr_dbuf == db);
2825 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2826 ASSERT(db->db_dirtycnt > 0);
2827 db->db_dirtycnt -= 1;
2828 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
2835 * This function may have dropped the db_mtx lock allowing a dmu_sync
2836 * operation to sneak in. As a result, we need to ensure that we
2837 * don't check the dr_override_state until we have returned from
2838 * dbuf_check_blkptr.
2840 dbuf_check_blkptr(dn, db);
2843 * If this buffer is in the middle of an immediate write,
2844 * wait for the synchronous IO to complete.
2846 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
2847 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
2848 cv_wait(&db->db_changed, &db->db_mtx);
2849 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
2852 if (db->db_state != DB_NOFILL &&
2853 dn->dn_object != DMU_META_DNODE_OBJECT &&
2854 refcount_count(&db->db_holds) > 1 &&
2855 dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
2856 *datap == db->db_buf) {
2858 * If this buffer is currently "in use" (i.e., there
2859 * are active holds and db_data still references it),
2860 * then make a copy before we start the write so that
2861 * any modifications from the open txg will not leak
2864 * NOTE: this copy does not need to be made for
2865 * objects only modified in the syncing context (e.g.
2866 * DNONE_DNODE blocks).
2868 int blksz = arc_buf_size(*datap);
2869 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2870 *datap = arc_buf_alloc(os->os_spa, blksz, db, type);
2871 bcopy(db->db.db_data, (*datap)->b_data, blksz);
2873 db->db_data_pending = dr;
2875 mutex_exit(&db->db_mtx);
2877 dbuf_write(dr, *datap, tx);
2879 ASSERT(!list_link_active(&dr->dr_dirty_node));
2880 if (dn->dn_object == DMU_META_DNODE_OBJECT) {
2881 list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr);
2885 * Although zio_nowait() does not "wait for an IO", it does
2886 * initiate the IO. If this is an empty write it seems plausible
2887 * that the IO could actually be completed before the nowait
2888 * returns. We need to DB_DNODE_EXIT() first in case
2889 * zio_nowait() invalidates the dbuf.
2892 zio_nowait(dr->dr_zio);
2897 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx)
2899 dbuf_dirty_record_t *dr;
2901 while (dr = list_head(list)) {
2902 if (dr->dr_zio != NULL) {
2904 * If we find an already initialized zio then we
2905 * are processing the meta-dnode, and we have finished.
2906 * The dbufs for all dnodes are put back on the list
2907 * during processing, so that we can zio_wait()
2908 * these IOs after initiating all child IOs.
2910 ASSERT3U(dr->dr_dbuf->db.db_object, ==,
2911 DMU_META_DNODE_OBJECT);
2914 if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
2915 dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
2916 VERIFY3U(dr->dr_dbuf->db_level, ==, level);
2918 list_remove(list, dr);
2919 if (dr->dr_dbuf->db_level > 0)
2920 dbuf_sync_indirect(dr, tx);
2922 dbuf_sync_leaf(dr, tx);
2928 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
2930 dmu_buf_impl_t *db = vdb;
2932 blkptr_t *bp = zio->io_bp;
2933 blkptr_t *bp_orig = &zio->io_bp_orig;
2934 spa_t *spa = zio->io_spa;
2939 ASSERT3P(db->db_blkptr, !=, NULL);
2940 ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp);
2944 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
2945 dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
2946 zio->io_prev_space_delta = delta;
2948 if (bp->blk_birth != 0) {
2949 ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
2950 BP_GET_TYPE(bp) == dn->dn_type) ||
2951 (db->db_blkid == DMU_SPILL_BLKID &&
2952 BP_GET_TYPE(bp) == dn->dn_bonustype) ||
2953 BP_IS_EMBEDDED(bp));
2954 ASSERT(BP_GET_LEVEL(bp) == db->db_level);
2957 mutex_enter(&db->db_mtx);
2960 if (db->db_blkid == DMU_SPILL_BLKID) {
2961 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2962 ASSERT(!(BP_IS_HOLE(bp)) &&
2963 db->db_blkptr == &dn->dn_phys->dn_spill);
2967 if (db->db_level == 0) {
2968 mutex_enter(&dn->dn_mtx);
2969 if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
2970 db->db_blkid != DMU_SPILL_BLKID)
2971 dn->dn_phys->dn_maxblkid = db->db_blkid;
2972 mutex_exit(&dn->dn_mtx);
2974 if (dn->dn_type == DMU_OT_DNODE) {
2975 dnode_phys_t *dnp = db->db.db_data;
2976 for (i = db->db.db_size >> DNODE_SHIFT; i > 0;
2978 if (dnp->dn_type != DMU_OT_NONE)
2982 if (BP_IS_HOLE(bp)) {
2989 blkptr_t *ibp = db->db.db_data;
2990 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2991 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
2992 if (BP_IS_HOLE(ibp))
2994 fill += BP_GET_FILL(ibp);
2999 if (!BP_IS_EMBEDDED(bp))
3000 bp->blk_fill = fill;
3002 mutex_exit(&db->db_mtx);
3004 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
3005 *db->db_blkptr = *bp;
3006 rw_exit(&dn->dn_struct_rwlock);
3011 * This function gets called just prior to running through the compression
3012 * stage of the zio pipeline. If we're an indirect block comprised of only
3013 * holes, then we want this indirect to be compressed away to a hole. In
3014 * order to do that we must zero out any information about the holes that
3015 * this indirect points to prior to before we try to compress it.
3018 dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
3020 dmu_buf_impl_t *db = vdb;
3026 ASSERT3U(db->db_level, >, 0);
3029 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
3031 /* Determine if all our children are holes */
3032 for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) {
3033 if (!BP_IS_HOLE(bp))
3038 * If all the children are holes, then zero them all out so that
3039 * we may get compressed away.
3041 if (i == 1 << epbs) {
3042 /* didn't find any non-holes */
3043 bzero(db->db.db_data, db->db.db_size);
3049 * The SPA will call this callback several times for each zio - once
3050 * for every physical child i/o (zio->io_phys_children times). This
3051 * allows the DMU to monitor the progress of each logical i/o. For example,
3052 * there may be 2 copies of an indirect block, or many fragments of a RAID-Z
3053 * block. There may be a long delay before all copies/fragments are completed,
3054 * so this callback allows us to retire dirty space gradually, as the physical
3059 dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg)
3061 dmu_buf_impl_t *db = arg;
3062 objset_t *os = db->db_objset;
3063 dsl_pool_t *dp = dmu_objset_pool(os);
3064 dbuf_dirty_record_t *dr;
3067 dr = db->db_data_pending;
3068 ASSERT3U(dr->dr_txg, ==, zio->io_txg);
3071 * The callback will be called io_phys_children times. Retire one
3072 * portion of our dirty space each time we are called. Any rounding
3073 * error will be cleaned up by dsl_pool_sync()'s call to
3074 * dsl_pool_undirty_space().
3076 delta = dr->dr_accounted / zio->io_phys_children;
3077 dsl_pool_undirty_space(dp, delta, zio->io_txg);
3082 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
3084 dmu_buf_impl_t *db = vdb;
3085 blkptr_t *bp_orig = &zio->io_bp_orig;
3086 blkptr_t *bp = db->db_blkptr;
3087 objset_t *os = db->db_objset;
3088 dmu_tx_t *tx = os->os_synctx;
3089 dbuf_dirty_record_t **drp, *dr;
3091 ASSERT0(zio->io_error);
3092 ASSERT(db->db_blkptr == bp);
3095 * For nopwrites and rewrites we ensure that the bp matches our
3096 * original and bypass all the accounting.
3098 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
3099 ASSERT(BP_EQUAL(bp, bp_orig));
3101 dsl_dataset_t *ds = os->os_dsl_dataset;
3102 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
3103 dsl_dataset_block_born(ds, bp, tx);
3106 mutex_enter(&db->db_mtx);
3110 drp = &db->db_last_dirty;
3111 while ((dr = *drp) != db->db_data_pending)
3113 ASSERT(!list_link_active(&dr->dr_dirty_node));
3114 ASSERT(dr->dr_dbuf == db);
3115 ASSERT(dr->dr_next == NULL);
3119 if (db->db_blkid == DMU_SPILL_BLKID) {
3124 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
3125 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
3126 db->db_blkptr == &dn->dn_phys->dn_spill);
3131 if (db->db_level == 0) {
3132 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
3133 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
3134 if (db->db_state != DB_NOFILL) {
3135 if (dr->dt.dl.dr_data != db->db_buf)
3136 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data,
3138 else if (!arc_released(db->db_buf))
3139 arc_set_callback(db->db_buf, dbuf_do_evict, db);
3146 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
3147 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
3148 if (!BP_IS_HOLE(db->db_blkptr)) {
3150 dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
3151 ASSERT3U(db->db_blkid, <=,
3152 dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
3153 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
3155 if (!arc_released(db->db_buf))
3156 arc_set_callback(db->db_buf, dbuf_do_evict, db);
3159 mutex_destroy(&dr->dt.di.dr_mtx);
3160 list_destroy(&dr->dt.di.dr_children);
3162 kmem_free(dr, sizeof (dbuf_dirty_record_t));
3164 cv_broadcast(&db->db_changed);
3165 ASSERT(db->db_dirtycnt > 0);
3166 db->db_dirtycnt -= 1;
3167 db->db_data_pending = NULL;
3168 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg);
3172 dbuf_write_nofill_ready(zio_t *zio)
3174 dbuf_write_ready(zio, NULL, zio->io_private);
3178 dbuf_write_nofill_done(zio_t *zio)
3180 dbuf_write_done(zio, NULL, zio->io_private);
3184 dbuf_write_override_ready(zio_t *zio)
3186 dbuf_dirty_record_t *dr = zio->io_private;
3187 dmu_buf_impl_t *db = dr->dr_dbuf;
3189 dbuf_write_ready(zio, NULL, db);
3193 dbuf_write_override_done(zio_t *zio)
3195 dbuf_dirty_record_t *dr = zio->io_private;
3196 dmu_buf_impl_t *db = dr->dr_dbuf;
3197 blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
3199 mutex_enter(&db->db_mtx);
3200 if (!BP_EQUAL(zio->io_bp, obp)) {
3201 if (!BP_IS_HOLE(obp))
3202 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
3203 arc_release(dr->dt.dl.dr_data, db);
3205 mutex_exit(&db->db_mtx);
3207 dbuf_write_done(zio, NULL, db);
3210 /* Issue I/O to commit a dirty buffer to disk. */
3212 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
3214 dmu_buf_impl_t *db = dr->dr_dbuf;
3217 dmu_buf_impl_t *parent = db->db_parent;
3218 uint64_t txg = tx->tx_txg;
3219 zbookmark_phys_t zb;
3224 ASSERT(dmu_tx_is_syncing(tx));
3230 if (db->db_state != DB_NOFILL) {
3231 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
3233 * Private object buffers are released here rather
3234 * than in dbuf_dirty() since they are only modified
3235 * in the syncing context and we don't want the
3236 * overhead of making multiple copies of the data.
3238 if (BP_IS_HOLE(db->db_blkptr)) {
3241 dbuf_release_bp(db);
3246 if (parent != dn->dn_dbuf) {
3247 /* Our parent is an indirect block. */
3248 /* We have a dirty parent that has been scheduled for write. */
3249 ASSERT(parent && parent->db_data_pending);
3250 /* Our parent's buffer is one level closer to the dnode. */
3251 ASSERT(db->db_level == parent->db_level-1);
3253 * We're about to modify our parent's db_data by modifying
3254 * our block pointer, so the parent must be released.
3256 ASSERT(arc_released(parent->db_buf));
3257 zio = parent->db_data_pending->dr_zio;
3259 /* Our parent is the dnode itself. */
3260 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
3261 db->db_blkid != DMU_SPILL_BLKID) ||
3262 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
3263 if (db->db_blkid != DMU_SPILL_BLKID)
3264 ASSERT3P(db->db_blkptr, ==,
3265 &dn->dn_phys->dn_blkptr[db->db_blkid]);
3269 ASSERT(db->db_level == 0 || data == db->db_buf);
3270 ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
3273 SET_BOOKMARK(&zb, os->os_dsl_dataset ?
3274 os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
3275 db->db.db_object, db->db_level, db->db_blkid);
3277 if (db->db_blkid == DMU_SPILL_BLKID)
3279 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
3281 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
3285 * We copy the blkptr now (rather than when we instantiate the dirty
3286 * record), because its value can change between open context and
3287 * syncing context. We do not need to hold dn_struct_rwlock to read
3288 * db_blkptr because we are in syncing context.
3290 dr->dr_bp_copy = *db->db_blkptr;
3292 if (db->db_level == 0 &&
3293 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
3295 * The BP for this block has been provided by open context
3296 * (by dmu_sync() or dmu_buf_write_embedded()).
3298 void *contents = (data != NULL) ? data->b_data : NULL;
3300 dr->dr_zio = zio_write(zio, os->os_spa, txg,
3301 &dr->dr_bp_copy, contents, db->db.db_size, &zp,
3302 dbuf_write_override_ready, NULL, NULL,
3303 dbuf_write_override_done,
3304 dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
3305 mutex_enter(&db->db_mtx);
3306 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
3307 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
3308 dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite);
3309 mutex_exit(&db->db_mtx);
3310 } else if (db->db_state == DB_NOFILL) {
3311 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
3312 zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
3313 dr->dr_zio = zio_write(zio, os->os_spa, txg,
3314 &dr->dr_bp_copy, NULL, db->db.db_size, &zp,
3315 dbuf_write_nofill_ready, NULL, NULL,
3316 dbuf_write_nofill_done, db,
3317 ZIO_PRIORITY_ASYNC_WRITE,
3318 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
3320 ASSERT(arc_released(data));
3323 * For indirect blocks, we want to setup the children
3324 * ready callback so that we can properly handle an indirect
3325 * block that only contains holes.
3327 arc_done_func_t *children_ready_cb = NULL;
3328 if (db->db_level != 0)
3329 children_ready_cb = dbuf_write_children_ready;
3331 dr->dr_zio = arc_write(zio, os->os_spa, txg,
3332 &dr->dr_bp_copy, data, DBUF_IS_L2CACHEABLE(db),
3333 DBUF_IS_L2COMPRESSIBLE(db), &zp, dbuf_write_ready,
3335 dbuf_write_physdone, dbuf_write_done, db,
3336 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);