4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
28 #include <sys/zfs_context.h>
31 #include <sys/dmu_send.h>
32 #include <sys/dmu_impl.h>
34 #include <sys/dmu_objset.h>
35 #include <sys/dsl_dataset.h>
36 #include <sys/dsl_dir.h>
37 #include <sys/dmu_tx.h>
40 #include <sys/dmu_zfetch.h>
42 #include <sys/sa_impl.h>
43 #include <sys/range_tree.h>
45 struct dbuf_hold_impl_data {
46 /* Function arguments */
52 dmu_buf_impl_t **dh_dbp;
54 dmu_buf_impl_t *dh_db;
55 dmu_buf_impl_t *dh_parent;
58 dbuf_dirty_record_t *dh_dr;
59 arc_buf_contents_t dh_type;
63 static void __dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh,
64 dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
65 void *tag, dmu_buf_impl_t **dbp, int depth);
66 static int __dbuf_hold_impl(struct dbuf_hold_impl_data *dh);
69 * Number of times that zfs_free_range() took the slow path while doing
70 * a zfs receive. A nonzero value indicates a potential performance problem.
72 uint64_t zfs_free_range_recv_miss;
74 static void dbuf_destroy(dmu_buf_impl_t *db);
75 static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
76 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
79 * Global data structures and functions for the dbuf cache.
81 static kmem_cache_t *dbuf_cache;
85 dbuf_cons(void *vdb, void *unused, int kmflag)
87 dmu_buf_impl_t *db = vdb;
88 bzero(db, sizeof (dmu_buf_impl_t));
90 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
91 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
92 refcount_create(&db->db_holds);
93 list_link_init(&db->db_link);
99 dbuf_dest(void *vdb, void *unused)
101 dmu_buf_impl_t *db = vdb;
102 mutex_destroy(&db->db_mtx);
103 cv_destroy(&db->db_changed);
104 refcount_destroy(&db->db_holds);
108 * dbuf hash table routines
110 static dbuf_hash_table_t dbuf_hash_table;
112 static uint64_t dbuf_hash_count;
115 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
117 uintptr_t osv = (uintptr_t)os;
118 uint64_t crc = -1ULL;
120 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
121 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF];
122 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF];
123 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF];
124 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF];
125 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF];
126 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF];
128 crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16);
133 #define DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid);
135 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \
136 ((dbuf)->db.db_object == (obj) && \
137 (dbuf)->db_objset == (os) && \
138 (dbuf)->db_level == (level) && \
139 (dbuf)->db_blkid == (blkid))
142 dbuf_find(dnode_t *dn, uint8_t level, uint64_t blkid)
144 dbuf_hash_table_t *h = &dbuf_hash_table;
145 objset_t *os = dn->dn_objset;
152 hv = DBUF_HASH(os, obj, level, blkid);
153 idx = hv & h->hash_table_mask;
155 mutex_enter(DBUF_HASH_MUTEX(h, idx));
156 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
157 if (DBUF_EQUAL(db, os, obj, level, blkid)) {
158 mutex_enter(&db->db_mtx);
159 if (db->db_state != DB_EVICTING) {
160 mutex_exit(DBUF_HASH_MUTEX(h, idx));
163 mutex_exit(&db->db_mtx);
166 mutex_exit(DBUF_HASH_MUTEX(h, idx));
171 * Insert an entry into the hash table. If there is already an element
172 * equal to elem in the hash table, then the already existing element
173 * will be returned and the new element will not be inserted.
174 * Otherwise returns NULL.
176 static dmu_buf_impl_t *
177 dbuf_hash_insert(dmu_buf_impl_t *db)
179 dbuf_hash_table_t *h = &dbuf_hash_table;
180 objset_t *os = db->db_objset;
181 uint64_t obj = db->db.db_object;
182 int level = db->db_level;
183 uint64_t blkid, hv, idx;
186 blkid = db->db_blkid;
187 hv = DBUF_HASH(os, obj, level, blkid);
188 idx = hv & h->hash_table_mask;
190 mutex_enter(DBUF_HASH_MUTEX(h, idx));
191 for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) {
192 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
193 mutex_enter(&dbf->db_mtx);
194 if (dbf->db_state != DB_EVICTING) {
195 mutex_exit(DBUF_HASH_MUTEX(h, idx));
198 mutex_exit(&dbf->db_mtx);
202 mutex_enter(&db->db_mtx);
203 db->db_hash_next = h->hash_table[idx];
204 h->hash_table[idx] = db;
205 mutex_exit(DBUF_HASH_MUTEX(h, idx));
206 atomic_add_64(&dbuf_hash_count, 1);
212 * Remove an entry from the hash table. This operation will
213 * fail if there are any existing holds on the db.
216 dbuf_hash_remove(dmu_buf_impl_t *db)
218 dbuf_hash_table_t *h = &dbuf_hash_table;
220 dmu_buf_impl_t *dbf, **dbp;
222 hv = DBUF_HASH(db->db_objset, db->db.db_object,
223 db->db_level, db->db_blkid);
224 idx = hv & h->hash_table_mask;
227 * We musn't hold db_mtx to maintin lock ordering:
228 * DBUF_HASH_MUTEX > db_mtx.
230 ASSERT(refcount_is_zero(&db->db_holds));
231 ASSERT(db->db_state == DB_EVICTING);
232 ASSERT(!MUTEX_HELD(&db->db_mtx));
234 mutex_enter(DBUF_HASH_MUTEX(h, idx));
235 dbp = &h->hash_table[idx];
236 while ((dbf = *dbp) != db) {
237 dbp = &dbf->db_hash_next;
240 *dbp = db->db_hash_next;
241 db->db_hash_next = NULL;
242 mutex_exit(DBUF_HASH_MUTEX(h, idx));
243 atomic_add_64(&dbuf_hash_count, -1);
246 static arc_evict_func_t dbuf_do_evict;
249 dbuf_evict_user(dmu_buf_impl_t *db)
251 ASSERT(MUTEX_HELD(&db->db_mtx));
253 if (db->db_level != 0 || db->db_evict_func == NULL)
256 if (db->db_user_data_ptr_ptr)
257 *db->db_user_data_ptr_ptr = db->db.db_data;
258 db->db_evict_func(&db->db, db->db_user_ptr);
259 db->db_user_ptr = NULL;
260 db->db_user_data_ptr_ptr = NULL;
261 db->db_evict_func = NULL;
265 dbuf_is_metadata(dmu_buf_impl_t *db)
268 * Consider indirect blocks and spill blocks to be meta data.
270 if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) {
273 boolean_t is_metadata;
276 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
279 return (is_metadata);
284 dbuf_evict(dmu_buf_impl_t *db)
286 ASSERT(MUTEX_HELD(&db->db_mtx));
287 ASSERT(db->db_buf == NULL);
288 ASSERT(db->db_data_pending == NULL);
297 uint64_t hsize = 1ULL << 16;
298 dbuf_hash_table_t *h = &dbuf_hash_table;
302 * The hash table is big enough to fill all of physical memory
303 * with an average 4K block size. The table will take up
304 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers).
306 while (hsize * 4096 < physmem * PAGESIZE)
310 h->hash_table_mask = hsize - 1;
311 #if defined(_KERNEL) && defined(HAVE_SPL)
313 * Large allocations which do not require contiguous pages
314 * should be using vmem_alloc() in the linux kernel
316 h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_PUSHPAGE);
318 h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
320 if (h->hash_table == NULL) {
321 /* XXX - we should really return an error instead of assert */
322 ASSERT(hsize > (1ULL << 10));
327 dbuf_cache = kmem_cache_create("dmu_buf_impl_t",
328 sizeof (dmu_buf_impl_t),
329 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
331 for (i = 0; i < DBUF_MUTEXES; i++)
332 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
340 dbuf_hash_table_t *h = &dbuf_hash_table;
343 dbuf_stats_destroy();
345 for (i = 0; i < DBUF_MUTEXES; i++)
346 mutex_destroy(&h->hash_mutexes[i]);
347 #if defined(_KERNEL) && defined(HAVE_SPL)
349 * Large allocations which do not require contiguous pages
350 * should be using vmem_free() in the linux kernel
352 vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
354 kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
356 kmem_cache_destroy(dbuf_cache);
365 dbuf_verify(dmu_buf_impl_t *db)
368 dbuf_dirty_record_t *dr;
370 ASSERT(MUTEX_HELD(&db->db_mtx));
372 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
375 ASSERT(db->db_objset != NULL);
379 ASSERT(db->db_parent == NULL);
380 ASSERT(db->db_blkptr == NULL);
382 ASSERT3U(db->db.db_object, ==, dn->dn_object);
383 ASSERT3P(db->db_objset, ==, dn->dn_objset);
384 ASSERT3U(db->db_level, <, dn->dn_nlevels);
385 ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
386 db->db_blkid == DMU_SPILL_BLKID ||
387 !list_is_empty(&dn->dn_dbufs));
389 if (db->db_blkid == DMU_BONUS_BLKID) {
391 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
392 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
393 } else if (db->db_blkid == DMU_SPILL_BLKID) {
395 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
396 ASSERT0(db->db.db_offset);
398 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
401 for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next)
402 ASSERT(dr->dr_dbuf == db);
404 for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next)
405 ASSERT(dr->dr_dbuf == db);
408 * We can't assert that db_size matches dn_datablksz because it
409 * can be momentarily different when another thread is doing
412 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
413 dr = db->db_data_pending;
415 * It should only be modified in syncing context, so
416 * make sure we only have one copy of the data.
418 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
421 /* verify db->db_blkptr */
423 if (db->db_parent == dn->dn_dbuf) {
424 /* db is pointed to by the dnode */
425 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
426 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
427 ASSERT(db->db_parent == NULL);
429 ASSERT(db->db_parent != NULL);
430 if (db->db_blkid != DMU_SPILL_BLKID)
431 ASSERT3P(db->db_blkptr, ==,
432 &dn->dn_phys->dn_blkptr[db->db_blkid]);
434 /* db is pointed to by an indirect block */
435 ASSERTV(int epb = db->db_parent->db.db_size >>
437 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
438 ASSERT3U(db->db_parent->db.db_object, ==,
441 * dnode_grow_indblksz() can make this fail if we don't
442 * have the struct_rwlock. XXX indblksz no longer
443 * grows. safe to do this now?
445 if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
446 ASSERT3P(db->db_blkptr, ==,
447 ((blkptr_t *)db->db_parent->db.db_data +
448 db->db_blkid % epb));
452 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
453 (db->db_buf == NULL || db->db_buf->b_data) &&
454 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
455 db->db_state != DB_FILL && !dn->dn_free_txg) {
457 * If the blkptr isn't set but they have nonzero data,
458 * it had better be dirty, otherwise we'll lose that
459 * data when we evict this buffer.
461 if (db->db_dirtycnt == 0) {
462 ASSERTV(uint64_t *buf = db->db.db_data);
465 for (i = 0; i < db->db.db_size >> 3; i++) {
475 dbuf_update_data(dmu_buf_impl_t *db)
477 ASSERT(MUTEX_HELD(&db->db_mtx));
478 if (db->db_level == 0 && db->db_user_data_ptr_ptr) {
479 ASSERT(!refcount_is_zero(&db->db_holds));
480 *db->db_user_data_ptr_ptr = db->db.db_data;
485 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
487 ASSERT(MUTEX_HELD(&db->db_mtx));
488 ASSERT(db->db_buf == NULL || !arc_has_callback(db->db_buf));
491 ASSERT(buf->b_data != NULL);
492 db->db.db_data = buf->b_data;
493 if (!arc_released(buf))
494 arc_set_callback(buf, dbuf_do_evict, db);
495 dbuf_update_data(db);
498 db->db.db_data = NULL;
499 if (db->db_state != DB_NOFILL)
500 db->db_state = DB_UNCACHED;
505 * Loan out an arc_buf for read. Return the loaned arc_buf.
508 dbuf_loan_arcbuf(dmu_buf_impl_t *db)
512 mutex_enter(&db->db_mtx);
513 if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) {
514 int blksz = db->db.db_size;
515 spa_t *spa = db->db_objset->os_spa;
517 mutex_exit(&db->db_mtx);
518 abuf = arc_loan_buf(spa, blksz);
519 bcopy(db->db.db_data, abuf->b_data, blksz);
522 arc_loan_inuse_buf(abuf, db);
523 dbuf_set_data(db, NULL);
524 mutex_exit(&db->db_mtx);
530 dbuf_whichblock(dnode_t *dn, uint64_t offset)
532 if (dn->dn_datablkshift) {
533 return (offset >> dn->dn_datablkshift);
535 ASSERT3U(offset, <, dn->dn_datablksz);
541 dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb)
543 dmu_buf_impl_t *db = vdb;
545 mutex_enter(&db->db_mtx);
546 ASSERT3U(db->db_state, ==, DB_READ);
548 * All reads are synchronous, so we must have a hold on the dbuf
550 ASSERT(refcount_count(&db->db_holds) > 0);
551 ASSERT(db->db_buf == NULL);
552 ASSERT(db->db.db_data == NULL);
553 if (db->db_level == 0 && db->db_freed_in_flight) {
554 /* we were freed in flight; disregard any error */
555 arc_release(buf, db);
556 bzero(buf->b_data, db->db.db_size);
558 db->db_freed_in_flight = FALSE;
559 dbuf_set_data(db, buf);
560 db->db_state = DB_CACHED;
561 } else if (zio == NULL || zio->io_error == 0) {
562 dbuf_set_data(db, buf);
563 db->db_state = DB_CACHED;
565 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
566 ASSERT3P(db->db_buf, ==, NULL);
567 VERIFY(arc_buf_remove_ref(buf, db));
568 db->db_state = DB_UNCACHED;
570 cv_broadcast(&db->db_changed);
571 dbuf_rele_and_unlock(db, NULL);
575 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags)
579 uint32_t aflags = ARC_NOWAIT;
583 ASSERT(!refcount_is_zero(&db->db_holds));
584 /* We need the struct_rwlock to prevent db_blkptr from changing. */
585 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
586 ASSERT(MUTEX_HELD(&db->db_mtx));
587 ASSERT(db->db_state == DB_UNCACHED);
588 ASSERT(db->db_buf == NULL);
590 if (db->db_blkid == DMU_BONUS_BLKID) {
591 int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
593 ASSERT3U(bonuslen, <=, db->db.db_size);
594 db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN);
595 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
596 if (bonuslen < DN_MAX_BONUSLEN)
597 bzero(db->db.db_data, DN_MAX_BONUSLEN);
599 bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
601 dbuf_update_data(db);
602 db->db_state = DB_CACHED;
603 mutex_exit(&db->db_mtx);
608 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
609 * processes the delete record and clears the bp while we are waiting
610 * for the dn_mtx (resulting in a "no" from block_freed).
612 if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) ||
613 (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) ||
614 BP_IS_HOLE(db->db_blkptr)))) {
615 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
618 dbuf_set_data(db, arc_buf_alloc(db->db_objset->os_spa,
619 db->db.db_size, db, type));
620 bzero(db->db.db_data, db->db.db_size);
621 db->db_state = DB_CACHED;
622 *flags |= DB_RF_CACHED;
623 mutex_exit(&db->db_mtx);
629 db->db_state = DB_READ;
630 mutex_exit(&db->db_mtx);
632 if (DBUF_IS_L2CACHEABLE(db))
633 aflags |= ARC_L2CACHE;
634 if (DBUF_IS_L2COMPRESSIBLE(db))
635 aflags |= ARC_L2COMPRESS;
637 SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ?
638 db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET,
639 db->db.db_object, db->db_level, db->db_blkid);
641 dbuf_add_ref(db, NULL);
643 (void) arc_read(zio, db->db_objset->os_spa, db->db_blkptr,
644 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ,
645 (*flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED,
647 if (aflags & ARC_CACHED)
648 *flags |= DB_RF_CACHED;
652 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
655 boolean_t havepzio = (zio != NULL);
660 * We don't have to hold the mutex to check db_state because it
661 * can't be freed while we have a hold on the buffer.
663 ASSERT(!refcount_is_zero(&db->db_holds));
665 if (db->db_state == DB_NOFILL)
666 return (SET_ERROR(EIO));
670 if ((flags & DB_RF_HAVESTRUCT) == 0)
671 rw_enter(&dn->dn_struct_rwlock, RW_READER);
673 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
674 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL &&
675 DBUF_IS_CACHEABLE(db);
677 mutex_enter(&db->db_mtx);
678 if (db->db_state == DB_CACHED) {
679 mutex_exit(&db->db_mtx);
681 dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
682 db->db.db_size, TRUE);
683 if ((flags & DB_RF_HAVESTRUCT) == 0)
684 rw_exit(&dn->dn_struct_rwlock);
686 } else if (db->db_state == DB_UNCACHED) {
687 spa_t *spa = dn->dn_objset->os_spa;
690 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
691 dbuf_read_impl(db, zio, &flags);
693 /* dbuf_read_impl has dropped db_mtx for us */
696 dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
697 db->db.db_size, flags & DB_RF_CACHED);
699 if ((flags & DB_RF_HAVESTRUCT) == 0)
700 rw_exit(&dn->dn_struct_rwlock);
707 * Another reader came in while the dbuf was in flight
708 * between UNCACHED and CACHED. Either a writer will finish
709 * writing the buffer (sending the dbuf to CACHED) or the
710 * first reader's request will reach the read_done callback
711 * and send the dbuf to CACHED. Otherwise, a failure
712 * occurred and the dbuf went to UNCACHED.
714 mutex_exit(&db->db_mtx);
716 dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
717 db->db.db_size, TRUE);
718 if ((flags & DB_RF_HAVESTRUCT) == 0)
719 rw_exit(&dn->dn_struct_rwlock);
722 /* Skip the wait per the caller's request. */
723 mutex_enter(&db->db_mtx);
724 if ((flags & DB_RF_NEVERWAIT) == 0) {
725 while (db->db_state == DB_READ ||
726 db->db_state == DB_FILL) {
727 ASSERT(db->db_state == DB_READ ||
728 (flags & DB_RF_HAVESTRUCT) == 0);
729 cv_wait(&db->db_changed, &db->db_mtx);
731 if (db->db_state == DB_UNCACHED)
732 err = SET_ERROR(EIO);
734 mutex_exit(&db->db_mtx);
737 ASSERT(err || havepzio || db->db_state == DB_CACHED);
742 dbuf_noread(dmu_buf_impl_t *db)
744 ASSERT(!refcount_is_zero(&db->db_holds));
745 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
746 mutex_enter(&db->db_mtx);
747 while (db->db_state == DB_READ || db->db_state == DB_FILL)
748 cv_wait(&db->db_changed, &db->db_mtx);
749 if (db->db_state == DB_UNCACHED) {
750 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
751 spa_t *spa = db->db_objset->os_spa;
753 ASSERT(db->db_buf == NULL);
754 ASSERT(db->db.db_data == NULL);
755 dbuf_set_data(db, arc_buf_alloc(spa, db->db.db_size, db, type));
756 db->db_state = DB_FILL;
757 } else if (db->db_state == DB_NOFILL) {
758 dbuf_set_data(db, NULL);
760 ASSERT3U(db->db_state, ==, DB_CACHED);
762 mutex_exit(&db->db_mtx);
766 * This is our just-in-time copy function. It makes a copy of
767 * buffers, that have been modified in a previous transaction
768 * group, before we modify them in the current active group.
770 * This function is used in two places: when we are dirtying a
771 * buffer for the first time in a txg, and when we are freeing
772 * a range in a dnode that includes this buffer.
774 * Note that when we are called from dbuf_free_range() we do
775 * not put a hold on the buffer, we just traverse the active
776 * dbuf list for the dnode.
779 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
781 dbuf_dirty_record_t *dr = db->db_last_dirty;
783 ASSERT(MUTEX_HELD(&db->db_mtx));
784 ASSERT(db->db.db_data != NULL);
785 ASSERT(db->db_level == 0);
786 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
789 (dr->dt.dl.dr_data !=
790 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
794 * If the last dirty record for this dbuf has not yet synced
795 * and its referencing the dbuf data, either:
796 * reset the reference to point to a new copy,
797 * or (if there a no active holders)
798 * just null out the current db_data pointer.
800 ASSERT(dr->dr_txg >= txg - 2);
801 if (db->db_blkid == DMU_BONUS_BLKID) {
802 /* Note that the data bufs here are zio_bufs */
803 dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN);
804 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
805 bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN);
806 } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
807 int size = db->db.db_size;
808 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
809 spa_t *spa = db->db_objset->os_spa;
811 dr->dt.dl.dr_data = arc_buf_alloc(spa, size, db, type);
812 bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
814 dbuf_set_data(db, NULL);
819 dbuf_unoverride(dbuf_dirty_record_t *dr)
821 dmu_buf_impl_t *db = dr->dr_dbuf;
822 blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
823 uint64_t txg = dr->dr_txg;
825 ASSERT(MUTEX_HELD(&db->db_mtx));
826 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
827 ASSERT(db->db_level == 0);
829 if (db->db_blkid == DMU_BONUS_BLKID ||
830 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
833 ASSERT(db->db_data_pending != dr);
835 /* free this block */
836 if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
837 zio_free(db->db_objset->os_spa, txg, bp);
839 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
840 dr->dt.dl.dr_nopwrite = B_FALSE;
843 * Release the already-written buffer, so we leave it in
844 * a consistent dirty state. Note that all callers are
845 * modifying the buffer, so they will immediately do
846 * another (redundant) arc_release(). Therefore, leave
847 * the buf thawed to save the effort of freezing &
848 * immediately re-thawing it.
850 arc_release(dr->dt.dl.dr_data, db);
854 * Evict (if its unreferenced) or clear (if its referenced) any level-0
855 * data blocks in the free range, so that any future readers will find
858 * This is a no-op if the dataset is in the middle of an incremental
859 * receive; see comment below for details.
862 dbuf_free_range(dnode_t *dn, uint64_t start, uint64_t end, dmu_tx_t *tx)
864 dmu_buf_impl_t *db, *db_next;
865 uint64_t txg = tx->tx_txg;
867 if (end > dn->dn_maxblkid && (end != DMU_SPILL_BLKID))
868 end = dn->dn_maxblkid;
869 dprintf_dnode(dn, "start=%llu end=%llu\n", start, end);
871 mutex_enter(&dn->dn_dbufs_mtx);
872 if (start >= dn->dn_unlisted_l0_blkid * dn->dn_datablksz) {
873 /* There can't be any dbufs in this range; no need to search. */
874 mutex_exit(&dn->dn_dbufs_mtx);
876 } else if (dmu_objset_is_receiving(dn->dn_objset)) {
878 * If we are receiving, we expect there to be no dbufs in
879 * the range to be freed, because receive modifies each
880 * block at most once, and in offset order. If this is
881 * not the case, it can lead to performance problems,
882 * so note that we unexpectedly took the slow path.
884 atomic_inc_64(&zfs_free_range_recv_miss);
887 for (db = list_head(&dn->dn_dbufs); db != NULL; db = db_next) {
888 db_next = list_next(&dn->dn_dbufs, db);
889 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
891 if (db->db_level != 0)
893 if (db->db_blkid < start || db->db_blkid > end)
896 /* found a level 0 buffer in the range */
897 mutex_enter(&db->db_mtx);
898 if (dbuf_undirty(db, tx)) {
899 /* mutex has been dropped and dbuf destroyed */
903 if (db->db_state == DB_UNCACHED ||
904 db->db_state == DB_NOFILL ||
905 db->db_state == DB_EVICTING) {
906 ASSERT(db->db.db_data == NULL);
907 mutex_exit(&db->db_mtx);
910 if (db->db_state == DB_READ || db->db_state == DB_FILL) {
911 /* will be handled in dbuf_read_done or dbuf_rele */
912 db->db_freed_in_flight = TRUE;
913 mutex_exit(&db->db_mtx);
916 if (refcount_count(&db->db_holds) == 0) {
921 /* The dbuf is referenced */
923 if (db->db_last_dirty != NULL) {
924 dbuf_dirty_record_t *dr = db->db_last_dirty;
926 if (dr->dr_txg == txg) {
928 * This buffer is "in-use", re-adjust the file
929 * size to reflect that this buffer may
930 * contain new data when we sync.
932 if (db->db_blkid != DMU_SPILL_BLKID &&
933 db->db_blkid > dn->dn_maxblkid)
934 dn->dn_maxblkid = db->db_blkid;
938 * This dbuf is not dirty in the open context.
939 * Either uncache it (if its not referenced in
940 * the open context) or reset its contents to
943 dbuf_fix_old_data(db, txg);
946 /* clear the contents if its cached */
947 if (db->db_state == DB_CACHED) {
948 ASSERT(db->db.db_data != NULL);
949 arc_release(db->db_buf, db);
950 bzero(db->db.db_data, db->db.db_size);
951 arc_buf_freeze(db->db_buf);
954 mutex_exit(&db->db_mtx);
956 mutex_exit(&dn->dn_dbufs_mtx);
960 dbuf_block_freeable(dmu_buf_impl_t *db)
962 dsl_dataset_t *ds = db->db_objset->os_dsl_dataset;
963 uint64_t birth_txg = 0;
966 * We don't need any locking to protect db_blkptr:
967 * If it's syncing, then db_last_dirty will be set
968 * so we'll ignore db_blkptr.
970 * This logic ensures that only block births for
971 * filled blocks are considered.
973 ASSERT(MUTEX_HELD(&db->db_mtx));
974 if (db->db_last_dirty && (db->db_blkptr == NULL ||
975 !BP_IS_HOLE(db->db_blkptr))) {
976 birth_txg = db->db_last_dirty->dr_txg;
977 } else if (db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) {
978 birth_txg = db->db_blkptr->blk_birth;
982 * If this block don't exist or is in a snapshot, it can't be freed.
983 * Don't pass the bp to dsl_dataset_block_freeable() since we
984 * are holding the db_mtx lock and might deadlock if we are
985 * prefetching a dedup-ed block.
988 return (ds == NULL ||
989 dsl_dataset_block_freeable(ds, NULL, birth_txg));
995 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
997 arc_buf_t *buf, *obuf;
998 int osize = db->db.db_size;
999 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1002 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1007 /* XXX does *this* func really need the lock? */
1008 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
1011 * This call to dmu_buf_will_dirty() with the dn_struct_rwlock held
1012 * is OK, because there can be no other references to the db
1013 * when we are changing its size, so no concurrent DB_FILL can
1017 * XXX we should be doing a dbuf_read, checking the return
1018 * value and returning that up to our callers
1020 dmu_buf_will_dirty(&db->db, tx);
1022 /* create the data buffer for the new block */
1023 buf = arc_buf_alloc(dn->dn_objset->os_spa, size, db, type);
1025 /* copy old block data to the new block */
1027 bcopy(obuf->b_data, buf->b_data, MIN(osize, size));
1028 /* zero the remainder */
1030 bzero((uint8_t *)buf->b_data + osize, size - osize);
1032 mutex_enter(&db->db_mtx);
1033 dbuf_set_data(db, buf);
1034 VERIFY(arc_buf_remove_ref(obuf, db));
1035 db->db.db_size = size;
1037 if (db->db_level == 0) {
1038 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
1039 db->db_last_dirty->dt.dl.dr_data = buf;
1041 mutex_exit(&db->db_mtx);
1043 dnode_willuse_space(dn, size-osize, tx);
1048 dbuf_release_bp(dmu_buf_impl_t *db)
1050 ASSERTV(objset_t *os = db->db_objset);
1052 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
1053 ASSERT(arc_released(os->os_phys_buf) ||
1054 list_link_active(&os->os_dsl_dataset->ds_synced_link));
1055 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
1057 (void) arc_release(db->db_buf, db);
1060 dbuf_dirty_record_t *
1061 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1065 dbuf_dirty_record_t **drp, *dr;
1066 int drop_struct_lock = FALSE;
1067 boolean_t do_free_accounting = B_FALSE;
1068 int txgoff = tx->tx_txg & TXG_MASK;
1070 ASSERT(tx->tx_txg != 0);
1071 ASSERT(!refcount_is_zero(&db->db_holds));
1072 DMU_TX_DIRTY_BUF(tx, db);
1077 * Shouldn't dirty a regular buffer in syncing context. Private
1078 * objects may be dirtied in syncing context, but only if they
1079 * were already pre-dirtied in open context.
1081 ASSERT(!dmu_tx_is_syncing(tx) ||
1082 BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
1083 DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1084 dn->dn_objset->os_dsl_dataset == NULL);
1086 * We make this assert for private objects as well, but after we
1087 * check if we're already dirty. They are allowed to re-dirty
1088 * in syncing context.
1090 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1091 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1092 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1094 mutex_enter(&db->db_mtx);
1096 * XXX make this true for indirects too? The problem is that
1097 * transactions created with dmu_tx_create_assigned() from
1098 * syncing context don't bother holding ahead.
1100 ASSERT(db->db_level != 0 ||
1101 db->db_state == DB_CACHED || db->db_state == DB_FILL ||
1102 db->db_state == DB_NOFILL);
1104 mutex_enter(&dn->dn_mtx);
1106 * Don't set dirtyctx to SYNC if we're just modifying this as we
1107 * initialize the objset.
1109 if (dn->dn_dirtyctx == DN_UNDIRTIED &&
1110 !BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
1112 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN);
1113 ASSERT(dn->dn_dirtyctx_firstset == NULL);
1114 dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_PUSHPAGE);
1116 mutex_exit(&dn->dn_mtx);
1118 if (db->db_blkid == DMU_SPILL_BLKID)
1119 dn->dn_have_spill = B_TRUE;
1122 * If this buffer is already dirty, we're done.
1124 drp = &db->db_last_dirty;
1125 ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg ||
1126 db->db.db_object == DMU_META_DNODE_OBJECT);
1127 while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg)
1129 if (dr && dr->dr_txg == tx->tx_txg) {
1132 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
1134 * If this buffer has already been written out,
1135 * we now need to reset its state.
1137 dbuf_unoverride(dr);
1138 if (db->db.db_object != DMU_META_DNODE_OBJECT &&
1139 db->db_state != DB_NOFILL)
1140 arc_buf_thaw(db->db_buf);
1142 mutex_exit(&db->db_mtx);
1147 * Only valid if not already dirty.
1149 ASSERT(dn->dn_object == 0 ||
1150 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1151 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1153 ASSERT3U(dn->dn_nlevels, >, db->db_level);
1154 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
1155 dn->dn_phys->dn_nlevels > db->db_level ||
1156 dn->dn_next_nlevels[txgoff] > db->db_level ||
1157 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
1158 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
1161 * We should only be dirtying in syncing context if it's the
1162 * mos or we're initializing the os or it's a special object.
1163 * However, we are allowed to dirty in syncing context provided
1164 * we already dirtied it in open context. Hence we must make
1165 * this assertion only if we're not already dirty.
1168 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1169 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
1170 ASSERT(db->db.db_size != 0);
1172 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1174 if (db->db_blkid != DMU_BONUS_BLKID) {
1176 * Update the accounting.
1177 * Note: we delay "free accounting" until after we drop
1178 * the db_mtx. This keeps us from grabbing other locks
1179 * (and possibly deadlocking) in bp_get_dsize() while
1180 * also holding the db_mtx.
1182 dnode_willuse_space(dn, db->db.db_size, tx);
1183 do_free_accounting = dbuf_block_freeable(db);
1187 * If this buffer is dirty in an old transaction group we need
1188 * to make a copy of it so that the changes we make in this
1189 * transaction group won't leak out when we sync the older txg.
1191 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_PUSHPAGE);
1192 list_link_init(&dr->dr_dirty_node);
1193 if (db->db_level == 0) {
1194 void *data_old = db->db_buf;
1196 if (db->db_state != DB_NOFILL) {
1197 if (db->db_blkid == DMU_BONUS_BLKID) {
1198 dbuf_fix_old_data(db, tx->tx_txg);
1199 data_old = db->db.db_data;
1200 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
1202 * Release the data buffer from the cache so
1203 * that we can modify it without impacting
1204 * possible other users of this cached data
1205 * block. Note that indirect blocks and
1206 * private objects are not released until the
1207 * syncing state (since they are only modified
1210 arc_release(db->db_buf, db);
1211 dbuf_fix_old_data(db, tx->tx_txg);
1212 data_old = db->db_buf;
1214 ASSERT(data_old != NULL);
1216 dr->dt.dl.dr_data = data_old;
1218 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL);
1219 list_create(&dr->dt.di.dr_children,
1220 sizeof (dbuf_dirty_record_t),
1221 offsetof(dbuf_dirty_record_t, dr_dirty_node));
1223 if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL)
1224 dr->dr_accounted = db->db.db_size;
1226 dr->dr_txg = tx->tx_txg;
1231 * We could have been freed_in_flight between the dbuf_noread
1232 * and dbuf_dirty. We win, as though the dbuf_noread() had
1233 * happened after the free.
1235 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1236 db->db_blkid != DMU_SPILL_BLKID) {
1237 mutex_enter(&dn->dn_mtx);
1238 if (dn->dn_free_ranges[txgoff] != NULL) {
1239 range_tree_clear(dn->dn_free_ranges[txgoff],
1242 mutex_exit(&dn->dn_mtx);
1243 db->db_freed_in_flight = FALSE;
1247 * This buffer is now part of this txg
1249 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
1250 db->db_dirtycnt += 1;
1251 ASSERT3U(db->db_dirtycnt, <=, 3);
1253 mutex_exit(&db->db_mtx);
1255 if (db->db_blkid == DMU_BONUS_BLKID ||
1256 db->db_blkid == DMU_SPILL_BLKID) {
1257 mutex_enter(&dn->dn_mtx);
1258 ASSERT(!list_link_active(&dr->dr_dirty_node));
1259 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1260 mutex_exit(&dn->dn_mtx);
1261 dnode_setdirty(dn, tx);
1264 } else if (do_free_accounting) {
1265 blkptr_t *bp = db->db_blkptr;
1266 int64_t willfree = (bp && !BP_IS_HOLE(bp)) ?
1267 bp_get_dsize(os->os_spa, bp) : db->db.db_size;
1269 * This is only a guess -- if the dbuf is dirty
1270 * in a previous txg, we don't know how much
1271 * space it will use on disk yet. We should
1272 * really have the struct_rwlock to access
1273 * db_blkptr, but since this is just a guess,
1274 * it's OK if we get an odd answer.
1276 ddt_prefetch(os->os_spa, bp);
1277 dnode_willuse_space(dn, -willfree, tx);
1280 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
1281 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1282 drop_struct_lock = TRUE;
1285 if (db->db_level == 0) {
1286 dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock);
1287 ASSERT(dn->dn_maxblkid >= db->db_blkid);
1290 if (db->db_level+1 < dn->dn_nlevels) {
1291 dmu_buf_impl_t *parent = db->db_parent;
1292 dbuf_dirty_record_t *di;
1293 int parent_held = FALSE;
1295 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
1296 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1298 parent = dbuf_hold_level(dn, db->db_level+1,
1299 db->db_blkid >> epbs, FTAG);
1300 ASSERT(parent != NULL);
1303 if (drop_struct_lock)
1304 rw_exit(&dn->dn_struct_rwlock);
1305 ASSERT3U(db->db_level+1, ==, parent->db_level);
1306 di = dbuf_dirty(parent, tx);
1308 dbuf_rele(parent, FTAG);
1310 mutex_enter(&db->db_mtx);
1312 * Since we've dropped the mutex, it's possible that
1313 * dbuf_undirty() might have changed this out from under us.
1315 if (db->db_last_dirty == dr ||
1316 dn->dn_object == DMU_META_DNODE_OBJECT) {
1317 mutex_enter(&di->dt.di.dr_mtx);
1318 ASSERT3U(di->dr_txg, ==, tx->tx_txg);
1319 ASSERT(!list_link_active(&dr->dr_dirty_node));
1320 list_insert_tail(&di->dt.di.dr_children, dr);
1321 mutex_exit(&di->dt.di.dr_mtx);
1324 mutex_exit(&db->db_mtx);
1326 ASSERT(db->db_level+1 == dn->dn_nlevels);
1327 ASSERT(db->db_blkid < dn->dn_nblkptr);
1328 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
1329 mutex_enter(&dn->dn_mtx);
1330 ASSERT(!list_link_active(&dr->dr_dirty_node));
1331 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1332 mutex_exit(&dn->dn_mtx);
1333 if (drop_struct_lock)
1334 rw_exit(&dn->dn_struct_rwlock);
1337 dnode_setdirty(dn, tx);
1343 * Undirty a buffer in the transaction group referenced by the given
1344 * transaction. Return whether this evicted the dbuf.
1347 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1350 uint64_t txg = tx->tx_txg;
1351 dbuf_dirty_record_t *dr, **drp;
1354 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1355 ASSERT0(db->db_level);
1356 ASSERT(MUTEX_HELD(&db->db_mtx));
1359 * If this buffer is not dirty, we're done.
1361 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
1362 if (dr->dr_txg <= txg)
1364 if (dr == NULL || dr->dr_txg < txg)
1366 ASSERT(dr->dr_txg == txg);
1367 ASSERT(dr->dr_dbuf == db);
1372 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1374 ASSERT(db->db.db_size != 0);
1377 * Any space we accounted for in dp_dirty_* will be cleaned up by
1378 * dsl_pool_sync(). This is relatively rare so the discrepancy
1379 * is not a big deal.
1385 * Note that there are three places in dbuf_dirty()
1386 * where this dirty record may be put on a list.
1387 * Make sure to do a list_remove corresponding to
1388 * every one of those list_insert calls.
1390 if (dr->dr_parent) {
1391 mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
1392 list_remove(&dr->dr_parent->dt.di.dr_children, dr);
1393 mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
1394 } else if (db->db_blkid == DMU_SPILL_BLKID ||
1395 db->db_level+1 == dn->dn_nlevels) {
1396 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
1397 mutex_enter(&dn->dn_mtx);
1398 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
1399 mutex_exit(&dn->dn_mtx);
1403 if (db->db_state != DB_NOFILL) {
1404 dbuf_unoverride(dr);
1406 ASSERT(db->db_buf != NULL);
1407 ASSERT(dr->dt.dl.dr_data != NULL);
1408 if (dr->dt.dl.dr_data != db->db_buf)
1409 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, db));
1411 kmem_free(dr, sizeof (dbuf_dirty_record_t));
1413 ASSERT(db->db_dirtycnt > 0);
1414 db->db_dirtycnt -= 1;
1416 if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
1417 arc_buf_t *buf = db->db_buf;
1419 ASSERT(db->db_state == DB_NOFILL || arc_released(buf));
1420 dbuf_set_data(db, NULL);
1421 VERIFY(arc_buf_remove_ref(buf, db));
1430 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
1432 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1433 int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH;
1435 ASSERT(tx->tx_txg != 0);
1436 ASSERT(!refcount_is_zero(&db->db_holds));
1439 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
1440 rf |= DB_RF_HAVESTRUCT;
1442 (void) dbuf_read(db, NULL, rf);
1443 (void) dbuf_dirty(db, tx);
1447 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1449 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1451 db->db_state = DB_NOFILL;
1453 dmu_buf_will_fill(db_fake, tx);
1457 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1459 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1461 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1462 ASSERT(tx->tx_txg != 0);
1463 ASSERT(db->db_level == 0);
1464 ASSERT(!refcount_is_zero(&db->db_holds));
1466 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
1467 dmu_tx_private_ok(tx));
1470 (void) dbuf_dirty(db, tx);
1473 #pragma weak dmu_buf_fill_done = dbuf_fill_done
1476 dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx)
1478 mutex_enter(&db->db_mtx);
1481 if (db->db_state == DB_FILL) {
1482 if (db->db_level == 0 && db->db_freed_in_flight) {
1483 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1484 /* we were freed while filling */
1485 /* XXX dbuf_undirty? */
1486 bzero(db->db.db_data, db->db.db_size);
1487 db->db_freed_in_flight = FALSE;
1489 db->db_state = DB_CACHED;
1490 cv_broadcast(&db->db_changed);
1492 mutex_exit(&db->db_mtx);
1496 * Directly assign a provided arc buf to a given dbuf if it's not referenced
1497 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
1500 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
1502 ASSERT(!refcount_is_zero(&db->db_holds));
1503 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1504 ASSERT(db->db_level == 0);
1505 ASSERT(DBUF_GET_BUFC_TYPE(db) == ARC_BUFC_DATA);
1506 ASSERT(buf != NULL);
1507 ASSERT(arc_buf_size(buf) == db->db.db_size);
1508 ASSERT(tx->tx_txg != 0);
1510 arc_return_buf(buf, db);
1511 ASSERT(arc_released(buf));
1513 mutex_enter(&db->db_mtx);
1515 while (db->db_state == DB_READ || db->db_state == DB_FILL)
1516 cv_wait(&db->db_changed, &db->db_mtx);
1518 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
1520 if (db->db_state == DB_CACHED &&
1521 refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
1522 mutex_exit(&db->db_mtx);
1523 (void) dbuf_dirty(db, tx);
1524 bcopy(buf->b_data, db->db.db_data, db->db.db_size);
1525 VERIFY(arc_buf_remove_ref(buf, db));
1526 xuio_stat_wbuf_copied();
1530 xuio_stat_wbuf_nocopy();
1531 if (db->db_state == DB_CACHED) {
1532 dbuf_dirty_record_t *dr = db->db_last_dirty;
1534 ASSERT(db->db_buf != NULL);
1535 if (dr != NULL && dr->dr_txg == tx->tx_txg) {
1536 ASSERT(dr->dt.dl.dr_data == db->db_buf);
1537 if (!arc_released(db->db_buf)) {
1538 ASSERT(dr->dt.dl.dr_override_state ==
1540 arc_release(db->db_buf, db);
1542 dr->dt.dl.dr_data = buf;
1543 VERIFY(arc_buf_remove_ref(db->db_buf, db));
1544 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
1545 arc_release(db->db_buf, db);
1546 VERIFY(arc_buf_remove_ref(db->db_buf, db));
1550 ASSERT(db->db_buf == NULL);
1551 dbuf_set_data(db, buf);
1552 db->db_state = DB_FILL;
1553 mutex_exit(&db->db_mtx);
1554 (void) dbuf_dirty(db, tx);
1555 dmu_buf_fill_done(&db->db, tx);
1559 * "Clear" the contents of this dbuf. This will mark the dbuf
1560 * EVICTING and clear *most* of its references. Unfortunately,
1561 * when we are not holding the dn_dbufs_mtx, we can't clear the
1562 * entry in the dn_dbufs list. We have to wait until dbuf_destroy()
1563 * in this case. For callers from the DMU we will usually see:
1564 * dbuf_clear()->arc_buf_evict()->dbuf_do_evict()->dbuf_destroy()
1565 * For the arc callback, we will usually see:
1566 * dbuf_do_evict()->dbuf_clear();dbuf_destroy()
1567 * Sometimes, though, we will get a mix of these two:
1568 * DMU: dbuf_clear()->arc_buf_evict()
1569 * ARC: dbuf_do_evict()->dbuf_destroy()
1572 dbuf_clear(dmu_buf_impl_t *db)
1575 dmu_buf_impl_t *parent = db->db_parent;
1576 dmu_buf_impl_t *dndb;
1577 int dbuf_gone = FALSE;
1579 ASSERT(MUTEX_HELD(&db->db_mtx));
1580 ASSERT(refcount_is_zero(&db->db_holds));
1582 dbuf_evict_user(db);
1584 if (db->db_state == DB_CACHED) {
1585 ASSERT(db->db.db_data != NULL);
1586 if (db->db_blkid == DMU_BONUS_BLKID) {
1587 zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN);
1588 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
1590 db->db.db_data = NULL;
1591 db->db_state = DB_UNCACHED;
1594 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
1595 ASSERT(db->db_data_pending == NULL);
1597 db->db_state = DB_EVICTING;
1598 db->db_blkptr = NULL;
1603 if (db->db_blkid != DMU_BONUS_BLKID && MUTEX_HELD(&dn->dn_dbufs_mtx)) {
1604 list_remove(&dn->dn_dbufs, db);
1605 (void) atomic_dec_32_nv(&dn->dn_dbufs_count);
1609 * Decrementing the dbuf count means that the hold corresponding
1610 * to the removed dbuf is no longer discounted in dnode_move(),
1611 * so the dnode cannot be moved until after we release the hold.
1612 * The membar_producer() ensures visibility of the decremented
1613 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
1617 db->db_dnode_handle = NULL;
1623 dbuf_gone = arc_buf_evict(db->db_buf);
1626 mutex_exit(&db->db_mtx);
1629 * If this dbuf is referenced from an indirect dbuf,
1630 * decrement the ref count on the indirect dbuf.
1632 if (parent && parent != dndb)
1633 dbuf_rele(parent, db);
1636 __attribute__((always_inline))
1638 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
1639 dmu_buf_impl_t **parentp, blkptr_t **bpp, struct dbuf_hold_impl_data *dh)
1646 ASSERT(blkid != DMU_BONUS_BLKID);
1648 if (blkid == DMU_SPILL_BLKID) {
1649 mutex_enter(&dn->dn_mtx);
1650 if (dn->dn_have_spill &&
1651 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
1652 *bpp = &dn->dn_phys->dn_spill;
1655 dbuf_add_ref(dn->dn_dbuf, NULL);
1656 *parentp = dn->dn_dbuf;
1657 mutex_exit(&dn->dn_mtx);
1661 if (dn->dn_phys->dn_nlevels == 0)
1664 nlevels = dn->dn_phys->dn_nlevels;
1666 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1668 ASSERT3U(level * epbs, <, 64);
1669 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1670 if (level >= nlevels ||
1671 (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
1672 /* the buffer has no parent yet */
1673 return (SET_ERROR(ENOENT));
1674 } else if (level < nlevels-1) {
1675 /* this block is referenced from an indirect block */
1678 err = dbuf_hold_impl(dn, level+1, blkid >> epbs,
1679 fail_sparse, NULL, parentp);
1681 __dbuf_hold_impl_init(dh + 1, dn, dh->dh_level + 1,
1682 blkid >> epbs, fail_sparse, NULL,
1683 parentp, dh->dh_depth + 1);
1684 err = __dbuf_hold_impl(dh + 1);
1688 err = dbuf_read(*parentp, NULL,
1689 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
1691 dbuf_rele(*parentp, NULL);
1695 *bpp = ((blkptr_t *)(*parentp)->db.db_data) +
1696 (blkid & ((1ULL << epbs) - 1));
1699 /* the block is referenced from the dnode */
1700 ASSERT3U(level, ==, nlevels-1);
1701 ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
1702 blkid < dn->dn_phys->dn_nblkptr);
1704 dbuf_add_ref(dn->dn_dbuf, NULL);
1705 *parentp = dn->dn_dbuf;
1707 *bpp = &dn->dn_phys->dn_blkptr[blkid];
1712 static dmu_buf_impl_t *
1713 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
1714 dmu_buf_impl_t *parent, blkptr_t *blkptr)
1716 objset_t *os = dn->dn_objset;
1717 dmu_buf_impl_t *db, *odb;
1719 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1720 ASSERT(dn->dn_type != DMU_OT_NONE);
1722 db = kmem_cache_alloc(dbuf_cache, KM_PUSHPAGE);
1725 db->db.db_object = dn->dn_object;
1726 db->db_level = level;
1727 db->db_blkid = blkid;
1728 db->db_last_dirty = NULL;
1729 db->db_dirtycnt = 0;
1730 db->db_dnode_handle = dn->dn_handle;
1731 db->db_parent = parent;
1732 db->db_blkptr = blkptr;
1734 db->db_user_ptr = NULL;
1735 db->db_user_data_ptr_ptr = NULL;
1736 db->db_evict_func = NULL;
1737 db->db_immediate_evict = 0;
1738 db->db_freed_in_flight = 0;
1740 if (blkid == DMU_BONUS_BLKID) {
1741 ASSERT3P(parent, ==, dn->dn_dbuf);
1742 db->db.db_size = DN_MAX_BONUSLEN -
1743 (dn->dn_nblkptr-1) * sizeof (blkptr_t);
1744 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1745 db->db.db_offset = DMU_BONUS_BLKID;
1746 db->db_state = DB_UNCACHED;
1747 /* the bonus dbuf is not placed in the hash table */
1748 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1750 } else if (blkid == DMU_SPILL_BLKID) {
1751 db->db.db_size = (blkptr != NULL) ?
1752 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
1753 db->db.db_offset = 0;
1756 db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
1757 db->db.db_size = blocksize;
1758 db->db.db_offset = db->db_blkid * blocksize;
1762 * Hold the dn_dbufs_mtx while we get the new dbuf
1763 * in the hash table *and* added to the dbufs list.
1764 * This prevents a possible deadlock with someone
1765 * trying to look up this dbuf before its added to the
1768 mutex_enter(&dn->dn_dbufs_mtx);
1769 db->db_state = DB_EVICTING;
1770 if ((odb = dbuf_hash_insert(db)) != NULL) {
1771 /* someone else inserted it first */
1772 kmem_cache_free(dbuf_cache, db);
1773 mutex_exit(&dn->dn_dbufs_mtx);
1776 list_insert_head(&dn->dn_dbufs, db);
1777 if (db->db_level == 0 && db->db_blkid >=
1778 dn->dn_unlisted_l0_blkid)
1779 dn->dn_unlisted_l0_blkid = db->db_blkid + 1;
1780 db->db_state = DB_UNCACHED;
1781 mutex_exit(&dn->dn_dbufs_mtx);
1782 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1784 if (parent && parent != dn->dn_dbuf)
1785 dbuf_add_ref(parent, db);
1787 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1788 refcount_count(&dn->dn_holds) > 0);
1789 (void) refcount_add(&dn->dn_holds, db);
1790 (void) atomic_inc_32_nv(&dn->dn_dbufs_count);
1792 dprintf_dbuf(db, "db=%p\n", db);
1798 dbuf_do_evict(void *private)
1800 arc_buf_t *buf = private;
1801 dmu_buf_impl_t *db = buf->b_private;
1803 if (!MUTEX_HELD(&db->db_mtx))
1804 mutex_enter(&db->db_mtx);
1806 ASSERT(refcount_is_zero(&db->db_holds));
1808 if (db->db_state != DB_EVICTING) {
1809 ASSERT(db->db_state == DB_CACHED);
1814 mutex_exit(&db->db_mtx);
1821 dbuf_destroy(dmu_buf_impl_t *db)
1823 ASSERT(refcount_is_zero(&db->db_holds));
1825 if (db->db_blkid != DMU_BONUS_BLKID) {
1827 * If this dbuf is still on the dn_dbufs list,
1828 * remove it from that list.
1830 if (db->db_dnode_handle != NULL) {
1835 mutex_enter(&dn->dn_dbufs_mtx);
1836 list_remove(&dn->dn_dbufs, db);
1837 (void) atomic_dec_32_nv(&dn->dn_dbufs_count);
1838 mutex_exit(&dn->dn_dbufs_mtx);
1841 * Decrementing the dbuf count means that the hold
1842 * corresponding to the removed dbuf is no longer
1843 * discounted in dnode_move(), so the dnode cannot be
1844 * moved until after we release the hold.
1847 db->db_dnode_handle = NULL;
1849 dbuf_hash_remove(db);
1851 db->db_parent = NULL;
1854 ASSERT(!list_link_active(&db->db_link));
1855 ASSERT(db->db.db_data == NULL);
1856 ASSERT(db->db_hash_next == NULL);
1857 ASSERT(db->db_blkptr == NULL);
1858 ASSERT(db->db_data_pending == NULL);
1860 kmem_cache_free(dbuf_cache, db);
1861 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1865 dbuf_prefetch(dnode_t *dn, uint64_t blkid, zio_priority_t prio)
1867 dmu_buf_impl_t *db = NULL;
1868 blkptr_t *bp = NULL;
1870 ASSERT(blkid != DMU_BONUS_BLKID);
1871 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1873 if (dnode_block_freed(dn, blkid))
1876 /* dbuf_find() returns with db_mtx held */
1877 if ((db = dbuf_find(dn, 0, blkid))) {
1879 * This dbuf is already in the cache. We assume that
1880 * it is already CACHED, or else about to be either
1883 mutex_exit(&db->db_mtx);
1887 if (dbuf_findbp(dn, 0, blkid, TRUE, &db, &bp, NULL) == 0) {
1888 if (bp && !BP_IS_HOLE(bp)) {
1889 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
1890 uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH;
1893 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
1894 dn->dn_object, 0, blkid);
1896 (void) arc_read(NULL, dn->dn_objset->os_spa,
1897 bp, NULL, NULL, prio,
1898 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
1902 dbuf_rele(db, NULL);
1906 #define DBUF_HOLD_IMPL_MAX_DEPTH 20
1909 * Returns with db_holds incremented, and db_mtx not held.
1910 * Note: dn_struct_rwlock must be held.
1913 __dbuf_hold_impl(struct dbuf_hold_impl_data *dh)
1915 ASSERT3S(dh->dh_depth, <, DBUF_HOLD_IMPL_MAX_DEPTH);
1916 dh->dh_parent = NULL;
1918 ASSERT(dh->dh_blkid != DMU_BONUS_BLKID);
1919 ASSERT(RW_LOCK_HELD(&dh->dh_dn->dn_struct_rwlock));
1920 ASSERT3U(dh->dh_dn->dn_nlevels, >, dh->dh_level);
1922 *(dh->dh_dbp) = NULL;
1924 /* dbuf_find() returns with db_mtx held */
1925 dh->dh_db = dbuf_find(dh->dh_dn, dh->dh_level, dh->dh_blkid);
1927 if (dh->dh_db == NULL) {
1930 ASSERT3P(dh->dh_parent, ==, NULL);
1931 dh->dh_err = dbuf_findbp(dh->dh_dn, dh->dh_level, dh->dh_blkid,
1932 dh->dh_fail_sparse, &dh->dh_parent,
1934 if (dh->dh_fail_sparse) {
1935 if (dh->dh_err == 0 &&
1936 dh->dh_bp && BP_IS_HOLE(dh->dh_bp))
1937 dh->dh_err = SET_ERROR(ENOENT);
1940 dbuf_rele(dh->dh_parent, NULL);
1941 return (dh->dh_err);
1944 if (dh->dh_err && dh->dh_err != ENOENT)
1945 return (dh->dh_err);
1946 dh->dh_db = dbuf_create(dh->dh_dn, dh->dh_level, dh->dh_blkid,
1947 dh->dh_parent, dh->dh_bp);
1950 if (dh->dh_db->db_buf && refcount_is_zero(&dh->dh_db->db_holds)) {
1951 arc_buf_add_ref(dh->dh_db->db_buf, dh->dh_db);
1952 if (dh->dh_db->db_buf->b_data == NULL) {
1953 dbuf_clear(dh->dh_db);
1954 if (dh->dh_parent) {
1955 dbuf_rele(dh->dh_parent, NULL);
1956 dh->dh_parent = NULL;
1960 ASSERT3P(dh->dh_db->db.db_data, ==, dh->dh_db->db_buf->b_data);
1963 ASSERT(dh->dh_db->db_buf == NULL || arc_referenced(dh->dh_db->db_buf));
1966 * If this buffer is currently syncing out, and we are are
1967 * still referencing it from db_data, we need to make a copy
1968 * of it in case we decide we want to dirty it again in this txg.
1970 if (dh->dh_db->db_level == 0 &&
1971 dh->dh_db->db_blkid != DMU_BONUS_BLKID &&
1972 dh->dh_dn->dn_object != DMU_META_DNODE_OBJECT &&
1973 dh->dh_db->db_state == DB_CACHED && dh->dh_db->db_data_pending) {
1974 dh->dh_dr = dh->dh_db->db_data_pending;
1976 if (dh->dh_dr->dt.dl.dr_data == dh->dh_db->db_buf) {
1977 dh->dh_type = DBUF_GET_BUFC_TYPE(dh->dh_db);
1979 dbuf_set_data(dh->dh_db,
1980 arc_buf_alloc(dh->dh_dn->dn_objset->os_spa,
1981 dh->dh_db->db.db_size, dh->dh_db, dh->dh_type));
1982 bcopy(dh->dh_dr->dt.dl.dr_data->b_data,
1983 dh->dh_db->db.db_data, dh->dh_db->db.db_size);
1987 (void) refcount_add(&dh->dh_db->db_holds, dh->dh_tag);
1988 dbuf_update_data(dh->dh_db);
1989 DBUF_VERIFY(dh->dh_db);
1990 mutex_exit(&dh->dh_db->db_mtx);
1992 /* NOTE: we can't rele the parent until after we drop the db_mtx */
1994 dbuf_rele(dh->dh_parent, NULL);
1996 ASSERT3P(DB_DNODE(dh->dh_db), ==, dh->dh_dn);
1997 ASSERT3U(dh->dh_db->db_blkid, ==, dh->dh_blkid);
1998 ASSERT3U(dh->dh_db->db_level, ==, dh->dh_level);
1999 *(dh->dh_dbp) = dh->dh_db;
2005 * The following code preserves the recursive function dbuf_hold_impl()
2006 * but moves the local variables AND function arguments to the heap to
2007 * minimize the stack frame size. Enough space is initially allocated
2008 * on the stack for 20 levels of recursion.
2011 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
2012 void *tag, dmu_buf_impl_t **dbp)
2014 struct dbuf_hold_impl_data *dh;
2017 dh = kmem_zalloc(sizeof (struct dbuf_hold_impl_data) *
2018 DBUF_HOLD_IMPL_MAX_DEPTH, KM_PUSHPAGE);
2019 __dbuf_hold_impl_init(dh, dn, level, blkid, fail_sparse, tag, dbp, 0);
2021 error = __dbuf_hold_impl(dh);
2023 kmem_free(dh, sizeof (struct dbuf_hold_impl_data) *
2024 DBUF_HOLD_IMPL_MAX_DEPTH);
2030 __dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh,
2031 dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
2032 void *tag, dmu_buf_impl_t **dbp, int depth)
2035 dh->dh_level = level;
2036 dh->dh_blkid = blkid;
2037 dh->dh_fail_sparse = fail_sparse;
2040 dh->dh_depth = depth;
2044 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag)
2047 int err = dbuf_hold_impl(dn, 0, blkid, FALSE, tag, &db);
2048 return (err ? NULL : db);
2052 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag)
2055 int err = dbuf_hold_impl(dn, level, blkid, FALSE, tag, &db);
2056 return (err ? NULL : db);
2060 dbuf_create_bonus(dnode_t *dn)
2062 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
2064 ASSERT(dn->dn_bonus == NULL);
2065 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL);
2069 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
2071 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2074 if (db->db_blkid != DMU_SPILL_BLKID)
2075 return (SET_ERROR(ENOTSUP));
2077 blksz = SPA_MINBLOCKSIZE;
2078 if (blksz > SPA_MAXBLOCKSIZE)
2079 blksz = SPA_MAXBLOCKSIZE;
2081 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
2085 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
2086 dbuf_new_size(db, blksz, tx);
2087 rw_exit(&dn->dn_struct_rwlock);
2094 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
2096 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
2099 #pragma weak dmu_buf_add_ref = dbuf_add_ref
2101 dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
2103 VERIFY(refcount_add(&db->db_holds, tag) > 1);
2107 * If you call dbuf_rele() you had better not be referencing the dnode handle
2108 * unless you have some other direct or indirect hold on the dnode. (An indirect
2109 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
2110 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
2111 * dnode's parent dbuf evicting its dnode handles.
2114 dbuf_rele(dmu_buf_impl_t *db, void *tag)
2116 mutex_enter(&db->db_mtx);
2117 dbuf_rele_and_unlock(db, tag);
2121 dmu_buf_rele(dmu_buf_t *db, void *tag)
2123 dbuf_rele((dmu_buf_impl_t *)db, tag);
2127 * dbuf_rele() for an already-locked dbuf. This is necessary to allow
2128 * db_dirtycnt and db_holds to be updated atomically.
2131 dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
2135 ASSERT(MUTEX_HELD(&db->db_mtx));
2139 * Remove the reference to the dbuf before removing its hold on the
2140 * dnode so we can guarantee in dnode_move() that a referenced bonus
2141 * buffer has a corresponding dnode hold.
2143 holds = refcount_remove(&db->db_holds, tag);
2147 * We can't freeze indirects if there is a possibility that they
2148 * may be modified in the current syncing context.
2150 if (db->db_buf && holds == (db->db_level == 0 ? db->db_dirtycnt : 0))
2151 arc_buf_freeze(db->db_buf);
2153 if (holds == db->db_dirtycnt &&
2154 db->db_level == 0 && db->db_immediate_evict)
2155 dbuf_evict_user(db);
2158 if (db->db_blkid == DMU_BONUS_BLKID) {
2159 mutex_exit(&db->db_mtx);
2162 * If the dnode moves here, we cannot cross this barrier
2163 * until the move completes.
2166 (void) atomic_dec_32_nv(&DB_DNODE(db)->dn_dbufs_count);
2169 * The bonus buffer's dnode hold is no longer discounted
2170 * in dnode_move(). The dnode cannot move until after
2173 dnode_rele(DB_DNODE(db), db);
2174 } else if (db->db_buf == NULL) {
2176 * This is a special case: we never associated this
2177 * dbuf with any data allocated from the ARC.
2179 ASSERT(db->db_state == DB_UNCACHED ||
2180 db->db_state == DB_NOFILL);
2182 } else if (arc_released(db->db_buf)) {
2183 arc_buf_t *buf = db->db_buf;
2185 * This dbuf has anonymous data associated with it.
2187 dbuf_set_data(db, NULL);
2188 VERIFY(arc_buf_remove_ref(buf, db));
2191 VERIFY(!arc_buf_remove_ref(db->db_buf, db));
2194 * A dbuf will be eligible for eviction if either the
2195 * 'primarycache' property is set or a duplicate
2196 * copy of this buffer is already cached in the arc.
2198 * In the case of the 'primarycache' a buffer
2199 * is considered for eviction if it matches the
2200 * criteria set in the property.
2202 * To decide if our buffer is considered a
2203 * duplicate, we must call into the arc to determine
2204 * if multiple buffers are referencing the same
2205 * block on-disk. If so, then we simply evict
2208 if (!DBUF_IS_CACHEABLE(db) ||
2209 arc_buf_eviction_needed(db->db_buf))
2212 mutex_exit(&db->db_mtx);
2215 mutex_exit(&db->db_mtx);
2219 #pragma weak dmu_buf_refcount = dbuf_refcount
2221 dbuf_refcount(dmu_buf_impl_t *db)
2223 return (refcount_count(&db->db_holds));
2227 dmu_buf_set_user(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr,
2228 dmu_buf_evict_func_t *evict_func)
2230 return (dmu_buf_update_user(db_fake, NULL, user_ptr,
2231 user_data_ptr_ptr, evict_func));
2235 dmu_buf_set_user_ie(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr,
2236 dmu_buf_evict_func_t *evict_func)
2238 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2240 db->db_immediate_evict = TRUE;
2241 return (dmu_buf_update_user(db_fake, NULL, user_ptr,
2242 user_data_ptr_ptr, evict_func));
2246 dmu_buf_update_user(dmu_buf_t *db_fake, void *old_user_ptr, void *user_ptr,
2247 void *user_data_ptr_ptr, dmu_buf_evict_func_t *evict_func)
2249 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2250 ASSERT(db->db_level == 0);
2252 ASSERT((user_ptr == NULL) == (evict_func == NULL));
2254 mutex_enter(&db->db_mtx);
2256 if (db->db_user_ptr == old_user_ptr) {
2257 db->db_user_ptr = user_ptr;
2258 db->db_user_data_ptr_ptr = user_data_ptr_ptr;
2259 db->db_evict_func = evict_func;
2261 dbuf_update_data(db);
2263 old_user_ptr = db->db_user_ptr;
2266 mutex_exit(&db->db_mtx);
2267 return (old_user_ptr);
2271 dmu_buf_get_user(dmu_buf_t *db_fake)
2273 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2274 ASSERT(!refcount_is_zero(&db->db_holds));
2276 return (db->db_user_ptr);
2280 dmu_buf_freeable(dmu_buf_t *dbuf)
2282 boolean_t res = B_FALSE;
2283 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2286 res = dsl_dataset_block_freeable(db->db_objset->os_dsl_dataset,
2287 db->db_blkptr, db->db_blkptr->blk_birth);
2293 dmu_buf_get_blkptr(dmu_buf_t *db)
2295 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
2296 return (dbi->db_blkptr);
2300 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
2302 /* ASSERT(dmu_tx_is_syncing(tx) */
2303 ASSERT(MUTEX_HELD(&db->db_mtx));
2305 if (db->db_blkptr != NULL)
2308 if (db->db_blkid == DMU_SPILL_BLKID) {
2309 db->db_blkptr = &dn->dn_phys->dn_spill;
2310 BP_ZERO(db->db_blkptr);
2313 if (db->db_level == dn->dn_phys->dn_nlevels-1) {
2315 * This buffer was allocated at a time when there was
2316 * no available blkptrs from the dnode, or it was
2317 * inappropriate to hook it in (i.e., nlevels mis-match).
2319 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
2320 ASSERT(db->db_parent == NULL);
2321 db->db_parent = dn->dn_dbuf;
2322 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
2325 dmu_buf_impl_t *parent = db->db_parent;
2326 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2328 ASSERT(dn->dn_phys->dn_nlevels > 1);
2329 if (parent == NULL) {
2330 mutex_exit(&db->db_mtx);
2331 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2332 (void) dbuf_hold_impl(dn, db->db_level+1,
2333 db->db_blkid >> epbs, FALSE, db, &parent);
2334 rw_exit(&dn->dn_struct_rwlock);
2335 mutex_enter(&db->db_mtx);
2336 db->db_parent = parent;
2338 db->db_blkptr = (blkptr_t *)parent->db.db_data +
2339 (db->db_blkid & ((1ULL << epbs) - 1));
2345 * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
2346 * is critical the we not allow the compiler to inline this function in to
2347 * dbuf_sync_list() thereby drastically bloating the stack usage.
2349 noinline static void
2350 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2352 dmu_buf_impl_t *db = dr->dr_dbuf;
2356 ASSERT(dmu_tx_is_syncing(tx));
2358 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2360 mutex_enter(&db->db_mtx);
2362 ASSERT(db->db_level > 0);
2365 /* Read the block if it hasn't been read yet. */
2366 if (db->db_buf == NULL) {
2367 mutex_exit(&db->db_mtx);
2368 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
2369 mutex_enter(&db->db_mtx);
2371 ASSERT3U(db->db_state, ==, DB_CACHED);
2372 ASSERT(db->db_buf != NULL);
2376 /* Indirect block size must match what the dnode thinks it is. */
2377 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2378 dbuf_check_blkptr(dn, db);
2381 /* Provide the pending dirty record to child dbufs */
2382 db->db_data_pending = dr;
2384 mutex_exit(&db->db_mtx);
2385 dbuf_write(dr, db->db_buf, tx);
2388 mutex_enter(&dr->dt.di.dr_mtx);
2389 dbuf_sync_list(&dr->dt.di.dr_children, tx);
2390 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2391 mutex_exit(&dr->dt.di.dr_mtx);
2396 * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
2397 * critical the we not allow the compiler to inline this function in to
2398 * dbuf_sync_list() thereby drastically bloating the stack usage.
2400 noinline static void
2401 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2403 arc_buf_t **datap = &dr->dt.dl.dr_data;
2404 dmu_buf_impl_t *db = dr->dr_dbuf;
2407 uint64_t txg = tx->tx_txg;
2409 ASSERT(dmu_tx_is_syncing(tx));
2411 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2413 mutex_enter(&db->db_mtx);
2415 * To be synced, we must be dirtied. But we
2416 * might have been freed after the dirty.
2418 if (db->db_state == DB_UNCACHED) {
2419 /* This buffer has been freed since it was dirtied */
2420 ASSERT(db->db.db_data == NULL);
2421 } else if (db->db_state == DB_FILL) {
2422 /* This buffer was freed and is now being re-filled */
2423 ASSERT(db->db.db_data != dr->dt.dl.dr_data);
2425 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
2432 if (db->db_blkid == DMU_SPILL_BLKID) {
2433 mutex_enter(&dn->dn_mtx);
2434 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
2435 mutex_exit(&dn->dn_mtx);
2439 * If this is a bonus buffer, simply copy the bonus data into the
2440 * dnode. It will be written out when the dnode is synced (and it
2441 * will be synced, since it must have been dirty for dbuf_sync to
2444 if (db->db_blkid == DMU_BONUS_BLKID) {
2445 dbuf_dirty_record_t **drp;
2447 ASSERT(*datap != NULL);
2448 ASSERT0(db->db_level);
2449 ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN);
2450 bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen);
2453 if (*datap != db->db.db_data) {
2454 zio_buf_free(*datap, DN_MAX_BONUSLEN);
2455 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
2457 db->db_data_pending = NULL;
2458 drp = &db->db_last_dirty;
2460 drp = &(*drp)->dr_next;
2461 ASSERT(dr->dr_next == NULL);
2462 ASSERT(dr->dr_dbuf == db);
2464 if (dr->dr_dbuf->db_level != 0) {
2465 mutex_destroy(&dr->dt.di.dr_mtx);
2466 list_destroy(&dr->dt.di.dr_children);
2468 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2469 ASSERT(db->db_dirtycnt > 0);
2470 db->db_dirtycnt -= 1;
2471 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
2478 * This function may have dropped the db_mtx lock allowing a dmu_sync
2479 * operation to sneak in. As a result, we need to ensure that we
2480 * don't check the dr_override_state until we have returned from
2481 * dbuf_check_blkptr.
2483 dbuf_check_blkptr(dn, db);
2486 * If this buffer is in the middle of an immediate write,
2487 * wait for the synchronous IO to complete.
2489 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
2490 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
2491 cv_wait(&db->db_changed, &db->db_mtx);
2492 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
2495 if (db->db_state != DB_NOFILL &&
2496 dn->dn_object != DMU_META_DNODE_OBJECT &&
2497 refcount_count(&db->db_holds) > 1 &&
2498 dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
2499 *datap == db->db_buf) {
2501 * If this buffer is currently "in use" (i.e., there
2502 * are active holds and db_data still references it),
2503 * then make a copy before we start the write so that
2504 * any modifications from the open txg will not leak
2507 * NOTE: this copy does not need to be made for
2508 * objects only modified in the syncing context (e.g.
2509 * DNONE_DNODE blocks).
2511 int blksz = arc_buf_size(*datap);
2512 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2513 *datap = arc_buf_alloc(os->os_spa, blksz, db, type);
2514 bcopy(db->db.db_data, (*datap)->b_data, blksz);
2516 db->db_data_pending = dr;
2518 mutex_exit(&db->db_mtx);
2520 dbuf_write(dr, *datap, tx);
2522 ASSERT(!list_link_active(&dr->dr_dirty_node));
2523 if (dn->dn_object == DMU_META_DNODE_OBJECT) {
2524 list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr);
2528 * Although zio_nowait() does not "wait for an IO", it does
2529 * initiate the IO. If this is an empty write it seems plausible
2530 * that the IO could actually be completed before the nowait
2531 * returns. We need to DB_DNODE_EXIT() first in case
2532 * zio_nowait() invalidates the dbuf.
2535 zio_nowait(dr->dr_zio);
2540 dbuf_sync_list(list_t *list, dmu_tx_t *tx)
2542 dbuf_dirty_record_t *dr;
2544 while ((dr = list_head(list))) {
2545 if (dr->dr_zio != NULL) {
2547 * If we find an already initialized zio then we
2548 * are processing the meta-dnode, and we have finished.
2549 * The dbufs for all dnodes are put back on the list
2550 * during processing, so that we can zio_wait()
2551 * these IOs after initiating all child IOs.
2553 ASSERT3U(dr->dr_dbuf->db.db_object, ==,
2554 DMU_META_DNODE_OBJECT);
2557 list_remove(list, dr);
2558 if (dr->dr_dbuf->db_level > 0)
2559 dbuf_sync_indirect(dr, tx);
2561 dbuf_sync_leaf(dr, tx);
2567 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
2569 dmu_buf_impl_t *db = vdb;
2571 blkptr_t *bp = zio->io_bp;
2572 blkptr_t *bp_orig = &zio->io_bp_orig;
2573 spa_t *spa = zio->io_spa;
2578 ASSERT(db->db_blkptr == bp);
2582 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
2583 dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
2584 zio->io_prev_space_delta = delta;
2586 if (bp->blk_birth != 0) {
2587 ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
2588 BP_GET_TYPE(bp) == dn->dn_type) ||
2589 (db->db_blkid == DMU_SPILL_BLKID &&
2590 BP_GET_TYPE(bp) == dn->dn_bonustype));
2591 ASSERT(BP_GET_LEVEL(bp) == db->db_level);
2594 mutex_enter(&db->db_mtx);
2597 if (db->db_blkid == DMU_SPILL_BLKID) {
2598 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2599 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2600 db->db_blkptr == &dn->dn_phys->dn_spill);
2604 if (db->db_level == 0) {
2605 mutex_enter(&dn->dn_mtx);
2606 if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
2607 db->db_blkid != DMU_SPILL_BLKID)
2608 dn->dn_phys->dn_maxblkid = db->db_blkid;
2609 mutex_exit(&dn->dn_mtx);
2611 if (dn->dn_type == DMU_OT_DNODE) {
2612 dnode_phys_t *dnp = db->db.db_data;
2613 for (i = db->db.db_size >> DNODE_SHIFT; i > 0;
2615 if (dnp->dn_type != DMU_OT_NONE)
2619 if (BP_IS_HOLE(bp)) {
2626 blkptr_t *ibp = db->db.db_data;
2627 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2628 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
2629 if (BP_IS_HOLE(ibp))
2631 fill += ibp->blk_fill;
2636 bp->blk_fill = fill;
2638 mutex_exit(&db->db_mtx);
2642 * The SPA will call this callback several times for each zio - once
2643 * for every physical child i/o (zio->io_phys_children times). This
2644 * allows the DMU to monitor the progress of each logical i/o. For example,
2645 * there may be 2 copies of an indirect block, or many fragments of a RAID-Z
2646 * block. There may be a long delay before all copies/fragments are completed,
2647 * so this callback allows us to retire dirty space gradually, as the physical
2652 dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg)
2654 dmu_buf_impl_t *db = arg;
2655 objset_t *os = db->db_objset;
2656 dsl_pool_t *dp = dmu_objset_pool(os);
2657 dbuf_dirty_record_t *dr;
2660 dr = db->db_data_pending;
2661 ASSERT3U(dr->dr_txg, ==, zio->io_txg);
2664 * The callback will be called io_phys_children times. Retire one
2665 * portion of our dirty space each time we are called. Any rounding
2666 * error will be cleaned up by dsl_pool_sync()'s call to
2667 * dsl_pool_undirty_space().
2669 delta = dr->dr_accounted / zio->io_phys_children;
2670 dsl_pool_undirty_space(dp, delta, zio->io_txg);
2675 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
2677 dmu_buf_impl_t *db = vdb;
2678 blkptr_t *bp_orig = &zio->io_bp_orig;
2679 blkptr_t *bp = db->db_blkptr;
2680 objset_t *os = db->db_objset;
2681 dmu_tx_t *tx = os->os_synctx;
2682 dbuf_dirty_record_t **drp, *dr;
2684 ASSERT0(zio->io_error);
2685 ASSERT(db->db_blkptr == bp);
2688 * For nopwrites and rewrites we ensure that the bp matches our
2689 * original and bypass all the accounting.
2691 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
2692 ASSERT(BP_EQUAL(bp, bp_orig));
2694 dsl_dataset_t *ds = os->os_dsl_dataset;
2695 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
2696 dsl_dataset_block_born(ds, bp, tx);
2699 mutex_enter(&db->db_mtx);
2703 drp = &db->db_last_dirty;
2704 while ((dr = *drp) != db->db_data_pending)
2706 ASSERT(!list_link_active(&dr->dr_dirty_node));
2707 ASSERT(dr->dr_dbuf == db);
2708 ASSERT(dr->dr_next == NULL);
2712 if (db->db_blkid == DMU_SPILL_BLKID) {
2717 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2718 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2719 db->db_blkptr == &dn->dn_phys->dn_spill);
2724 if (db->db_level == 0) {
2725 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2726 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
2727 if (db->db_state != DB_NOFILL) {
2728 if (dr->dt.dl.dr_data != db->db_buf)
2729 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data,
2731 else if (!arc_released(db->db_buf))
2732 arc_set_callback(db->db_buf, dbuf_do_evict, db);
2739 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2740 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
2741 if (!BP_IS_HOLE(db->db_blkptr)) {
2742 ASSERTV(int epbs = dn->dn_phys->dn_indblkshift -
2744 ASSERT3U(db->db_blkid, <=,
2745 dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
2746 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
2748 arc_set_callback(db->db_buf, dbuf_do_evict, db);
2751 mutex_destroy(&dr->dt.di.dr_mtx);
2752 list_destroy(&dr->dt.di.dr_children);
2754 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2756 cv_broadcast(&db->db_changed);
2757 ASSERT(db->db_dirtycnt > 0);
2758 db->db_dirtycnt -= 1;
2759 db->db_data_pending = NULL;
2760 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg);
2764 dbuf_write_nofill_ready(zio_t *zio)
2766 dbuf_write_ready(zio, NULL, zio->io_private);
2770 dbuf_write_nofill_done(zio_t *zio)
2772 dbuf_write_done(zio, NULL, zio->io_private);
2776 dbuf_write_override_ready(zio_t *zio)
2778 dbuf_dirty_record_t *dr = zio->io_private;
2779 dmu_buf_impl_t *db = dr->dr_dbuf;
2781 dbuf_write_ready(zio, NULL, db);
2785 dbuf_write_override_done(zio_t *zio)
2787 dbuf_dirty_record_t *dr = zio->io_private;
2788 dmu_buf_impl_t *db = dr->dr_dbuf;
2789 blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
2791 mutex_enter(&db->db_mtx);
2792 if (!BP_EQUAL(zio->io_bp, obp)) {
2793 if (!BP_IS_HOLE(obp))
2794 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
2795 arc_release(dr->dt.dl.dr_data, db);
2797 mutex_exit(&db->db_mtx);
2799 dbuf_write_done(zio, NULL, db);
2802 /* Issue I/O to commit a dirty buffer to disk. */
2804 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
2806 dmu_buf_impl_t *db = dr->dr_dbuf;
2809 dmu_buf_impl_t *parent = db->db_parent;
2810 uint64_t txg = tx->tx_txg;
2820 if (db->db_state != DB_NOFILL) {
2821 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
2823 * Private object buffers are released here rather
2824 * than in dbuf_dirty() since they are only modified
2825 * in the syncing context and we don't want the
2826 * overhead of making multiple copies of the data.
2828 if (BP_IS_HOLE(db->db_blkptr)) {
2831 dbuf_release_bp(db);
2836 if (parent != dn->dn_dbuf) {
2837 /* Our parent is an indirect block. */
2838 /* We have a dirty parent that has been scheduled for write. */
2839 ASSERT(parent && parent->db_data_pending);
2840 /* Our parent's buffer is one level closer to the dnode. */
2841 ASSERT(db->db_level == parent->db_level-1);
2843 * We're about to modify our parent's db_data by modifying
2844 * our block pointer, so the parent must be released.
2846 ASSERT(arc_released(parent->db_buf));
2847 zio = parent->db_data_pending->dr_zio;
2849 /* Our parent is the dnode itself. */
2850 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
2851 db->db_blkid != DMU_SPILL_BLKID) ||
2852 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
2853 if (db->db_blkid != DMU_SPILL_BLKID)
2854 ASSERT3P(db->db_blkptr, ==,
2855 &dn->dn_phys->dn_blkptr[db->db_blkid]);
2859 ASSERT(db->db_level == 0 || data == db->db_buf);
2860 ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
2863 SET_BOOKMARK(&zb, os->os_dsl_dataset ?
2864 os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
2865 db->db.db_object, db->db_level, db->db_blkid);
2867 if (db->db_blkid == DMU_SPILL_BLKID)
2869 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
2871 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
2874 if (db->db_level == 0 && dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
2875 ASSERT(db->db_state != DB_NOFILL);
2876 dr->dr_zio = zio_write(zio, os->os_spa, txg,
2877 db->db_blkptr, data->b_data, arc_buf_size(data), &zp,
2878 dbuf_write_override_ready, NULL, dbuf_write_override_done,
2879 dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
2880 mutex_enter(&db->db_mtx);
2881 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
2882 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
2883 dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite);
2884 mutex_exit(&db->db_mtx);
2885 } else if (db->db_state == DB_NOFILL) {
2886 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF);
2887 dr->dr_zio = zio_write(zio, os->os_spa, txg,
2888 db->db_blkptr, NULL, db->db.db_size, &zp,
2889 dbuf_write_nofill_ready, NULL, dbuf_write_nofill_done, db,
2890 ZIO_PRIORITY_ASYNC_WRITE,
2891 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
2893 ASSERT(arc_released(data));
2894 dr->dr_zio = arc_write(zio, os->os_spa, txg,
2895 db->db_blkptr, data, DBUF_IS_L2CACHEABLE(db),
2896 DBUF_IS_L2COMPRESSIBLE(db), &zp, dbuf_write_ready,
2897 dbuf_write_physdone, dbuf_write_done, db,
2898 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
2902 #if defined(_KERNEL) && defined(HAVE_SPL)
2903 EXPORT_SYMBOL(dbuf_find);
2904 EXPORT_SYMBOL(dbuf_is_metadata);
2905 EXPORT_SYMBOL(dbuf_evict);
2906 EXPORT_SYMBOL(dbuf_loan_arcbuf);
2907 EXPORT_SYMBOL(dbuf_whichblock);
2908 EXPORT_SYMBOL(dbuf_read);
2909 EXPORT_SYMBOL(dbuf_unoverride);
2910 EXPORT_SYMBOL(dbuf_free_range);
2911 EXPORT_SYMBOL(dbuf_new_size);
2912 EXPORT_SYMBOL(dbuf_release_bp);
2913 EXPORT_SYMBOL(dbuf_dirty);
2914 EXPORT_SYMBOL(dmu_buf_will_dirty);
2915 EXPORT_SYMBOL(dmu_buf_will_not_fill);
2916 EXPORT_SYMBOL(dmu_buf_will_fill);
2917 EXPORT_SYMBOL(dmu_buf_fill_done);
2918 EXPORT_SYMBOL(dmu_buf_rele);
2919 EXPORT_SYMBOL(dbuf_assign_arcbuf);
2920 EXPORT_SYMBOL(dbuf_clear);
2921 EXPORT_SYMBOL(dbuf_prefetch);
2922 EXPORT_SYMBOL(dbuf_hold_impl);
2923 EXPORT_SYMBOL(dbuf_hold);
2924 EXPORT_SYMBOL(dbuf_hold_level);
2925 EXPORT_SYMBOL(dbuf_create_bonus);
2926 EXPORT_SYMBOL(dbuf_spill_set_blksz);
2927 EXPORT_SYMBOL(dbuf_rm_spill);
2928 EXPORT_SYMBOL(dbuf_add_ref);
2929 EXPORT_SYMBOL(dbuf_rele);
2930 EXPORT_SYMBOL(dbuf_rele_and_unlock);
2931 EXPORT_SYMBOL(dbuf_refcount);
2932 EXPORT_SYMBOL(dbuf_sync_list);
2933 EXPORT_SYMBOL(dmu_buf_set_user);
2934 EXPORT_SYMBOL(dmu_buf_set_user_ie);
2935 EXPORT_SYMBOL(dmu_buf_update_user);
2936 EXPORT_SYMBOL(dmu_buf_get_user);
2937 EXPORT_SYMBOL(dmu_buf_freeable);