4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27 * Copyright (c) 2019, Klara Inc.
28 * Copyright (c) 2019, Allan Jude
31 #include <sys/zfs_context.h>
34 #include <sys/dmu_send.h>
35 #include <sys/dmu_impl.h>
37 #include <sys/dmu_objset.h>
38 #include <sys/dsl_dataset.h>
39 #include <sys/dsl_dir.h>
40 #include <sys/dmu_tx.h>
43 #include <sys/dmu_zfetch.h>
45 #include <sys/sa_impl.h>
46 #include <sys/zfeature.h>
47 #include <sys/blkptr.h>
48 #include <sys/range_tree.h>
49 #include <sys/trace_zfs.h>
50 #include <sys/callb.h>
54 #include <sys/spa_impl.h>
55 #include <sys/wmsum.h>
56 #include <sys/vdev_impl.h>
58 static kstat_t *dbuf_ksp;
60 typedef struct dbuf_stats {
62 * Various statistics about the size of the dbuf cache.
64 kstat_named_t cache_count;
65 kstat_named_t cache_size_bytes;
66 kstat_named_t cache_size_bytes_max;
68 * Statistics regarding the bounds on the dbuf cache size.
70 kstat_named_t cache_target_bytes;
71 kstat_named_t cache_lowater_bytes;
72 kstat_named_t cache_hiwater_bytes;
74 * Total number of dbuf cache evictions that have occurred.
76 kstat_named_t cache_total_evicts;
78 * The distribution of dbuf levels in the dbuf cache and
79 * the total size of all dbufs at each level.
81 kstat_named_t cache_levels[DN_MAX_LEVELS];
82 kstat_named_t cache_levels_bytes[DN_MAX_LEVELS];
84 * Statistics about the dbuf hash table.
86 kstat_named_t hash_hits;
87 kstat_named_t hash_misses;
88 kstat_named_t hash_collisions;
89 kstat_named_t hash_elements;
90 kstat_named_t hash_elements_max;
92 * Number of sublists containing more than one dbuf in the dbuf
93 * hash table. Keep track of the longest hash chain.
95 kstat_named_t hash_chains;
96 kstat_named_t hash_chain_max;
98 * Number of times a dbuf_create() discovers that a dbuf was
99 * already created and in the dbuf hash table.
101 kstat_named_t hash_insert_race;
103 * Number of entries in the hash table dbuf and mutex arrays.
105 kstat_named_t hash_table_count;
106 kstat_named_t hash_mutex_count;
108 * Statistics about the size of the metadata dbuf cache.
110 kstat_named_t metadata_cache_count;
111 kstat_named_t metadata_cache_size_bytes;
112 kstat_named_t metadata_cache_size_bytes_max;
114 * For diagnostic purposes, this is incremented whenever we can't add
115 * something to the metadata cache because it's full, and instead put
116 * the data in the regular dbuf cache.
118 kstat_named_t metadata_cache_overflow;
121 dbuf_stats_t dbuf_stats = {
122 { "cache_count", KSTAT_DATA_UINT64 },
123 { "cache_size_bytes", KSTAT_DATA_UINT64 },
124 { "cache_size_bytes_max", KSTAT_DATA_UINT64 },
125 { "cache_target_bytes", KSTAT_DATA_UINT64 },
126 { "cache_lowater_bytes", KSTAT_DATA_UINT64 },
127 { "cache_hiwater_bytes", KSTAT_DATA_UINT64 },
128 { "cache_total_evicts", KSTAT_DATA_UINT64 },
129 { { "cache_levels_N", KSTAT_DATA_UINT64 } },
130 { { "cache_levels_bytes_N", KSTAT_DATA_UINT64 } },
131 { "hash_hits", KSTAT_DATA_UINT64 },
132 { "hash_misses", KSTAT_DATA_UINT64 },
133 { "hash_collisions", KSTAT_DATA_UINT64 },
134 { "hash_elements", KSTAT_DATA_UINT64 },
135 { "hash_elements_max", KSTAT_DATA_UINT64 },
136 { "hash_chains", KSTAT_DATA_UINT64 },
137 { "hash_chain_max", KSTAT_DATA_UINT64 },
138 { "hash_insert_race", KSTAT_DATA_UINT64 },
139 { "hash_table_count", KSTAT_DATA_UINT64 },
140 { "hash_mutex_count", KSTAT_DATA_UINT64 },
141 { "metadata_cache_count", KSTAT_DATA_UINT64 },
142 { "metadata_cache_size_bytes", KSTAT_DATA_UINT64 },
143 { "metadata_cache_size_bytes_max", KSTAT_DATA_UINT64 },
144 { "metadata_cache_overflow", KSTAT_DATA_UINT64 }
149 wmsum_t cache_total_evicts;
150 wmsum_t cache_levels[DN_MAX_LEVELS];
151 wmsum_t cache_levels_bytes[DN_MAX_LEVELS];
154 wmsum_t hash_collisions;
156 wmsum_t hash_insert_race;
157 wmsum_t metadata_cache_count;
158 wmsum_t metadata_cache_overflow;
161 #define DBUF_STAT_INCR(stat, val) \
162 wmsum_add(&dbuf_sums.stat, val);
163 #define DBUF_STAT_DECR(stat, val) \
164 DBUF_STAT_INCR(stat, -(val));
165 #define DBUF_STAT_BUMP(stat) \
166 DBUF_STAT_INCR(stat, 1);
167 #define DBUF_STAT_BUMPDOWN(stat) \
168 DBUF_STAT_INCR(stat, -1);
169 #define DBUF_STAT_MAX(stat, v) { \
171 while ((v) > (_m = dbuf_stats.stat.value.ui64) && \
172 (_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\
176 static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
177 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
178 static void dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr);
179 static int dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags);
182 * Global data structures and functions for the dbuf cache.
184 static kmem_cache_t *dbuf_kmem_cache;
185 static taskq_t *dbu_evict_taskq;
187 static kthread_t *dbuf_cache_evict_thread;
188 static kmutex_t dbuf_evict_lock;
189 static kcondvar_t dbuf_evict_cv;
190 static boolean_t dbuf_evict_thread_exit;
193 * There are two dbuf caches; each dbuf can only be in one of them at a time.
195 * 1. Cache of metadata dbufs, to help make read-heavy administrative commands
196 * from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs
197 * that represent the metadata that describes filesystems/snapshots/
198 * bookmarks/properties/etc. We only evict from this cache when we export a
199 * pool, to short-circuit as much I/O as possible for all administrative
200 * commands that need the metadata. There is no eviction policy for this
201 * cache, because we try to only include types in it which would occupy a
202 * very small amount of space per object but create a large impact on the
203 * performance of these commands. Instead, after it reaches a maximum size
204 * (which should only happen on very small memory systems with a very large
205 * number of filesystem objects), we stop taking new dbufs into the
206 * metadata cache, instead putting them in the normal dbuf cache.
208 * 2. LRU cache of dbufs. The dbuf cache maintains a list of dbufs that
209 * are not currently held but have been recently released. These dbufs
210 * are not eligible for arc eviction until they are aged out of the cache.
211 * Dbufs that are aged out of the cache will be immediately destroyed and
212 * become eligible for arc eviction.
214 * Dbufs are added to these caches once the last hold is released. If a dbuf is
215 * later accessed and still exists in the dbuf cache, then it will be removed
216 * from the cache and later re-added to the head of the cache.
218 * If a given dbuf meets the requirements for the metadata cache, it will go
219 * there, otherwise it will be considered for the generic LRU dbuf cache. The
220 * caches and the refcounts tracking their sizes are stored in an array indexed
221 * by those caches' matching enum values (from dbuf_cached_state_t).
223 typedef struct dbuf_cache {
225 zfs_refcount_t size ____cacheline_aligned;
227 dbuf_cache_t dbuf_caches[DB_CACHE_MAX];
229 /* Size limits for the caches */
230 static uint64_t dbuf_cache_max_bytes = UINT64_MAX;
231 static uint64_t dbuf_metadata_cache_max_bytes = UINT64_MAX;
233 /* Set the default sizes of the caches to log2 fraction of arc size */
234 static uint_t dbuf_cache_shift = 5;
235 static uint_t dbuf_metadata_cache_shift = 6;
237 /* Set the dbuf hash mutex count as log2 shift (dynamic by default) */
238 static uint_t dbuf_mutex_cache_shift = 0;
240 static unsigned long dbuf_cache_target_bytes(void);
241 static unsigned long dbuf_metadata_cache_target_bytes(void);
244 * The LRU dbuf cache uses a three-stage eviction policy:
245 * - A low water marker designates when the dbuf eviction thread
246 * should stop evicting from the dbuf cache.
247 * - When we reach the maximum size (aka mid water mark), we
248 * signal the eviction thread to run.
249 * - The high water mark indicates when the eviction thread
250 * is unable to keep up with the incoming load and eviction must
251 * happen in the context of the calling thread.
255 * low water mid water hi water
256 * +----------------------------------------+----------+----------+
261 * +----------------------------------------+----------+----------+
263 * evicting eviction directly
266 * The high and low water marks indicate the operating range for the eviction
267 * thread. The low water mark is, by default, 90% of the total size of the
268 * cache and the high water mark is at 110% (both of these percentages can be
269 * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct,
270 * respectively). The eviction thread will try to ensure that the cache remains
271 * within this range by waking up every second and checking if the cache is
272 * above the low water mark. The thread can also be woken up by callers adding
273 * elements into the cache if the cache is larger than the mid water (i.e max
274 * cache size). Once the eviction thread is woken up and eviction is required,
275 * it will continue evicting buffers until it's able to reduce the cache size
276 * to the low water mark. If the cache size continues to grow and hits the high
277 * water mark, then callers adding elements to the cache will begin to evict
278 * directly from the cache until the cache is no longer above the high water
283 * The percentage above and below the maximum cache size.
285 static uint_t dbuf_cache_hiwater_pct = 10;
286 static uint_t dbuf_cache_lowater_pct = 10;
289 dbuf_cons(void *vdb, void *unused, int kmflag)
291 (void) unused, (void) kmflag;
292 dmu_buf_impl_t *db = vdb;
293 memset(db, 0, sizeof (dmu_buf_impl_t));
295 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
296 rw_init(&db->db_rwlock, NULL, RW_DEFAULT, NULL);
297 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
298 multilist_link_init(&db->db_cache_link);
299 zfs_refcount_create(&db->db_holds);
305 dbuf_dest(void *vdb, void *unused)
308 dmu_buf_impl_t *db = vdb;
309 mutex_destroy(&db->db_mtx);
310 rw_destroy(&db->db_rwlock);
311 cv_destroy(&db->db_changed);
312 ASSERT(!multilist_link_active(&db->db_cache_link));
313 zfs_refcount_destroy(&db->db_holds);
317 * dbuf hash table routines
319 static dbuf_hash_table_t dbuf_hash_table;
322 * We use Cityhash for this. It's fast, and has good hash properties without
323 * requiring any large static buffers.
326 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
328 return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid));
331 #define DTRACE_SET_STATE(db, why) \
332 DTRACE_PROBE2(dbuf__state_change, dmu_buf_impl_t *, db, \
335 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \
336 ((dbuf)->db.db_object == (obj) && \
337 (dbuf)->db_objset == (os) && \
338 (dbuf)->db_level == (level) && \
339 (dbuf)->db_blkid == (blkid))
342 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid)
344 dbuf_hash_table_t *h = &dbuf_hash_table;
349 hv = dbuf_hash(os, obj, level, blkid);
350 idx = hv & h->hash_table_mask;
352 mutex_enter(DBUF_HASH_MUTEX(h, idx));
353 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
354 if (DBUF_EQUAL(db, os, obj, level, blkid)) {
355 mutex_enter(&db->db_mtx);
356 if (db->db_state != DB_EVICTING) {
357 mutex_exit(DBUF_HASH_MUTEX(h, idx));
360 mutex_exit(&db->db_mtx);
363 mutex_exit(DBUF_HASH_MUTEX(h, idx));
367 static dmu_buf_impl_t *
368 dbuf_find_bonus(objset_t *os, uint64_t object)
371 dmu_buf_impl_t *db = NULL;
373 if (dnode_hold(os, object, FTAG, &dn) == 0) {
374 rw_enter(&dn->dn_struct_rwlock, RW_READER);
375 if (dn->dn_bonus != NULL) {
377 mutex_enter(&db->db_mtx);
379 rw_exit(&dn->dn_struct_rwlock);
380 dnode_rele(dn, FTAG);
386 * Insert an entry into the hash table. If there is already an element
387 * equal to elem in the hash table, then the already existing element
388 * will be returned and the new element will not be inserted.
389 * Otherwise returns NULL.
391 static dmu_buf_impl_t *
392 dbuf_hash_insert(dmu_buf_impl_t *db)
394 dbuf_hash_table_t *h = &dbuf_hash_table;
395 objset_t *os = db->db_objset;
396 uint64_t obj = db->db.db_object;
397 int level = db->db_level;
398 uint64_t blkid, hv, idx;
402 blkid = db->db_blkid;
403 hv = dbuf_hash(os, obj, level, blkid);
404 idx = hv & h->hash_table_mask;
406 mutex_enter(DBUF_HASH_MUTEX(h, idx));
407 for (dbf = h->hash_table[idx], i = 0; dbf != NULL;
408 dbf = dbf->db_hash_next, i++) {
409 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
410 mutex_enter(&dbf->db_mtx);
411 if (dbf->db_state != DB_EVICTING) {
412 mutex_exit(DBUF_HASH_MUTEX(h, idx));
415 mutex_exit(&dbf->db_mtx);
420 DBUF_STAT_BUMP(hash_collisions);
422 DBUF_STAT_BUMP(hash_chains);
424 DBUF_STAT_MAX(hash_chain_max, i);
427 mutex_enter(&db->db_mtx);
428 db->db_hash_next = h->hash_table[idx];
429 h->hash_table[idx] = db;
430 mutex_exit(DBUF_HASH_MUTEX(h, idx));
431 uint64_t he = atomic_inc_64_nv(&dbuf_stats.hash_elements.value.ui64);
432 DBUF_STAT_MAX(hash_elements_max, he);
438 * This returns whether this dbuf should be stored in the metadata cache, which
439 * is based on whether it's from one of the dnode types that store data related
440 * to traversing dataset hierarchies.
443 dbuf_include_in_metadata_cache(dmu_buf_impl_t *db)
446 dmu_object_type_t type = DB_DNODE(db)->dn_type;
449 /* Check if this dbuf is one of the types we care about */
450 if (DMU_OT_IS_METADATA_CACHED(type)) {
451 /* If we hit this, then we set something up wrong in dmu_ot */
452 ASSERT(DMU_OT_IS_METADATA(type));
455 * Sanity check for small-memory systems: don't allocate too
456 * much memory for this purpose.
458 if (zfs_refcount_count(
459 &dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
460 dbuf_metadata_cache_target_bytes()) {
461 DBUF_STAT_BUMP(metadata_cache_overflow);
472 * Remove an entry from the hash table. It must be in the EVICTING state.
475 dbuf_hash_remove(dmu_buf_impl_t *db)
477 dbuf_hash_table_t *h = &dbuf_hash_table;
479 dmu_buf_impl_t *dbf, **dbp;
481 hv = dbuf_hash(db->db_objset, db->db.db_object,
482 db->db_level, db->db_blkid);
483 idx = hv & h->hash_table_mask;
486 * We mustn't hold db_mtx to maintain lock ordering:
487 * DBUF_HASH_MUTEX > db_mtx.
489 ASSERT(zfs_refcount_is_zero(&db->db_holds));
490 ASSERT(db->db_state == DB_EVICTING);
491 ASSERT(!MUTEX_HELD(&db->db_mtx));
493 mutex_enter(DBUF_HASH_MUTEX(h, idx));
494 dbp = &h->hash_table[idx];
495 while ((dbf = *dbp) != db) {
496 dbp = &dbf->db_hash_next;
499 *dbp = db->db_hash_next;
500 db->db_hash_next = NULL;
501 if (h->hash_table[idx] &&
502 h->hash_table[idx]->db_hash_next == NULL)
503 DBUF_STAT_BUMPDOWN(hash_chains);
504 mutex_exit(DBUF_HASH_MUTEX(h, idx));
505 atomic_dec_64(&dbuf_stats.hash_elements.value.ui64);
511 } dbvu_verify_type_t;
514 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
519 if (db->db_user == NULL)
522 /* Only data blocks support the attachment of user data. */
523 ASSERT(db->db_level == 0);
525 /* Clients must resolve a dbuf before attaching user data. */
526 ASSERT(db->db.db_data != NULL);
527 ASSERT3U(db->db_state, ==, DB_CACHED);
529 holds = zfs_refcount_count(&db->db_holds);
530 if (verify_type == DBVU_EVICTING) {
532 * Immediate eviction occurs when holds == dirtycnt.
533 * For normal eviction buffers, holds is zero on
534 * eviction, except when dbuf_fix_old_data() calls
535 * dbuf_clear_data(). However, the hold count can grow
536 * during eviction even though db_mtx is held (see
537 * dmu_bonus_hold() for an example), so we can only
538 * test the generic invariant that holds >= dirtycnt.
540 ASSERT3U(holds, >=, db->db_dirtycnt);
542 if (db->db_user_immediate_evict == TRUE)
543 ASSERT3U(holds, >=, db->db_dirtycnt);
545 ASSERT3U(holds, >, 0);
551 dbuf_evict_user(dmu_buf_impl_t *db)
553 dmu_buf_user_t *dbu = db->db_user;
555 ASSERT(MUTEX_HELD(&db->db_mtx));
560 dbuf_verify_user(db, DBVU_EVICTING);
564 if (dbu->dbu_clear_on_evict_dbufp != NULL)
565 *dbu->dbu_clear_on_evict_dbufp = NULL;
569 * There are two eviction callbacks - one that we call synchronously
570 * and one that we invoke via a taskq. The async one is useful for
571 * avoiding lock order reversals and limiting stack depth.
573 * Note that if we have a sync callback but no async callback,
574 * it's likely that the sync callback will free the structure
575 * containing the dbu. In that case we need to take care to not
576 * dereference dbu after calling the sync evict func.
578 boolean_t has_async = (dbu->dbu_evict_func_async != NULL);
580 if (dbu->dbu_evict_func_sync != NULL)
581 dbu->dbu_evict_func_sync(dbu);
584 taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async,
585 dbu, 0, &dbu->dbu_tqent);
590 dbuf_is_metadata(dmu_buf_impl_t *db)
593 * Consider indirect blocks and spill blocks to be meta data.
595 if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) {
598 boolean_t is_metadata;
601 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
604 return (is_metadata);
609 * We want to exclude buffers that are on a special allocation class from
613 dbuf_is_l2cacheable(dmu_buf_impl_t *db)
616 zfs_cache_type_t cache = db->db_objset->os_secondary_cache;
617 blkptr_t *bp = db->db_blkptr;
619 if (bp != NULL && !BP_IS_HOLE(bp)) {
620 uint64_t vdev = DVA_GET_VDEV(bp->blk_dva);
621 vdev_t *rvd = db->db_objset->os_spa->spa_root_vdev;
623 if (vdev < rvd->vdev_children)
624 vd = rvd->vdev_child[vdev];
626 if (cache == ZFS_CACHE_ALL ||
627 (dbuf_is_metadata(db) && cache == ZFS_CACHE_METADATA)) {
631 if ((vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
632 vd->vdev_alloc_bias != VDEV_BIAS_DEDUP) ||
633 l2arc_exclude_special == 0)
641 static inline boolean_t
642 dnode_level_is_l2cacheable(blkptr_t *bp, dnode_t *dn, int64_t level)
645 zfs_cache_type_t cache = dn->dn_objset->os_secondary_cache;
647 if (bp != NULL && !BP_IS_HOLE(bp)) {
648 uint64_t vdev = DVA_GET_VDEV(bp->blk_dva);
649 vdev_t *rvd = dn->dn_objset->os_spa->spa_root_vdev;
651 if (vdev < rvd->vdev_children)
652 vd = rvd->vdev_child[vdev];
654 if (cache == ZFS_CACHE_ALL || ((level > 0 ||
655 DMU_OT_IS_METADATA(dn->dn_handle->dnh_dnode->dn_type)) &&
656 cache == ZFS_CACHE_METADATA)) {
660 if ((vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
661 vd->vdev_alloc_bias != VDEV_BIAS_DEDUP) ||
662 l2arc_exclude_special == 0)
672 * This function *must* return indices evenly distributed between all
673 * sublists of the multilist. This is needed due to how the dbuf eviction
674 * code is laid out; dbuf_evict_thread() assumes dbufs are evenly
675 * distributed between all sublists and uses this assumption when
676 * deciding which sublist to evict from and how much to evict from it.
679 dbuf_cache_multilist_index_func(multilist_t *ml, void *obj)
681 dmu_buf_impl_t *db = obj;
684 * The assumption here, is the hash value for a given
685 * dmu_buf_impl_t will remain constant throughout it's lifetime
686 * (i.e. it's objset, object, level and blkid fields don't change).
687 * Thus, we don't need to store the dbuf's sublist index
688 * on insertion, as this index can be recalculated on removal.
690 * Also, the low order bits of the hash value are thought to be
691 * distributed evenly. Otherwise, in the case that the multilist
692 * has a power of two number of sublists, each sublists' usage
693 * would not be evenly distributed. In this context full 64bit
694 * division would be a waste of time, so limit it to 32 bits.
696 return ((unsigned int)dbuf_hash(db->db_objset, db->db.db_object,
697 db->db_level, db->db_blkid) %
698 multilist_get_num_sublists(ml));
702 * The target size of the dbuf cache can grow with the ARC target,
703 * unless limited by the tunable dbuf_cache_max_bytes.
705 static inline unsigned long
706 dbuf_cache_target_bytes(void)
708 return (MIN(dbuf_cache_max_bytes,
709 arc_target_bytes() >> dbuf_cache_shift));
713 * The target size of the dbuf metadata cache can grow with the ARC target,
714 * unless limited by the tunable dbuf_metadata_cache_max_bytes.
716 static inline unsigned long
717 dbuf_metadata_cache_target_bytes(void)
719 return (MIN(dbuf_metadata_cache_max_bytes,
720 arc_target_bytes() >> dbuf_metadata_cache_shift));
723 static inline uint64_t
724 dbuf_cache_hiwater_bytes(void)
726 uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
727 return (dbuf_cache_target +
728 (dbuf_cache_target * dbuf_cache_hiwater_pct) / 100);
731 static inline uint64_t
732 dbuf_cache_lowater_bytes(void)
734 uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
735 return (dbuf_cache_target -
736 (dbuf_cache_target * dbuf_cache_lowater_pct) / 100);
739 static inline boolean_t
740 dbuf_cache_above_lowater(void)
742 return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
743 dbuf_cache_lowater_bytes());
747 * Evict the oldest eligible dbuf from the dbuf cache.
752 int idx = multilist_get_random_index(&dbuf_caches[DB_DBUF_CACHE].cache);
753 multilist_sublist_t *mls = multilist_sublist_lock(
754 &dbuf_caches[DB_DBUF_CACHE].cache, idx);
756 ASSERT(!MUTEX_HELD(&dbuf_evict_lock));
758 dmu_buf_impl_t *db = multilist_sublist_tail(mls);
759 while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) {
760 db = multilist_sublist_prev(mls, db);
763 DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db,
764 multilist_sublist_t *, mls);
767 multilist_sublist_remove(mls, db);
768 multilist_sublist_unlock(mls);
769 (void) zfs_refcount_remove_many(
770 &dbuf_caches[DB_DBUF_CACHE].size, db->db.db_size, db);
771 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
772 DBUF_STAT_BUMPDOWN(cache_count);
773 DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
775 ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE);
776 db->db_caching_status = DB_NO_CACHE;
778 DBUF_STAT_BUMP(cache_total_evicts);
780 multilist_sublist_unlock(mls);
785 * The dbuf evict thread is responsible for aging out dbufs from the
786 * cache. Once the cache has reached it's maximum size, dbufs are removed
787 * and destroyed. The eviction thread will continue running until the size
788 * of the dbuf cache is at or below the maximum size. Once the dbuf is aged
789 * out of the cache it is destroyed and becomes eligible for arc eviction.
791 static __attribute__((noreturn)) void
792 dbuf_evict_thread(void *unused)
797 CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG);
799 mutex_enter(&dbuf_evict_lock);
800 while (!dbuf_evict_thread_exit) {
801 while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
802 CALLB_CPR_SAFE_BEGIN(&cpr);
803 (void) cv_timedwait_idle_hires(&dbuf_evict_cv,
804 &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0);
805 CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock);
807 mutex_exit(&dbuf_evict_lock);
810 * Keep evicting as long as we're above the low water mark
811 * for the cache. We do this without holding the locks to
812 * minimize lock contention.
814 while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
818 mutex_enter(&dbuf_evict_lock);
821 dbuf_evict_thread_exit = B_FALSE;
822 cv_broadcast(&dbuf_evict_cv);
823 CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */
828 * Wake up the dbuf eviction thread if the dbuf cache is at its max size.
829 * If the dbuf cache is at its high water mark, then evict a dbuf from the
830 * dbuf cache using the caller's context.
833 dbuf_evict_notify(uint64_t size)
836 * We check if we should evict without holding the dbuf_evict_lock,
837 * because it's OK to occasionally make the wrong decision here,
838 * and grabbing the lock results in massive lock contention.
840 if (size > dbuf_cache_target_bytes()) {
841 if (size > dbuf_cache_hiwater_bytes())
843 cv_signal(&dbuf_evict_cv);
848 dbuf_kstat_update(kstat_t *ksp, int rw)
850 dbuf_stats_t *ds = ksp->ks_data;
851 dbuf_hash_table_t *h = &dbuf_hash_table;
853 if (rw == KSTAT_WRITE)
854 return (SET_ERROR(EACCES));
856 ds->cache_count.value.ui64 =
857 wmsum_value(&dbuf_sums.cache_count);
858 ds->cache_size_bytes.value.ui64 =
859 zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size);
860 ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes();
861 ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes();
862 ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes();
863 ds->cache_total_evicts.value.ui64 =
864 wmsum_value(&dbuf_sums.cache_total_evicts);
865 for (int i = 0; i < DN_MAX_LEVELS; i++) {
866 ds->cache_levels[i].value.ui64 =
867 wmsum_value(&dbuf_sums.cache_levels[i]);
868 ds->cache_levels_bytes[i].value.ui64 =
869 wmsum_value(&dbuf_sums.cache_levels_bytes[i]);
871 ds->hash_hits.value.ui64 =
872 wmsum_value(&dbuf_sums.hash_hits);
873 ds->hash_misses.value.ui64 =
874 wmsum_value(&dbuf_sums.hash_misses);
875 ds->hash_collisions.value.ui64 =
876 wmsum_value(&dbuf_sums.hash_collisions);
877 ds->hash_chains.value.ui64 =
878 wmsum_value(&dbuf_sums.hash_chains);
879 ds->hash_insert_race.value.ui64 =
880 wmsum_value(&dbuf_sums.hash_insert_race);
881 ds->hash_table_count.value.ui64 = h->hash_table_mask + 1;
882 ds->hash_mutex_count.value.ui64 = h->hash_mutex_mask + 1;
883 ds->metadata_cache_count.value.ui64 =
884 wmsum_value(&dbuf_sums.metadata_cache_count);
885 ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count(
886 &dbuf_caches[DB_DBUF_METADATA_CACHE].size);
887 ds->metadata_cache_overflow.value.ui64 =
888 wmsum_value(&dbuf_sums.metadata_cache_overflow);
895 uint64_t hmsize, hsize = 1ULL << 16;
896 dbuf_hash_table_t *h = &dbuf_hash_table;
899 * The hash table is big enough to fill one eighth of physical memory
900 * with an average block size of zfs_arc_average_blocksize (default 8K).
901 * By default, the table will take up
902 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
904 while (hsize * zfs_arc_average_blocksize < arc_all_memory() / 8)
907 h->hash_table = NULL;
908 while (h->hash_table == NULL) {
909 h->hash_table_mask = hsize - 1;
911 h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP);
912 if (h->hash_table == NULL)
915 ASSERT3U(hsize, >=, 1ULL << 10);
919 * The hash table buckets are protected by an array of mutexes where
920 * each mutex is reponsible for protecting 128 buckets. A minimum
921 * array size of 8192 is targeted to avoid contention.
923 if (dbuf_mutex_cache_shift == 0)
924 hmsize = MAX(hsize >> 7, 1ULL << 13);
926 hmsize = 1ULL << MIN(dbuf_mutex_cache_shift, 24);
928 h->hash_mutexes = NULL;
929 while (h->hash_mutexes == NULL) {
930 h->hash_mutex_mask = hmsize - 1;
932 h->hash_mutexes = vmem_zalloc(hmsize * sizeof (kmutex_t),
934 if (h->hash_mutexes == NULL)
938 dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t",
939 sizeof (dmu_buf_impl_t),
940 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
942 for (int i = 0; i < hmsize; i++)
943 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
948 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc
949 * configuration is not required.
951 dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0);
953 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
954 multilist_create(&dbuf_caches[dcs].cache,
955 sizeof (dmu_buf_impl_t),
956 offsetof(dmu_buf_impl_t, db_cache_link),
957 dbuf_cache_multilist_index_func);
958 zfs_refcount_create(&dbuf_caches[dcs].size);
961 dbuf_evict_thread_exit = B_FALSE;
962 mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL);
963 cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL);
964 dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread,
965 NULL, 0, &p0, TS_RUN, minclsyspri);
967 wmsum_init(&dbuf_sums.cache_count, 0);
968 wmsum_init(&dbuf_sums.cache_total_evicts, 0);
969 for (int i = 0; i < DN_MAX_LEVELS; i++) {
970 wmsum_init(&dbuf_sums.cache_levels[i], 0);
971 wmsum_init(&dbuf_sums.cache_levels_bytes[i], 0);
973 wmsum_init(&dbuf_sums.hash_hits, 0);
974 wmsum_init(&dbuf_sums.hash_misses, 0);
975 wmsum_init(&dbuf_sums.hash_collisions, 0);
976 wmsum_init(&dbuf_sums.hash_chains, 0);
977 wmsum_init(&dbuf_sums.hash_insert_race, 0);
978 wmsum_init(&dbuf_sums.metadata_cache_count, 0);
979 wmsum_init(&dbuf_sums.metadata_cache_overflow, 0);
981 dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc",
982 KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t),
984 if (dbuf_ksp != NULL) {
985 for (int i = 0; i < DN_MAX_LEVELS; i++) {
986 snprintf(dbuf_stats.cache_levels[i].name,
987 KSTAT_STRLEN, "cache_level_%d", i);
988 dbuf_stats.cache_levels[i].data_type =
990 snprintf(dbuf_stats.cache_levels_bytes[i].name,
991 KSTAT_STRLEN, "cache_level_%d_bytes", i);
992 dbuf_stats.cache_levels_bytes[i].data_type =
995 dbuf_ksp->ks_data = &dbuf_stats;
996 dbuf_ksp->ks_update = dbuf_kstat_update;
997 kstat_install(dbuf_ksp);
1004 dbuf_hash_table_t *h = &dbuf_hash_table;
1006 dbuf_stats_destroy();
1008 for (int i = 0; i < (h->hash_mutex_mask + 1); i++)
1009 mutex_destroy(&h->hash_mutexes[i]);
1011 vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
1012 vmem_free(h->hash_mutexes, (h->hash_mutex_mask + 1) *
1015 kmem_cache_destroy(dbuf_kmem_cache);
1016 taskq_destroy(dbu_evict_taskq);
1018 mutex_enter(&dbuf_evict_lock);
1019 dbuf_evict_thread_exit = B_TRUE;
1020 while (dbuf_evict_thread_exit) {
1021 cv_signal(&dbuf_evict_cv);
1022 cv_wait(&dbuf_evict_cv, &dbuf_evict_lock);
1024 mutex_exit(&dbuf_evict_lock);
1026 mutex_destroy(&dbuf_evict_lock);
1027 cv_destroy(&dbuf_evict_cv);
1029 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
1030 zfs_refcount_destroy(&dbuf_caches[dcs].size);
1031 multilist_destroy(&dbuf_caches[dcs].cache);
1034 if (dbuf_ksp != NULL) {
1035 kstat_delete(dbuf_ksp);
1039 wmsum_fini(&dbuf_sums.cache_count);
1040 wmsum_fini(&dbuf_sums.cache_total_evicts);
1041 for (int i = 0; i < DN_MAX_LEVELS; i++) {
1042 wmsum_fini(&dbuf_sums.cache_levels[i]);
1043 wmsum_fini(&dbuf_sums.cache_levels_bytes[i]);
1045 wmsum_fini(&dbuf_sums.hash_hits);
1046 wmsum_fini(&dbuf_sums.hash_misses);
1047 wmsum_fini(&dbuf_sums.hash_collisions);
1048 wmsum_fini(&dbuf_sums.hash_chains);
1049 wmsum_fini(&dbuf_sums.hash_insert_race);
1050 wmsum_fini(&dbuf_sums.metadata_cache_count);
1051 wmsum_fini(&dbuf_sums.metadata_cache_overflow);
1060 dbuf_verify(dmu_buf_impl_t *db)
1063 dbuf_dirty_record_t *dr;
1066 ASSERT(MUTEX_HELD(&db->db_mtx));
1068 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
1071 ASSERT(db->db_objset != NULL);
1075 ASSERT(db->db_parent == NULL);
1076 ASSERT(db->db_blkptr == NULL);
1078 ASSERT3U(db->db.db_object, ==, dn->dn_object);
1079 ASSERT3P(db->db_objset, ==, dn->dn_objset);
1080 ASSERT3U(db->db_level, <, dn->dn_nlevels);
1081 ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
1082 db->db_blkid == DMU_SPILL_BLKID ||
1083 !avl_is_empty(&dn->dn_dbufs));
1085 if (db->db_blkid == DMU_BONUS_BLKID) {
1087 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1088 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
1089 } else if (db->db_blkid == DMU_SPILL_BLKID) {
1091 ASSERT0(db->db.db_offset);
1093 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
1096 if ((dr = list_head(&db->db_dirty_records)) != NULL) {
1097 ASSERT(dr->dr_dbuf == db);
1098 txg_prev = dr->dr_txg;
1099 for (dr = list_next(&db->db_dirty_records, dr); dr != NULL;
1100 dr = list_next(&db->db_dirty_records, dr)) {
1101 ASSERT(dr->dr_dbuf == db);
1102 ASSERT(txg_prev > dr->dr_txg);
1103 txg_prev = dr->dr_txg;
1108 * We can't assert that db_size matches dn_datablksz because it
1109 * can be momentarily different when another thread is doing
1110 * dnode_set_blksz().
1112 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
1113 dr = db->db_data_pending;
1115 * It should only be modified in syncing context, so
1116 * make sure we only have one copy of the data.
1118 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
1121 /* verify db->db_blkptr */
1122 if (db->db_blkptr) {
1123 if (db->db_parent == dn->dn_dbuf) {
1124 /* db is pointed to by the dnode */
1125 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
1126 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
1127 ASSERT(db->db_parent == NULL);
1129 ASSERT(db->db_parent != NULL);
1130 if (db->db_blkid != DMU_SPILL_BLKID)
1131 ASSERT3P(db->db_blkptr, ==,
1132 &dn->dn_phys->dn_blkptr[db->db_blkid]);
1134 /* db is pointed to by an indirect block */
1135 int epb __maybe_unused = db->db_parent->db.db_size >>
1137 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
1138 ASSERT3U(db->db_parent->db.db_object, ==,
1141 * dnode_grow_indblksz() can make this fail if we don't
1142 * have the parent's rwlock. XXX indblksz no longer
1143 * grows. safe to do this now?
1145 if (RW_LOCK_HELD(&db->db_parent->db_rwlock)) {
1146 ASSERT3P(db->db_blkptr, ==,
1147 ((blkptr_t *)db->db_parent->db.db_data +
1148 db->db_blkid % epb));
1152 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
1153 (db->db_buf == NULL || db->db_buf->b_data) &&
1154 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
1155 db->db_state != DB_FILL && !dn->dn_free_txg) {
1157 * If the blkptr isn't set but they have nonzero data,
1158 * it had better be dirty, otherwise we'll lose that
1159 * data when we evict this buffer.
1161 * There is an exception to this rule for indirect blocks; in
1162 * this case, if the indirect block is a hole, we fill in a few
1163 * fields on each of the child blocks (importantly, birth time)
1164 * to prevent hole birth times from being lost when you
1165 * partially fill in a hole.
1167 if (db->db_dirtycnt == 0) {
1168 if (db->db_level == 0) {
1169 uint64_t *buf = db->db.db_data;
1172 for (i = 0; i < db->db.db_size >> 3; i++) {
1173 ASSERT(buf[i] == 0);
1176 blkptr_t *bps = db->db.db_data;
1177 ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==,
1180 * We want to verify that all the blkptrs in the
1181 * indirect block are holes, but we may have
1182 * automatically set up a few fields for them.
1183 * We iterate through each blkptr and verify
1184 * they only have those fields set.
1187 i < db->db.db_size / sizeof (blkptr_t);
1189 blkptr_t *bp = &bps[i];
1190 ASSERT(ZIO_CHECKSUM_IS_ZERO(
1193 DVA_IS_EMPTY(&bp->blk_dva[0]) &&
1194 DVA_IS_EMPTY(&bp->blk_dva[1]) &&
1195 DVA_IS_EMPTY(&bp->blk_dva[2]));
1196 ASSERT0(bp->blk_fill);
1197 ASSERT0(bp->blk_pad[0]);
1198 ASSERT0(bp->blk_pad[1]);
1199 ASSERT(!BP_IS_EMBEDDED(bp));
1200 ASSERT(BP_IS_HOLE(bp));
1201 ASSERT0(bp->blk_phys_birth);
1211 dbuf_clear_data(dmu_buf_impl_t *db)
1213 ASSERT(MUTEX_HELD(&db->db_mtx));
1214 dbuf_evict_user(db);
1215 ASSERT3P(db->db_buf, ==, NULL);
1216 db->db.db_data = NULL;
1217 if (db->db_state != DB_NOFILL) {
1218 db->db_state = DB_UNCACHED;
1219 DTRACE_SET_STATE(db, "clear data");
1224 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
1226 ASSERT(MUTEX_HELD(&db->db_mtx));
1227 ASSERT(buf != NULL);
1230 ASSERT(buf->b_data != NULL);
1231 db->db.db_data = buf->b_data;
1235 dbuf_alloc_arcbuf(dmu_buf_impl_t *db)
1237 spa_t *spa = db->db_objset->os_spa;
1239 return (arc_alloc_buf(spa, db, DBUF_GET_BUFC_TYPE(db), db->db.db_size));
1243 * Loan out an arc_buf for read. Return the loaned arc_buf.
1246 dbuf_loan_arcbuf(dmu_buf_impl_t *db)
1250 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1251 mutex_enter(&db->db_mtx);
1252 if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) {
1253 int blksz = db->db.db_size;
1254 spa_t *spa = db->db_objset->os_spa;
1256 mutex_exit(&db->db_mtx);
1257 abuf = arc_loan_buf(spa, B_FALSE, blksz);
1258 memcpy(abuf->b_data, db->db.db_data, blksz);
1261 arc_loan_inuse_buf(abuf, db);
1263 dbuf_clear_data(db);
1264 mutex_exit(&db->db_mtx);
1270 * Calculate which level n block references the data at the level 0 offset
1274 dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset)
1276 if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) {
1278 * The level n blkid is equal to the level 0 blkid divided by
1279 * the number of level 0s in a level n block.
1281 * The level 0 blkid is offset >> datablkshift =
1282 * offset / 2^datablkshift.
1284 * The number of level 0s in a level n is the number of block
1285 * pointers in an indirect block, raised to the power of level.
1286 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level =
1287 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)).
1289 * Thus, the level n blkid is: offset /
1290 * ((2^datablkshift)*(2^(level*(indblkshift-SPA_BLKPTRSHIFT))))
1291 * = offset / 2^(datablkshift + level *
1292 * (indblkshift - SPA_BLKPTRSHIFT))
1293 * = offset >> (datablkshift + level *
1294 * (indblkshift - SPA_BLKPTRSHIFT))
1297 const unsigned exp = dn->dn_datablkshift +
1298 level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
1300 if (exp >= 8 * sizeof (offset)) {
1301 /* This only happens on the highest indirection level */
1302 ASSERT3U(level, ==, dn->dn_nlevels - 1);
1306 ASSERT3U(exp, <, 8 * sizeof (offset));
1308 return (offset >> exp);
1310 ASSERT3U(offset, <, dn->dn_datablksz);
1316 * This function is used to lock the parent of the provided dbuf. This should be
1317 * used when modifying or reading db_blkptr.
1320 dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, const void *tag)
1322 enum db_lock_type ret = DLT_NONE;
1323 if (db->db_parent != NULL) {
1324 rw_enter(&db->db_parent->db_rwlock, rw);
1326 } else if (dmu_objset_ds(db->db_objset) != NULL) {
1327 rrw_enter(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, rw,
1332 * We only return a DLT_NONE lock when it's the top-most indirect block
1333 * of the meta-dnode of the MOS.
1339 * We need to pass the lock type in because it's possible that the block will
1340 * move from being the topmost indirect block in a dnode (and thus, have no
1341 * parent) to not the top-most via an indirection increase. This would cause a
1342 * panic if we didn't pass the lock type in.
1345 dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, const void *tag)
1347 if (type == DLT_PARENT)
1348 rw_exit(&db->db_parent->db_rwlock);
1349 else if (type == DLT_OBJSET)
1350 rrw_exit(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, tag);
1354 dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
1355 arc_buf_t *buf, void *vdb)
1357 (void) zb, (void) bp;
1358 dmu_buf_impl_t *db = vdb;
1360 mutex_enter(&db->db_mtx);
1361 ASSERT3U(db->db_state, ==, DB_READ);
1363 * All reads are synchronous, so we must have a hold on the dbuf
1365 ASSERT(zfs_refcount_count(&db->db_holds) > 0);
1366 ASSERT(db->db_buf == NULL);
1367 ASSERT(db->db.db_data == NULL);
1370 ASSERT(zio == NULL || zio->io_error != 0);
1371 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1372 ASSERT3P(db->db_buf, ==, NULL);
1373 db->db_state = DB_UNCACHED;
1374 DTRACE_SET_STATE(db, "i/o error");
1375 } else if (db->db_level == 0 && db->db_freed_in_flight) {
1376 /* freed in flight */
1377 ASSERT(zio == NULL || zio->io_error == 0);
1378 arc_release(buf, db);
1379 memset(buf->b_data, 0, db->db.db_size);
1380 arc_buf_freeze(buf);
1381 db->db_freed_in_flight = FALSE;
1382 dbuf_set_data(db, buf);
1383 db->db_state = DB_CACHED;
1384 DTRACE_SET_STATE(db, "freed in flight");
1387 ASSERT(zio == NULL || zio->io_error == 0);
1388 dbuf_set_data(db, buf);
1389 db->db_state = DB_CACHED;
1390 DTRACE_SET_STATE(db, "successful read");
1392 cv_broadcast(&db->db_changed);
1393 dbuf_rele_and_unlock(db, NULL, B_FALSE);
1397 * Shortcut for performing reads on bonus dbufs. Returns
1398 * an error if we fail to verify the dnode associated with
1399 * a decrypted block. Otherwise success.
1402 dbuf_read_bonus(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags)
1404 int bonuslen, max_bonuslen, err;
1406 err = dbuf_read_verify_dnode_crypt(db, flags);
1410 bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
1411 max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1412 ASSERT(MUTEX_HELD(&db->db_mtx));
1413 ASSERT(DB_DNODE_HELD(db));
1414 ASSERT3U(bonuslen, <=, db->db.db_size);
1415 db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP);
1416 arc_space_consume(max_bonuslen, ARC_SPACE_BONUS);
1417 if (bonuslen < max_bonuslen)
1418 memset(db->db.db_data, 0, max_bonuslen);
1420 memcpy(db->db.db_data, DN_BONUS(dn->dn_phys), bonuslen);
1421 db->db_state = DB_CACHED;
1422 DTRACE_SET_STATE(db, "bonus buffer filled");
1427 dbuf_handle_indirect_hole(dmu_buf_impl_t *db, dnode_t *dn)
1429 blkptr_t *bps = db->db.db_data;
1430 uint32_t indbs = 1ULL << dn->dn_indblkshift;
1431 int n_bps = indbs >> SPA_BLKPTRSHIFT;
1433 for (int i = 0; i < n_bps; i++) {
1434 blkptr_t *bp = &bps[i];
1436 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, indbs);
1437 BP_SET_LSIZE(bp, BP_GET_LEVEL(db->db_blkptr) == 1 ?
1438 dn->dn_datablksz : BP_GET_LSIZE(db->db_blkptr));
1439 BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr));
1440 BP_SET_LEVEL(bp, BP_GET_LEVEL(db->db_blkptr) - 1);
1441 BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0);
1446 * Handle reads on dbufs that are holes, if necessary. This function
1447 * requires that the dbuf's mutex is held. Returns success (0) if action
1448 * was taken, ENOENT if no action was taken.
1451 dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn)
1453 ASSERT(MUTEX_HELD(&db->db_mtx));
1455 int is_hole = db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr);
1457 * For level 0 blocks only, if the above check fails:
1458 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
1459 * processes the delete record and clears the bp while we are waiting
1460 * for the dn_mtx (resulting in a "no" from block_freed).
1462 if (!is_hole && db->db_level == 0) {
1463 is_hole = dnode_block_freed(dn, db->db_blkid) ||
1464 BP_IS_HOLE(db->db_blkptr);
1468 dbuf_set_data(db, dbuf_alloc_arcbuf(db));
1469 memset(db->db.db_data, 0, db->db.db_size);
1471 if (db->db_blkptr != NULL && db->db_level > 0 &&
1472 BP_IS_HOLE(db->db_blkptr) &&
1473 db->db_blkptr->blk_birth != 0) {
1474 dbuf_handle_indirect_hole(db, dn);
1476 db->db_state = DB_CACHED;
1477 DTRACE_SET_STATE(db, "hole read satisfied");
1484 * This function ensures that, when doing a decrypting read of a block,
1485 * we make sure we have decrypted the dnode associated with it. We must do
1486 * this so that we ensure we are fully authenticating the checksum-of-MACs
1487 * tree from the root of the objset down to this block. Indirect blocks are
1488 * always verified against their secure checksum-of-MACs assuming that the
1489 * dnode containing them is correct. Now that we are doing a decrypting read,
1490 * we can be sure that the key is loaded and verify that assumption. This is
1491 * especially important considering that we always read encrypted dnode
1492 * blocks as raw data (without verifying their MACs) to start, and
1493 * decrypt / authenticate them when we need to read an encrypted bonus buffer.
1496 dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags)
1499 objset_t *os = db->db_objset;
1500 arc_buf_t *dnode_abuf;
1502 zbookmark_phys_t zb;
1504 ASSERT(MUTEX_HELD(&db->db_mtx));
1506 if (!os->os_encrypted || os->os_raw_receive ||
1507 (flags & DB_RF_NO_DECRYPT) != 0)
1512 dnode_abuf = (dn->dn_dbuf != NULL) ? dn->dn_dbuf->db_buf : NULL;
1514 if (dnode_abuf == NULL || !arc_is_encrypted(dnode_abuf)) {
1519 SET_BOOKMARK(&zb, dmu_objset_id(os),
1520 DMU_META_DNODE_OBJECT, 0, dn->dn_dbuf->db_blkid);
1521 err = arc_untransform(dnode_abuf, os->os_spa, &zb, B_TRUE);
1524 * An error code of EACCES tells us that the key is still not
1525 * available. This is ok if we are only reading authenticated
1526 * (and therefore non-encrypted) blocks.
1528 if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID &&
1529 !DMU_OT_IS_ENCRYPTED(dn->dn_type)) ||
1530 (db->db_blkid == DMU_BONUS_BLKID &&
1531 !DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))))
1540 * Drops db_mtx and the parent lock specified by dblt and tag before
1544 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags,
1545 db_lock_type_t dblt, const void *tag)
1548 zbookmark_phys_t zb;
1549 uint32_t aflags = ARC_FLAG_NOWAIT;
1554 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1555 ASSERT(MUTEX_HELD(&db->db_mtx));
1556 ASSERT(db->db_state == DB_UNCACHED);
1557 ASSERT(db->db_buf == NULL);
1558 ASSERT(db->db_parent == NULL ||
1559 RW_LOCK_HELD(&db->db_parent->db_rwlock));
1561 if (db->db_blkid == DMU_BONUS_BLKID) {
1562 err = dbuf_read_bonus(db, dn, flags);
1566 err = dbuf_read_hole(db, dn);
1571 * Any attempt to read a redacted block should result in an error. This
1572 * will never happen under normal conditions, but can be useful for
1573 * debugging purposes.
1575 if (BP_IS_REDACTED(db->db_blkptr)) {
1576 ASSERT(dsl_dataset_feature_is_active(
1577 db->db_objset->os_dsl_dataset,
1578 SPA_FEATURE_REDACTED_DATASETS));
1579 err = SET_ERROR(EIO);
1583 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1584 db->db.db_object, db->db_level, db->db_blkid);
1587 * All bps of an encrypted os should have the encryption bit set.
1588 * If this is not true it indicates tampering and we report an error.
1590 if (db->db_objset->os_encrypted && !BP_USES_CRYPT(db->db_blkptr)) {
1591 spa_log_error(db->db_objset->os_spa, &zb);
1592 zfs_panic_recover("unencrypted block in encrypted "
1593 "object set %llu", dmu_objset_id(db->db_objset));
1594 err = SET_ERROR(EIO);
1598 err = dbuf_read_verify_dnode_crypt(db, flags);
1604 db->db_state = DB_READ;
1605 DTRACE_SET_STATE(db, "read issued");
1606 mutex_exit(&db->db_mtx);
1608 if (dbuf_is_l2cacheable(db))
1609 aflags |= ARC_FLAG_L2CACHE;
1611 dbuf_add_ref(db, NULL);
1613 zio_flags = (flags & DB_RF_CANFAIL) ?
1614 ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED;
1616 if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr))
1617 zio_flags |= ZIO_FLAG_RAW;
1619 * The zio layer will copy the provided blkptr later, but we need to
1620 * do this now so that we can release the parent's rwlock. We have to
1621 * do that now so that if dbuf_read_done is called synchronously (on
1622 * an l1 cache hit) we don't acquire the db_mtx while holding the
1623 * parent's rwlock, which would be a lock ordering violation.
1625 blkptr_t bp = *db->db_blkptr;
1626 dmu_buf_unlock_parent(db, dblt, tag);
1627 (void) arc_read(zio, db->db_objset->os_spa, &bp,
1628 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags,
1633 mutex_exit(&db->db_mtx);
1634 dmu_buf_unlock_parent(db, dblt, tag);
1639 * This is our just-in-time copy function. It makes a copy of buffers that
1640 * have been modified in a previous transaction group before we access them in
1641 * the current active group.
1643 * This function is used in three places: when we are dirtying a buffer for the
1644 * first time in a txg, when we are freeing a range in a dnode that includes
1645 * this buffer, and when we are accessing a buffer which was received compressed
1646 * and later referenced in a WRITE_BYREF record.
1648 * Note that when we are called from dbuf_free_range() we do not put a hold on
1649 * the buffer, we just traverse the active dbuf list for the dnode.
1652 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
1654 dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
1656 ASSERT(MUTEX_HELD(&db->db_mtx));
1657 ASSERT(db->db.db_data != NULL);
1658 ASSERT(db->db_level == 0);
1659 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
1662 (dr->dt.dl.dr_data !=
1663 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
1667 * If the last dirty record for this dbuf has not yet synced
1668 * and its referencing the dbuf data, either:
1669 * reset the reference to point to a new copy,
1670 * or (if there a no active holders)
1671 * just null out the current db_data pointer.
1673 ASSERT3U(dr->dr_txg, >=, txg - 2);
1674 if (db->db_blkid == DMU_BONUS_BLKID) {
1675 dnode_t *dn = DB_DNODE(db);
1676 int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1677 dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP);
1678 arc_space_consume(bonuslen, ARC_SPACE_BONUS);
1679 memcpy(dr->dt.dl.dr_data, db->db.db_data, bonuslen);
1680 } else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
1681 dnode_t *dn = DB_DNODE(db);
1682 int size = arc_buf_size(db->db_buf);
1683 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1684 spa_t *spa = db->db_objset->os_spa;
1685 enum zio_compress compress_type =
1686 arc_get_compression(db->db_buf);
1687 uint8_t complevel = arc_get_complevel(db->db_buf);
1689 if (arc_is_encrypted(db->db_buf)) {
1690 boolean_t byteorder;
1691 uint8_t salt[ZIO_DATA_SALT_LEN];
1692 uint8_t iv[ZIO_DATA_IV_LEN];
1693 uint8_t mac[ZIO_DATA_MAC_LEN];
1695 arc_get_raw_params(db->db_buf, &byteorder, salt,
1697 dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db,
1698 dmu_objset_id(dn->dn_objset), byteorder, salt, iv,
1699 mac, dn->dn_type, size, arc_buf_lsize(db->db_buf),
1700 compress_type, complevel);
1701 } else if (compress_type != ZIO_COMPRESS_OFF) {
1702 ASSERT3U(type, ==, ARC_BUFC_DATA);
1703 dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db,
1704 size, arc_buf_lsize(db->db_buf), compress_type,
1707 dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size);
1709 memcpy(dr->dt.dl.dr_data->b_data, db->db.db_data, size);
1712 dbuf_clear_data(db);
1717 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
1724 * We don't have to hold the mutex to check db_state because it
1725 * can't be freed while we have a hold on the buffer.
1727 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1729 if (db->db_state == DB_NOFILL)
1730 return (SET_ERROR(EIO));
1735 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1736 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL &&
1737 DBUF_IS_CACHEABLE(db);
1739 mutex_enter(&db->db_mtx);
1740 if (db->db_state == DB_CACHED) {
1741 spa_t *spa = dn->dn_objset->os_spa;
1744 * Ensure that this block's dnode has been decrypted if
1745 * the caller has requested decrypted data.
1747 err = dbuf_read_verify_dnode_crypt(db, flags);
1750 * If the arc buf is compressed or encrypted and the caller
1751 * requested uncompressed data, we need to untransform it
1752 * before returning. We also call arc_untransform() on any
1753 * unauthenticated blocks, which will verify their MAC if
1754 * the key is now available.
1756 if (err == 0 && db->db_buf != NULL &&
1757 (flags & DB_RF_NO_DECRYPT) == 0 &&
1758 (arc_is_encrypted(db->db_buf) ||
1759 arc_is_unauthenticated(db->db_buf) ||
1760 arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) {
1761 zbookmark_phys_t zb;
1763 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1764 db->db.db_object, db->db_level, db->db_blkid);
1765 dbuf_fix_old_data(db, spa_syncing_txg(spa));
1766 err = arc_untransform(db->db_buf, spa, &zb, B_FALSE);
1767 dbuf_set_data(db, db->db_buf);
1769 mutex_exit(&db->db_mtx);
1770 if (err == 0 && prefetch) {
1771 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1772 B_FALSE, flags & DB_RF_HAVESTRUCT);
1775 DBUF_STAT_BUMP(hash_hits);
1776 } else if (db->db_state == DB_UNCACHED) {
1777 spa_t *spa = dn->dn_objset->os_spa;
1778 boolean_t need_wait = B_FALSE;
1780 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
1783 db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) {
1784 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
1787 err = dbuf_read_impl(db, zio, flags, dblt, FTAG);
1789 * dbuf_read_impl has dropped db_mtx and our parent's rwlock
1792 if (!err && prefetch) {
1793 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1794 db->db_state != DB_CACHED,
1795 flags & DB_RF_HAVESTRUCT);
1799 DBUF_STAT_BUMP(hash_misses);
1802 * If we created a zio_root we must execute it to avoid
1803 * leaking it, even if it isn't attached to any work due
1804 * to an error in dbuf_read_impl().
1808 err = zio_wait(zio);
1810 VERIFY0(zio_wait(zio));
1814 * Another reader came in while the dbuf was in flight
1815 * between UNCACHED and CACHED. Either a writer will finish
1816 * writing the buffer (sending the dbuf to CACHED) or the
1817 * first reader's request will reach the read_done callback
1818 * and send the dbuf to CACHED. Otherwise, a failure
1819 * occurred and the dbuf went to UNCACHED.
1821 mutex_exit(&db->db_mtx);
1823 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1824 B_TRUE, flags & DB_RF_HAVESTRUCT);
1827 DBUF_STAT_BUMP(hash_misses);
1829 /* Skip the wait per the caller's request. */
1830 if ((flags & DB_RF_NEVERWAIT) == 0) {
1831 mutex_enter(&db->db_mtx);
1832 while (db->db_state == DB_READ ||
1833 db->db_state == DB_FILL) {
1834 ASSERT(db->db_state == DB_READ ||
1835 (flags & DB_RF_HAVESTRUCT) == 0);
1836 DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *,
1838 cv_wait(&db->db_changed, &db->db_mtx);
1840 if (db->db_state == DB_UNCACHED)
1841 err = SET_ERROR(EIO);
1842 mutex_exit(&db->db_mtx);
1850 dbuf_noread(dmu_buf_impl_t *db)
1852 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1853 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1854 mutex_enter(&db->db_mtx);
1855 while (db->db_state == DB_READ || db->db_state == DB_FILL)
1856 cv_wait(&db->db_changed, &db->db_mtx);
1857 if (db->db_state == DB_UNCACHED) {
1858 ASSERT(db->db_buf == NULL);
1859 ASSERT(db->db.db_data == NULL);
1860 dbuf_set_data(db, dbuf_alloc_arcbuf(db));
1861 db->db_state = DB_FILL;
1862 DTRACE_SET_STATE(db, "assigning filled buffer");
1863 } else if (db->db_state == DB_NOFILL) {
1864 dbuf_clear_data(db);
1866 ASSERT3U(db->db_state, ==, DB_CACHED);
1868 mutex_exit(&db->db_mtx);
1872 dbuf_unoverride(dbuf_dirty_record_t *dr)
1874 dmu_buf_impl_t *db = dr->dr_dbuf;
1875 blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
1876 uint64_t txg = dr->dr_txg;
1878 ASSERT(MUTEX_HELD(&db->db_mtx));
1880 * This assert is valid because dmu_sync() expects to be called by
1881 * a zilog's get_data while holding a range lock. This call only
1882 * comes from dbuf_dirty() callers who must also hold a range lock.
1884 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
1885 ASSERT(db->db_level == 0);
1887 if (db->db_blkid == DMU_BONUS_BLKID ||
1888 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
1891 ASSERT(db->db_data_pending != dr);
1893 /* free this block */
1894 if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
1895 zio_free(db->db_objset->os_spa, txg, bp);
1897 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1898 dr->dt.dl.dr_nopwrite = B_FALSE;
1899 dr->dt.dl.dr_has_raw_params = B_FALSE;
1902 * Release the already-written buffer, so we leave it in
1903 * a consistent dirty state. Note that all callers are
1904 * modifying the buffer, so they will immediately do
1905 * another (redundant) arc_release(). Therefore, leave
1906 * the buf thawed to save the effort of freezing &
1907 * immediately re-thawing it.
1909 arc_release(dr->dt.dl.dr_data, db);
1913 * Evict (if its unreferenced) or clear (if its referenced) any level-0
1914 * data blocks in the free range, so that any future readers will find
1918 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
1921 dmu_buf_impl_t *db_search;
1922 dmu_buf_impl_t *db, *db_next;
1923 uint64_t txg = tx->tx_txg;
1925 dbuf_dirty_record_t *dr;
1927 if (end_blkid > dn->dn_maxblkid &&
1928 !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID))
1929 end_blkid = dn->dn_maxblkid;
1930 dprintf_dnode(dn, "start=%llu end=%llu\n", (u_longlong_t)start_blkid,
1931 (u_longlong_t)end_blkid);
1933 db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
1934 db_search->db_level = 0;
1935 db_search->db_blkid = start_blkid;
1936 db_search->db_state = DB_SEARCH;
1938 mutex_enter(&dn->dn_dbufs_mtx);
1939 db = avl_find(&dn->dn_dbufs, db_search, &where);
1940 ASSERT3P(db, ==, NULL);
1942 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
1944 for (; db != NULL; db = db_next) {
1945 db_next = AVL_NEXT(&dn->dn_dbufs, db);
1946 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1948 if (db->db_level != 0 || db->db_blkid > end_blkid) {
1951 ASSERT3U(db->db_blkid, >=, start_blkid);
1953 /* found a level 0 buffer in the range */
1954 mutex_enter(&db->db_mtx);
1955 if (dbuf_undirty(db, tx)) {
1956 /* mutex has been dropped and dbuf destroyed */
1960 if (db->db_state == DB_UNCACHED ||
1961 db->db_state == DB_NOFILL ||
1962 db->db_state == DB_EVICTING) {
1963 ASSERT(db->db.db_data == NULL);
1964 mutex_exit(&db->db_mtx);
1967 if (db->db_state == DB_READ || db->db_state == DB_FILL) {
1968 /* will be handled in dbuf_read_done or dbuf_rele */
1969 db->db_freed_in_flight = TRUE;
1970 mutex_exit(&db->db_mtx);
1973 if (zfs_refcount_count(&db->db_holds) == 0) {
1978 /* The dbuf is referenced */
1980 dr = list_head(&db->db_dirty_records);
1982 if (dr->dr_txg == txg) {
1984 * This buffer is "in-use", re-adjust the file
1985 * size to reflect that this buffer may
1986 * contain new data when we sync.
1988 if (db->db_blkid != DMU_SPILL_BLKID &&
1989 db->db_blkid > dn->dn_maxblkid)
1990 dn->dn_maxblkid = db->db_blkid;
1991 dbuf_unoverride(dr);
1994 * This dbuf is not dirty in the open context.
1995 * Either uncache it (if its not referenced in
1996 * the open context) or reset its contents to
1999 dbuf_fix_old_data(db, txg);
2002 /* clear the contents if its cached */
2003 if (db->db_state == DB_CACHED) {
2004 ASSERT(db->db.db_data != NULL);
2005 arc_release(db->db_buf, db);
2006 rw_enter(&db->db_rwlock, RW_WRITER);
2007 memset(db->db.db_data, 0, db->db.db_size);
2008 rw_exit(&db->db_rwlock);
2009 arc_buf_freeze(db->db_buf);
2012 mutex_exit(&db->db_mtx);
2015 mutex_exit(&dn->dn_dbufs_mtx);
2016 kmem_free(db_search, sizeof (dmu_buf_impl_t));
2020 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
2022 arc_buf_t *buf, *old_buf;
2023 dbuf_dirty_record_t *dr;
2024 int osize = db->db.db_size;
2025 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2028 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2034 * XXX we should be doing a dbuf_read, checking the return
2035 * value and returning that up to our callers
2037 dmu_buf_will_dirty(&db->db, tx);
2039 /* create the data buffer for the new block */
2040 buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size);
2042 /* copy old block data to the new block */
2043 old_buf = db->db_buf;
2044 memcpy(buf->b_data, old_buf->b_data, MIN(osize, size));
2045 /* zero the remainder */
2047 memset((uint8_t *)buf->b_data + osize, 0, size - osize);
2049 mutex_enter(&db->db_mtx);
2050 dbuf_set_data(db, buf);
2051 arc_buf_destroy(old_buf, db);
2052 db->db.db_size = size;
2054 dr = list_head(&db->db_dirty_records);
2055 /* dirty record added by dmu_buf_will_dirty() */
2057 if (db->db_level == 0)
2058 dr->dt.dl.dr_data = buf;
2059 ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2060 ASSERT3U(dr->dr_accounted, ==, osize);
2061 dr->dr_accounted = size;
2062 mutex_exit(&db->db_mtx);
2064 dmu_objset_willuse_space(dn->dn_objset, size - osize, tx);
2069 dbuf_release_bp(dmu_buf_impl_t *db)
2071 objset_t *os __maybe_unused = db->db_objset;
2073 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
2074 ASSERT(arc_released(os->os_phys_buf) ||
2075 list_link_active(&os->os_dsl_dataset->ds_synced_link));
2076 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
2078 (void) arc_release(db->db_buf, db);
2082 * We already have a dirty record for this TXG, and we are being
2086 dbuf_redirty(dbuf_dirty_record_t *dr)
2088 dmu_buf_impl_t *db = dr->dr_dbuf;
2090 ASSERT(MUTEX_HELD(&db->db_mtx));
2092 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
2094 * If this buffer has already been written out,
2095 * we now need to reset its state.
2097 dbuf_unoverride(dr);
2098 if (db->db.db_object != DMU_META_DNODE_OBJECT &&
2099 db->db_state != DB_NOFILL) {
2100 /* Already released on initial dirty, so just thaw. */
2101 ASSERT(arc_released(db->db_buf));
2102 arc_buf_thaw(db->db_buf);
2107 dbuf_dirty_record_t *
2108 dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx)
2110 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2111 IMPLY(dn->dn_objset->os_raw_receive, dn->dn_maxblkid >= blkid);
2112 dnode_new_blkid(dn, blkid, tx, B_TRUE, B_FALSE);
2113 ASSERT(dn->dn_maxblkid >= blkid);
2115 dbuf_dirty_record_t *dr = kmem_zalloc(sizeof (*dr), KM_SLEEP);
2116 list_link_init(&dr->dr_dirty_node);
2117 list_link_init(&dr->dr_dbuf_node);
2119 dr->dr_txg = tx->tx_txg;
2120 dr->dt.dll.dr_blkid = blkid;
2121 dr->dr_accounted = dn->dn_datablksz;
2124 * There should not be any dbuf for the block that we're dirtying.
2125 * Otherwise the buffer contents could be inconsistent between the
2126 * dbuf and the lightweight dirty record.
2128 ASSERT3P(NULL, ==, dbuf_find(dn->dn_objset, dn->dn_object, 0, blkid));
2130 mutex_enter(&dn->dn_mtx);
2131 int txgoff = tx->tx_txg & TXG_MASK;
2132 if (dn->dn_free_ranges[txgoff] != NULL) {
2133 range_tree_clear(dn->dn_free_ranges[txgoff], blkid, 1);
2136 if (dn->dn_nlevels == 1) {
2137 ASSERT3U(blkid, <, dn->dn_nblkptr);
2138 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2139 mutex_exit(&dn->dn_mtx);
2140 rw_exit(&dn->dn_struct_rwlock);
2141 dnode_setdirty(dn, tx);
2143 mutex_exit(&dn->dn_mtx);
2145 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2146 dmu_buf_impl_t *parent_db = dbuf_hold_level(dn,
2147 1, blkid >> epbs, FTAG);
2148 rw_exit(&dn->dn_struct_rwlock);
2149 if (parent_db == NULL) {
2150 kmem_free(dr, sizeof (*dr));
2153 int err = dbuf_read(parent_db, NULL,
2154 (DB_RF_NOPREFETCH | DB_RF_CANFAIL));
2156 dbuf_rele(parent_db, FTAG);
2157 kmem_free(dr, sizeof (*dr));
2161 dbuf_dirty_record_t *parent_dr = dbuf_dirty(parent_db, tx);
2162 dbuf_rele(parent_db, FTAG);
2163 mutex_enter(&parent_dr->dt.di.dr_mtx);
2164 ASSERT3U(parent_dr->dr_txg, ==, tx->tx_txg);
2165 list_insert_tail(&parent_dr->dt.di.dr_children, dr);
2166 mutex_exit(&parent_dr->dt.di.dr_mtx);
2167 dr->dr_parent = parent_dr;
2170 dmu_objset_willuse_space(dn->dn_objset, dr->dr_accounted, tx);
2175 dbuf_dirty_record_t *
2176 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2180 dbuf_dirty_record_t *dr, *dr_next, *dr_head;
2181 int txgoff = tx->tx_txg & TXG_MASK;
2182 boolean_t drop_struct_rwlock = B_FALSE;
2184 ASSERT(tx->tx_txg != 0);
2185 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2186 DMU_TX_DIRTY_BUF(tx, db);
2191 * Shouldn't dirty a regular buffer in syncing context. Private
2192 * objects may be dirtied in syncing context, but only if they
2193 * were already pre-dirtied in open context.
2196 if (dn->dn_objset->os_dsl_dataset != NULL) {
2197 rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
2200 ASSERT(!dmu_tx_is_syncing(tx) ||
2201 BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
2202 DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
2203 dn->dn_objset->os_dsl_dataset == NULL);
2204 if (dn->dn_objset->os_dsl_dataset != NULL)
2205 rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG);
2208 * We make this assert for private objects as well, but after we
2209 * check if we're already dirty. They are allowed to re-dirty
2210 * in syncing context.
2212 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
2213 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
2214 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
2216 mutex_enter(&db->db_mtx);
2218 * XXX make this true for indirects too? The problem is that
2219 * transactions created with dmu_tx_create_assigned() from
2220 * syncing context don't bother holding ahead.
2222 ASSERT(db->db_level != 0 ||
2223 db->db_state == DB_CACHED || db->db_state == DB_FILL ||
2224 db->db_state == DB_NOFILL);
2226 mutex_enter(&dn->dn_mtx);
2227 dnode_set_dirtyctx(dn, tx, db);
2228 if (tx->tx_txg > dn->dn_dirty_txg)
2229 dn->dn_dirty_txg = tx->tx_txg;
2230 mutex_exit(&dn->dn_mtx);
2232 if (db->db_blkid == DMU_SPILL_BLKID)
2233 dn->dn_have_spill = B_TRUE;
2236 * If this buffer is already dirty, we're done.
2238 dr_head = list_head(&db->db_dirty_records);
2239 ASSERT(dr_head == NULL || dr_head->dr_txg <= tx->tx_txg ||
2240 db->db.db_object == DMU_META_DNODE_OBJECT);
2241 dr_next = dbuf_find_dirty_lte(db, tx->tx_txg);
2242 if (dr_next && dr_next->dr_txg == tx->tx_txg) {
2245 dbuf_redirty(dr_next);
2246 mutex_exit(&db->db_mtx);
2251 * Only valid if not already dirty.
2253 ASSERT(dn->dn_object == 0 ||
2254 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
2255 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
2257 ASSERT3U(dn->dn_nlevels, >, db->db_level);
2260 * We should only be dirtying in syncing context if it's the
2261 * mos or we're initializing the os or it's a special object.
2262 * However, we are allowed to dirty in syncing context provided
2263 * we already dirtied it in open context. Hence we must make
2264 * this assertion only if we're not already dirty.
2267 VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa));
2269 if (dn->dn_objset->os_dsl_dataset != NULL)
2270 rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG);
2271 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
2272 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
2273 if (dn->dn_objset->os_dsl_dataset != NULL)
2274 rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
2276 ASSERT(db->db.db_size != 0);
2278 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2280 if (db->db_blkid != DMU_BONUS_BLKID) {
2281 dmu_objset_willuse_space(os, db->db.db_size, tx);
2285 * If this buffer is dirty in an old transaction group we need
2286 * to make a copy of it so that the changes we make in this
2287 * transaction group won't leak out when we sync the older txg.
2289 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
2290 list_link_init(&dr->dr_dirty_node);
2291 list_link_init(&dr->dr_dbuf_node);
2293 if (db->db_level == 0) {
2294 void *data_old = db->db_buf;
2296 if (db->db_state != DB_NOFILL) {
2297 if (db->db_blkid == DMU_BONUS_BLKID) {
2298 dbuf_fix_old_data(db, tx->tx_txg);
2299 data_old = db->db.db_data;
2300 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
2302 * Release the data buffer from the cache so
2303 * that we can modify it without impacting
2304 * possible other users of this cached data
2305 * block. Note that indirect blocks and
2306 * private objects are not released until the
2307 * syncing state (since they are only modified
2310 arc_release(db->db_buf, db);
2311 dbuf_fix_old_data(db, tx->tx_txg);
2312 data_old = db->db_buf;
2314 ASSERT(data_old != NULL);
2316 dr->dt.dl.dr_data = data_old;
2318 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL);
2319 list_create(&dr->dt.di.dr_children,
2320 sizeof (dbuf_dirty_record_t),
2321 offsetof(dbuf_dirty_record_t, dr_dirty_node));
2323 if (db->db_blkid != DMU_BONUS_BLKID)
2324 dr->dr_accounted = db->db.db_size;
2326 dr->dr_txg = tx->tx_txg;
2327 list_insert_before(&db->db_dirty_records, dr_next, dr);
2330 * We could have been freed_in_flight between the dbuf_noread
2331 * and dbuf_dirty. We win, as though the dbuf_noread() had
2332 * happened after the free.
2334 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
2335 db->db_blkid != DMU_SPILL_BLKID) {
2336 mutex_enter(&dn->dn_mtx);
2337 if (dn->dn_free_ranges[txgoff] != NULL) {
2338 range_tree_clear(dn->dn_free_ranges[txgoff],
2341 mutex_exit(&dn->dn_mtx);
2342 db->db_freed_in_flight = FALSE;
2346 * This buffer is now part of this txg
2348 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
2349 db->db_dirtycnt += 1;
2350 ASSERT3U(db->db_dirtycnt, <=, 3);
2352 mutex_exit(&db->db_mtx);
2354 if (db->db_blkid == DMU_BONUS_BLKID ||
2355 db->db_blkid == DMU_SPILL_BLKID) {
2356 mutex_enter(&dn->dn_mtx);
2357 ASSERT(!list_link_active(&dr->dr_dirty_node));
2358 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2359 mutex_exit(&dn->dn_mtx);
2360 dnode_setdirty(dn, tx);
2365 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
2366 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2367 drop_struct_rwlock = B_TRUE;
2371 * If we are overwriting a dedup BP, then unless it is snapshotted,
2372 * when we get to syncing context we will need to decrement its
2373 * refcount in the DDT. Prefetch the relevant DDT block so that
2374 * syncing context won't have to wait for the i/o.
2376 if (db->db_blkptr != NULL) {
2377 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
2378 ddt_prefetch(os->os_spa, db->db_blkptr);
2379 dmu_buf_unlock_parent(db, dblt, FTAG);
2383 * We need to hold the dn_struct_rwlock to make this assertion,
2384 * because it protects dn_phys / dn_next_nlevels from changing.
2386 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
2387 dn->dn_phys->dn_nlevels > db->db_level ||
2388 dn->dn_next_nlevels[txgoff] > db->db_level ||
2389 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
2390 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
2393 if (db->db_level == 0) {
2394 ASSERT(!db->db_objset->os_raw_receive ||
2395 dn->dn_maxblkid >= db->db_blkid);
2396 dnode_new_blkid(dn, db->db_blkid, tx,
2397 drop_struct_rwlock, B_FALSE);
2398 ASSERT(dn->dn_maxblkid >= db->db_blkid);
2401 if (db->db_level+1 < dn->dn_nlevels) {
2402 dmu_buf_impl_t *parent = db->db_parent;
2403 dbuf_dirty_record_t *di;
2404 int parent_held = FALSE;
2406 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
2407 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2408 parent = dbuf_hold_level(dn, db->db_level + 1,
2409 db->db_blkid >> epbs, FTAG);
2410 ASSERT(parent != NULL);
2413 if (drop_struct_rwlock)
2414 rw_exit(&dn->dn_struct_rwlock);
2415 ASSERT3U(db->db_level + 1, ==, parent->db_level);
2416 di = dbuf_dirty(parent, tx);
2418 dbuf_rele(parent, FTAG);
2420 mutex_enter(&db->db_mtx);
2422 * Since we've dropped the mutex, it's possible that
2423 * dbuf_undirty() might have changed this out from under us.
2425 if (list_head(&db->db_dirty_records) == dr ||
2426 dn->dn_object == DMU_META_DNODE_OBJECT) {
2427 mutex_enter(&di->dt.di.dr_mtx);
2428 ASSERT3U(di->dr_txg, ==, tx->tx_txg);
2429 ASSERT(!list_link_active(&dr->dr_dirty_node));
2430 list_insert_tail(&di->dt.di.dr_children, dr);
2431 mutex_exit(&di->dt.di.dr_mtx);
2434 mutex_exit(&db->db_mtx);
2436 ASSERT(db->db_level + 1 == dn->dn_nlevels);
2437 ASSERT(db->db_blkid < dn->dn_nblkptr);
2438 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
2439 mutex_enter(&dn->dn_mtx);
2440 ASSERT(!list_link_active(&dr->dr_dirty_node));
2441 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2442 mutex_exit(&dn->dn_mtx);
2443 if (drop_struct_rwlock)
2444 rw_exit(&dn->dn_struct_rwlock);
2447 dnode_setdirty(dn, tx);
2453 dbuf_undirty_bonus(dbuf_dirty_record_t *dr)
2455 dmu_buf_impl_t *db = dr->dr_dbuf;
2457 if (dr->dt.dl.dr_data != db->db.db_data) {
2458 struct dnode *dn = dr->dr_dnode;
2459 int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
2461 kmem_free(dr->dt.dl.dr_data, max_bonuslen);
2462 arc_space_return(max_bonuslen, ARC_SPACE_BONUS);
2464 db->db_data_pending = NULL;
2465 ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
2466 list_remove(&db->db_dirty_records, dr);
2467 if (dr->dr_dbuf->db_level != 0) {
2468 mutex_destroy(&dr->dt.di.dr_mtx);
2469 list_destroy(&dr->dt.di.dr_children);
2471 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2472 ASSERT3U(db->db_dirtycnt, >, 0);
2473 db->db_dirtycnt -= 1;
2477 * Undirty a buffer in the transaction group referenced by the given
2478 * transaction. Return whether this evicted the dbuf.
2481 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2483 uint64_t txg = tx->tx_txg;
2488 * Due to our use of dn_nlevels below, this can only be called
2489 * in open context, unless we are operating on the MOS.
2490 * From syncing context, dn_nlevels may be different from the
2491 * dn_nlevels used when dbuf was dirtied.
2493 ASSERT(db->db_objset ==
2494 dmu_objset_pool(db->db_objset)->dp_meta_objset ||
2495 txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
2496 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2497 ASSERT0(db->db_level);
2498 ASSERT(MUTEX_HELD(&db->db_mtx));
2501 * If this buffer is not dirty, we're done.
2503 dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, txg);
2506 ASSERT(dr->dr_dbuf == db);
2508 dnode_t *dn = dr->dr_dnode;
2510 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2512 ASSERT(db->db.db_size != 0);
2514 dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset),
2515 dr->dr_accounted, txg);
2517 list_remove(&db->db_dirty_records, dr);
2520 * Note that there are three places in dbuf_dirty()
2521 * where this dirty record may be put on a list.
2522 * Make sure to do a list_remove corresponding to
2523 * every one of those list_insert calls.
2525 if (dr->dr_parent) {
2526 mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
2527 list_remove(&dr->dr_parent->dt.di.dr_children, dr);
2528 mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
2529 } else if (db->db_blkid == DMU_SPILL_BLKID ||
2530 db->db_level + 1 == dn->dn_nlevels) {
2531 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
2532 mutex_enter(&dn->dn_mtx);
2533 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
2534 mutex_exit(&dn->dn_mtx);
2537 if (db->db_state != DB_NOFILL) {
2538 dbuf_unoverride(dr);
2540 ASSERT(db->db_buf != NULL);
2541 ASSERT(dr->dt.dl.dr_data != NULL);
2542 if (dr->dt.dl.dr_data != db->db_buf)
2543 arc_buf_destroy(dr->dt.dl.dr_data, db);
2546 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2548 ASSERT(db->db_dirtycnt > 0);
2549 db->db_dirtycnt -= 1;
2551 if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
2552 ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf));
2561 dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
2563 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2565 ASSERT(tx->tx_txg != 0);
2566 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2569 * Quick check for dirtiness. For already dirty blocks, this
2570 * reduces runtime of this function by >90%, and overall performance
2571 * by 50% for some workloads (e.g. file deletion with indirect blocks
2574 mutex_enter(&db->db_mtx);
2576 if (db->db_state == DB_CACHED) {
2577 dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2579 * It's possible that it is already dirty but not cached,
2580 * because there are some calls to dbuf_dirty() that don't
2581 * go through dmu_buf_will_dirty().
2584 /* This dbuf is already dirty and cached. */
2586 mutex_exit(&db->db_mtx);
2590 mutex_exit(&db->db_mtx);
2593 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
2594 flags |= DB_RF_HAVESTRUCT;
2596 (void) dbuf_read(db, NULL, flags);
2597 (void) dbuf_dirty(db, tx);
2601 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2603 dmu_buf_will_dirty_impl(db_fake,
2604 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx);
2608 dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2610 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2611 dbuf_dirty_record_t *dr;
2613 mutex_enter(&db->db_mtx);
2614 dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2615 mutex_exit(&db->db_mtx);
2616 return (dr != NULL);
2620 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2622 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2624 db->db_state = DB_NOFILL;
2625 DTRACE_SET_STATE(db, "allocating NOFILL buffer");
2626 dmu_buf_will_fill(db_fake, tx);
2630 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2632 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2634 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2635 ASSERT(tx->tx_txg != 0);
2636 ASSERT(db->db_level == 0);
2637 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2639 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
2640 dmu_tx_private_ok(tx));
2643 (void) dbuf_dirty(db, tx);
2647 * This function is effectively the same as dmu_buf_will_dirty(), but
2648 * indicates the caller expects raw encrypted data in the db, and provides
2649 * the crypt params (byteorder, salt, iv, mac) which should be stored in the
2650 * blkptr_t when this dbuf is written. This is only used for blocks of
2651 * dnodes, during raw receive.
2654 dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder,
2655 const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx)
2657 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2658 dbuf_dirty_record_t *dr;
2661 * dr_has_raw_params is only processed for blocks of dnodes
2662 * (see dbuf_sync_dnode_leaf_crypt()).
2664 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
2665 ASSERT3U(db->db_level, ==, 0);
2666 ASSERT(db->db_objset->os_raw_receive);
2668 dmu_buf_will_dirty_impl(db_fake,
2669 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx);
2671 dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2673 ASSERT3P(dr, !=, NULL);
2675 dr->dt.dl.dr_has_raw_params = B_TRUE;
2676 dr->dt.dl.dr_byteorder = byteorder;
2677 memcpy(dr->dt.dl.dr_salt, salt, ZIO_DATA_SALT_LEN);
2678 memcpy(dr->dt.dl.dr_iv, iv, ZIO_DATA_IV_LEN);
2679 memcpy(dr->dt.dl.dr_mac, mac, ZIO_DATA_MAC_LEN);
2683 dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx)
2685 struct dirty_leaf *dl;
2686 dbuf_dirty_record_t *dr;
2688 dr = list_head(&db->db_dirty_records);
2689 ASSERT3P(dr, !=, NULL);
2690 ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2692 dl->dr_overridden_by = *bp;
2693 dl->dr_override_state = DR_OVERRIDDEN;
2694 dl->dr_overridden_by.blk_birth = dr->dr_txg;
2698 dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx)
2701 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2702 dbuf_states_t old_state;
2703 mutex_enter(&db->db_mtx);
2706 old_state = db->db_state;
2707 db->db_state = DB_CACHED;
2708 if (old_state == DB_FILL) {
2709 if (db->db_level == 0 && db->db_freed_in_flight) {
2710 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2711 /* we were freed while filling */
2712 /* XXX dbuf_undirty? */
2713 memset(db->db.db_data, 0, db->db.db_size);
2714 db->db_freed_in_flight = FALSE;
2715 DTRACE_SET_STATE(db,
2716 "fill done handling freed in flight");
2718 DTRACE_SET_STATE(db, "fill done");
2720 cv_broadcast(&db->db_changed);
2722 mutex_exit(&db->db_mtx);
2726 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
2727 bp_embedded_type_t etype, enum zio_compress comp,
2728 int uncompressed_size, int compressed_size, int byteorder,
2731 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2732 struct dirty_leaf *dl;
2733 dmu_object_type_t type;
2734 dbuf_dirty_record_t *dr;
2736 if (etype == BP_EMBEDDED_TYPE_DATA) {
2737 ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset),
2738 SPA_FEATURE_EMBEDDED_DATA));
2742 type = DB_DNODE(db)->dn_type;
2745 ASSERT0(db->db_level);
2746 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2748 dmu_buf_will_not_fill(dbuf, tx);
2750 dr = list_head(&db->db_dirty_records);
2751 ASSERT3P(dr, !=, NULL);
2752 ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2754 encode_embedded_bp_compressed(&dl->dr_overridden_by,
2755 data, comp, uncompressed_size, compressed_size);
2756 BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
2757 BP_SET_TYPE(&dl->dr_overridden_by, type);
2758 BP_SET_LEVEL(&dl->dr_overridden_by, 0);
2759 BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
2761 dl->dr_override_state = DR_OVERRIDDEN;
2762 dl->dr_overridden_by.blk_birth = dr->dr_txg;
2766 dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx)
2768 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2769 dmu_object_type_t type;
2770 ASSERT(dsl_dataset_feature_is_active(db->db_objset->os_dsl_dataset,
2771 SPA_FEATURE_REDACTED_DATASETS));
2774 type = DB_DNODE(db)->dn_type;
2777 ASSERT0(db->db_level);
2778 dmu_buf_will_not_fill(dbuf, tx);
2780 blkptr_t bp = { { { {0} } } };
2781 BP_SET_TYPE(&bp, type);
2782 BP_SET_LEVEL(&bp, 0);
2783 BP_SET_BIRTH(&bp, tx->tx_txg, 0);
2784 BP_SET_REDACTED(&bp);
2785 BPE_SET_LSIZE(&bp, dbuf->db_size);
2787 dbuf_override_impl(db, &bp, tx);
2791 * Directly assign a provided arc buf to a given dbuf if it's not referenced
2792 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
2795 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
2797 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2798 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2799 ASSERT(db->db_level == 0);
2800 ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
2801 ASSERT(buf != NULL);
2802 ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size);
2803 ASSERT(tx->tx_txg != 0);
2805 arc_return_buf(buf, db);
2806 ASSERT(arc_released(buf));
2808 mutex_enter(&db->db_mtx);
2810 while (db->db_state == DB_READ || db->db_state == DB_FILL)
2811 cv_wait(&db->db_changed, &db->db_mtx);
2813 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
2815 if (db->db_state == DB_CACHED &&
2816 zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
2818 * In practice, we will never have a case where we have an
2819 * encrypted arc buffer while additional holds exist on the
2820 * dbuf. We don't handle this here so we simply assert that
2823 ASSERT(!arc_is_encrypted(buf));
2824 mutex_exit(&db->db_mtx);
2825 (void) dbuf_dirty(db, tx);
2826 memcpy(db->db.db_data, buf->b_data, db->db.db_size);
2827 arc_buf_destroy(buf, db);
2831 if (db->db_state == DB_CACHED) {
2832 dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
2834 ASSERT(db->db_buf != NULL);
2835 if (dr != NULL && dr->dr_txg == tx->tx_txg) {
2836 ASSERT(dr->dt.dl.dr_data == db->db_buf);
2838 if (!arc_released(db->db_buf)) {
2839 ASSERT(dr->dt.dl.dr_override_state ==
2841 arc_release(db->db_buf, db);
2843 dr->dt.dl.dr_data = buf;
2844 arc_buf_destroy(db->db_buf, db);
2845 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
2846 arc_release(db->db_buf, db);
2847 arc_buf_destroy(db->db_buf, db);
2851 ASSERT(db->db_buf == NULL);
2852 dbuf_set_data(db, buf);
2853 db->db_state = DB_FILL;
2854 DTRACE_SET_STATE(db, "filling assigned arcbuf");
2855 mutex_exit(&db->db_mtx);
2856 (void) dbuf_dirty(db, tx);
2857 dmu_buf_fill_done(&db->db, tx);
2861 dbuf_destroy(dmu_buf_impl_t *db)
2864 dmu_buf_impl_t *parent = db->db_parent;
2865 dmu_buf_impl_t *dndb;
2867 ASSERT(MUTEX_HELD(&db->db_mtx));
2868 ASSERT(zfs_refcount_is_zero(&db->db_holds));
2870 if (db->db_buf != NULL) {
2871 arc_buf_destroy(db->db_buf, db);
2875 if (db->db_blkid == DMU_BONUS_BLKID) {
2876 int slots = DB_DNODE(db)->dn_num_slots;
2877 int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
2878 if (db->db.db_data != NULL) {
2879 kmem_free(db->db.db_data, bonuslen);
2880 arc_space_return(bonuslen, ARC_SPACE_BONUS);
2881 db->db_state = DB_UNCACHED;
2882 DTRACE_SET_STATE(db, "buffer cleared");
2886 dbuf_clear_data(db);
2888 if (multilist_link_active(&db->db_cache_link)) {
2889 ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
2890 db->db_caching_status == DB_DBUF_METADATA_CACHE);
2892 multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
2893 (void) zfs_refcount_remove_many(
2894 &dbuf_caches[db->db_caching_status].size,
2895 db->db.db_size, db);
2897 if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
2898 DBUF_STAT_BUMPDOWN(metadata_cache_count);
2900 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
2901 DBUF_STAT_BUMPDOWN(cache_count);
2902 DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
2905 db->db_caching_status = DB_NO_CACHE;
2908 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
2909 ASSERT(db->db_data_pending == NULL);
2910 ASSERT(list_is_empty(&db->db_dirty_records));
2912 db->db_state = DB_EVICTING;
2913 DTRACE_SET_STATE(db, "buffer eviction started");
2914 db->db_blkptr = NULL;
2917 * Now that db_state is DB_EVICTING, nobody else can find this via
2918 * the hash table. We can now drop db_mtx, which allows us to
2919 * acquire the dn_dbufs_mtx.
2921 mutex_exit(&db->db_mtx);
2926 if (db->db_blkid != DMU_BONUS_BLKID) {
2927 boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx);
2929 mutex_enter_nested(&dn->dn_dbufs_mtx,
2931 avl_remove(&dn->dn_dbufs, db);
2935 mutex_exit(&dn->dn_dbufs_mtx);
2937 * Decrementing the dbuf count means that the hold corresponding
2938 * to the removed dbuf is no longer discounted in dnode_move(),
2939 * so the dnode cannot be moved until after we release the hold.
2940 * The membar_producer() ensures visibility of the decremented
2941 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
2944 mutex_enter(&dn->dn_mtx);
2945 dnode_rele_and_unlock(dn, db, B_TRUE);
2946 db->db_dnode_handle = NULL;
2948 dbuf_hash_remove(db);
2953 ASSERT(zfs_refcount_is_zero(&db->db_holds));
2955 db->db_parent = NULL;
2957 ASSERT(db->db_buf == NULL);
2958 ASSERT(db->db.db_data == NULL);
2959 ASSERT(db->db_hash_next == NULL);
2960 ASSERT(db->db_blkptr == NULL);
2961 ASSERT(db->db_data_pending == NULL);
2962 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
2963 ASSERT(!multilist_link_active(&db->db_cache_link));
2966 * If this dbuf is referenced from an indirect dbuf,
2967 * decrement the ref count on the indirect dbuf.
2969 if (parent && parent != dndb) {
2970 mutex_enter(&parent->db_mtx);
2971 dbuf_rele_and_unlock(parent, db, B_TRUE);
2974 kmem_cache_free(dbuf_kmem_cache, db);
2975 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
2979 * Note: While bpp will always be updated if the function returns success,
2980 * parentp will not be updated if the dnode does not have dn_dbuf filled in;
2981 * this happens when the dnode is the meta-dnode, or {user|group|project}used
2984 __attribute__((always_inline))
2986 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
2987 dmu_buf_impl_t **parentp, blkptr_t **bpp)
2992 ASSERT(blkid != DMU_BONUS_BLKID);
2994 if (blkid == DMU_SPILL_BLKID) {
2995 mutex_enter(&dn->dn_mtx);
2996 if (dn->dn_have_spill &&
2997 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
2998 *bpp = DN_SPILL_BLKPTR(dn->dn_phys);
3001 dbuf_add_ref(dn->dn_dbuf, NULL);
3002 *parentp = dn->dn_dbuf;
3003 mutex_exit(&dn->dn_mtx);
3008 (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels;
3009 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
3011 ASSERT3U(level * epbs, <, 64);
3012 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3014 * This assertion shouldn't trip as long as the max indirect block size
3015 * is less than 1M. The reason for this is that up to that point,
3016 * the number of levels required to address an entire object with blocks
3017 * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In
3018 * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55
3019 * (i.e. we can address the entire object), objects will all use at most
3020 * N-1 levels and the assertion won't overflow. However, once epbs is
3021 * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be
3022 * enough to address an entire object, so objects will have 5 levels,
3023 * but then this assertion will overflow.
3025 * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we
3026 * need to redo this logic to handle overflows.
3028 ASSERT(level >= nlevels ||
3029 ((nlevels - level - 1) * epbs) +
3030 highbit64(dn->dn_phys->dn_nblkptr) <= 64);
3031 if (level >= nlevels ||
3032 blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr <<
3033 ((nlevels - level - 1) * epbs)) ||
3035 blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
3036 /* the buffer has no parent yet */
3037 return (SET_ERROR(ENOENT));
3038 } else if (level < nlevels-1) {
3039 /* this block is referenced from an indirect block */
3042 err = dbuf_hold_impl(dn, level + 1,
3043 blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
3047 err = dbuf_read(*parentp, NULL,
3048 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
3050 dbuf_rele(*parentp, NULL);
3054 rw_enter(&(*parentp)->db_rwlock, RW_READER);
3055 *bpp = ((blkptr_t *)(*parentp)->db.db_data) +
3056 (blkid & ((1ULL << epbs) - 1));
3057 if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))
3058 ASSERT(BP_IS_HOLE(*bpp));
3059 rw_exit(&(*parentp)->db_rwlock);
3062 /* the block is referenced from the dnode */
3063 ASSERT3U(level, ==, nlevels-1);
3064 ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
3065 blkid < dn->dn_phys->dn_nblkptr);
3067 dbuf_add_ref(dn->dn_dbuf, NULL);
3068 *parentp = dn->dn_dbuf;
3070 *bpp = &dn->dn_phys->dn_blkptr[blkid];
3075 static dmu_buf_impl_t *
3076 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
3077 dmu_buf_impl_t *parent, blkptr_t *blkptr)
3079 objset_t *os = dn->dn_objset;
3080 dmu_buf_impl_t *db, *odb;
3082 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3083 ASSERT(dn->dn_type != DMU_OT_NONE);
3085 db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP);
3087 list_create(&db->db_dirty_records, sizeof (dbuf_dirty_record_t),
3088 offsetof(dbuf_dirty_record_t, dr_dbuf_node));
3091 db->db.db_object = dn->dn_object;
3092 db->db_level = level;
3093 db->db_blkid = blkid;
3094 db->db_dirtycnt = 0;
3095 db->db_dnode_handle = dn->dn_handle;
3096 db->db_parent = parent;
3097 db->db_blkptr = blkptr;
3100 db->db_user_immediate_evict = FALSE;
3101 db->db_freed_in_flight = FALSE;
3102 db->db_pending_evict = FALSE;
3104 if (blkid == DMU_BONUS_BLKID) {
3105 ASSERT3P(parent, ==, dn->dn_dbuf);
3106 db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
3107 (dn->dn_nblkptr-1) * sizeof (blkptr_t);
3108 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
3109 db->db.db_offset = DMU_BONUS_BLKID;
3110 db->db_state = DB_UNCACHED;
3111 DTRACE_SET_STATE(db, "bonus buffer created");
3112 db->db_caching_status = DB_NO_CACHE;
3113 /* the bonus dbuf is not placed in the hash table */
3114 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3116 } else if (blkid == DMU_SPILL_BLKID) {
3117 db->db.db_size = (blkptr != NULL) ?
3118 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
3119 db->db.db_offset = 0;
3122 db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
3123 db->db.db_size = blocksize;
3124 db->db.db_offset = db->db_blkid * blocksize;
3128 * Hold the dn_dbufs_mtx while we get the new dbuf
3129 * in the hash table *and* added to the dbufs list.
3130 * This prevents a possible deadlock with someone
3131 * trying to look up this dbuf before it's added to the
3134 mutex_enter(&dn->dn_dbufs_mtx);
3135 db->db_state = DB_EVICTING; /* not worth logging this state change */
3136 if ((odb = dbuf_hash_insert(db)) != NULL) {
3137 /* someone else inserted it first */
3138 mutex_exit(&dn->dn_dbufs_mtx);
3139 kmem_cache_free(dbuf_kmem_cache, db);
3140 DBUF_STAT_BUMP(hash_insert_race);
3143 avl_add(&dn->dn_dbufs, db);
3145 db->db_state = DB_UNCACHED;
3146 DTRACE_SET_STATE(db, "regular buffer created");
3147 db->db_caching_status = DB_NO_CACHE;
3148 mutex_exit(&dn->dn_dbufs_mtx);
3149 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3151 if (parent && parent != dn->dn_dbuf)
3152 dbuf_add_ref(parent, db);
3154 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
3155 zfs_refcount_count(&dn->dn_holds) > 0);
3156 (void) zfs_refcount_add(&dn->dn_holds, db);
3158 dprintf_dbuf(db, "db=%p\n", db);
3164 * This function returns a block pointer and information about the object,
3165 * given a dnode and a block. This is a publicly accessible version of
3166 * dbuf_findbp that only returns some information, rather than the
3167 * dbuf. Note that the dnode passed in must be held, and the dn_struct_rwlock
3168 * should be locked as (at least) a reader.
3171 dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid,
3172 blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift)
3174 dmu_buf_impl_t *dbp = NULL;
3177 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3179 err = dbuf_findbp(dn, level, blkid, B_FALSE, &dbp, &bp2);
3183 dbuf_rele(dbp, NULL);
3184 if (datablkszsec != NULL)
3185 *datablkszsec = dn->dn_phys->dn_datablkszsec;
3186 if (indblkshift != NULL)
3187 *indblkshift = dn->dn_phys->dn_indblkshift;
3193 typedef struct dbuf_prefetch_arg {
3194 spa_t *dpa_spa; /* The spa to issue the prefetch in. */
3195 zbookmark_phys_t dpa_zb; /* The target block to prefetch. */
3196 int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */
3197 int dpa_curlevel; /* The current level that we're reading */
3198 dnode_t *dpa_dnode; /* The dnode associated with the prefetch */
3199 zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */
3200 zio_t *dpa_zio; /* The parent zio_t for all prefetches. */
3201 arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */
3202 dbuf_prefetch_fn dpa_cb; /* prefetch completion callback */
3203 void *dpa_arg; /* prefetch completion arg */
3204 } dbuf_prefetch_arg_t;
3207 dbuf_prefetch_fini(dbuf_prefetch_arg_t *dpa, boolean_t io_done)
3209 if (dpa->dpa_cb != NULL) {
3210 dpa->dpa_cb(dpa->dpa_arg, dpa->dpa_zb.zb_level,
3211 dpa->dpa_zb.zb_blkid, io_done);
3213 kmem_free(dpa, sizeof (*dpa));
3217 dbuf_issue_final_prefetch_done(zio_t *zio, const zbookmark_phys_t *zb,
3218 const blkptr_t *iobp, arc_buf_t *abuf, void *private)
3220 (void) zio, (void) zb, (void) iobp;
3221 dbuf_prefetch_arg_t *dpa = private;
3224 arc_buf_destroy(abuf, private);
3226 dbuf_prefetch_fini(dpa, B_TRUE);
3230 * Actually issue the prefetch read for the block given.
3233 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
3235 ASSERT(!BP_IS_REDACTED(bp) ||
3236 dsl_dataset_feature_is_active(
3237 dpa->dpa_dnode->dn_objset->os_dsl_dataset,
3238 SPA_FEATURE_REDACTED_DATASETS));
3240 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
3241 return (dbuf_prefetch_fini(dpa, B_FALSE));
3243 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
3244 arc_flags_t aflags =
3245 dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH |
3248 /* dnodes are always read as raw and then converted later */
3249 if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) &&
3250 dpa->dpa_curlevel == 0)
3251 zio_flags |= ZIO_FLAG_RAW;
3253 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
3254 ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
3255 ASSERT(dpa->dpa_zio != NULL);
3256 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp,
3257 dbuf_issue_final_prefetch_done, dpa,
3258 dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb);
3262 * Called when an indirect block above our prefetch target is read in. This
3263 * will either read in the next indirect block down the tree or issue the actual
3264 * prefetch if the next block down is our target.
3267 dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb,
3268 const blkptr_t *iobp, arc_buf_t *abuf, void *private)
3270 (void) zb, (void) iobp;
3271 dbuf_prefetch_arg_t *dpa = private;
3273 ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
3274 ASSERT3S(dpa->dpa_curlevel, >, 0);
3277 ASSERT(zio == NULL || zio->io_error != 0);
3278 dbuf_prefetch_fini(dpa, B_TRUE);
3281 ASSERT(zio == NULL || zio->io_error == 0);
3284 * The dpa_dnode is only valid if we are called with a NULL
3285 * zio. This indicates that the arc_read() returned without
3286 * first calling zio_read() to issue a physical read. Once
3287 * a physical read is made the dpa_dnode must be invalidated
3288 * as the locks guarding it may have been dropped. If the
3289 * dpa_dnode is still valid, then we want to add it to the dbuf
3290 * cache. To do so, we must hold the dbuf associated with the block
3291 * we just prefetched, read its contents so that we associate it
3292 * with an arc_buf_t, and then release it.
3295 ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel);
3296 if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) {
3297 ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size);
3299 ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
3301 ASSERT3P(zio->io_spa, ==, dpa->dpa_spa);
3303 dpa->dpa_dnode = NULL;
3304 } else if (dpa->dpa_dnode != NULL) {
3305 uint64_t curblkid = dpa->dpa_zb.zb_blkid >>
3306 (dpa->dpa_epbs * (dpa->dpa_curlevel -
3307 dpa->dpa_zb.zb_level));
3308 dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode,
3309 dpa->dpa_curlevel, curblkid, FTAG);
3311 arc_buf_destroy(abuf, private);
3312 dbuf_prefetch_fini(dpa, B_TRUE);
3315 (void) dbuf_read(db, NULL,
3316 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT);
3317 dbuf_rele(db, FTAG);
3320 dpa->dpa_curlevel--;
3321 uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
3322 (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
3323 blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
3324 P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
3326 ASSERT(!BP_IS_REDACTED(bp) ||
3327 dsl_dataset_feature_is_active(
3328 dpa->dpa_dnode->dn_objset->os_dsl_dataset,
3329 SPA_FEATURE_REDACTED_DATASETS));
3330 if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) {
3331 arc_buf_destroy(abuf, private);
3332 dbuf_prefetch_fini(dpa, B_TRUE);
3334 } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) {
3335 ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid);
3336 dbuf_issue_final_prefetch(dpa, bp);
3338 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3339 zbookmark_phys_t zb;
3341 /* flag if L2ARC eligible, l2arc_noprefetch then decides */
3342 if (dpa->dpa_aflags & ARC_FLAG_L2CACHE)
3343 iter_aflags |= ARC_FLAG_L2CACHE;
3345 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
3347 SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
3348 dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid);
3350 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3351 bp, dbuf_prefetch_indirect_done, dpa,
3352 ZIO_PRIORITY_SYNC_READ,
3353 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3357 arc_buf_destroy(abuf, private);
3361 * Issue prefetch reads for the given block on the given level. If the indirect
3362 * blocks above that block are not in memory, we will read them in
3363 * asynchronously. As a result, this call never blocks waiting for a read to
3364 * complete. Note that the prefetch might fail if the dataset is encrypted and
3365 * the encryption key is unmapped before the IO completes.
3368 dbuf_prefetch_impl(dnode_t *dn, int64_t level, uint64_t blkid,
3369 zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb,
3373 int epbs, nlevels, curlevel;
3376 ASSERT(blkid != DMU_BONUS_BLKID);
3377 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3379 if (blkid > dn->dn_maxblkid)
3382 if (level == 0 && dnode_block_freed(dn, blkid))
3386 * This dnode hasn't been written to disk yet, so there's nothing to
3389 nlevels = dn->dn_phys->dn_nlevels;
3390 if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0)
3393 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
3394 if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
3397 dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
3400 mutex_exit(&db->db_mtx);
3402 * This dbuf already exists. It is either CACHED, or
3403 * (we assume) about to be read or filled.
3409 * Find the closest ancestor (indirect block) of the target block
3410 * that is present in the cache. In this indirect block, we will
3411 * find the bp that is at curlevel, curblkid.
3415 while (curlevel < nlevels - 1) {
3416 int parent_level = curlevel + 1;
3417 uint64_t parent_blkid = curblkid >> epbs;
3420 if (dbuf_hold_impl(dn, parent_level, parent_blkid,
3421 FALSE, TRUE, FTAG, &db) == 0) {
3422 blkptr_t *bpp = db->db_buf->b_data;
3423 bp = bpp[P2PHASE(curblkid, 1 << epbs)];
3424 dbuf_rele(db, FTAG);
3428 curlevel = parent_level;
3429 curblkid = parent_blkid;
3432 if (curlevel == nlevels - 1) {
3433 /* No cached indirect blocks found. */
3434 ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr);
3435 bp = dn->dn_phys->dn_blkptr[curblkid];
3437 ASSERT(!BP_IS_REDACTED(&bp) ||
3438 dsl_dataset_feature_is_active(dn->dn_objset->os_dsl_dataset,
3439 SPA_FEATURE_REDACTED_DATASETS));
3440 if (BP_IS_HOLE(&bp) || BP_IS_REDACTED(&bp))
3443 ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
3445 zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
3448 dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
3449 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
3450 SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3451 dn->dn_object, level, blkid);
3452 dpa->dpa_curlevel = curlevel;
3453 dpa->dpa_prio = prio;
3454 dpa->dpa_aflags = aflags;
3455 dpa->dpa_spa = dn->dn_objset->os_spa;
3456 dpa->dpa_dnode = dn;
3457 dpa->dpa_epbs = epbs;
3462 /* flag if L2ARC eligible, l2arc_noprefetch then decides */
3463 if (dnode_level_is_l2cacheable(&bp, dn, level))
3464 dpa->dpa_aflags |= ARC_FLAG_L2CACHE;
3467 * If we have the indirect just above us, no need to do the asynchronous
3468 * prefetch chain; we'll just run the last step ourselves. If we're at
3469 * a higher level, though, we want to issue the prefetches for all the
3470 * indirect blocks asynchronously, so we can go on with whatever we were
3473 if (curlevel == level) {
3474 ASSERT3U(curblkid, ==, blkid);
3475 dbuf_issue_final_prefetch(dpa, &bp);
3477 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3478 zbookmark_phys_t zb;
3480 /* flag if L2ARC eligible, l2arc_noprefetch then decides */
3481 if (dnode_level_is_l2cacheable(&bp, dn, level))
3482 iter_aflags |= ARC_FLAG_L2CACHE;
3484 SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3485 dn->dn_object, curlevel, curblkid);
3486 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3487 &bp, dbuf_prefetch_indirect_done, dpa,
3488 ZIO_PRIORITY_SYNC_READ,
3489 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3493 * We use pio here instead of dpa_zio since it's possible that
3494 * dpa may have already been freed.
3500 cb(arg, level, blkid, B_FALSE);
3505 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
3509 return (dbuf_prefetch_impl(dn, level, blkid, prio, aflags, NULL, NULL));
3513 * Helper function for dbuf_hold_impl() to copy a buffer. Handles
3514 * the case of encrypted, compressed and uncompressed buffers by
3515 * allocating the new buffer, respectively, with arc_alloc_raw_buf(),
3516 * arc_alloc_compressed_buf() or arc_alloc_buf().*
3518 * NOTE: Declared noinline to avoid stack bloat in dbuf_hold_impl().
3520 noinline static void
3521 dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db)
3523 dbuf_dirty_record_t *dr = db->db_data_pending;
3524 arc_buf_t *data = dr->dt.dl.dr_data;
3525 enum zio_compress compress_type = arc_get_compression(data);
3526 uint8_t complevel = arc_get_complevel(data);
3528 if (arc_is_encrypted(data)) {
3529 boolean_t byteorder;
3530 uint8_t salt[ZIO_DATA_SALT_LEN];
3531 uint8_t iv[ZIO_DATA_IV_LEN];
3532 uint8_t mac[ZIO_DATA_MAC_LEN];
3534 arc_get_raw_params(data, &byteorder, salt, iv, mac);
3535 dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db,
3536 dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac,
3537 dn->dn_type, arc_buf_size(data), arc_buf_lsize(data),
3538 compress_type, complevel));
3539 } else if (compress_type != ZIO_COMPRESS_OFF) {
3540 dbuf_set_data(db, arc_alloc_compressed_buf(
3541 dn->dn_objset->os_spa, db, arc_buf_size(data),
3542 arc_buf_lsize(data), compress_type, complevel));
3544 dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db,
3545 DBUF_GET_BUFC_TYPE(db), db->db.db_size));
3548 rw_enter(&db->db_rwlock, RW_WRITER);
3549 memcpy(db->db.db_data, data->b_data, arc_buf_size(data));
3550 rw_exit(&db->db_rwlock);
3554 * Returns with db_holds incremented, and db_mtx not held.
3555 * Note: dn_struct_rwlock must be held.
3558 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
3559 boolean_t fail_sparse, boolean_t fail_uncached,
3560 const void *tag, dmu_buf_impl_t **dbp)
3562 dmu_buf_impl_t *db, *parent = NULL;
3564 /* If the pool has been created, verify the tx_sync_lock is not held */
3565 spa_t *spa = dn->dn_objset->os_spa;
3566 dsl_pool_t *dp = spa->spa_dsl_pool;
3568 ASSERT(!MUTEX_HELD(&dp->dp_tx.tx_sync_lock));
3571 ASSERT(blkid != DMU_BONUS_BLKID);
3572 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3573 ASSERT3U(dn->dn_nlevels, >, level);
3577 /* dbuf_find() returns with db_mtx held */
3578 db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid);
3581 blkptr_t *bp = NULL;
3585 return (SET_ERROR(ENOENT));
3587 ASSERT3P(parent, ==, NULL);
3588 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
3590 if (err == 0 && bp && BP_IS_HOLE(bp))
3591 err = SET_ERROR(ENOENT);
3594 dbuf_rele(parent, NULL);
3598 if (err && err != ENOENT)
3600 db = dbuf_create(dn, level, blkid, parent, bp);
3603 if (fail_uncached && db->db_state != DB_CACHED) {
3604 mutex_exit(&db->db_mtx);
3605 return (SET_ERROR(ENOENT));
3608 if (db->db_buf != NULL) {
3609 arc_buf_access(db->db_buf);
3610 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
3613 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
3616 * If this buffer is currently syncing out, and we are
3617 * still referencing it from db_data, we need to make a copy
3618 * of it in case we decide we want to dirty it again in this txg.
3620 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
3621 dn->dn_object != DMU_META_DNODE_OBJECT &&
3622 db->db_state == DB_CACHED && db->db_data_pending) {
3623 dbuf_dirty_record_t *dr = db->db_data_pending;
3624 if (dr->dt.dl.dr_data == db->db_buf)
3625 dbuf_hold_copy(dn, db);
3628 if (multilist_link_active(&db->db_cache_link)) {
3629 ASSERT(zfs_refcount_is_zero(&db->db_holds));
3630 ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
3631 db->db_caching_status == DB_DBUF_METADATA_CACHE);
3633 multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
3634 (void) zfs_refcount_remove_many(
3635 &dbuf_caches[db->db_caching_status].size,
3636 db->db.db_size, db);
3638 if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
3639 DBUF_STAT_BUMPDOWN(metadata_cache_count);
3641 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
3642 DBUF_STAT_BUMPDOWN(cache_count);
3643 DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
3646 db->db_caching_status = DB_NO_CACHE;
3648 (void) zfs_refcount_add(&db->db_holds, tag);
3650 mutex_exit(&db->db_mtx);
3652 /* NOTE: we can't rele the parent until after we drop the db_mtx */
3654 dbuf_rele(parent, NULL);
3656 ASSERT3P(DB_DNODE(db), ==, dn);
3657 ASSERT3U(db->db_blkid, ==, blkid);
3658 ASSERT3U(db->db_level, ==, level);
3665 dbuf_hold(dnode_t *dn, uint64_t blkid, const void *tag)
3667 return (dbuf_hold_level(dn, 0, blkid, tag));
3671 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, const void *tag)
3674 int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db);
3675 return (err ? NULL : db);
3679 dbuf_create_bonus(dnode_t *dn)
3681 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
3683 ASSERT(dn->dn_bonus == NULL);
3684 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL);
3688 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
3690 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3692 if (db->db_blkid != DMU_SPILL_BLKID)
3693 return (SET_ERROR(ENOTSUP));
3695 blksz = SPA_MINBLOCKSIZE;
3696 ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset)));
3697 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
3699 dbuf_new_size(db, blksz, tx);
3705 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
3707 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
3710 #pragma weak dmu_buf_add_ref = dbuf_add_ref
3712 dbuf_add_ref(dmu_buf_impl_t *db, const void *tag)
3714 int64_t holds = zfs_refcount_add(&db->db_holds, tag);
3715 VERIFY3S(holds, >, 1);
3718 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
3720 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
3723 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3724 dmu_buf_impl_t *found_db;
3725 boolean_t result = B_FALSE;
3727 if (blkid == DMU_BONUS_BLKID)
3728 found_db = dbuf_find_bonus(os, obj);
3730 found_db = dbuf_find(os, obj, 0, blkid);
3732 if (found_db != NULL) {
3733 if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
3734 (void) zfs_refcount_add(&db->db_holds, tag);
3737 mutex_exit(&found_db->db_mtx);
3743 * If you call dbuf_rele() you had better not be referencing the dnode handle
3744 * unless you have some other direct or indirect hold on the dnode. (An indirect
3745 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
3746 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
3747 * dnode's parent dbuf evicting its dnode handles.
3750 dbuf_rele(dmu_buf_impl_t *db, const void *tag)
3752 mutex_enter(&db->db_mtx);
3753 dbuf_rele_and_unlock(db, tag, B_FALSE);
3757 dmu_buf_rele(dmu_buf_t *db, const void *tag)
3759 dbuf_rele((dmu_buf_impl_t *)db, tag);
3763 * dbuf_rele() for an already-locked dbuf. This is necessary to allow
3764 * db_dirtycnt and db_holds to be updated atomically. The 'evicting'
3765 * argument should be set if we are already in the dbuf-evicting code
3766 * path, in which case we don't want to recursively evict. This allows us to
3767 * avoid deeply nested stacks that would have a call flow similar to this:
3769 * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify()
3772 * +-----dbuf_destroy()<--dbuf_evict_one()<--------+
3776 dbuf_rele_and_unlock(dmu_buf_impl_t *db, const void *tag, boolean_t evicting)
3781 ASSERT(MUTEX_HELD(&db->db_mtx));
3785 * Remove the reference to the dbuf before removing its hold on the
3786 * dnode so we can guarantee in dnode_move() that a referenced bonus
3787 * buffer has a corresponding dnode hold.
3789 holds = zfs_refcount_remove(&db->db_holds, tag);
3793 * We can't freeze indirects if there is a possibility that they
3794 * may be modified in the current syncing context.
3796 if (db->db_buf != NULL &&
3797 holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) {
3798 arc_buf_freeze(db->db_buf);
3801 if (holds == db->db_dirtycnt &&
3802 db->db_level == 0 && db->db_user_immediate_evict)
3803 dbuf_evict_user(db);
3806 if (db->db_blkid == DMU_BONUS_BLKID) {
3808 boolean_t evict_dbuf = db->db_pending_evict;
3811 * If the dnode moves here, we cannot cross this
3812 * barrier until the move completes.
3817 atomic_dec_32(&dn->dn_dbufs_count);
3820 * Decrementing the dbuf count means that the bonus
3821 * buffer's dnode hold is no longer discounted in
3822 * dnode_move(). The dnode cannot move until after
3823 * the dnode_rele() below.
3828 * Do not reference db after its lock is dropped.
3829 * Another thread may evict it.
3831 mutex_exit(&db->db_mtx);
3834 dnode_evict_bonus(dn);
3837 } else if (db->db_buf == NULL) {
3839 * This is a special case: we never associated this
3840 * dbuf with any data allocated from the ARC.
3842 ASSERT(db->db_state == DB_UNCACHED ||
3843 db->db_state == DB_NOFILL);
3845 } else if (arc_released(db->db_buf)) {
3847 * This dbuf has anonymous data associated with it.
3851 boolean_t do_arc_evict = B_FALSE;
3853 spa_t *spa = dmu_objset_spa(db->db_objset);
3855 if (!DBUF_IS_CACHEABLE(db) &&
3856 db->db_blkptr != NULL &&
3857 !BP_IS_HOLE(db->db_blkptr) &&
3858 !BP_IS_EMBEDDED(db->db_blkptr)) {
3859 do_arc_evict = B_TRUE;
3860 bp = *db->db_blkptr;
3863 if (!DBUF_IS_CACHEABLE(db) ||
3864 db->db_pending_evict) {
3866 } else if (!multilist_link_active(&db->db_cache_link)) {
3867 ASSERT3U(db->db_caching_status, ==,
3870 dbuf_cached_state_t dcs =
3871 dbuf_include_in_metadata_cache(db) ?
3872 DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE;
3873 db->db_caching_status = dcs;
3875 multilist_insert(&dbuf_caches[dcs].cache, db);
3876 uint64_t db_size = db->db.db_size;
3877 size = zfs_refcount_add_many(
3878 &dbuf_caches[dcs].size, db_size, db);
3879 uint8_t db_level = db->db_level;
3880 mutex_exit(&db->db_mtx);
3882 if (dcs == DB_DBUF_METADATA_CACHE) {
3883 DBUF_STAT_BUMP(metadata_cache_count);
3885 metadata_cache_size_bytes_max,
3888 DBUF_STAT_BUMP(cache_count);
3889 DBUF_STAT_MAX(cache_size_bytes_max,
3891 DBUF_STAT_BUMP(cache_levels[db_level]);
3893 cache_levels_bytes[db_level],
3897 if (dcs == DB_DBUF_CACHE && !evicting)
3898 dbuf_evict_notify(size);
3902 arc_freed(spa, &bp);
3905 mutex_exit(&db->db_mtx);
3910 #pragma weak dmu_buf_refcount = dbuf_refcount
3912 dbuf_refcount(dmu_buf_impl_t *db)
3914 return (zfs_refcount_count(&db->db_holds));
3918 dmu_buf_user_refcount(dmu_buf_t *db_fake)
3921 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3923 mutex_enter(&db->db_mtx);
3924 ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt);
3925 holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt;
3926 mutex_exit(&db->db_mtx);
3932 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user,
3933 dmu_buf_user_t *new_user)
3935 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3937 mutex_enter(&db->db_mtx);
3938 dbuf_verify_user(db, DBVU_NOT_EVICTING);
3939 if (db->db_user == old_user)
3940 db->db_user = new_user;
3942 old_user = db->db_user;
3943 dbuf_verify_user(db, DBVU_NOT_EVICTING);
3944 mutex_exit(&db->db_mtx);
3950 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
3952 return (dmu_buf_replace_user(db_fake, NULL, user));
3956 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user)
3958 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3960 db->db_user_immediate_evict = TRUE;
3961 return (dmu_buf_set_user(db_fake, user));
3965 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
3967 return (dmu_buf_replace_user(db_fake, user, NULL));
3971 dmu_buf_get_user(dmu_buf_t *db_fake)
3973 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3975 dbuf_verify_user(db, DBVU_NOT_EVICTING);
3976 return (db->db_user);
3980 dmu_buf_user_evict_wait(void)
3982 taskq_wait(dbu_evict_taskq);
3986 dmu_buf_get_blkptr(dmu_buf_t *db)
3988 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
3989 return (dbi->db_blkptr);
3993 dmu_buf_get_objset(dmu_buf_t *db)
3995 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
3996 return (dbi->db_objset);
4000 dmu_buf_dnode_enter(dmu_buf_t *db)
4002 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4003 DB_DNODE_ENTER(dbi);
4004 return (DB_DNODE(dbi));
4008 dmu_buf_dnode_exit(dmu_buf_t *db)
4010 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4015 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
4017 /* ASSERT(dmu_tx_is_syncing(tx) */
4018 ASSERT(MUTEX_HELD(&db->db_mtx));
4020 if (db->db_blkptr != NULL)
4023 if (db->db_blkid == DMU_SPILL_BLKID) {
4024 db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys);
4025 BP_ZERO(db->db_blkptr);
4028 if (db->db_level == dn->dn_phys->dn_nlevels-1) {
4030 * This buffer was allocated at a time when there was
4031 * no available blkptrs from the dnode, or it was
4032 * inappropriate to hook it in (i.e., nlevels mismatch).
4034 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
4035 ASSERT(db->db_parent == NULL);
4036 db->db_parent = dn->dn_dbuf;
4037 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
4040 dmu_buf_impl_t *parent = db->db_parent;
4041 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
4043 ASSERT(dn->dn_phys->dn_nlevels > 1);
4044 if (parent == NULL) {
4045 mutex_exit(&db->db_mtx);
4046 rw_enter(&dn->dn_struct_rwlock, RW_READER);
4047 parent = dbuf_hold_level(dn, db->db_level + 1,
4048 db->db_blkid >> epbs, db);
4049 rw_exit(&dn->dn_struct_rwlock);
4050 mutex_enter(&db->db_mtx);
4051 db->db_parent = parent;
4053 db->db_blkptr = (blkptr_t *)parent->db.db_data +
4054 (db->db_blkid & ((1ULL << epbs) - 1));
4060 dbuf_sync_bonus(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4062 dmu_buf_impl_t *db = dr->dr_dbuf;
4063 void *data = dr->dt.dl.dr_data;
4065 ASSERT0(db->db_level);
4066 ASSERT(MUTEX_HELD(&db->db_mtx));
4067 ASSERT(db->db_blkid == DMU_BONUS_BLKID);
4068 ASSERT(data != NULL);
4070 dnode_t *dn = dr->dr_dnode;
4071 ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=,
4072 DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1));
4073 memcpy(DN_BONUS(dn->dn_phys), data, DN_MAX_BONUS_LEN(dn->dn_phys));
4075 dbuf_sync_leaf_verify_bonus_dnode(dr);
4077 dbuf_undirty_bonus(dr);
4078 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
4082 * When syncing out a blocks of dnodes, adjust the block to deal with
4083 * encryption. Normally, we make sure the block is decrypted before writing
4084 * it. If we have crypt params, then we are writing a raw (encrypted) block,
4085 * from a raw receive. In this case, set the ARC buf's crypt params so
4086 * that the BP will be filled with the correct byteorder, salt, iv, and mac.
4089 dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr)
4092 dmu_buf_impl_t *db = dr->dr_dbuf;
4094 ASSERT(MUTEX_HELD(&db->db_mtx));
4095 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
4096 ASSERT3U(db->db_level, ==, 0);
4098 if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) {
4099 zbookmark_phys_t zb;
4102 * Unfortunately, there is currently no mechanism for
4103 * syncing context to handle decryption errors. An error
4104 * here is only possible if an attacker maliciously
4105 * changed a dnode block and updated the associated
4106 * checksums going up the block tree.
4108 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
4109 db->db.db_object, db->db_level, db->db_blkid);
4110 err = arc_untransform(db->db_buf, db->db_objset->os_spa,
4113 panic("Invalid dnode block MAC");
4114 } else if (dr->dt.dl.dr_has_raw_params) {
4115 (void) arc_release(dr->dt.dl.dr_data, db);
4116 arc_convert_to_raw(dr->dt.dl.dr_data,
4117 dmu_objset_id(db->db_objset),
4118 dr->dt.dl.dr_byteorder, DMU_OT_DNODE,
4119 dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac);
4124 * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
4125 * is critical the we not allow the compiler to inline this function in to
4126 * dbuf_sync_list() thereby drastically bloating the stack usage.
4128 noinline static void
4129 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4131 dmu_buf_impl_t *db = dr->dr_dbuf;
4132 dnode_t *dn = dr->dr_dnode;
4134 ASSERT(dmu_tx_is_syncing(tx));
4136 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
4138 mutex_enter(&db->db_mtx);
4140 ASSERT(db->db_level > 0);
4143 /* Read the block if it hasn't been read yet. */
4144 if (db->db_buf == NULL) {
4145 mutex_exit(&db->db_mtx);
4146 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
4147 mutex_enter(&db->db_mtx);
4149 ASSERT3U(db->db_state, ==, DB_CACHED);
4150 ASSERT(db->db_buf != NULL);
4152 /* Indirect block size must match what the dnode thinks it is. */
4153 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
4154 dbuf_check_blkptr(dn, db);
4156 /* Provide the pending dirty record to child dbufs */
4157 db->db_data_pending = dr;
4159 mutex_exit(&db->db_mtx);
4161 dbuf_write(dr, db->db_buf, tx);
4163 zio_t *zio = dr->dr_zio;
4164 mutex_enter(&dr->dt.di.dr_mtx);
4165 dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx);
4166 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
4167 mutex_exit(&dr->dt.di.dr_mtx);
4172 * Verify that the size of the data in our bonus buffer does not exceed
4173 * its recorded size.
4175 * The purpose of this verification is to catch any cases in development
4176 * where the size of a phys structure (i.e space_map_phys_t) grows and,
4177 * due to incorrect feature management, older pools expect to read more
4178 * data even though they didn't actually write it to begin with.
4180 * For a example, this would catch an error in the feature logic where we
4181 * open an older pool and we expect to write the space map histogram of
4182 * a space map with size SPACE_MAP_SIZE_V0.
4185 dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr)
4188 dnode_t *dn = dr->dr_dnode;
4191 * Encrypted bonus buffers can have data past their bonuslen.
4192 * Skip the verification of these blocks.
4194 if (DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))
4197 uint16_t bonuslen = dn->dn_phys->dn_bonuslen;
4198 uint16_t maxbonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
4199 ASSERT3U(bonuslen, <=, maxbonuslen);
4201 arc_buf_t *datap = dr->dt.dl.dr_data;
4202 char *datap_end = ((char *)datap) + bonuslen;
4203 char *datap_max = ((char *)datap) + maxbonuslen;
4205 /* ensure that everything is zero after our data */
4206 for (; datap_end < datap_max; datap_end++)
4207 ASSERT(*datap_end == 0);
4212 dbuf_lightweight_bp(dbuf_dirty_record_t *dr)
4214 /* This must be a lightweight dirty record. */
4215 ASSERT3P(dr->dr_dbuf, ==, NULL);
4216 dnode_t *dn = dr->dr_dnode;
4218 if (dn->dn_phys->dn_nlevels == 1) {
4219 VERIFY3U(dr->dt.dll.dr_blkid, <, dn->dn_phys->dn_nblkptr);
4220 return (&dn->dn_phys->dn_blkptr[dr->dt.dll.dr_blkid]);
4222 dmu_buf_impl_t *parent_db = dr->dr_parent->dr_dbuf;
4223 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
4224 VERIFY3U(parent_db->db_level, ==, 1);
4225 VERIFY3P(parent_db->db_dnode_handle->dnh_dnode, ==, dn);
4226 VERIFY3U(dr->dt.dll.dr_blkid >> epbs, ==, parent_db->db_blkid);
4227 blkptr_t *bp = parent_db->db.db_data;
4228 return (&bp[dr->dt.dll.dr_blkid & ((1 << epbs) - 1)]);
4233 dbuf_lightweight_ready(zio_t *zio)
4235 dbuf_dirty_record_t *dr = zio->io_private;
4236 blkptr_t *bp = zio->io_bp;
4238 if (zio->io_error != 0)
4241 dnode_t *dn = dr->dr_dnode;
4243 blkptr_t *bp_orig = dbuf_lightweight_bp(dr);
4244 spa_t *spa = dmu_objset_spa(dn->dn_objset);
4245 int64_t delta = bp_get_dsize_sync(spa, bp) -
4246 bp_get_dsize_sync(spa, bp_orig);
4247 dnode_diduse_space(dn, delta);
4249 uint64_t blkid = dr->dt.dll.dr_blkid;
4250 mutex_enter(&dn->dn_mtx);
4251 if (blkid > dn->dn_phys->dn_maxblkid) {
4252 ASSERT0(dn->dn_objset->os_raw_receive);
4253 dn->dn_phys->dn_maxblkid = blkid;
4255 mutex_exit(&dn->dn_mtx);
4257 if (!BP_IS_EMBEDDED(bp)) {
4258 uint64_t fill = BP_IS_HOLE(bp) ? 0 : 1;
4259 BP_SET_FILL(bp, fill);
4262 dmu_buf_impl_t *parent_db;
4263 EQUIV(dr->dr_parent == NULL, dn->dn_phys->dn_nlevels == 1);
4264 if (dr->dr_parent == NULL) {
4265 parent_db = dn->dn_dbuf;
4267 parent_db = dr->dr_parent->dr_dbuf;
4269 rw_enter(&parent_db->db_rwlock, RW_WRITER);
4271 rw_exit(&parent_db->db_rwlock);
4275 dbuf_lightweight_physdone(zio_t *zio)
4277 dbuf_dirty_record_t *dr = zio->io_private;
4278 dsl_pool_t *dp = spa_get_dsl(zio->io_spa);
4279 ASSERT3U(dr->dr_txg, ==, zio->io_txg);
4282 * The callback will be called io_phys_children times. Retire one
4283 * portion of our dirty space each time we are called. Any rounding
4284 * error will be cleaned up by dbuf_lightweight_done().
4286 int delta = dr->dr_accounted / zio->io_phys_children;
4287 dsl_pool_undirty_space(dp, delta, zio->io_txg);
4291 dbuf_lightweight_done(zio_t *zio)
4293 dbuf_dirty_record_t *dr = zio->io_private;
4295 VERIFY0(zio->io_error);
4297 objset_t *os = dr->dr_dnode->dn_objset;
4298 dmu_tx_t *tx = os->os_synctx;
4300 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
4301 ASSERT(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
4303 dsl_dataset_t *ds = os->os_dsl_dataset;
4304 (void) dsl_dataset_block_kill(ds, &zio->io_bp_orig, tx, B_TRUE);
4305 dsl_dataset_block_born(ds, zio->io_bp, tx);
4309 * See comment in dbuf_write_done().
4311 if (zio->io_phys_children == 0) {
4312 dsl_pool_undirty_space(dmu_objset_pool(os),
4313 dr->dr_accounted, zio->io_txg);
4315 dsl_pool_undirty_space(dmu_objset_pool(os),
4316 dr->dr_accounted % zio->io_phys_children, zio->io_txg);
4319 abd_free(dr->dt.dll.dr_abd);
4320 kmem_free(dr, sizeof (*dr));
4323 noinline static void
4324 dbuf_sync_lightweight(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4326 dnode_t *dn = dr->dr_dnode;
4328 if (dn->dn_phys->dn_nlevels == 1) {
4331 pio = dr->dr_parent->dr_zio;
4334 zbookmark_phys_t zb = {
4335 .zb_objset = dmu_objset_id(dn->dn_objset),
4336 .zb_object = dn->dn_object,
4338 .zb_blkid = dr->dt.dll.dr_blkid,
4342 * See comment in dbuf_write(). This is so that zio->io_bp_orig
4343 * will have the old BP in dbuf_lightweight_done().
4345 dr->dr_bp_copy = *dbuf_lightweight_bp(dr);
4347 dr->dr_zio = zio_write(pio, dmu_objset_spa(dn->dn_objset),
4348 dmu_tx_get_txg(tx), &dr->dr_bp_copy, dr->dt.dll.dr_abd,
4349 dn->dn_datablksz, abd_get_size(dr->dt.dll.dr_abd),
4350 &dr->dt.dll.dr_props, dbuf_lightweight_ready, NULL,
4351 dbuf_lightweight_physdone, dbuf_lightweight_done, dr,
4352 ZIO_PRIORITY_ASYNC_WRITE,
4353 ZIO_FLAG_MUSTSUCCEED | dr->dt.dll.dr_flags, &zb);
4355 zio_nowait(dr->dr_zio);
4359 * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
4360 * critical the we not allow the compiler to inline this function in to
4361 * dbuf_sync_list() thereby drastically bloating the stack usage.
4363 noinline static void
4364 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4366 arc_buf_t **datap = &dr->dt.dl.dr_data;
4367 dmu_buf_impl_t *db = dr->dr_dbuf;
4368 dnode_t *dn = dr->dr_dnode;
4370 uint64_t txg = tx->tx_txg;
4372 ASSERT(dmu_tx_is_syncing(tx));
4374 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
4376 mutex_enter(&db->db_mtx);
4378 * To be synced, we must be dirtied. But we
4379 * might have been freed after the dirty.
4381 if (db->db_state == DB_UNCACHED) {
4382 /* This buffer has been freed since it was dirtied */
4383 ASSERT(db->db.db_data == NULL);
4384 } else if (db->db_state == DB_FILL) {
4385 /* This buffer was freed and is now being re-filled */
4386 ASSERT(db->db.db_data != dr->dt.dl.dr_data);
4388 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
4392 if (db->db_blkid == DMU_SPILL_BLKID) {
4393 mutex_enter(&dn->dn_mtx);
4394 if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
4396 * In the previous transaction group, the bonus buffer
4397 * was entirely used to store the attributes for the
4398 * dnode which overrode the dn_spill field. However,
4399 * when adding more attributes to the file a spill
4400 * block was required to hold the extra attributes.
4402 * Make sure to clear the garbage left in the dn_spill
4403 * field from the previous attributes in the bonus
4404 * buffer. Otherwise, after writing out the spill
4405 * block to the new allocated dva, it will free
4406 * the old block pointed to by the invalid dn_spill.
4408 db->db_blkptr = NULL;
4410 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
4411 mutex_exit(&dn->dn_mtx);
4415 * If this is a bonus buffer, simply copy the bonus data into the
4416 * dnode. It will be written out when the dnode is synced (and it
4417 * will be synced, since it must have been dirty for dbuf_sync to
4420 if (db->db_blkid == DMU_BONUS_BLKID) {
4421 ASSERT(dr->dr_dbuf == db);
4422 dbuf_sync_bonus(dr, tx);
4429 * This function may have dropped the db_mtx lock allowing a dmu_sync
4430 * operation to sneak in. As a result, we need to ensure that we
4431 * don't check the dr_override_state until we have returned from
4432 * dbuf_check_blkptr.
4434 dbuf_check_blkptr(dn, db);
4437 * If this buffer is in the middle of an immediate write,
4438 * wait for the synchronous IO to complete.
4440 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
4441 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
4442 cv_wait(&db->db_changed, &db->db_mtx);
4443 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
4447 * If this is a dnode block, ensure it is appropriately encrypted
4448 * or decrypted, depending on what we are writing to it this txg.
4450 if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT)
4451 dbuf_prepare_encrypted_dnode_leaf(dr);
4453 if (db->db_state != DB_NOFILL &&
4454 dn->dn_object != DMU_META_DNODE_OBJECT &&
4455 zfs_refcount_count(&db->db_holds) > 1 &&
4456 dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
4457 *datap == db->db_buf) {
4459 * If this buffer is currently "in use" (i.e., there
4460 * are active holds and db_data still references it),
4461 * then make a copy before we start the write so that
4462 * any modifications from the open txg will not leak
4465 * NOTE: this copy does not need to be made for
4466 * objects only modified in the syncing context (e.g.
4467 * DNONE_DNODE blocks).
4469 int psize = arc_buf_size(*datap);
4470 int lsize = arc_buf_lsize(*datap);
4471 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
4472 enum zio_compress compress_type = arc_get_compression(*datap);
4473 uint8_t complevel = arc_get_complevel(*datap);
4475 if (arc_is_encrypted(*datap)) {
4476 boolean_t byteorder;
4477 uint8_t salt[ZIO_DATA_SALT_LEN];
4478 uint8_t iv[ZIO_DATA_IV_LEN];
4479 uint8_t mac[ZIO_DATA_MAC_LEN];
4481 arc_get_raw_params(*datap, &byteorder, salt, iv, mac);
4482 *datap = arc_alloc_raw_buf(os->os_spa, db,
4483 dmu_objset_id(os), byteorder, salt, iv, mac,
4484 dn->dn_type, psize, lsize, compress_type,
4486 } else if (compress_type != ZIO_COMPRESS_OFF) {
4487 ASSERT3U(type, ==, ARC_BUFC_DATA);
4488 *datap = arc_alloc_compressed_buf(os->os_spa, db,
4489 psize, lsize, compress_type, complevel);
4491 *datap = arc_alloc_buf(os->os_spa, db, type, psize);
4493 memcpy((*datap)->b_data, db->db.db_data, psize);
4495 db->db_data_pending = dr;
4497 mutex_exit(&db->db_mtx);
4499 dbuf_write(dr, *datap, tx);
4501 ASSERT(!list_link_active(&dr->dr_dirty_node));
4502 if (dn->dn_object == DMU_META_DNODE_OBJECT) {
4503 list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr);
4505 zio_nowait(dr->dr_zio);
4510 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx)
4512 dbuf_dirty_record_t *dr;
4514 while ((dr = list_head(list))) {
4515 if (dr->dr_zio != NULL) {
4517 * If we find an already initialized zio then we
4518 * are processing the meta-dnode, and we have finished.
4519 * The dbufs for all dnodes are put back on the list
4520 * during processing, so that we can zio_wait()
4521 * these IOs after initiating all child IOs.
4523 ASSERT3U(dr->dr_dbuf->db.db_object, ==,
4524 DMU_META_DNODE_OBJECT);
4527 list_remove(list, dr);
4528 if (dr->dr_dbuf == NULL) {
4529 dbuf_sync_lightweight(dr, tx);
4531 if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
4532 dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
4533 VERIFY3U(dr->dr_dbuf->db_level, ==, level);
4535 if (dr->dr_dbuf->db_level > 0)
4536 dbuf_sync_indirect(dr, tx);
4538 dbuf_sync_leaf(dr, tx);
4544 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4547 dmu_buf_impl_t *db = vdb;
4549 blkptr_t *bp = zio->io_bp;
4550 blkptr_t *bp_orig = &zio->io_bp_orig;
4551 spa_t *spa = zio->io_spa;
4556 ASSERT3P(db->db_blkptr, !=, NULL);
4557 ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp);
4561 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
4562 dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
4563 zio->io_prev_space_delta = delta;
4565 if (bp->blk_birth != 0) {
4566 ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
4567 BP_GET_TYPE(bp) == dn->dn_type) ||
4568 (db->db_blkid == DMU_SPILL_BLKID &&
4569 BP_GET_TYPE(bp) == dn->dn_bonustype) ||
4570 BP_IS_EMBEDDED(bp));
4571 ASSERT(BP_GET_LEVEL(bp) == db->db_level);
4574 mutex_enter(&db->db_mtx);
4577 if (db->db_blkid == DMU_SPILL_BLKID) {
4578 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
4579 ASSERT(!(BP_IS_HOLE(bp)) &&
4580 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
4584 if (db->db_level == 0) {
4585 mutex_enter(&dn->dn_mtx);
4586 if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
4587 db->db_blkid != DMU_SPILL_BLKID) {
4588 ASSERT0(db->db_objset->os_raw_receive);
4589 dn->dn_phys->dn_maxblkid = db->db_blkid;
4591 mutex_exit(&dn->dn_mtx);
4593 if (dn->dn_type == DMU_OT_DNODE) {
4595 while (i < db->db.db_size) {
4597 (void *)(((char *)db->db.db_data) + i);
4599 i += DNODE_MIN_SIZE;
4600 if (dnp->dn_type != DMU_OT_NONE) {
4602 i += dnp->dn_extra_slots *
4607 if (BP_IS_HOLE(bp)) {
4614 blkptr_t *ibp = db->db.db_data;
4615 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
4616 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
4617 if (BP_IS_HOLE(ibp))
4619 fill += BP_GET_FILL(ibp);
4624 if (!BP_IS_EMBEDDED(bp))
4625 BP_SET_FILL(bp, fill);
4627 mutex_exit(&db->db_mtx);
4629 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_WRITER, FTAG);
4630 *db->db_blkptr = *bp;
4631 dmu_buf_unlock_parent(db, dblt, FTAG);
4635 * This function gets called just prior to running through the compression
4636 * stage of the zio pipeline. If we're an indirect block comprised of only
4637 * holes, then we want this indirect to be compressed away to a hole. In
4638 * order to do that we must zero out any information about the holes that
4639 * this indirect points to prior to before we try to compress it.
4642 dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4644 (void) zio, (void) buf;
4645 dmu_buf_impl_t *db = vdb;
4648 unsigned int epbs, i;
4650 ASSERT3U(db->db_level, >, 0);
4653 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
4654 ASSERT3U(epbs, <, 31);
4656 /* Determine if all our children are holes */
4657 for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) {
4658 if (!BP_IS_HOLE(bp))
4663 * If all the children are holes, then zero them all out so that
4664 * we may get compressed away.
4666 if (i == 1ULL << epbs) {
4668 * We only found holes. Grab the rwlock to prevent
4669 * anybody from reading the blocks we're about to
4672 rw_enter(&db->db_rwlock, RW_WRITER);
4673 memset(db->db.db_data, 0, db->db.db_size);
4674 rw_exit(&db->db_rwlock);
4680 * The SPA will call this callback several times for each zio - once
4681 * for every physical child i/o (zio->io_phys_children times). This
4682 * allows the DMU to monitor the progress of each logical i/o. For example,
4683 * there may be 2 copies of an indirect block, or many fragments of a RAID-Z
4684 * block. There may be a long delay before all copies/fragments are completed,
4685 * so this callback allows us to retire dirty space gradually, as the physical
4689 dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg)
4692 dmu_buf_impl_t *db = arg;
4693 objset_t *os = db->db_objset;
4694 dsl_pool_t *dp = dmu_objset_pool(os);
4695 dbuf_dirty_record_t *dr;
4698 dr = db->db_data_pending;
4699 ASSERT3U(dr->dr_txg, ==, zio->io_txg);
4702 * The callback will be called io_phys_children times. Retire one
4703 * portion of our dirty space each time we are called. Any rounding
4704 * error will be cleaned up by dbuf_write_done().
4706 delta = dr->dr_accounted / zio->io_phys_children;
4707 dsl_pool_undirty_space(dp, delta, zio->io_txg);
4711 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
4714 dmu_buf_impl_t *db = vdb;
4715 blkptr_t *bp_orig = &zio->io_bp_orig;
4716 blkptr_t *bp = db->db_blkptr;
4717 objset_t *os = db->db_objset;
4718 dmu_tx_t *tx = os->os_synctx;
4720 ASSERT0(zio->io_error);
4721 ASSERT(db->db_blkptr == bp);
4724 * For nopwrites and rewrites we ensure that the bp matches our
4725 * original and bypass all the accounting.
4727 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
4728 ASSERT(BP_EQUAL(bp, bp_orig));
4730 dsl_dataset_t *ds = os->os_dsl_dataset;
4731 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
4732 dsl_dataset_block_born(ds, bp, tx);
4735 mutex_enter(&db->db_mtx);
4739 dbuf_dirty_record_t *dr = db->db_data_pending;
4740 dnode_t *dn = dr->dr_dnode;
4741 ASSERT(!list_link_active(&dr->dr_dirty_node));
4742 ASSERT(dr->dr_dbuf == db);
4743 ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
4744 list_remove(&db->db_dirty_records, dr);
4747 if (db->db_blkid == DMU_SPILL_BLKID) {
4748 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
4749 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
4750 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
4754 if (db->db_level == 0) {
4755 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
4756 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
4757 if (db->db_state != DB_NOFILL) {
4758 if (dr->dt.dl.dr_data != db->db_buf)
4759 arc_buf_destroy(dr->dt.dl.dr_data, db);
4762 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
4763 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
4764 if (!BP_IS_HOLE(db->db_blkptr)) {
4765 int epbs __maybe_unused = dn->dn_phys->dn_indblkshift -
4767 ASSERT3U(db->db_blkid, <=,
4768 dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
4769 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
4772 mutex_destroy(&dr->dt.di.dr_mtx);
4773 list_destroy(&dr->dt.di.dr_children);
4776 cv_broadcast(&db->db_changed);
4777 ASSERT(db->db_dirtycnt > 0);
4778 db->db_dirtycnt -= 1;
4779 db->db_data_pending = NULL;
4780 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
4783 * If we didn't do a physical write in this ZIO and we
4784 * still ended up here, it means that the space of the
4785 * dbuf that we just released (and undirtied) above hasn't
4786 * been marked as undirtied in the pool's accounting.
4788 * Thus, we undirty that space in the pool's view of the
4789 * world here. For physical writes this type of update
4790 * happens in dbuf_write_physdone().
4792 * If we did a physical write, cleanup any rounding errors
4793 * that came up due to writing multiple copies of a block
4794 * on disk [see dbuf_write_physdone()].
4796 if (zio->io_phys_children == 0) {
4797 dsl_pool_undirty_space(dmu_objset_pool(os),
4798 dr->dr_accounted, zio->io_txg);
4800 dsl_pool_undirty_space(dmu_objset_pool(os),
4801 dr->dr_accounted % zio->io_phys_children, zio->io_txg);
4804 kmem_free(dr, sizeof (dbuf_dirty_record_t));
4808 dbuf_write_nofill_ready(zio_t *zio)
4810 dbuf_write_ready(zio, NULL, zio->io_private);
4814 dbuf_write_nofill_done(zio_t *zio)
4816 dbuf_write_done(zio, NULL, zio->io_private);
4820 dbuf_write_override_ready(zio_t *zio)
4822 dbuf_dirty_record_t *dr = zio->io_private;
4823 dmu_buf_impl_t *db = dr->dr_dbuf;
4825 dbuf_write_ready(zio, NULL, db);
4829 dbuf_write_override_done(zio_t *zio)
4831 dbuf_dirty_record_t *dr = zio->io_private;
4832 dmu_buf_impl_t *db = dr->dr_dbuf;
4833 blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
4835 mutex_enter(&db->db_mtx);
4836 if (!BP_EQUAL(zio->io_bp, obp)) {
4837 if (!BP_IS_HOLE(obp))
4838 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
4839 arc_release(dr->dt.dl.dr_data, db);
4841 mutex_exit(&db->db_mtx);
4843 dbuf_write_done(zio, NULL, db);
4845 if (zio->io_abd != NULL)
4846 abd_free(zio->io_abd);
4849 typedef struct dbuf_remap_impl_callback_arg {
4851 uint64_t drica_blk_birth;
4853 } dbuf_remap_impl_callback_arg_t;
4856 dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size,
4859 dbuf_remap_impl_callback_arg_t *drica = arg;
4860 objset_t *os = drica->drica_os;
4861 spa_t *spa = dmu_objset_spa(os);
4862 dmu_tx_t *tx = drica->drica_tx;
4864 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4866 if (os == spa_meta_objset(spa)) {
4867 spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
4869 dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset,
4870 size, drica->drica_blk_birth, tx);
4875 dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx)
4877 blkptr_t bp_copy = *bp;
4878 spa_t *spa = dmu_objset_spa(dn->dn_objset);
4879 dbuf_remap_impl_callback_arg_t drica;
4881 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4883 drica.drica_os = dn->dn_objset;
4884 drica.drica_blk_birth = bp->blk_birth;
4885 drica.drica_tx = tx;
4886 if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback,
4889 * If the blkptr being remapped is tracked by a livelist,
4890 * then we need to make sure the livelist reflects the update.
4891 * First, cancel out the old blkptr by appending a 'FREE'
4892 * entry. Next, add an 'ALLOC' to track the new version. This
4893 * way we avoid trying to free an inaccurate blkptr at delete.
4894 * Note that embedded blkptrs are not tracked in livelists.
4896 if (dn->dn_objset != spa_meta_objset(spa)) {
4897 dsl_dataset_t *ds = dmu_objset_ds(dn->dn_objset);
4898 if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
4899 bp->blk_birth > ds->ds_dir->dd_origin_txg) {
4900 ASSERT(!BP_IS_EMBEDDED(bp));
4901 ASSERT(dsl_dir_is_clone(ds->ds_dir));
4902 ASSERT(spa_feature_is_enabled(spa,
4903 SPA_FEATURE_LIVELIST));
4904 bplist_append(&ds->ds_dir->dd_pending_frees,
4906 bplist_append(&ds->ds_dir->dd_pending_allocs,
4912 * The db_rwlock prevents dbuf_read_impl() from
4913 * dereferencing the BP while we are changing it. To
4914 * avoid lock contention, only grab it when we are actually
4918 rw_enter(rw, RW_WRITER);
4926 * Remap any existing BP's to concrete vdevs, if possible.
4929 dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx)
4931 spa_t *spa = dmu_objset_spa(db->db_objset);
4932 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4934 if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL))
4937 if (db->db_level > 0) {
4938 blkptr_t *bp = db->db.db_data;
4939 for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
4940 dbuf_remap_impl(dn, &bp[i], &db->db_rwlock, tx);
4942 } else if (db->db.db_object == DMU_META_DNODE_OBJECT) {
4943 dnode_phys_t *dnp = db->db.db_data;
4944 ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==,
4946 for (int i = 0; i < db->db.db_size >> DNODE_SHIFT;
4947 i += dnp[i].dn_extra_slots + 1) {
4948 for (int j = 0; j < dnp[i].dn_nblkptr; j++) {
4949 krwlock_t *lock = (dn->dn_dbuf == NULL ? NULL :
4950 &dn->dn_dbuf->db_rwlock);
4951 dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], lock,
4959 /* Issue I/O to commit a dirty buffer to disk. */
4961 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
4963 dmu_buf_impl_t *db = dr->dr_dbuf;
4964 dnode_t *dn = dr->dr_dnode;
4966 dmu_buf_impl_t *parent = db->db_parent;
4967 uint64_t txg = tx->tx_txg;
4968 zbookmark_phys_t zb;
4970 zio_t *pio; /* parent I/O */
4973 ASSERT(dmu_tx_is_syncing(tx));
4977 if (db->db_state != DB_NOFILL) {
4978 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
4980 * Private object buffers are released here rather
4981 * than in dbuf_dirty() since they are only modified
4982 * in the syncing context and we don't want the
4983 * overhead of making multiple copies of the data.
4985 if (BP_IS_HOLE(db->db_blkptr)) {
4988 dbuf_release_bp(db);
4990 dbuf_remap(dn, db, tx);
4994 if (parent != dn->dn_dbuf) {
4995 /* Our parent is an indirect block. */
4996 /* We have a dirty parent that has been scheduled for write. */
4997 ASSERT(parent && parent->db_data_pending);
4998 /* Our parent's buffer is one level closer to the dnode. */
4999 ASSERT(db->db_level == parent->db_level-1);
5001 * We're about to modify our parent's db_data by modifying
5002 * our block pointer, so the parent must be released.
5004 ASSERT(arc_released(parent->db_buf));
5005 pio = parent->db_data_pending->dr_zio;
5007 /* Our parent is the dnode itself. */
5008 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
5009 db->db_blkid != DMU_SPILL_BLKID) ||
5010 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
5011 if (db->db_blkid != DMU_SPILL_BLKID)
5012 ASSERT3P(db->db_blkptr, ==,
5013 &dn->dn_phys->dn_blkptr[db->db_blkid]);
5017 ASSERT(db->db_level == 0 || data == db->db_buf);
5018 ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
5021 SET_BOOKMARK(&zb, os->os_dsl_dataset ?
5022 os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
5023 db->db.db_object, db->db_level, db->db_blkid);
5025 if (db->db_blkid == DMU_SPILL_BLKID)
5027 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
5029 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
5032 * We copy the blkptr now (rather than when we instantiate the dirty
5033 * record), because its value can change between open context and
5034 * syncing context. We do not need to hold dn_struct_rwlock to read
5035 * db_blkptr because we are in syncing context.
5037 dr->dr_bp_copy = *db->db_blkptr;
5039 if (db->db_level == 0 &&
5040 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
5042 * The BP for this block has been provided by open context
5043 * (by dmu_sync() or dmu_buf_write_embedded()).
5045 abd_t *contents = (data != NULL) ?
5046 abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL;
5048 dr->dr_zio = zio_write(pio, os->os_spa, txg, &dr->dr_bp_copy,
5049 contents, db->db.db_size, db->db.db_size, &zp,
5050 dbuf_write_override_ready, NULL, NULL,
5051 dbuf_write_override_done,
5052 dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
5053 mutex_enter(&db->db_mtx);
5054 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
5055 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
5056 dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite);
5057 mutex_exit(&db->db_mtx);
5058 } else if (db->db_state == DB_NOFILL) {
5059 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
5060 zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
5061 dr->dr_zio = zio_write(pio, os->os_spa, txg,
5062 &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp,
5063 dbuf_write_nofill_ready, NULL, NULL,
5064 dbuf_write_nofill_done, db,
5065 ZIO_PRIORITY_ASYNC_WRITE,
5066 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
5068 ASSERT(arc_released(data));
5071 * For indirect blocks, we want to setup the children
5072 * ready callback so that we can properly handle an indirect
5073 * block that only contains holes.
5075 arc_write_done_func_t *children_ready_cb = NULL;
5076 if (db->db_level != 0)
5077 children_ready_cb = dbuf_write_children_ready;
5079 dr->dr_zio = arc_write(pio, os->os_spa, txg,
5080 &dr->dr_bp_copy, data, dbuf_is_l2cacheable(db),
5081 &zp, dbuf_write_ready,
5082 children_ready_cb, dbuf_write_physdone,
5083 dbuf_write_done, db, ZIO_PRIORITY_ASYNC_WRITE,
5084 ZIO_FLAG_MUSTSUCCEED, &zb);
5088 EXPORT_SYMBOL(dbuf_find);
5089 EXPORT_SYMBOL(dbuf_is_metadata);
5090 EXPORT_SYMBOL(dbuf_destroy);
5091 EXPORT_SYMBOL(dbuf_loan_arcbuf);
5092 EXPORT_SYMBOL(dbuf_whichblock);
5093 EXPORT_SYMBOL(dbuf_read);
5094 EXPORT_SYMBOL(dbuf_unoverride);
5095 EXPORT_SYMBOL(dbuf_free_range);
5096 EXPORT_SYMBOL(dbuf_new_size);
5097 EXPORT_SYMBOL(dbuf_release_bp);
5098 EXPORT_SYMBOL(dbuf_dirty);
5099 EXPORT_SYMBOL(dmu_buf_set_crypt_params);
5100 EXPORT_SYMBOL(dmu_buf_will_dirty);
5101 EXPORT_SYMBOL(dmu_buf_is_dirty);
5102 EXPORT_SYMBOL(dmu_buf_will_not_fill);
5103 EXPORT_SYMBOL(dmu_buf_will_fill);
5104 EXPORT_SYMBOL(dmu_buf_fill_done);
5105 EXPORT_SYMBOL(dmu_buf_rele);
5106 EXPORT_SYMBOL(dbuf_assign_arcbuf);
5107 EXPORT_SYMBOL(dbuf_prefetch);
5108 EXPORT_SYMBOL(dbuf_hold_impl);
5109 EXPORT_SYMBOL(dbuf_hold);
5110 EXPORT_SYMBOL(dbuf_hold_level);
5111 EXPORT_SYMBOL(dbuf_create_bonus);
5112 EXPORT_SYMBOL(dbuf_spill_set_blksz);
5113 EXPORT_SYMBOL(dbuf_rm_spill);
5114 EXPORT_SYMBOL(dbuf_add_ref);
5115 EXPORT_SYMBOL(dbuf_rele);
5116 EXPORT_SYMBOL(dbuf_rele_and_unlock);
5117 EXPORT_SYMBOL(dbuf_refcount);
5118 EXPORT_SYMBOL(dbuf_sync_list);
5119 EXPORT_SYMBOL(dmu_buf_set_user);
5120 EXPORT_SYMBOL(dmu_buf_set_user_ie);
5121 EXPORT_SYMBOL(dmu_buf_get_user);
5122 EXPORT_SYMBOL(dmu_buf_get_blkptr);
5124 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, max_bytes, U64, ZMOD_RW,
5125 "Maximum size in bytes of the dbuf cache.");
5127 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, hiwater_pct, UINT, ZMOD_RW,
5128 "Percentage over dbuf_cache_max_bytes for direct dbuf eviction.");
5130 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, lowater_pct, UINT, ZMOD_RW,
5131 "Percentage below dbuf_cache_max_bytes when dbuf eviction stops.");
5133 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, U64, ZMOD_RW,
5134 "Maximum size in bytes of dbuf metadata cache.");
5136 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, UINT, ZMOD_RW,
5137 "Set size of dbuf cache to log2 fraction of arc size.");
5139 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, UINT, ZMOD_RW,
5140 "Set size of dbuf metadata cache to log2 fraction of arc size.");
5142 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, mutex_cache_shift, UINT, ZMOD_RD,
5143 "Set size of dbuf cache mutex array as log2 shift.");