4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/zfs_context.h>
28 #include <sys/dnode.h>
30 #include <sys/dmu_impl.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dmu_objset.h>
33 #include <sys/dsl_dir.h>
34 #include <sys/dsl_dataset.h>
37 #include <sys/dmu_zfetch.h>
39 static int free_range_compar(const void *node1, const void *node2);
41 static kmem_cache_t *dnode_cache;
43 static dnode_phys_t dnode_phys_zero;
45 int zfs_default_bs = SPA_MINBLOCKSHIFT;
46 int zfs_default_ibs = DN_MAX_INDBLKSHIFT;
50 dnode_cons(void *arg, void *unused, int kmflag)
54 bzero(dn, sizeof (dnode_t));
56 cv_init(&dn->dn_notxholds, NULL, CV_DEFAULT, NULL);
57 rw_init(&dn->dn_struct_rwlock, NULL, RW_DEFAULT, NULL);
58 mutex_init(&dn->dn_mtx, NULL, MUTEX_DEFAULT, NULL);
59 mutex_init(&dn->dn_dbufs_mtx, NULL, MUTEX_DEFAULT, NULL);
60 refcount_create(&dn->dn_holds);
61 refcount_create(&dn->dn_tx_holds);
63 for (i = 0; i < TXG_SIZE; i++) {
64 avl_create(&dn->dn_ranges[i], free_range_compar,
65 sizeof (free_range_t),
66 offsetof(struct free_range, fr_node));
67 list_create(&dn->dn_dirty_records[i],
68 sizeof (dbuf_dirty_record_t),
69 offsetof(dbuf_dirty_record_t, dr_dirty_node));
72 list_create(&dn->dn_dbufs, sizeof (dmu_buf_impl_t),
73 offsetof(dmu_buf_impl_t, db_link));
80 dnode_dest(void *arg, void *unused)
85 cv_destroy(&dn->dn_notxholds);
86 rw_destroy(&dn->dn_struct_rwlock);
87 mutex_destroy(&dn->dn_mtx);
88 mutex_destroy(&dn->dn_dbufs_mtx);
89 refcount_destroy(&dn->dn_holds);
90 refcount_destroy(&dn->dn_tx_holds);
92 for (i = 0; i < TXG_SIZE; i++) {
93 avl_destroy(&dn->dn_ranges[i]);
94 list_destroy(&dn->dn_dirty_records[i]);
97 list_destroy(&dn->dn_dbufs);
103 dnode_cache = kmem_cache_create("dnode_t",
105 0, dnode_cons, dnode_dest, NULL, NULL, NULL, 0);
111 kmem_cache_destroy(dnode_cache);
117 dnode_verify(dnode_t *dn)
119 int drop_struct_lock = FALSE;
122 ASSERT(dn->dn_objset);
124 ASSERT(dn->dn_phys->dn_type < DMU_OT_NUMTYPES);
126 if (!(zfs_flags & ZFS_DEBUG_DNODE_VERIFY))
129 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
130 rw_enter(&dn->dn_struct_rwlock, RW_READER);
131 drop_struct_lock = TRUE;
133 if (dn->dn_phys->dn_type != DMU_OT_NONE || dn->dn_allocated_txg != 0) {
135 ASSERT3U(dn->dn_indblkshift, >=, 0);
136 ASSERT3U(dn->dn_indblkshift, <=, SPA_MAXBLOCKSHIFT);
137 if (dn->dn_datablkshift) {
138 ASSERT3U(dn->dn_datablkshift, >=, SPA_MINBLOCKSHIFT);
139 ASSERT3U(dn->dn_datablkshift, <=, SPA_MAXBLOCKSHIFT);
140 ASSERT3U(1<<dn->dn_datablkshift, ==, dn->dn_datablksz);
142 ASSERT3U(dn->dn_nlevels, <=, 30);
143 ASSERT3U(dn->dn_type, <=, DMU_OT_NUMTYPES);
144 ASSERT3U(dn->dn_nblkptr, >=, 1);
145 ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
146 ASSERT3U(dn->dn_bonuslen, <=, DN_MAX_BONUSLEN);
147 ASSERT3U(dn->dn_datablksz, ==,
148 dn->dn_datablkszsec << SPA_MINBLOCKSHIFT);
149 ASSERT3U(ISP2(dn->dn_datablksz), ==, dn->dn_datablkshift != 0);
150 ASSERT3U((dn->dn_nblkptr - 1) * sizeof (blkptr_t) +
151 dn->dn_bonuslen, <=, DN_MAX_BONUSLEN);
152 for (i = 0; i < TXG_SIZE; i++) {
153 ASSERT3U(dn->dn_next_nlevels[i], <=, dn->dn_nlevels);
156 if (dn->dn_phys->dn_type != DMU_OT_NONE)
157 ASSERT3U(dn->dn_phys->dn_nlevels, <=, dn->dn_nlevels);
158 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || dn->dn_dbuf != NULL);
159 if (dn->dn_dbuf != NULL) {
160 ASSERT3P(dn->dn_phys, ==,
161 (dnode_phys_t *)dn->dn_dbuf->db.db_data +
162 (dn->dn_object % (dn->dn_dbuf->db.db_size >> DNODE_SHIFT)));
164 if (drop_struct_lock)
165 rw_exit(&dn->dn_struct_rwlock);
170 dnode_byteswap(dnode_phys_t *dnp)
172 uint64_t *buf64 = (void*)&dnp->dn_blkptr;
175 if (dnp->dn_type == DMU_OT_NONE) {
176 bzero(dnp, sizeof (dnode_phys_t));
180 dnp->dn_datablkszsec = BSWAP_16(dnp->dn_datablkszsec);
181 dnp->dn_bonuslen = BSWAP_16(dnp->dn_bonuslen);
182 dnp->dn_maxblkid = BSWAP_64(dnp->dn_maxblkid);
183 dnp->dn_used = BSWAP_64(dnp->dn_used);
186 * dn_nblkptr is only one byte, so it's OK to read it in either
187 * byte order. We can't read dn_bouslen.
189 ASSERT(dnp->dn_indblkshift <= SPA_MAXBLOCKSHIFT);
190 ASSERT(dnp->dn_nblkptr <= DN_MAX_NBLKPTR);
191 for (i = 0; i < dnp->dn_nblkptr * sizeof (blkptr_t)/8; i++)
192 buf64[i] = BSWAP_64(buf64[i]);
195 * OK to check dn_bonuslen for zero, because it won't matter if
196 * we have the wrong byte order. This is necessary because the
197 * dnode dnode is smaller than a regular dnode.
199 if (dnp->dn_bonuslen != 0) {
201 * Note that the bonus length calculated here may be
202 * longer than the actual bonus buffer. This is because
203 * we always put the bonus buffer after the last block
204 * pointer (instead of packing it against the end of the
207 int off = (dnp->dn_nblkptr-1) * sizeof (blkptr_t);
208 size_t len = DN_MAX_BONUSLEN - off;
209 ASSERT3U(dnp->dn_bonustype, <, DMU_OT_NUMTYPES);
210 dmu_ot[dnp->dn_bonustype].ot_byteswap(dnp->dn_bonus + off, len);
215 dnode_buf_byteswap(void *vbuf, size_t size)
217 dnode_phys_t *buf = vbuf;
220 ASSERT3U(sizeof (dnode_phys_t), ==, (1<<DNODE_SHIFT));
221 ASSERT((size & (sizeof (dnode_phys_t)-1)) == 0);
223 size >>= DNODE_SHIFT;
224 for (i = 0; i < size; i++) {
231 free_range_compar(const void *node1, const void *node2)
233 const free_range_t *rp1 = node1;
234 const free_range_t *rp2 = node2;
236 if (rp1->fr_blkid < rp2->fr_blkid)
238 else if (rp1->fr_blkid > rp2->fr_blkid)
244 dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx)
246 ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
248 dnode_setdirty(dn, tx);
249 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
250 ASSERT3U(newsize, <=, DN_MAX_BONUSLEN -
251 (dn->dn_nblkptr-1) * sizeof (blkptr_t));
252 dn->dn_bonuslen = newsize;
254 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = DN_ZERO_BONUSLEN;
256 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
257 rw_exit(&dn->dn_struct_rwlock);
261 dnode_setdblksz(dnode_t *dn, int size)
263 ASSERT3U(P2PHASE(size, SPA_MINBLOCKSIZE), ==, 0);
264 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
265 ASSERT3U(size, >=, SPA_MINBLOCKSIZE);
266 ASSERT3U(size >> SPA_MINBLOCKSHIFT, <,
267 1<<(sizeof (dn->dn_phys->dn_datablkszsec) * 8));
268 dn->dn_datablksz = size;
269 dn->dn_datablkszsec = size >> SPA_MINBLOCKSHIFT;
270 dn->dn_datablkshift = ISP2(size) ? highbit(size - 1) : 0;
274 dnode_create(objset_impl_t *os, dnode_phys_t *dnp, dmu_buf_impl_t *db,
277 dnode_t *dn = kmem_cache_alloc(dnode_cache, KM_SLEEP);
280 dn->dn_object = object;
284 if (dnp->dn_datablkszsec)
285 dnode_setdblksz(dn, dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
286 dn->dn_indblkshift = dnp->dn_indblkshift;
287 dn->dn_nlevels = dnp->dn_nlevels;
288 dn->dn_type = dnp->dn_type;
289 dn->dn_nblkptr = dnp->dn_nblkptr;
290 dn->dn_checksum = dnp->dn_checksum;
291 dn->dn_compress = dnp->dn_compress;
292 dn->dn_bonustype = dnp->dn_bonustype;
293 dn->dn_bonuslen = dnp->dn_bonuslen;
294 dn->dn_maxblkid = dnp->dn_maxblkid;
296 dmu_zfetch_init(&dn->dn_zfetch, dn);
298 ASSERT(dn->dn_phys->dn_type < DMU_OT_NUMTYPES);
299 mutex_enter(&os->os_lock);
300 list_insert_head(&os->os_dnodes, dn);
301 mutex_exit(&os->os_lock);
303 arc_space_consume(sizeof (dnode_t));
308 dnode_destroy(dnode_t *dn)
310 objset_impl_t *os = dn->dn_objset;
315 for (i = 0; i < TXG_SIZE; i++) {
316 ASSERT(!list_link_active(&dn->dn_dirty_link[i]));
317 ASSERT(NULL == list_head(&dn->dn_dirty_records[i]));
318 ASSERT(0 == avl_numnodes(&dn->dn_ranges[i]));
320 ASSERT(NULL == list_head(&dn->dn_dbufs));
323 mutex_enter(&os->os_lock);
324 list_remove(&os->os_dnodes, dn);
325 mutex_exit(&os->os_lock);
327 if (dn->dn_dirtyctx_firstset) {
328 kmem_free(dn->dn_dirtyctx_firstset, 1);
329 dn->dn_dirtyctx_firstset = NULL;
331 dmu_zfetch_rele(&dn->dn_zfetch);
333 mutex_enter(&dn->dn_bonus->db_mtx);
334 dbuf_evict(dn->dn_bonus);
337 kmem_cache_free(dnode_cache, dn);
338 arc_space_return(sizeof (dnode_t));
342 dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
343 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
348 blocksize = 1 << zfs_default_bs;
349 else if (blocksize > SPA_MAXBLOCKSIZE)
350 blocksize = SPA_MAXBLOCKSIZE;
352 blocksize = P2ROUNDUP(blocksize, SPA_MINBLOCKSIZE);
355 ibs = zfs_default_ibs;
357 ibs = MIN(MAX(ibs, DN_MIN_INDBLKSHIFT), DN_MAX_INDBLKSHIFT);
359 dprintf("os=%p obj=%llu txg=%llu blocksize=%d ibs=%d\n", dn->dn_objset,
360 dn->dn_object, tx->tx_txg, blocksize, ibs);
362 ASSERT(dn->dn_type == DMU_OT_NONE);
363 ASSERT(bcmp(dn->dn_phys, &dnode_phys_zero, sizeof (dnode_phys_t)) == 0);
364 ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE);
365 ASSERT(ot != DMU_OT_NONE);
366 ASSERT3U(ot, <, DMU_OT_NUMTYPES);
367 ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
368 (bonustype != DMU_OT_NONE && bonuslen != 0));
369 ASSERT3U(bonustype, <, DMU_OT_NUMTYPES);
370 ASSERT3U(bonuslen, <=, DN_MAX_BONUSLEN);
371 ASSERT(dn->dn_type == DMU_OT_NONE);
372 ASSERT3U(dn->dn_maxblkid, ==, 0);
373 ASSERT3U(dn->dn_allocated_txg, ==, 0);
374 ASSERT3U(dn->dn_assigned_txg, ==, 0);
375 ASSERT(refcount_is_zero(&dn->dn_tx_holds));
376 ASSERT3U(refcount_count(&dn->dn_holds), <=, 1);
377 ASSERT3P(list_head(&dn->dn_dbufs), ==, NULL);
379 for (i = 0; i < TXG_SIZE; i++) {
380 ASSERT3U(dn->dn_next_nlevels[i], ==, 0);
381 ASSERT3U(dn->dn_next_indblkshift[i], ==, 0);
382 ASSERT3U(dn->dn_next_bonuslen[i], ==, 0);
383 ASSERT3U(dn->dn_next_blksz[i], ==, 0);
384 ASSERT(!list_link_active(&dn->dn_dirty_link[i]));
385 ASSERT3P(list_head(&dn->dn_dirty_records[i]), ==, NULL);
386 ASSERT3U(avl_numnodes(&dn->dn_ranges[i]), ==, 0);
390 dnode_setdblksz(dn, blocksize);
391 dn->dn_indblkshift = ibs;
393 dn->dn_nblkptr = 1 + ((DN_MAX_BONUSLEN - bonuslen) >> SPA_BLKPTRSHIFT);
394 dn->dn_bonustype = bonustype;
395 dn->dn_bonuslen = bonuslen;
396 dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
397 dn->dn_compress = ZIO_COMPRESS_INHERIT;
401 if (dn->dn_dirtyctx_firstset) {
402 kmem_free(dn->dn_dirtyctx_firstset, 1);
403 dn->dn_dirtyctx_firstset = NULL;
406 dn->dn_allocated_txg = tx->tx_txg;
408 dnode_setdirty(dn, tx);
409 dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs;
410 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
411 dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = dn->dn_datablksz;
415 dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
416 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
419 dmu_buf_impl_t *db = NULL;
421 ASSERT3U(blocksize, >=, SPA_MINBLOCKSIZE);
422 ASSERT3U(blocksize, <=, SPA_MAXBLOCKSIZE);
423 ASSERT3U(blocksize % SPA_MINBLOCKSIZE, ==, 0);
424 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT || dmu_tx_private_ok(tx));
425 ASSERT(tx->tx_txg != 0);
426 ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
427 (bonustype != DMU_OT_NONE && bonuslen != 0));
428 ASSERT3U(bonustype, <, DMU_OT_NUMTYPES);
429 ASSERT3U(bonuslen, <=, DN_MAX_BONUSLEN);
431 for (i = 0; i < TXG_SIZE; i++)
432 ASSERT(!list_link_active(&dn->dn_dirty_link[i]));
434 /* clean up any unreferenced dbufs */
435 dnode_evict_dbufs(dn);
436 ASSERT3P(list_head(&dn->dn_dbufs), ==, NULL);
439 * XXX I should really have a generation number to tell if we
442 if (blocksize != dn->dn_datablksz ||
443 dn->dn_bonustype != bonustype || dn->dn_bonuslen != bonuslen) {
444 /* free all old data */
445 dnode_free_range(dn, 0, -1ULL, tx);
448 nblkptr = 1 + ((DN_MAX_BONUSLEN - bonuslen) >> SPA_BLKPTRSHIFT);
450 /* change blocksize */
451 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
452 if (blocksize != dn->dn_datablksz &&
453 (!BP_IS_HOLE(&dn->dn_phys->dn_blkptr[0]) ||
454 list_head(&dn->dn_dbufs) != NULL)) {
455 db = dbuf_hold(dn, 0, FTAG);
456 dbuf_new_size(db, blocksize, tx);
458 dnode_setdblksz(dn, blocksize);
459 dnode_setdirty(dn, tx);
460 dn->dn_next_bonuslen[tx->tx_txg&TXG_MASK] = bonuslen;
461 dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = blocksize;
462 if (dn->dn_nblkptr != nblkptr)
463 dn->dn_next_nblkptr[tx->tx_txg&TXG_MASK] = nblkptr;
464 rw_exit(&dn->dn_struct_rwlock);
471 /* change bonus size and type */
472 mutex_enter(&dn->dn_mtx);
473 dn->dn_bonustype = bonustype;
474 dn->dn_bonuslen = bonuslen;
475 dn->dn_nblkptr = nblkptr;
476 dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
477 dn->dn_compress = ZIO_COMPRESS_INHERIT;
478 ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
480 /* fix up the bonus db_size */
482 dn->dn_bonus->db.db_size =
483 DN_MAX_BONUSLEN - (dn->dn_nblkptr-1) * sizeof (blkptr_t);
484 ASSERT(dn->dn_bonuslen <= dn->dn_bonus->db.db_size);
487 dn->dn_allocated_txg = tx->tx_txg;
488 mutex_exit(&dn->dn_mtx);
492 dnode_special_close(dnode_t *dn)
495 * Wait for final references to the dnode to clear. This can
496 * only happen if the arc is asyncronously evicting state that
497 * has a hold on this dnode while we are trying to evict this
500 while (refcount_count(&dn->dn_holds) > 0)
506 dnode_special_open(objset_impl_t *os, dnode_phys_t *dnp, uint64_t object)
508 dnode_t *dn = dnode_create(os, dnp, NULL, object);
514 dnode_buf_pageout(dmu_buf_t *db, void *arg)
516 dnode_t **children_dnodes = arg;
518 int epb = db->db_size >> DNODE_SHIFT;
520 for (i = 0; i < epb; i++) {
521 dnode_t *dn = children_dnodes[i];
528 * If there are holds on this dnode, then there should
529 * be holds on the dnode's containing dbuf as well; thus
530 * it wouldn't be eligable for eviction and this function
531 * would not have been called.
533 ASSERT(refcount_is_zero(&dn->dn_holds));
534 ASSERT(list_head(&dn->dn_dbufs) == NULL);
535 ASSERT(refcount_is_zero(&dn->dn_tx_holds));
537 for (n = 0; n < TXG_SIZE; n++)
538 ASSERT(!list_link_active(&dn->dn_dirty_link[n]));
540 children_dnodes[i] = NULL;
543 kmem_free(children_dnodes, epb * sizeof (dnode_t *));
548 * EINVAL - invalid object number.
550 * succeeds even for free dnodes.
553 dnode_hold_impl(objset_impl_t *os, uint64_t object, int flag,
554 void *tag, dnode_t **dnp)
557 int drop_struct_lock = FALSE;
562 dnode_t **children_dnodes;
565 * If you are holding the spa config lock as writer, you shouldn't
566 * be asking the DMU to do *anything*.
568 ASSERT(spa_config_held(os->os_spa, SCL_ALL, RW_WRITER) == 0);
570 if (object == 0 || object >= DN_MAX_OBJECT)
573 mdn = os->os_meta_dnode;
577 if (!RW_WRITE_HELD(&mdn->dn_struct_rwlock)) {
578 rw_enter(&mdn->dn_struct_rwlock, RW_READER);
579 drop_struct_lock = TRUE;
582 blk = dbuf_whichblock(mdn, object * sizeof (dnode_phys_t));
584 db = dbuf_hold(mdn, blk, FTAG);
585 if (drop_struct_lock)
586 rw_exit(&mdn->dn_struct_rwlock);
589 err = dbuf_read(db, NULL, DB_RF_CANFAIL);
595 ASSERT3U(db->db.db_size, >=, 1<<DNODE_SHIFT);
596 epb = db->db.db_size >> DNODE_SHIFT;
598 idx = object & (epb-1);
600 children_dnodes = dmu_buf_get_user(&db->db);
601 if (children_dnodes == NULL) {
603 children_dnodes = kmem_zalloc(epb * sizeof (dnode_t *),
605 if (winner = dmu_buf_set_user(&db->db, children_dnodes, NULL,
606 dnode_buf_pageout)) {
607 kmem_free(children_dnodes, epb * sizeof (dnode_t *));
608 children_dnodes = winner;
612 if ((dn = children_dnodes[idx]) == NULL) {
613 dnode_phys_t *dnp = (dnode_phys_t *)db->db.db_data+idx;
616 dn = dnode_create(os, dnp, db, object);
617 winner = atomic_cas_ptr(&children_dnodes[idx], NULL, dn);
618 if (winner != NULL) {
624 mutex_enter(&dn->dn_mtx);
626 if (dn->dn_free_txg ||
627 ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE) ||
628 ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE)) {
629 mutex_exit(&dn->dn_mtx);
631 return (type == DMU_OT_NONE ? ENOENT : EEXIST);
633 mutex_exit(&dn->dn_mtx);
635 if (refcount_add(&dn->dn_holds, tag) == 1)
636 dbuf_add_ref(db, dn);
639 ASSERT3P(dn->dn_dbuf, ==, db);
640 ASSERT3U(dn->dn_object, ==, object);
648 * Return held dnode if the object is allocated, NULL if not.
651 dnode_hold(objset_impl_t *os, uint64_t object, void *tag, dnode_t **dnp)
653 return (dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, tag, dnp));
657 * Can only add a reference if there is already at least one
658 * reference on the dnode. Returns FALSE if unable to add a
662 dnode_add_ref(dnode_t *dn, void *tag)
664 mutex_enter(&dn->dn_mtx);
665 if (refcount_is_zero(&dn->dn_holds)) {
666 mutex_exit(&dn->dn_mtx);
669 VERIFY(1 < refcount_add(&dn->dn_holds, tag));
670 mutex_exit(&dn->dn_mtx);
675 dnode_rele(dnode_t *dn, void *tag)
679 mutex_enter(&dn->dn_mtx);
680 refs = refcount_remove(&dn->dn_holds, tag);
681 mutex_exit(&dn->dn_mtx);
682 /* NOTE: the DNODE_DNODE does not have a dn_dbuf */
683 if (refs == 0 && dn->dn_dbuf)
684 dbuf_rele(dn->dn_dbuf, dn);
688 dnode_setdirty(dnode_t *dn, dmu_tx_t *tx)
690 objset_impl_t *os = dn->dn_objset;
691 uint64_t txg = tx->tx_txg;
693 if (dn->dn_object == DMU_META_DNODE_OBJECT)
699 mutex_enter(&dn->dn_mtx);
700 ASSERT(dn->dn_phys->dn_type || dn->dn_allocated_txg);
701 /* ASSERT(dn->dn_free_txg == 0 || dn->dn_free_txg >= txg); */
702 mutex_exit(&dn->dn_mtx);
705 mutex_enter(&os->os_lock);
708 * If we are already marked dirty, we're done.
710 if (list_link_active(&dn->dn_dirty_link[txg & TXG_MASK])) {
711 mutex_exit(&os->os_lock);
715 ASSERT(!refcount_is_zero(&dn->dn_holds) || list_head(&dn->dn_dbufs));
716 ASSERT(dn->dn_datablksz != 0);
717 ASSERT3U(dn->dn_next_bonuslen[txg&TXG_MASK], ==, 0);
718 ASSERT3U(dn->dn_next_blksz[txg&TXG_MASK], ==, 0);
720 dprintf_ds(os->os_dsl_dataset, "obj=%llu txg=%llu\n",
723 if (dn->dn_free_txg > 0 && dn->dn_free_txg <= txg) {
724 list_insert_tail(&os->os_free_dnodes[txg&TXG_MASK], dn);
726 list_insert_tail(&os->os_dirty_dnodes[txg&TXG_MASK], dn);
729 mutex_exit(&os->os_lock);
732 * The dnode maintains a hold on its containing dbuf as
733 * long as there are holds on it. Each instantiated child
734 * dbuf maintaines a hold on the dnode. When the last child
735 * drops its hold, the dnode will drop its hold on the
736 * containing dbuf. We add a "dirty hold" here so that the
737 * dnode will hang around after we finish processing its
740 VERIFY(dnode_add_ref(dn, (void *)(uintptr_t)tx->tx_txg));
742 (void) dbuf_dirty(dn->dn_dbuf, tx);
744 dsl_dataset_dirty(os->os_dsl_dataset, tx);
748 dnode_free(dnode_t *dn, dmu_tx_t *tx)
750 int txgoff = tx->tx_txg & TXG_MASK;
752 dprintf("dn=%p txg=%llu\n", dn, tx->tx_txg);
754 /* we should be the only holder... hopefully */
755 /* ASSERT3U(refcount_count(&dn->dn_holds), ==, 1); */
757 mutex_enter(&dn->dn_mtx);
758 if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg) {
759 mutex_exit(&dn->dn_mtx);
762 dn->dn_free_txg = tx->tx_txg;
763 mutex_exit(&dn->dn_mtx);
766 * If the dnode is already dirty, it needs to be moved from
767 * the dirty list to the free list.
769 mutex_enter(&dn->dn_objset->os_lock);
770 if (list_link_active(&dn->dn_dirty_link[txgoff])) {
771 list_remove(&dn->dn_objset->os_dirty_dnodes[txgoff], dn);
772 list_insert_tail(&dn->dn_objset->os_free_dnodes[txgoff], dn);
773 mutex_exit(&dn->dn_objset->os_lock);
775 mutex_exit(&dn->dn_objset->os_lock);
776 dnode_setdirty(dn, tx);
781 * Try to change the block size for the indicated dnode. This can only
782 * succeed if there are no blocks allocated or dirty beyond first block
785 dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx)
787 dmu_buf_impl_t *db, *db_next;
791 size = SPA_MINBLOCKSIZE;
792 if (size > SPA_MAXBLOCKSIZE)
793 size = SPA_MAXBLOCKSIZE;
795 size = P2ROUNDUP(size, SPA_MINBLOCKSIZE);
797 if (ibs == dn->dn_indblkshift)
800 if (size >> SPA_MINBLOCKSHIFT == dn->dn_datablkszsec && ibs == 0)
803 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
805 /* Check for any allocated blocks beyond the first */
806 if (dn->dn_phys->dn_maxblkid != 0)
809 mutex_enter(&dn->dn_dbufs_mtx);
810 for (db = list_head(&dn->dn_dbufs); db; db = db_next) {
811 db_next = list_next(&dn->dn_dbufs, db);
813 if (db->db_blkid != 0 && db->db_blkid != DB_BONUS_BLKID) {
814 mutex_exit(&dn->dn_dbufs_mtx);
818 mutex_exit(&dn->dn_dbufs_mtx);
820 if (ibs && dn->dn_nlevels != 1)
823 /* resize the old block */
824 err = dbuf_hold_impl(dn, 0, 0, TRUE, FTAG, &db);
826 dbuf_new_size(db, size, tx);
827 else if (err != ENOENT)
830 dnode_setdblksz(dn, size);
831 dnode_setdirty(dn, tx);
832 dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = size;
834 dn->dn_indblkshift = ibs;
835 dn->dn_next_indblkshift[tx->tx_txg&TXG_MASK] = ibs;
837 /* rele after we have fixed the blocksize in the dnode */
841 rw_exit(&dn->dn_struct_rwlock);
845 rw_exit(&dn->dn_struct_rwlock);
849 /* read-holding callers must not rely on the lock being continuously held */
851 dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx, boolean_t have_read)
853 uint64_t txgoff = tx->tx_txg & TXG_MASK;
854 int epbs, new_nlevels;
857 ASSERT(blkid != DB_BONUS_BLKID);
860 RW_READ_HELD(&dn->dn_struct_rwlock) :
861 RW_WRITE_HELD(&dn->dn_struct_rwlock));
864 * if we have a read-lock, check to see if we need to do any work
865 * before upgrading to a write-lock.
868 if (blkid <= dn->dn_maxblkid)
871 if (!rw_tryupgrade(&dn->dn_struct_rwlock)) {
872 rw_exit(&dn->dn_struct_rwlock);
873 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
877 if (blkid <= dn->dn_maxblkid)
880 dn->dn_maxblkid = blkid;
883 * Compute the number of levels necessary to support the new maxblkid.
886 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
887 for (sz = dn->dn_nblkptr;
888 sz <= blkid && sz >= dn->dn_nblkptr; sz <<= epbs)
891 if (new_nlevels > dn->dn_nlevels) {
892 int old_nlevels = dn->dn_nlevels;
895 dbuf_dirty_record_t *new, *dr, *dr_next;
897 dn->dn_nlevels = new_nlevels;
899 ASSERT3U(new_nlevels, >, dn->dn_next_nlevels[txgoff]);
900 dn->dn_next_nlevels[txgoff] = new_nlevels;
902 /* dirty the left indirects */
903 db = dbuf_hold_level(dn, old_nlevels, 0, FTAG);
904 new = dbuf_dirty(db, tx);
907 /* transfer the dirty records to the new indirect */
908 mutex_enter(&dn->dn_mtx);
909 mutex_enter(&new->dt.di.dr_mtx);
910 list = &dn->dn_dirty_records[txgoff];
911 for (dr = list_head(list); dr; dr = dr_next) {
912 dr_next = list_next(&dn->dn_dirty_records[txgoff], dr);
913 if (dr->dr_dbuf->db_level != new_nlevels-1 &&
914 dr->dr_dbuf->db_blkid != DB_BONUS_BLKID) {
915 ASSERT(dr->dr_dbuf->db_level == old_nlevels-1);
916 list_remove(&dn->dn_dirty_records[txgoff], dr);
917 list_insert_tail(&new->dt.di.dr_children, dr);
921 mutex_exit(&new->dt.di.dr_mtx);
922 mutex_exit(&dn->dn_mtx);
927 rw_downgrade(&dn->dn_struct_rwlock);
931 dnode_clear_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx)
933 avl_tree_t *tree = &dn->dn_ranges[tx->tx_txg&TXG_MASK];
936 free_range_t rp_tofind;
937 uint64_t endblk = blkid + nblks;
939 ASSERT(MUTEX_HELD(&dn->dn_mtx));
940 ASSERT(nblks <= UINT64_MAX - blkid); /* no overflow */
942 dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n",
943 blkid, nblks, tx->tx_txg);
944 rp_tofind.fr_blkid = blkid;
945 rp = avl_find(tree, &rp_tofind, &where);
947 rp = avl_nearest(tree, where, AVL_BEFORE);
949 rp = avl_nearest(tree, where, AVL_AFTER);
951 while (rp && (rp->fr_blkid <= blkid + nblks)) {
952 uint64_t fr_endblk = rp->fr_blkid + rp->fr_nblks;
953 free_range_t *nrp = AVL_NEXT(tree, rp);
955 if (blkid <= rp->fr_blkid && endblk >= fr_endblk) {
956 /* clear this entire range */
957 avl_remove(tree, rp);
958 kmem_free(rp, sizeof (free_range_t));
959 } else if (blkid <= rp->fr_blkid &&
960 endblk > rp->fr_blkid && endblk < fr_endblk) {
961 /* clear the beginning of this range */
962 rp->fr_blkid = endblk;
963 rp->fr_nblks = fr_endblk - endblk;
964 } else if (blkid > rp->fr_blkid && blkid < fr_endblk &&
965 endblk >= fr_endblk) {
966 /* clear the end of this range */
967 rp->fr_nblks = blkid - rp->fr_blkid;
968 } else if (blkid > rp->fr_blkid && endblk < fr_endblk) {
969 /* clear a chunk out of this range */
970 free_range_t *new_rp =
971 kmem_alloc(sizeof (free_range_t), KM_SLEEP);
973 new_rp->fr_blkid = endblk;
974 new_rp->fr_nblks = fr_endblk - endblk;
975 avl_insert_here(tree, new_rp, rp, AVL_AFTER);
976 rp->fr_nblks = blkid - rp->fr_blkid;
978 /* there may be no overlap */
984 dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
987 uint64_t blkoff, blkid, nblks;
988 int blksz, blkshift, head, tail;
992 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
993 blksz = dn->dn_datablksz;
994 blkshift = dn->dn_datablkshift;
995 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
998 len = UINT64_MAX - off;
1003 * First, block align the region to free:
1006 head = P2NPHASE(off, blksz);
1007 blkoff = P2PHASE(off, blksz);
1008 if ((off >> blkshift) > dn->dn_maxblkid)
1011 ASSERT(dn->dn_maxblkid == 0);
1012 if (off == 0 && len >= blksz) {
1013 /* Freeing the whole block; fast-track this request */
1017 } else if (off >= blksz) {
1018 /* Freeing past end-of-data */
1021 /* Freeing part of the block. */
1023 ASSERT3U(head, >, 0);
1027 /* zero out any partial block data at the start of the range */
1029 ASSERT3U(blkoff + head, ==, blksz);
1032 if (dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, off), TRUE,
1036 /* don't dirty if it isn't on disk and isn't dirty */
1037 if (db->db_last_dirty ||
1038 (db->db_blkptr && !BP_IS_HOLE(db->db_blkptr))) {
1039 rw_exit(&dn->dn_struct_rwlock);
1040 dbuf_will_dirty(db, tx);
1041 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1042 data = db->db.db_data;
1043 bzero(data + blkoff, head);
1045 dbuf_rele(db, FTAG);
1051 /* If the range was less than one block, we're done */
1055 /* If the remaining range is past end of file, we're done */
1056 if ((off >> blkshift) > dn->dn_maxblkid)
1059 ASSERT(ISP2(blksz));
1063 tail = P2PHASE(len, blksz);
1065 ASSERT3U(P2PHASE(off, blksz), ==, 0);
1066 /* zero out any partial block data at the end of the range */
1070 if (dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, off+len),
1071 TRUE, FTAG, &db) == 0) {
1072 /* don't dirty if not on disk and not dirty */
1073 if (db->db_last_dirty ||
1074 (db->db_blkptr && !BP_IS_HOLE(db->db_blkptr))) {
1075 rw_exit(&dn->dn_struct_rwlock);
1076 dbuf_will_dirty(db, tx);
1077 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1078 bzero(db->db.db_data, tail);
1080 dbuf_rele(db, FTAG);
1085 /* If the range did not include a full block, we are done */
1089 ASSERT(IS_P2ALIGNED(off, blksz));
1090 ASSERT(trunc || IS_P2ALIGNED(len, blksz));
1091 blkid = off >> blkshift;
1092 nblks = len >> blkshift;
1097 * Read in and mark all the level-1 indirects dirty,
1098 * so that they will stay in memory until syncing phase.
1099 * Always dirty the first and last indirect to make sure
1100 * we dirty all the partial indirects.
1102 if (dn->dn_nlevels > 1) {
1103 uint64_t i, first, last;
1104 int shift = epbs + dn->dn_datablkshift;
1106 first = blkid >> epbs;
1107 if (db = dbuf_hold_level(dn, 1, first, FTAG)) {
1108 dbuf_will_dirty(db, tx);
1109 dbuf_rele(db, FTAG);
1112 last = dn->dn_maxblkid >> epbs;
1114 last = (blkid + nblks - 1) >> epbs;
1115 if (last > first && (db = dbuf_hold_level(dn, 1, last, FTAG))) {
1116 dbuf_will_dirty(db, tx);
1117 dbuf_rele(db, FTAG);
1119 for (i = first + 1; i < last; i++) {
1120 uint64_t ibyte = i << shift;
1123 err = dnode_next_offset(dn,
1124 DNODE_FIND_HAVELOCK, &ibyte, 1, 1, 0);
1126 if (err == ESRCH || i >= last)
1129 db = dbuf_hold_level(dn, 1, i, FTAG);
1131 dbuf_will_dirty(db, tx);
1132 dbuf_rele(db, FTAG);
1138 * Add this range to the dnode range list.
1139 * We will finish up this free operation in the syncing phase.
1141 mutex_enter(&dn->dn_mtx);
1142 dnode_clear_range(dn, blkid, nblks, tx);
1144 free_range_t *rp, *found;
1146 avl_tree_t *tree = &dn->dn_ranges[tx->tx_txg&TXG_MASK];
1148 /* Add new range to dn_ranges */
1149 rp = kmem_alloc(sizeof (free_range_t), KM_SLEEP);
1150 rp->fr_blkid = blkid;
1151 rp->fr_nblks = nblks;
1152 found = avl_find(tree, rp, &where);
1153 ASSERT(found == NULL);
1154 avl_insert(tree, rp, where);
1155 dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n",
1156 blkid, nblks, tx->tx_txg);
1158 mutex_exit(&dn->dn_mtx);
1160 dbuf_free_range(dn, blkid, blkid + nblks - 1, tx);
1161 dnode_setdirty(dn, tx);
1163 if (trunc && dn->dn_maxblkid >= (off >> blkshift))
1164 dn->dn_maxblkid = (off >> blkshift ? (off >> blkshift) - 1 : 0);
1166 rw_exit(&dn->dn_struct_rwlock);
1169 /* return TRUE if this blkid was freed in a recent txg, or FALSE if it wasn't */
1171 dnode_block_freed(dnode_t *dn, uint64_t blkid)
1173 free_range_t range_tofind;
1174 void *dp = spa_get_dsl(dn->dn_objset->os_spa);
1177 if (blkid == DB_BONUS_BLKID)
1181 * If we're in the process of opening the pool, dp will not be
1182 * set yet, but there shouldn't be anything dirty.
1187 if (dn->dn_free_txg)
1191 * If dn_datablkshift is not set, then there's only a single
1192 * block, in which case there will never be a free range so it
1195 range_tofind.fr_blkid = blkid;
1196 mutex_enter(&dn->dn_mtx);
1197 for (i = 0; i < TXG_SIZE; i++) {
1198 free_range_t *range_found;
1201 range_found = avl_find(&dn->dn_ranges[i], &range_tofind, &idx);
1203 ASSERT(range_found->fr_nblks > 0);
1206 range_found = avl_nearest(&dn->dn_ranges[i], idx, AVL_BEFORE);
1208 range_found->fr_blkid + range_found->fr_nblks > blkid)
1211 mutex_exit(&dn->dn_mtx);
1212 return (i < TXG_SIZE);
1215 /* call from syncing context when we actually write/free space for this dnode */
1217 dnode_diduse_space(dnode_t *dn, int64_t delta)
1220 dprintf_dnode(dn, "dn=%p dnp=%p used=%llu delta=%lld\n",
1222 (u_longlong_t)dn->dn_phys->dn_used,
1225 mutex_enter(&dn->dn_mtx);
1226 space = DN_USED_BYTES(dn->dn_phys);
1228 ASSERT3U(space + delta, >=, space); /* no overflow */
1230 ASSERT3U(space, >=, -delta); /* no underflow */
1233 if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_DNODE_BYTES) {
1234 ASSERT((dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) == 0);
1235 ASSERT3U(P2PHASE(space, 1<<DEV_BSHIFT), ==, 0);
1236 dn->dn_phys->dn_used = space >> DEV_BSHIFT;
1238 dn->dn_phys->dn_used = space;
1239 dn->dn_phys->dn_flags |= DNODE_FLAG_USED_BYTES;
1241 mutex_exit(&dn->dn_mtx);
1245 * Call when we think we're going to write/free space in open context.
1246 * Be conservative (ie. OK to write less than this or free more than
1247 * this, but don't write more or free less).
1250 dnode_willuse_space(dnode_t *dn, int64_t space, dmu_tx_t *tx)
1252 objset_impl_t *os = dn->dn_objset;
1253 dsl_dataset_t *ds = os->os_dsl_dataset;
1256 space = spa_get_asize(os->os_spa, space);
1259 dsl_dir_willuse_space(ds->ds_dir, space, tx);
1261 dmu_tx_willuse_space(tx, space);
1265 dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
1266 int lvl, uint64_t blkfill, uint64_t txg)
1268 dmu_buf_impl_t *db = NULL;
1270 uint64_t epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
1271 uint64_t epb = 1ULL << epbs;
1272 uint64_t minfill, maxfill;
1274 int i, inc, error, span;
1276 dprintf("probing object %llu offset %llx level %d of %u\n",
1277 dn->dn_object, *offset, lvl, dn->dn_phys->dn_nlevels);
1279 hole = flags & DNODE_FIND_HOLE;
1280 inc = (flags & DNODE_FIND_BACKWARDS) ? -1 : 1;
1281 ASSERT(txg == 0 || !hole);
1283 if (lvl == dn->dn_phys->dn_nlevels) {
1285 epb = dn->dn_phys->dn_nblkptr;
1286 data = dn->dn_phys->dn_blkptr;
1288 uint64_t blkid = dbuf_whichblock(dn, *offset) >> (epbs * lvl);
1289 error = dbuf_hold_impl(dn, lvl, blkid, TRUE, FTAG, &db);
1291 if (error != ENOENT)
1296 * This can only happen when we are searching up
1297 * the block tree for data. We don't really need to
1298 * adjust the offset, as we will just end up looking
1299 * at the pointer to this block in its parent, and its
1300 * going to be unallocated, so we will skip over it.
1304 error = dbuf_read(db, NULL, DB_RF_CANFAIL | DB_RF_HAVESTRUCT);
1306 dbuf_rele(db, FTAG);
1309 data = db->db.db_data;
1313 (db->db_blkptr == NULL || db->db_blkptr->blk_birth <= txg)) {
1315 * This can only happen when we are searching up the tree
1316 * and these conditions mean that we need to keep climbing.
1319 } else if (lvl == 0) {
1320 dnode_phys_t *dnp = data;
1322 ASSERT(dn->dn_type == DMU_OT_DNODE);
1324 for (i = (*offset >> span) & (blkfill - 1);
1325 i >= 0 && i < blkfill; i += inc) {
1326 boolean_t newcontents = B_TRUE;
1329 newcontents = B_FALSE;
1330 for (j = 0; j < dnp[i].dn_nblkptr; j++) {
1331 if (dnp[i].dn_blkptr[j].blk_birth > txg)
1332 newcontents = B_TRUE;
1335 if (!dnp[i].dn_type == hole && newcontents)
1337 *offset += (1ULL << span) * inc;
1339 if (i < 0 || i == blkfill)
1342 blkptr_t *bp = data;
1343 span = (lvl - 1) * epbs + dn->dn_datablkshift;
1345 maxfill = blkfill << ((lvl - 1) * epbs);
1352 for (i = (*offset >> span) & ((1ULL << epbs) - 1);
1353 i >= 0 && i < epb; i += inc) {
1354 if (bp[i].blk_fill >= minfill &&
1355 bp[i].blk_fill <= maxfill &&
1356 (hole || bp[i].blk_birth > txg))
1358 if (inc < 0 && *offset < (1ULL << span))
1361 *offset += (1ULL << span) * inc;
1363 if (i < 0 || i == epb)
1368 dbuf_rele(db, FTAG);
1374 * Find the next hole, data, or sparse region at or after *offset.
1375 * The value 'blkfill' tells us how many items we expect to find
1376 * in an L0 data block; this value is 1 for normal objects,
1377 * DNODES_PER_BLOCK for the meta dnode, and some fraction of
1378 * DNODES_PER_BLOCK when searching for sparse regions thereof.
1382 * dnode_next_offset(dn, flags, offset, 1, 1, 0);
1383 * Finds the next/previous hole/data in a file.
1384 * Used in dmu_offset_next().
1386 * dnode_next_offset(mdn, flags, offset, 0, DNODES_PER_BLOCK, txg);
1387 * Finds the next free/allocated dnode an objset's meta-dnode.
1388 * Only finds objects that have new contents since txg (ie.
1389 * bonus buffer changes and content removal are ignored).
1390 * Used in dmu_object_next().
1392 * dnode_next_offset(mdn, DNODE_FIND_HOLE, offset, 2, DNODES_PER_BLOCK >> 2, 0);
1393 * Finds the next L2 meta-dnode bp that's at most 1/4 full.
1394 * Used in dmu_object_alloc().
1397 dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
1398 int minlvl, uint64_t blkfill, uint64_t txg)
1400 uint64_t initial_offset = *offset;
1404 if (!(flags & DNODE_FIND_HAVELOCK))
1405 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1407 if (dn->dn_phys->dn_nlevels == 0) {
1412 if (dn->dn_datablkshift == 0) {
1413 if (*offset < dn->dn_datablksz) {
1414 if (flags & DNODE_FIND_HOLE)
1415 *offset = dn->dn_datablksz;
1422 maxlvl = dn->dn_phys->dn_nlevels;
1424 for (lvl = minlvl; lvl <= maxlvl; lvl++) {
1425 error = dnode_next_offset_level(dn,
1426 flags, offset, lvl, blkfill, txg);
1431 while (error == 0 && --lvl >= minlvl) {
1432 error = dnode_next_offset_level(dn,
1433 flags, offset, lvl, blkfill, txg);
1436 if (error == 0 && (flags & DNODE_FIND_BACKWARDS ?
1437 initial_offset < *offset : initial_offset > *offset))
1440 if (!(flags & DNODE_FIND_HAVELOCK))
1441 rw_exit(&dn->dn_struct_rwlock);