4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
28 #include <sys/zfs_context.h>
30 #include <sys/dnode.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dmu_objset.h>
34 #include <sys/dsl_dataset.h>
36 #include <sys/range_tree.h>
37 #include <sys/zfeature.h>
40 dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
43 int txgoff = tx->tx_txg & TXG_MASK;
44 int nblkptr = dn->dn_phys->dn_nblkptr;
45 int old_toplvl = dn->dn_phys->dn_nlevels - 1;
46 int new_level = dn->dn_next_nlevels[txgoff];
49 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
51 /* this dnode can't be paged out because it's dirty */
52 ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
53 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
54 ASSERT(new_level > 1 && dn->dn_phys->dn_nlevels > 0);
56 db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG);
59 dn->dn_phys->dn_nlevels = new_level;
60 dprintf("os=%p obj=%llu, increase to %d\n", dn->dn_objset,
61 dn->dn_object, dn->dn_phys->dn_nlevels);
63 /* transfer dnode's block pointers to new indirect block */
64 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED|DB_RF_HAVESTRUCT);
65 ASSERT(db->db.db_data);
66 ASSERT(arc_released(db->db_buf));
67 ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size);
68 bcopy(dn->dn_phys->dn_blkptr, db->db.db_data,
69 sizeof (blkptr_t) * nblkptr);
70 arc_buf_freeze(db->db_buf);
72 /* set dbuf's parent pointers to new indirect buf */
73 for (i = 0; i < nblkptr; i++) {
74 dmu_buf_impl_t *child =
75 dbuf_find(dn->dn_objset, dn->dn_object, old_toplvl, i);
80 DB_DNODE_ENTER(child);
81 ASSERT3P(DB_DNODE(child), ==, dn);
84 if (child->db_parent && child->db_parent != dn->dn_dbuf) {
85 ASSERT(child->db_parent->db_level == db->db_level);
86 ASSERT(child->db_blkptr !=
87 &dn->dn_phys->dn_blkptr[child->db_blkid]);
88 mutex_exit(&child->db_mtx);
91 ASSERT(child->db_parent == NULL ||
92 child->db_parent == dn->dn_dbuf);
94 child->db_parent = db;
95 dbuf_add_ref(db, child);
97 child->db_blkptr = (blkptr_t *)db->db.db_data + i;
99 child->db_blkptr = NULL;
100 dprintf_dbuf_bp(child, child->db_blkptr,
101 "changed db_blkptr to new indirect %s", "");
103 mutex_exit(&child->db_mtx);
106 bzero(dn->dn_phys->dn_blkptr, sizeof (blkptr_t) * nblkptr);
110 rw_exit(&dn->dn_struct_rwlock);
114 free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx)
116 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
117 uint64_t bytesfreed = 0;
119 dprintf("ds=%p obj=%llx num=%d\n", ds, dn->dn_object, num);
121 for (int i = 0; i < num; i++, bp++) {
125 bytesfreed += dsl_dataset_block_kill(ds, bp, tx, B_FALSE);
126 ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys));
129 * Save some useful information on the holes being
130 * punched, including logical size, type, and indirection
131 * level. Retaining birth time enables detection of when
132 * holes are punched for reducing the number of free
133 * records transmitted during a zfs send.
136 uint64_t lsize = BP_GET_LSIZE(bp);
137 dmu_object_type_t type = BP_GET_TYPE(bp);
138 uint64_t lvl = BP_GET_LEVEL(bp);
140 bzero(bp, sizeof (blkptr_t));
142 if (spa_feature_is_active(dn->dn_objset->os_spa,
143 SPA_FEATURE_HOLE_BIRTH)) {
144 BP_SET_LSIZE(bp, lsize);
145 BP_SET_TYPE(bp, type);
146 BP_SET_LEVEL(bp, lvl);
147 BP_SET_BIRTH(bp, dmu_tx_get_txg(tx), 0);
150 dnode_diduse_space(dn, -bytesfreed);
155 free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx)
159 uint64_t txg = tx->tx_txg;
164 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
165 off = start - (db->db_blkid * 1<<epbs);
166 num = end - start + 1;
168 ASSERT3U(off, >=, 0);
169 ASSERT3U(num, >=, 0);
170 ASSERT3U(db->db_level, >, 0);
171 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
172 ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT);
173 ASSERT(db->db_blkptr != NULL);
175 for (i = off; i < off+num; i++) {
177 dmu_buf_impl_t *child;
178 dbuf_dirty_record_t *dr;
181 ASSERT(db->db_level == 1);
183 rw_enter(&dn->dn_struct_rwlock, RW_READER);
184 err = dbuf_hold_impl(dn, db->db_level-1,
185 (db->db_blkid << epbs) + i, TRUE, FALSE, FTAG, &child);
186 rw_exit(&dn->dn_struct_rwlock);
190 ASSERT(child->db_level == 0);
191 dr = child->db_last_dirty;
192 while (dr && dr->dr_txg > txg)
194 ASSERT(dr == NULL || dr->dr_txg == txg);
196 /* data_old better be zeroed */
198 buf = dr->dt.dl.dr_data->b_data;
199 for (j = 0; j < child->db.db_size >> 3; j++) {
201 panic("freed data not zero: "
202 "child=%p i=%d off=%d num=%d\n",
203 (void *)child, i, off, num);
209 * db_data better be zeroed unless it's dirty in a
212 mutex_enter(&child->db_mtx);
213 buf = child->db.db_data;
214 if (buf != NULL && child->db_state != DB_FILL &&
215 child->db_last_dirty == NULL) {
216 for (j = 0; j < child->db.db_size >> 3; j++) {
218 panic("freed data not zero: "
219 "child=%p i=%d off=%d num=%d\n",
220 (void *)child, i, off, num);
224 mutex_exit(&child->db_mtx);
226 dbuf_rele(child, FTAG);
233 free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks,
238 dmu_buf_impl_t *subdb;
239 uint64_t start, end, dbstart, dbend, i;
243 * There is a small possibility that this block will not be cached:
244 * 1 - if level > 1 and there are no children with level <= 1
245 * 2 - if this block was evicted since we read it from
246 * dmu_tx_hold_free().
248 if (db->db_state != DB_CACHED)
249 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
256 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
257 shift = (db->db_level - 1) * epbs;
258 dbstart = db->db_blkid << epbs;
259 start = blkid >> shift;
260 if (dbstart < start) {
261 bp += start - dbstart;
265 dbend = ((db->db_blkid + 1) << epbs) - 1;
266 end = (blkid + nblks - 1) >> shift;
270 ASSERT3U(start, <=, end);
272 if (db->db_level == 1) {
273 FREE_VERIFY(db, start, end, tx);
274 free_blocks(dn, bp, end-start+1, tx);
276 for (i = start; i <= end; i++, bp++) {
279 rw_enter(&dn->dn_struct_rwlock, RW_READER);
280 VERIFY0(dbuf_hold_impl(dn, db->db_level - 1,
281 i, TRUE, FALSE, FTAG, &subdb));
282 rw_exit(&dn->dn_struct_rwlock);
283 ASSERT3P(bp, ==, subdb->db_blkptr);
285 free_children(subdb, blkid, nblks, tx);
286 dbuf_rele(subdb, FTAG);
290 /* If this whole block is free, free ourself too. */
291 for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) {
295 if (i == 1 << epbs) {
296 /* didn't find any non-holes */
297 bzero(db->db.db_data, db->db.db_size);
298 free_blocks(dn, db->db_blkptr, 1, tx);
301 * Partial block free; must be marked dirty so that it
302 * will be written out.
304 ASSERT(db->db_dirtycnt > 0);
308 arc_buf_freeze(db->db_buf);
312 * Traverse the indicated range of the provided file
313 * and "free" all the blocks contained there.
316 dnode_sync_free_range_impl(dnode_t *dn, uint64_t blkid, uint64_t nblks,
319 blkptr_t *bp = dn->dn_phys->dn_blkptr;
320 int dnlevel = dn->dn_phys->dn_nlevels;
321 boolean_t trunc = B_FALSE;
323 if (blkid > dn->dn_phys->dn_maxblkid)
326 ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX);
327 if (blkid + nblks > dn->dn_phys->dn_maxblkid) {
328 nblks = dn->dn_phys->dn_maxblkid - blkid + 1;
332 /* There are no indirect blocks in the object */
334 if (blkid >= dn->dn_phys->dn_nblkptr) {
335 /* this range was never made persistent */
338 ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr);
339 free_blocks(dn, bp + blkid, nblks, tx);
341 int shift = (dnlevel - 1) *
342 (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT);
343 int start = blkid >> shift;
344 int end = (blkid + nblks - 1) >> shift;
347 ASSERT(start < dn->dn_phys->dn_nblkptr);
349 for (int i = start; i <= end; i++, bp++) {
352 rw_enter(&dn->dn_struct_rwlock, RW_READER);
353 VERIFY0(dbuf_hold_impl(dn, dnlevel - 1, i,
354 TRUE, FALSE, FTAG, &db));
355 rw_exit(&dn->dn_struct_rwlock);
357 free_children(db, blkid, nblks, tx);
363 dn->dn_phys->dn_maxblkid = blkid == 0 ? 0 : blkid - 1;
365 uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
366 (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
367 ASSERT(off < dn->dn_phys->dn_maxblkid ||
368 dn->dn_phys->dn_maxblkid == 0 ||
369 dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0);
373 typedef struct dnode_sync_free_range_arg {
374 dnode_t *dsfra_dnode;
376 } dnode_sync_free_range_arg_t;
379 dnode_sync_free_range(void *arg, uint64_t blkid, uint64_t nblks)
381 dnode_sync_free_range_arg_t *dsfra = arg;
382 dnode_t *dn = dsfra->dsfra_dnode;
384 mutex_exit(&dn->dn_mtx);
385 dnode_sync_free_range_impl(dn, blkid, nblks, dsfra->dsfra_tx);
386 mutex_enter(&dn->dn_mtx);
390 * Try to kick all the dnode's dbufs out of the cache...
393 dnode_evict_dbufs(dnode_t *dn)
395 dmu_buf_impl_t db_marker;
396 dmu_buf_impl_t *db, *db_next;
398 mutex_enter(&dn->dn_dbufs_mtx);
399 for (db = avl_first(&dn->dn_dbufs); db != NULL; db = db_next) {
403 ASSERT3P(DB_DNODE(db), ==, dn);
407 mutex_enter(&db->db_mtx);
408 if (db->db_state != DB_EVICTING &&
409 refcount_is_zero(&db->db_holds)) {
410 db_marker.db_level = db->db_level;
411 db_marker.db_blkid = db->db_blkid;
412 db_marker.db_state = DB_SEARCH;
413 avl_insert_here(&dn->dn_dbufs, &db_marker, db,
418 db_next = AVL_NEXT(&dn->dn_dbufs, &db_marker);
419 avl_remove(&dn->dn_dbufs, &db_marker);
421 db->db_pending_evict = TRUE;
422 mutex_exit(&db->db_mtx);
423 db_next = AVL_NEXT(&dn->dn_dbufs, db);
426 mutex_exit(&dn->dn_dbufs_mtx);
428 dnode_evict_bonus(dn);
432 dnode_evict_bonus(dnode_t *dn)
434 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
435 if (dn->dn_bonus != NULL) {
436 if (refcount_is_zero(&dn->dn_bonus->db_holds)) {
437 mutex_enter(&dn->dn_bonus->db_mtx);
438 dbuf_evict(dn->dn_bonus);
441 dn->dn_bonus->db_pending_evict = TRUE;
444 rw_exit(&dn->dn_struct_rwlock);
448 dnode_undirty_dbufs(list_t *list)
450 dbuf_dirty_record_t *dr;
452 while (dr = list_head(list)) {
453 dmu_buf_impl_t *db = dr->dr_dbuf;
454 uint64_t txg = dr->dr_txg;
456 if (db->db_level != 0)
457 dnode_undirty_dbufs(&dr->dt.di.dr_children);
459 mutex_enter(&db->db_mtx);
460 /* XXX - use dbuf_undirty()? */
461 list_remove(list, dr);
462 ASSERT(db->db_last_dirty == dr);
463 db->db_last_dirty = NULL;
464 db->db_dirtycnt -= 1;
465 if (db->db_level == 0) {
466 ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
467 dr->dt.dl.dr_data == db->db_buf);
470 mutex_destroy(&dr->dt.di.dr_mtx);
471 list_destroy(&dr->dt.di.dr_children);
473 kmem_free(dr, sizeof (dbuf_dirty_record_t));
474 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
479 dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
481 int txgoff = tx->tx_txg & TXG_MASK;
483 ASSERT(dmu_tx_is_syncing(tx));
486 * Our contents should have been freed in dnode_sync() by the
487 * free range record inserted by the caller of dnode_free().
489 ASSERT0(DN_USED_BYTES(dn->dn_phys));
490 ASSERT(BP_IS_HOLE(dn->dn_phys->dn_blkptr));
492 dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]);
493 dnode_evict_dbufs(dn);
496 * XXX - It would be nice to assert this, but we may still
497 * have residual holds from async evictions from the arc...
499 * zfs_obj_to_path() also depends on this being
502 * ASSERT3U(refcount_count(&dn->dn_holds), ==, 1);
505 /* Undirty next bits */
506 dn->dn_next_nlevels[txgoff] = 0;
507 dn->dn_next_indblkshift[txgoff] = 0;
508 dn->dn_next_blksz[txgoff] = 0;
510 /* ASSERT(blkptrs are zero); */
511 ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
512 ASSERT(dn->dn_type != DMU_OT_NONE);
514 ASSERT(dn->dn_free_txg > 0);
515 if (dn->dn_allocated_txg != dn->dn_free_txg)
516 dmu_buf_will_dirty(&dn->dn_dbuf->db, tx);
517 bzero(dn->dn_phys, sizeof (dnode_phys_t));
519 mutex_enter(&dn->dn_mtx);
520 dn->dn_type = DMU_OT_NONE;
522 dn->dn_allocated_txg = 0;
524 dn->dn_have_spill = B_FALSE;
525 mutex_exit(&dn->dn_mtx);
527 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
529 dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
531 * Now that we've released our hold, the dnode may
532 * be evicted, so we musn't access it.
537 * Write out the dnode's dirty buffers.
540 dnode_sync(dnode_t *dn, dmu_tx_t *tx)
542 dnode_phys_t *dnp = dn->dn_phys;
543 int txgoff = tx->tx_txg & TXG_MASK;
544 list_t *list = &dn->dn_dirty_records[txgoff];
545 static const dnode_phys_t zerodn = { 0 };
546 boolean_t kill_spill = B_FALSE;
548 ASSERT(dmu_tx_is_syncing(tx));
549 ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg);
550 ASSERT(dnp->dn_type != DMU_OT_NONE ||
551 bcmp(dnp, &zerodn, DNODE_SIZE) == 0);
554 ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf));
556 if (dmu_objset_userused_enabled(dn->dn_objset) &&
557 !DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
558 mutex_enter(&dn->dn_mtx);
559 dn->dn_oldused = DN_USED_BYTES(dn->dn_phys);
560 dn->dn_oldflags = dn->dn_phys->dn_flags;
561 dn->dn_phys->dn_flags |= DNODE_FLAG_USERUSED_ACCOUNTED;
562 mutex_exit(&dn->dn_mtx);
563 dmu_objset_userquota_get_ids(dn, B_FALSE, tx);
565 /* Once we account for it, we should always account for it. */
566 ASSERT(!(dn->dn_phys->dn_flags &
567 DNODE_FLAG_USERUSED_ACCOUNTED));
570 mutex_enter(&dn->dn_mtx);
571 if (dn->dn_allocated_txg == tx->tx_txg) {
572 /* The dnode is newly allocated or reallocated */
573 if (dnp->dn_type == DMU_OT_NONE) {
574 /* this is a first alloc, not a realloc */
576 dnp->dn_nblkptr = dn->dn_nblkptr;
579 dnp->dn_type = dn->dn_type;
580 dnp->dn_bonustype = dn->dn_bonustype;
581 dnp->dn_bonuslen = dn->dn_bonuslen;
583 ASSERT(dnp->dn_nlevels > 1 ||
584 BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
585 BP_IS_EMBEDDED(&dnp->dn_blkptr[0]) ||
586 BP_GET_LSIZE(&dnp->dn_blkptr[0]) ==
587 dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
588 ASSERT(dnp->dn_nlevels < 2 ||
589 BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
590 BP_GET_LSIZE(&dnp->dn_blkptr[0]) == 1 << dnp->dn_indblkshift);
592 if (dn->dn_next_type[txgoff] != 0) {
593 dnp->dn_type = dn->dn_type;
594 dn->dn_next_type[txgoff] = 0;
597 if (dn->dn_next_blksz[txgoff] != 0) {
598 ASSERT(P2PHASE(dn->dn_next_blksz[txgoff],
599 SPA_MINBLOCKSIZE) == 0);
600 ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
601 dn->dn_maxblkid == 0 || list_head(list) != NULL ||
602 dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT ==
603 dnp->dn_datablkszsec ||
604 range_tree_space(dn->dn_free_ranges[txgoff]) != 0);
605 dnp->dn_datablkszsec =
606 dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT;
607 dn->dn_next_blksz[txgoff] = 0;
610 if (dn->dn_next_bonuslen[txgoff] != 0) {
611 if (dn->dn_next_bonuslen[txgoff] == DN_ZERO_BONUSLEN)
612 dnp->dn_bonuslen = 0;
614 dnp->dn_bonuslen = dn->dn_next_bonuslen[txgoff];
615 ASSERT(dnp->dn_bonuslen <= DN_MAX_BONUSLEN);
616 dn->dn_next_bonuslen[txgoff] = 0;
619 if (dn->dn_next_bonustype[txgoff] != 0) {
620 ASSERT(DMU_OT_IS_VALID(dn->dn_next_bonustype[txgoff]));
621 dnp->dn_bonustype = dn->dn_next_bonustype[txgoff];
622 dn->dn_next_bonustype[txgoff] = 0;
625 boolean_t freeing_dnode = dn->dn_free_txg > 0 &&
626 dn->dn_free_txg <= tx->tx_txg;
629 * Remove the spill block if we have been explicitly asked to
630 * remove it, or if the object is being removed.
632 if (dn->dn_rm_spillblk[txgoff] || freeing_dnode) {
633 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
635 dn->dn_rm_spillblk[txgoff] = 0;
638 if (dn->dn_next_indblkshift[txgoff] != 0) {
639 ASSERT(dnp->dn_nlevels == 1);
640 dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff];
641 dn->dn_next_indblkshift[txgoff] = 0;
645 * Just take the live (open-context) values for checksum and compress.
646 * Strictly speaking it's a future leak, but nothing bad happens if we
647 * start using the new checksum or compress algorithm a little early.
649 dnp->dn_checksum = dn->dn_checksum;
650 dnp->dn_compress = dn->dn_compress;
652 mutex_exit(&dn->dn_mtx);
655 free_blocks(dn, &dn->dn_phys->dn_spill, 1, tx);
656 mutex_enter(&dn->dn_mtx);
657 dnp->dn_flags &= ~DNODE_FLAG_SPILL_BLKPTR;
658 mutex_exit(&dn->dn_mtx);
661 /* process all the "freed" ranges in the file */
662 if (dn->dn_free_ranges[txgoff] != NULL) {
663 dnode_sync_free_range_arg_t dsfra;
664 dsfra.dsfra_dnode = dn;
666 mutex_enter(&dn->dn_mtx);
667 range_tree_vacate(dn->dn_free_ranges[txgoff],
668 dnode_sync_free_range, &dsfra);
669 range_tree_destroy(dn->dn_free_ranges[txgoff]);
670 dn->dn_free_ranges[txgoff] = NULL;
671 mutex_exit(&dn->dn_mtx);
675 dnode_sync_free(dn, tx);
679 if (dn->dn_next_nlevels[txgoff]) {
680 dnode_increase_indirection(dn, tx);
681 dn->dn_next_nlevels[txgoff] = 0;
684 if (dn->dn_next_nblkptr[txgoff]) {
685 /* this should only happen on a realloc */
686 ASSERT(dn->dn_allocated_txg == tx->tx_txg);
687 if (dn->dn_next_nblkptr[txgoff] > dnp->dn_nblkptr) {
688 /* zero the new blkptrs we are gaining */
689 bzero(dnp->dn_blkptr + dnp->dn_nblkptr,
691 (dn->dn_next_nblkptr[txgoff] - dnp->dn_nblkptr));
695 ASSERT(dn->dn_next_nblkptr[txgoff] < dnp->dn_nblkptr);
696 /* the blkptrs we are losing better be unallocated */
697 for (i = dn->dn_next_nblkptr[txgoff];
698 i < dnp->dn_nblkptr; i++)
699 ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[i]));
702 mutex_enter(&dn->dn_mtx);
703 dnp->dn_nblkptr = dn->dn_next_nblkptr[txgoff];
704 dn->dn_next_nblkptr[txgoff] = 0;
705 mutex_exit(&dn->dn_mtx);
708 dbuf_sync_list(list, dn->dn_phys->dn_nlevels - 1, tx);
710 if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
711 ASSERT3P(list_head(list), ==, NULL);
712 dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
716 * Although we have dropped our reference to the dnode, it
717 * can't be evicted until its written, and we haven't yet
718 * initiated the IO for the dnode's dbuf.