]> CyberLeo.Net >> Repos - FreeBSD/stable/9.git/blob - sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dnode_sync.c
MFC r260150: MFV r259170:
[FreeBSD/stable/9.git] / sys / cddl / contrib / opensolaris / uts / common / fs / zfs / dnode_sync.c
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2013 by Delphix. All rights reserved.
25  */
26
27 #include <sys/zfs_context.h>
28 #include <sys/dbuf.h>
29 #include <sys/dnode.h>
30 #include <sys/dmu.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dmu_objset.h>
33 #include <sys/dsl_dataset.h>
34 #include <sys/spa.h>
35 #include <sys/zfeature.h>
36
37 static void
38 dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
39 {
40         dmu_buf_impl_t *db;
41         int txgoff = tx->tx_txg & TXG_MASK;
42         int nblkptr = dn->dn_phys->dn_nblkptr;
43         int old_toplvl = dn->dn_phys->dn_nlevels - 1;
44         int new_level = dn->dn_next_nlevels[txgoff];
45         int i;
46
47         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
48
49         /* this dnode can't be paged out because it's dirty */
50         ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
51         ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
52         ASSERT(new_level > 1 && dn->dn_phys->dn_nlevels > 0);
53
54         db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG);
55         ASSERT(db != NULL);
56
57         dn->dn_phys->dn_nlevels = new_level;
58         dprintf("os=%p obj=%llu, increase to %d\n", dn->dn_objset,
59             dn->dn_object, dn->dn_phys->dn_nlevels);
60
61         /* check for existing blkptrs in the dnode */
62         for (i = 0; i < nblkptr; i++)
63                 if (!BP_IS_HOLE(&dn->dn_phys->dn_blkptr[i]))
64                         break;
65         if (i != nblkptr) {
66                 /* transfer dnode's block pointers to new indirect block */
67                 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED|DB_RF_HAVESTRUCT);
68                 ASSERT(db->db.db_data);
69                 ASSERT(arc_released(db->db_buf));
70                 ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size);
71                 bcopy(dn->dn_phys->dn_blkptr, db->db.db_data,
72                     sizeof (blkptr_t) * nblkptr);
73                 arc_buf_freeze(db->db_buf);
74         }
75
76         /* set dbuf's parent pointers to new indirect buf */
77         for (i = 0; i < nblkptr; i++) {
78                 dmu_buf_impl_t *child = dbuf_find(dn, old_toplvl, i);
79
80                 if (child == NULL)
81                         continue;
82 #ifdef  DEBUG
83                 DB_DNODE_ENTER(child);
84                 ASSERT3P(DB_DNODE(child), ==, dn);
85                 DB_DNODE_EXIT(child);
86 #endif  /* DEBUG */
87                 if (child->db_parent && child->db_parent != dn->dn_dbuf) {
88                         ASSERT(child->db_parent->db_level == db->db_level);
89                         ASSERT(child->db_blkptr !=
90                             &dn->dn_phys->dn_blkptr[child->db_blkid]);
91                         mutex_exit(&child->db_mtx);
92                         continue;
93                 }
94                 ASSERT(child->db_parent == NULL ||
95                     child->db_parent == dn->dn_dbuf);
96
97                 child->db_parent = db;
98                 dbuf_add_ref(db, child);
99                 if (db->db.db_data)
100                         child->db_blkptr = (blkptr_t *)db->db.db_data + i;
101                 else
102                         child->db_blkptr = NULL;
103                 dprintf_dbuf_bp(child, child->db_blkptr,
104                     "changed db_blkptr to new indirect %s", "");
105
106                 mutex_exit(&child->db_mtx);
107         }
108
109         bzero(dn->dn_phys->dn_blkptr, sizeof (blkptr_t) * nblkptr);
110
111         dbuf_rele(db, FTAG);
112
113         rw_exit(&dn->dn_struct_rwlock);
114 }
115
116 static void
117 free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx)
118 {
119         dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
120         uint64_t bytesfreed = 0;
121
122         dprintf("ds=%p obj=%llx num=%d\n", ds, dn->dn_object, num);
123
124         for (int i = 0; i < num; i++, bp++) {
125                 if (BP_IS_HOLE(bp))
126                         continue;
127
128                 bytesfreed += dsl_dataset_block_kill(ds, bp, tx, B_FALSE);
129                 ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys));
130
131                 /*
132                  * Save some useful information on the holes being
133                  * punched, including logical size, type, and indirection
134                  * level. Retaining birth time enables detection of when
135                  * holes are punched for reducing the number of free
136                  * records transmitted during a zfs send.
137                  */
138
139                 uint64_t lsize = BP_GET_LSIZE(bp);
140                 dmu_object_type_t type = BP_GET_TYPE(bp);
141                 uint64_t lvl = BP_GET_LEVEL(bp);
142
143                 bzero(bp, sizeof (blkptr_t));
144
145                 if (spa_feature_is_active(dn->dn_objset->os_spa,
146                     SPA_FEATURE_HOLE_BIRTH)) {
147                         BP_SET_LSIZE(bp, lsize);
148                         BP_SET_TYPE(bp, type);
149                         BP_SET_LEVEL(bp, lvl);
150                         BP_SET_BIRTH(bp, dmu_tx_get_txg(tx), 0);
151                 }
152         }
153         dnode_diduse_space(dn, -bytesfreed);
154 }
155
156 #ifdef ZFS_DEBUG
157 static void
158 free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx)
159 {
160         int off, num;
161         int i, err, epbs;
162         uint64_t txg = tx->tx_txg;
163         dnode_t *dn;
164
165         DB_DNODE_ENTER(db);
166         dn = DB_DNODE(db);
167         epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
168         off = start - (db->db_blkid * 1<<epbs);
169         num = end - start + 1;
170
171         ASSERT3U(off, >=, 0);
172         ASSERT3U(num, >=, 0);
173         ASSERT3U(db->db_level, >, 0);
174         ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
175         ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT);
176         ASSERT(db->db_blkptr != NULL);
177
178         for (i = off; i < off+num; i++) {
179                 uint64_t *buf;
180                 dmu_buf_impl_t *child;
181                 dbuf_dirty_record_t *dr;
182                 int j;
183
184                 ASSERT(db->db_level == 1);
185
186                 rw_enter(&dn->dn_struct_rwlock, RW_READER);
187                 err = dbuf_hold_impl(dn, db->db_level-1,
188                     (db->db_blkid << epbs) + i, TRUE, FTAG, &child);
189                 rw_exit(&dn->dn_struct_rwlock);
190                 if (err == ENOENT)
191                         continue;
192                 ASSERT(err == 0);
193                 ASSERT(child->db_level == 0);
194                 dr = child->db_last_dirty;
195                 while (dr && dr->dr_txg > txg)
196                         dr = dr->dr_next;
197                 ASSERT(dr == NULL || dr->dr_txg == txg);
198
199                 /* data_old better be zeroed */
200                 if (dr) {
201                         buf = dr->dt.dl.dr_data->b_data;
202                         for (j = 0; j < child->db.db_size >> 3; j++) {
203                                 if (buf[j] != 0) {
204                                         panic("freed data not zero: "
205                                             "child=%p i=%d off=%d num=%d\n",
206                                             (void *)child, i, off, num);
207                                 }
208                         }
209                 }
210
211                 /*
212                  * db_data better be zeroed unless it's dirty in a
213                  * future txg.
214                  */
215                 mutex_enter(&child->db_mtx);
216                 buf = child->db.db_data;
217                 if (buf != NULL && child->db_state != DB_FILL &&
218                     child->db_last_dirty == NULL) {
219                         for (j = 0; j < child->db.db_size >> 3; j++) {
220                                 if (buf[j] != 0) {
221                                         panic("freed data not zero: "
222                                             "child=%p i=%d off=%d num=%d\n",
223                                             (void *)child, i, off, num);
224                                 }
225                         }
226                 }
227                 mutex_exit(&child->db_mtx);
228
229                 dbuf_rele(child, FTAG);
230         }
231         DB_DNODE_EXIT(db);
232 }
233 #endif
234
235 #define ALL -1
236
237 static void
238 free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks,
239     dmu_tx_t *tx)
240 {
241         dnode_t *dn;
242         blkptr_t *bp;
243         dmu_buf_impl_t *subdb;
244         uint64_t start, end, dbstart, dbend, i;
245         int epbs, shift;
246
247         /*
248          * There is a small possibility that this block will not be cached:
249          *   1 - if level > 1 and there are no children with level <= 1
250          *   2 - if this block was evicted since we read it from
251          *       dmu_tx_hold_free().
252          */
253         if (db->db_state != DB_CACHED)
254                 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
255
256         dbuf_release_bp(db);
257         bp = db->db.db_data;
258
259         DB_DNODE_ENTER(db);
260         dn = DB_DNODE(db);
261         epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
262         shift = (db->db_level - 1) * epbs;
263         dbstart = db->db_blkid << epbs;
264         start = blkid >> shift;
265         if (dbstart < start) {
266                 bp += start - dbstart;
267         } else {
268                 start = dbstart;
269         }
270         dbend = ((db->db_blkid + 1) << epbs) - 1;
271         end = (blkid + nblks - 1) >> shift;
272         if (dbend <= end)
273                 end = dbend;
274
275         ASSERT3U(start, <=, end);
276
277         if (db->db_level == 1) {
278                 FREE_VERIFY(db, start, end, tx);
279                 free_blocks(dn, bp, end-start+1, tx);
280         } else {
281                 for (i = start; i <= end; i++, bp++) {
282                         if (BP_IS_HOLE(bp))
283                                 continue;
284                         rw_enter(&dn->dn_struct_rwlock, RW_READER);
285                         VERIFY0(dbuf_hold_impl(dn, db->db_level - 1,
286                             i, B_TRUE, FTAG, &subdb));
287                         rw_exit(&dn->dn_struct_rwlock);
288                         ASSERT3P(bp, ==, subdb->db_blkptr);
289
290                         free_children(subdb, blkid, nblks, tx);
291                         dbuf_rele(subdb, FTAG);
292                 }
293         }
294
295         /* If this whole block is free, free ourself too. */
296         for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) {
297                 if (!BP_IS_HOLE(bp))
298                         break;
299         }
300         if (i == 1 << epbs) {
301                 /* didn't find any non-holes */
302                 bzero(db->db.db_data, db->db.db_size);
303                 free_blocks(dn, db->db_blkptr, 1, tx);
304         } else {
305                 /*
306                  * Partial block free; must be marked dirty so that it
307                  * will be written out.
308                  */
309                 ASSERT(db->db_dirtycnt > 0);
310         }
311
312         DB_DNODE_EXIT(db);
313         arc_buf_freeze(db->db_buf);
314 }
315
316 /*
317  * Traverse the indicated range of the provided file
318  * and "free" all the blocks contained there.
319  */
320 static void
321 dnode_sync_free_range(dnode_t *dn, uint64_t blkid, uint64_t nblks,
322     dmu_tx_t *tx)
323 {
324         blkptr_t *bp = dn->dn_phys->dn_blkptr;
325         int dnlevel = dn->dn_phys->dn_nlevels;
326         boolean_t trunc = B_FALSE;
327
328         if (blkid > dn->dn_phys->dn_maxblkid)
329                 return;
330
331         ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX);
332         if (blkid + nblks > dn->dn_phys->dn_maxblkid) {
333                 nblks = dn->dn_phys->dn_maxblkid - blkid + 1;
334                 trunc = B_TRUE;
335         }
336
337         /* There are no indirect blocks in the object */
338         if (dnlevel == 1) {
339                 if (blkid >= dn->dn_phys->dn_nblkptr) {
340                         /* this range was never made persistent */
341                         return;
342                 }
343                 ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr);
344                 free_blocks(dn, bp + blkid, nblks, tx);
345         } else {
346                 int shift = (dnlevel - 1) *
347                     (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT);
348                 int start = blkid >> shift;
349                 int end = (blkid + nblks - 1) >> shift;
350                 dmu_buf_impl_t *db;
351
352                 ASSERT(start < dn->dn_phys->dn_nblkptr);
353                 bp += start;
354                 for (int i = start; i <= end; i++, bp++) {
355                         if (BP_IS_HOLE(bp))
356                                 continue;
357                         rw_enter(&dn->dn_struct_rwlock, RW_READER);
358                         VERIFY0(dbuf_hold_impl(dn, dnlevel - 1, i,
359                             TRUE, FTAG, &db));
360                         rw_exit(&dn->dn_struct_rwlock);
361
362                         free_children(db, blkid, nblks, tx);
363                         dbuf_rele(db, FTAG);
364
365                 }
366         }
367
368         if (trunc) {
369                 dn->dn_phys->dn_maxblkid = blkid == 0 ? 0 : blkid - 1;
370
371                 uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
372                     (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
373                 ASSERT(off < dn->dn_phys->dn_maxblkid ||
374                     dn->dn_phys->dn_maxblkid == 0 ||
375                     dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0);
376         }
377 }
378
379 /*
380  * Try to kick all the dnode's dbufs out of the cache...
381  */
382 void
383 dnode_evict_dbufs(dnode_t *dn)
384 {
385         int progress;
386         int pass = 0;
387
388         do {
389                 dmu_buf_impl_t *db, marker;
390                 int evicting = FALSE;
391
392                 progress = FALSE;
393                 mutex_enter(&dn->dn_dbufs_mtx);
394                 list_insert_tail(&dn->dn_dbufs, &marker);
395                 db = list_head(&dn->dn_dbufs);
396                 for (; db != &marker; db = list_head(&dn->dn_dbufs)) {
397                         list_remove(&dn->dn_dbufs, db);
398                         list_insert_tail(&dn->dn_dbufs, db);
399 #ifdef  DEBUG
400                         DB_DNODE_ENTER(db);
401                         ASSERT3P(DB_DNODE(db), ==, dn);
402                         DB_DNODE_EXIT(db);
403 #endif  /* DEBUG */
404
405                         mutex_enter(&db->db_mtx);
406                         if (db->db_state == DB_EVICTING) {
407                                 progress = TRUE;
408                                 evicting = TRUE;
409                                 mutex_exit(&db->db_mtx);
410                         } else if (refcount_is_zero(&db->db_holds)) {
411                                 progress = TRUE;
412                                 dbuf_clear(db); /* exits db_mtx for us */
413                         } else {
414                                 mutex_exit(&db->db_mtx);
415                         }
416
417                 }
418                 list_remove(&dn->dn_dbufs, &marker);
419                 /*
420                  * NB: we need to drop dn_dbufs_mtx between passes so
421                  * that any DB_EVICTING dbufs can make progress.
422                  * Ideally, we would have some cv we could wait on, but
423                  * since we don't, just wait a bit to give the other
424                  * thread a chance to run.
425                  */
426                 mutex_exit(&dn->dn_dbufs_mtx);
427                 if (evicting)
428                         delay(1);
429                 pass++;
430                 ASSERT(pass < 100); /* sanity check */
431         } while (progress);
432
433         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
434         if (dn->dn_bonus && refcount_is_zero(&dn->dn_bonus->db_holds)) {
435                 mutex_enter(&dn->dn_bonus->db_mtx);
436                 dbuf_evict(dn->dn_bonus);
437                 dn->dn_bonus = NULL;
438         }
439         rw_exit(&dn->dn_struct_rwlock);
440 }
441
442 static void
443 dnode_undirty_dbufs(list_t *list)
444 {
445         dbuf_dirty_record_t *dr;
446
447         while (dr = list_head(list)) {
448                 dmu_buf_impl_t *db = dr->dr_dbuf;
449                 uint64_t txg = dr->dr_txg;
450
451                 if (db->db_level != 0)
452                         dnode_undirty_dbufs(&dr->dt.di.dr_children);
453
454                 mutex_enter(&db->db_mtx);
455                 /* XXX - use dbuf_undirty()? */
456                 list_remove(list, dr);
457                 ASSERT(db->db_last_dirty == dr);
458                 db->db_last_dirty = NULL;
459                 db->db_dirtycnt -= 1;
460                 if (db->db_level == 0) {
461                         ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
462                             dr->dt.dl.dr_data == db->db_buf);
463                         dbuf_unoverride(dr);
464                 } else {
465                         list_destroy(&dr->dt.di.dr_children);
466                         mutex_destroy(&dr->dt.di.dr_mtx);
467                 }
468                 kmem_free(dr, sizeof (dbuf_dirty_record_t));
469                 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
470         }
471 }
472
473 static void
474 dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
475 {
476         int txgoff = tx->tx_txg & TXG_MASK;
477
478         ASSERT(dmu_tx_is_syncing(tx));
479
480         /*
481          * Our contents should have been freed in dnode_sync() by the
482          * free range record inserted by the caller of dnode_free().
483          */
484         ASSERT0(DN_USED_BYTES(dn->dn_phys));
485         ASSERT(BP_IS_HOLE(dn->dn_phys->dn_blkptr));
486
487         dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]);
488         dnode_evict_dbufs(dn);
489         ASSERT3P(list_head(&dn->dn_dbufs), ==, NULL);
490         ASSERT3P(dn->dn_bonus, ==, NULL);
491
492         /*
493          * XXX - It would be nice to assert this, but we may still
494          * have residual holds from async evictions from the arc...
495          *
496          * zfs_obj_to_path() also depends on this being
497          * commented out.
498          *
499          * ASSERT3U(refcount_count(&dn->dn_holds), ==, 1);
500          */
501
502         /* Undirty next bits */
503         dn->dn_next_nlevels[txgoff] = 0;
504         dn->dn_next_indblkshift[txgoff] = 0;
505         dn->dn_next_blksz[txgoff] = 0;
506
507         /* ASSERT(blkptrs are zero); */
508         ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
509         ASSERT(dn->dn_type != DMU_OT_NONE);
510
511         ASSERT(dn->dn_free_txg > 0);
512         if (dn->dn_allocated_txg != dn->dn_free_txg)
513                 dmu_buf_will_dirty(&dn->dn_dbuf->db, tx);
514         bzero(dn->dn_phys, sizeof (dnode_phys_t));
515
516         mutex_enter(&dn->dn_mtx);
517         dn->dn_type = DMU_OT_NONE;
518         dn->dn_maxblkid = 0;
519         dn->dn_allocated_txg = 0;
520         dn->dn_free_txg = 0;
521         dn->dn_have_spill = B_FALSE;
522         mutex_exit(&dn->dn_mtx);
523
524         ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
525
526         dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
527         /*
528          * Now that we've released our hold, the dnode may
529          * be evicted, so we musn't access it.
530          */
531 }
532
533 /*
534  * Write out the dnode's dirty buffers.
535  */
536 void
537 dnode_sync(dnode_t *dn, dmu_tx_t *tx)
538 {
539         free_range_t *rp;
540         dnode_phys_t *dnp = dn->dn_phys;
541         int txgoff = tx->tx_txg & TXG_MASK;
542         list_t *list = &dn->dn_dirty_records[txgoff];
543         static const dnode_phys_t zerodn = { 0 };
544         boolean_t kill_spill = B_FALSE;
545
546         ASSERT(dmu_tx_is_syncing(tx));
547         ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg);
548         ASSERT(dnp->dn_type != DMU_OT_NONE ||
549             bcmp(dnp, &zerodn, DNODE_SIZE) == 0);
550         DNODE_VERIFY(dn);
551
552         ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf));
553
554         if (dmu_objset_userused_enabled(dn->dn_objset) &&
555             !DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
556                 mutex_enter(&dn->dn_mtx);
557                 dn->dn_oldused = DN_USED_BYTES(dn->dn_phys);
558                 dn->dn_oldflags = dn->dn_phys->dn_flags;
559                 dn->dn_phys->dn_flags |= DNODE_FLAG_USERUSED_ACCOUNTED;
560                 mutex_exit(&dn->dn_mtx);
561                 dmu_objset_userquota_get_ids(dn, B_FALSE, tx);
562         } else {
563                 /* Once we account for it, we should always account for it. */
564                 ASSERT(!(dn->dn_phys->dn_flags &
565                     DNODE_FLAG_USERUSED_ACCOUNTED));
566         }
567
568         mutex_enter(&dn->dn_mtx);
569         if (dn->dn_allocated_txg == tx->tx_txg) {
570                 /* The dnode is newly allocated or reallocated */
571                 if (dnp->dn_type == DMU_OT_NONE) {
572                         /* this is a first alloc, not a realloc */
573                         dnp->dn_nlevels = 1;
574                         dnp->dn_nblkptr = dn->dn_nblkptr;
575                 }
576
577                 dnp->dn_type = dn->dn_type;
578                 dnp->dn_bonustype = dn->dn_bonustype;
579                 dnp->dn_bonuslen = dn->dn_bonuslen;
580         }
581
582         ASSERT(dnp->dn_nlevels > 1 ||
583             BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
584             BP_GET_LSIZE(&dnp->dn_blkptr[0]) ==
585             dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
586
587         if (dn->dn_next_type[txgoff] != 0) {
588                 dnp->dn_type = dn->dn_type;
589                 dn->dn_next_type[txgoff] = 0;
590         }
591
592         if (dn->dn_next_blksz[txgoff] != 0) {
593                 ASSERT(P2PHASE(dn->dn_next_blksz[txgoff],
594                     SPA_MINBLOCKSIZE) == 0);
595                 ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
596                     dn->dn_maxblkid == 0 || list_head(list) != NULL ||
597                     avl_last(&dn->dn_ranges[txgoff]) ||
598                     dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT ==
599                     dnp->dn_datablkszsec);
600                 dnp->dn_datablkszsec =
601                     dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT;
602                 dn->dn_next_blksz[txgoff] = 0;
603         }
604
605         if (dn->dn_next_bonuslen[txgoff] != 0) {
606                 if (dn->dn_next_bonuslen[txgoff] == DN_ZERO_BONUSLEN)
607                         dnp->dn_bonuslen = 0;
608                 else
609                         dnp->dn_bonuslen = dn->dn_next_bonuslen[txgoff];
610                 ASSERT(dnp->dn_bonuslen <= DN_MAX_BONUSLEN);
611                 dn->dn_next_bonuslen[txgoff] = 0;
612         }
613
614         if (dn->dn_next_bonustype[txgoff] != 0) {
615                 ASSERT(DMU_OT_IS_VALID(dn->dn_next_bonustype[txgoff]));
616                 dnp->dn_bonustype = dn->dn_next_bonustype[txgoff];
617                 dn->dn_next_bonustype[txgoff] = 0;
618         }
619
620         boolean_t freeing_dnode = dn->dn_free_txg > 0 &&
621             dn->dn_free_txg <= tx->tx_txg;
622
623         /*
624          * We will either remove a spill block when a file is being removed
625          * or we have been asked to remove it.
626          */
627         if (dn->dn_rm_spillblk[txgoff] ||
628             ((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) && freeing_dnode)) {
629                 if ((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
630                         kill_spill = B_TRUE;
631                 dn->dn_rm_spillblk[txgoff] = 0;
632         }
633
634         if (dn->dn_next_indblkshift[txgoff] != 0) {
635                 ASSERT(dnp->dn_nlevels == 1);
636                 dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff];
637                 dn->dn_next_indblkshift[txgoff] = 0;
638         }
639
640         /*
641          * Just take the live (open-context) values for checksum and compress.
642          * Strictly speaking it's a future leak, but nothing bad happens if we
643          * start using the new checksum or compress algorithm a little early.
644          */
645         dnp->dn_checksum = dn->dn_checksum;
646         dnp->dn_compress = dn->dn_compress;
647
648         mutex_exit(&dn->dn_mtx);
649
650         if (kill_spill) {
651                 free_blocks(dn, &dn->dn_phys->dn_spill, 1, tx);
652                 mutex_enter(&dn->dn_mtx);
653                 dnp->dn_flags &= ~DNODE_FLAG_SPILL_BLKPTR;
654                 mutex_exit(&dn->dn_mtx);
655         }
656
657         /* process all the "freed" ranges in the file */
658         while (rp = avl_last(&dn->dn_ranges[txgoff])) {
659                 dnode_sync_free_range(dn, rp->fr_blkid, rp->fr_nblks, tx);
660                 /* grab the mutex so we don't race with dnode_block_freed() */
661                 mutex_enter(&dn->dn_mtx);
662                 avl_remove(&dn->dn_ranges[txgoff], rp);
663                 mutex_exit(&dn->dn_mtx);
664                 kmem_free(rp, sizeof (free_range_t));
665         }
666
667         if (freeing_dnode) {
668                 dnode_sync_free(dn, tx);
669                 return;
670         }
671
672         if (dn->dn_next_nblkptr[txgoff]) {
673                 /* this should only happen on a realloc */
674                 ASSERT(dn->dn_allocated_txg == tx->tx_txg);
675                 if (dn->dn_next_nblkptr[txgoff] > dnp->dn_nblkptr) {
676                         /* zero the new blkptrs we are gaining */
677                         bzero(dnp->dn_blkptr + dnp->dn_nblkptr,
678                             sizeof (blkptr_t) *
679                             (dn->dn_next_nblkptr[txgoff] - dnp->dn_nblkptr));
680 #ifdef ZFS_DEBUG
681                 } else {
682                         int i;
683                         ASSERT(dn->dn_next_nblkptr[txgoff] < dnp->dn_nblkptr);
684                         /* the blkptrs we are losing better be unallocated */
685                         for (i = dn->dn_next_nblkptr[txgoff];
686                             i < dnp->dn_nblkptr; i++)
687                                 ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[i]));
688 #endif
689                 }
690                 mutex_enter(&dn->dn_mtx);
691                 dnp->dn_nblkptr = dn->dn_next_nblkptr[txgoff];
692                 dn->dn_next_nblkptr[txgoff] = 0;
693                 mutex_exit(&dn->dn_mtx);
694         }
695
696         if (dn->dn_next_nlevels[txgoff]) {
697                 dnode_increase_indirection(dn, tx);
698                 dn->dn_next_nlevels[txgoff] = 0;
699         }
700
701         dbuf_sync_list(list, tx);
702
703         if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
704                 ASSERT3P(list_head(list), ==, NULL);
705                 dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
706         }
707
708         /*
709          * Although we have dropped our reference to the dnode, it
710          * can't be evicted until its written, and we haven't yet
711          * initiated the IO for the dnode's dbuf.
712          */
713 }