4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "%Z%%M% %I% %E% SMI"
28 #include <sys/zfs_context.h>
30 #include <sys/dnode.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dmu_objset.h>
34 #include <sys/dsl_dataset.h>
38 dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
41 int txgoff = tx->tx_txg & TXG_MASK;
42 int nblkptr = dn->dn_phys->dn_nblkptr;
43 int old_toplvl = dn->dn_phys->dn_nlevels - 1;
44 int new_level = dn->dn_next_nlevels[txgoff];
47 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
49 /* this dnode can't be paged out because it's dirty */
50 ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
51 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
52 ASSERT(new_level > 1 && dn->dn_phys->dn_nlevels > 0);
54 db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG);
57 dn->dn_phys->dn_nlevels = new_level;
58 dprintf("os=%p obj=%llu, increase to %d\n",
59 dn->dn_objset, dn->dn_object,
60 dn->dn_phys->dn_nlevels);
62 /* check for existing blkptrs in the dnode */
63 for (i = 0; i < nblkptr; i++)
64 if (!BP_IS_HOLE(&dn->dn_phys->dn_blkptr[i]))
67 /* transfer dnode's block pointers to new indirect block */
68 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED|DB_RF_HAVESTRUCT);
69 ASSERT(db->db.db_data);
70 ASSERT(arc_released(db->db_buf));
71 ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size);
72 bcopy(dn->dn_phys->dn_blkptr, db->db.db_data,
73 sizeof (blkptr_t) * nblkptr);
74 arc_buf_freeze(db->db_buf);
77 /* set dbuf's parent pointers to new indirect buf */
78 for (i = 0; i < nblkptr; i++) {
79 dmu_buf_impl_t *child = dbuf_find(dn, old_toplvl, i);
83 ASSERT3P(child->db_dnode, ==, dn);
84 if (child->db_parent && child->db_parent != dn->dn_dbuf) {
85 ASSERT(child->db_parent->db_level == db->db_level);
86 ASSERT(child->db_blkptr !=
87 &dn->dn_phys->dn_blkptr[child->db_blkid]);
88 mutex_exit(&child->db_mtx);
91 ASSERT(child->db_parent == NULL ||
92 child->db_parent == dn->dn_dbuf);
94 child->db_parent = db;
95 dbuf_add_ref(db, child);
97 child->db_blkptr = (blkptr_t *)db->db.db_data + i;
99 child->db_blkptr = NULL;
100 dprintf_dbuf_bp(child, child->db_blkptr,
101 "changed db_blkptr to new indirect %s", "");
103 mutex_exit(&child->db_mtx);
106 bzero(dn->dn_phys->dn_blkptr, sizeof (blkptr_t) * nblkptr);
110 rw_exit(&dn->dn_struct_rwlock);
114 free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx)
116 objset_impl_t *os = dn->dn_objset;
117 uint64_t bytesfreed = 0;
120 dprintf("os=%p obj=%llx num=%d\n", os, dn->dn_object, num);
122 for (i = 0; i < num; i++, bp++) {
126 bytesfreed += bp_get_dasize(os->os_spa, bp);
127 ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys));
128 dsl_dataset_block_kill(os->os_dsl_dataset, bp, dn->dn_zio, tx);
129 bzero(bp, sizeof (blkptr_t));
131 dnode_diduse_space(dn, -bytesfreed);
136 free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx)
140 uint64_t txg = tx->tx_txg;
142 epbs = db->db_dnode->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
143 off = start - (db->db_blkid * 1<<epbs);
144 num = end - start + 1;
146 ASSERT3U(off, >=, 0);
147 ASSERT3U(num, >=, 0);
148 ASSERT3U(db->db_level, >, 0);
149 ASSERT3U(db->db.db_size, ==, 1<<db->db_dnode->dn_phys->dn_indblkshift);
150 ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT);
151 ASSERT(db->db_blkptr != NULL);
153 for (i = off; i < off+num; i++) {
155 dmu_buf_impl_t *child;
156 dbuf_dirty_record_t *dr;
159 ASSERT(db->db_level == 1);
161 rw_enter(&db->db_dnode->dn_struct_rwlock, RW_READER);
162 err = dbuf_hold_impl(db->db_dnode, db->db_level-1,
163 (db->db_blkid << epbs) + i, TRUE, FTAG, &child);
164 rw_exit(&db->db_dnode->dn_struct_rwlock);
168 ASSERT(child->db_level == 0);
169 dr = child->db_last_dirty;
170 while (dr && dr->dr_txg > txg)
172 ASSERT(dr == NULL || dr->dr_txg == txg);
174 /* data_old better be zeroed */
176 buf = dr->dt.dl.dr_data->b_data;
177 for (j = 0; j < child->db.db_size >> 3; j++) {
179 panic("freed data not zero: "
180 "child=%p i=%d off=%d num=%d\n",
187 * db_data better be zeroed unless it's dirty in a
190 mutex_enter(&child->db_mtx);
191 buf = child->db.db_data;
192 if (buf != NULL && child->db_state != DB_FILL &&
193 child->db_last_dirty == NULL) {
194 for (j = 0; j < child->db.db_size >> 3; j++) {
196 panic("freed data not zero: "
197 "child=%p i=%d off=%d num=%d\n",
202 mutex_exit(&child->db_mtx);
204 dbuf_rele(child, FTAG);
210 free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks, int trunc,
213 dnode_t *dn = db->db_dnode;
215 dmu_buf_impl_t *subdb;
216 uint64_t start, end, dbstart, dbend, i;
217 int epbs, shift, err;
220 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
221 arc_release(db->db_buf, db);
222 bp = (blkptr_t *)db->db.db_data;
224 epbs = db->db_dnode->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
225 shift = (db->db_level - 1) * epbs;
226 dbstart = db->db_blkid << epbs;
227 start = blkid >> shift;
228 if (dbstart < start) {
229 bp += start - dbstart;
234 dbend = ((db->db_blkid + 1) << epbs) - 1;
235 end = (blkid + nblks - 1) >> shift;
240 ASSERT3U(start, <=, end);
242 if (db->db_level == 1) {
243 FREE_VERIFY(db, start, end, tx);
244 free_blocks(dn, bp, end-start+1, tx);
245 arc_buf_freeze(db->db_buf);
246 ASSERT(all || db->db_last_dirty);
250 for (i = start; i <= end; i++, bp++) {
253 rw_enter(&dn->dn_struct_rwlock, RW_READER);
254 err = dbuf_hold_impl(dn, db->db_level-1, i, TRUE, FTAG, &subdb);
255 ASSERT3U(err, ==, 0);
256 rw_exit(&dn->dn_struct_rwlock);
258 if (free_children(subdb, blkid, nblks, trunc, tx)) {
259 ASSERT3P(subdb->db_blkptr, ==, bp);
260 free_blocks(dn, bp, 1, tx);
264 dbuf_rele(subdb, FTAG);
266 arc_buf_freeze(db->db_buf);
269 for (i = start; i <= end; i++, bp++) {
270 if (i == start && blkid != 0)
272 else if (i == end && !trunc)
274 ASSERT3U(bp->blk_birth, ==, 0);
277 ASSERT(all || db->db_last_dirty);
282 * free_range: Traverse the indicated range of the provided file
283 * and "free" all the blocks contained there.
286 dnode_sync_free_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx)
288 blkptr_t *bp = dn->dn_phys->dn_blkptr;
290 int trunc, start, end, shift, i, err;
291 int dnlevel = dn->dn_phys->dn_nlevels;
293 if (blkid > dn->dn_phys->dn_maxblkid)
296 ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX);
297 trunc = blkid + nblks > dn->dn_phys->dn_maxblkid;
299 nblks = dn->dn_phys->dn_maxblkid - blkid + 1;
301 /* There are no indirect blocks in the object */
303 if (blkid >= dn->dn_phys->dn_nblkptr) {
304 /* this range was never made persistent */
307 ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr);
308 free_blocks(dn, bp + blkid, nblks, tx);
310 uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
311 (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
312 dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0);
313 ASSERT(off < dn->dn_phys->dn_maxblkid ||
314 dn->dn_phys->dn_maxblkid == 0 ||
315 dnode_next_offset(dn, FALSE, &off,
321 shift = (dnlevel - 1) * (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT);
322 start = blkid >> shift;
323 ASSERT(start < dn->dn_phys->dn_nblkptr);
324 end = (blkid + nblks - 1) >> shift;
326 for (i = start; i <= end; i++, bp++) {
329 rw_enter(&dn->dn_struct_rwlock, RW_READER);
330 err = dbuf_hold_impl(dn, dnlevel-1, i, TRUE, FTAG, &db);
331 ASSERT3U(err, ==, 0);
332 rw_exit(&dn->dn_struct_rwlock);
334 if (free_children(db, blkid, nblks, trunc, tx)) {
335 ASSERT3P(db->db_blkptr, ==, bp);
336 free_blocks(dn, bp, 1, tx);
341 uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
342 (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
343 dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0);
344 ASSERT(off < dn->dn_phys->dn_maxblkid ||
345 dn->dn_phys->dn_maxblkid == 0 ||
346 dnode_next_offset(dn, FALSE, &off, 1, 1, 0) != 0);
351 * Try to kick all the dnodes dbufs out of the cache...
354 dnode_evict_dbufs(dnode_t *dn, int try)
360 dmu_buf_impl_t *db, marker;
361 int evicting = FALSE;
364 mutex_enter(&dn->dn_dbufs_mtx);
365 list_insert_tail(&dn->dn_dbufs, &marker);
366 db = list_head(&dn->dn_dbufs);
367 for (; db != ▮ db = list_head(&dn->dn_dbufs)) {
368 list_remove(&dn->dn_dbufs, db);
369 list_insert_tail(&dn->dn_dbufs, db);
371 mutex_enter(&db->db_mtx);
372 if (db->db_state == DB_EVICTING) {
375 mutex_exit(&db->db_mtx);
376 } else if (refcount_is_zero(&db->db_holds)) {
378 ASSERT(!arc_released(db->db_buf));
379 dbuf_clear(db); /* exits db_mtx for us */
381 mutex_exit(&db->db_mtx);
385 list_remove(&dn->dn_dbufs, &marker);
387 * NB: we need to drop dn_dbufs_mtx between passes so
388 * that any DB_EVICTING dbufs can make progress.
389 * Ideally, we would have some cv we could wait on, but
390 * since we don't, just wait a bit to give the other
391 * thread a chance to run.
393 mutex_exit(&dn->dn_dbufs_mtx);
397 ASSERT(pass < 100); /* sanity check */
401 * This function works fine even if it can't evict everything.
402 * If were only asked to try to evict everything then
403 * return an error if we can't. Otherwise panic as the caller
404 * expects total eviction.
406 if (list_head(&dn->dn_dbufs) != NULL) {
410 panic("dangling dbufs (dn=%p, dbuf=%p)\n",
411 dn, list_head(&dn->dn_dbufs));
415 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
416 if (dn->dn_bonus && refcount_is_zero(&dn->dn_bonus->db_holds)) {
417 mutex_enter(&dn->dn_bonus->db_mtx);
418 dbuf_evict(dn->dn_bonus);
421 rw_exit(&dn->dn_struct_rwlock);
426 dnode_undirty_dbufs(list_t *list)
428 dbuf_dirty_record_t *dr;
430 while (dr = list_head(list)) {
431 dmu_buf_impl_t *db = dr->dr_dbuf;
432 uint64_t txg = dr->dr_txg;
434 mutex_enter(&db->db_mtx);
435 /* XXX - use dbuf_undirty()? */
436 list_remove(list, dr);
437 ASSERT(db->db_last_dirty == dr);
438 db->db_last_dirty = NULL;
439 db->db_dirtycnt -= 1;
440 if (db->db_level == 0) {
441 ASSERT(db->db_blkid == DB_BONUS_BLKID ||
442 dr->dt.dl.dr_data == db->db_buf);
444 mutex_exit(&db->db_mtx);
446 mutex_exit(&db->db_mtx);
447 dnode_undirty_dbufs(&dr->dt.di.dr_children);
448 list_destroy(&dr->dt.di.dr_children);
449 mutex_destroy(&dr->dt.di.dr_mtx);
451 kmem_free(dr, sizeof (dbuf_dirty_record_t));
452 dbuf_rele(db, (void *)(uintptr_t)txg);
457 dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
459 int txgoff = tx->tx_txg & TXG_MASK;
461 ASSERT(dmu_tx_is_syncing(tx));
463 dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]);
464 (void) dnode_evict_dbufs(dn, 0);
465 ASSERT3P(list_head(&dn->dn_dbufs), ==, NULL);
468 * XXX - It would be nice to assert this, but we may still
469 * have residual holds from async evictions from the arc...
471 * zfs_obj_to_path() also depends on this being
474 * ASSERT3U(refcount_count(&dn->dn_holds), ==, 1);
477 /* Undirty next bits */
478 dn->dn_next_nlevels[txgoff] = 0;
479 dn->dn_next_indblkshift[txgoff] = 0;
480 dn->dn_next_blksz[txgoff] = 0;
482 /* free up all the blocks in the file. */
483 dnode_sync_free_range(dn, 0, dn->dn_phys->dn_maxblkid+1, tx);
484 ASSERT3U(DN_USED_BYTES(dn->dn_phys), ==, 0);
486 /* ASSERT(blkptrs are zero); */
487 ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
488 ASSERT(dn->dn_type != DMU_OT_NONE);
490 ASSERT(dn->dn_free_txg > 0);
491 if (dn->dn_allocated_txg != dn->dn_free_txg)
492 dbuf_will_dirty(dn->dn_dbuf, tx);
493 bzero(dn->dn_phys, sizeof (dnode_phys_t));
495 mutex_enter(&dn->dn_mtx);
496 dn->dn_type = DMU_OT_NONE;
498 dn->dn_allocated_txg = 0;
499 mutex_exit(&dn->dn_mtx);
501 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
503 dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
505 * Now that we've released our hold, the dnode may
506 * be evicted, so we musn't access it.
511 * Write out the dnode's dirty buffers.
513 * NOTE: The dnode is kept in memory by being dirty. Once the
514 * dirty bit is cleared, it may be evicted. Beware of this!
517 dnode_sync(dnode_t *dn, dmu_tx_t *tx)
520 dnode_phys_t *dnp = dn->dn_phys;
521 int txgoff = tx->tx_txg & TXG_MASK;
522 list_t *list = &dn->dn_dirty_records[txgoff];
524 ASSERT(dmu_tx_is_syncing(tx));
525 ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg);
528 ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf));
530 mutex_enter(&dn->dn_mtx);
531 if (dn->dn_allocated_txg == tx->tx_txg) {
532 /* The dnode is newly allocated or reallocated */
533 if (dnp->dn_type == DMU_OT_NONE) {
534 /* this is a first alloc, not a realloc */
535 /* XXX shouldn't the phys already be zeroed? */
536 bzero(dnp, DNODE_CORE_SIZE);
540 if (dn->dn_nblkptr > dnp->dn_nblkptr) {
541 /* zero the new blkptrs we are gaining */
542 bzero(dnp->dn_blkptr + dnp->dn_nblkptr,
544 (dn->dn_nblkptr - dnp->dn_nblkptr));
546 dnp->dn_type = dn->dn_type;
547 dnp->dn_bonustype = dn->dn_bonustype;
548 dnp->dn_bonuslen = dn->dn_bonuslen;
549 dnp->dn_nblkptr = dn->dn_nblkptr;
552 ASSERT(dnp->dn_nlevels > 1 ||
553 BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
554 BP_GET_LSIZE(&dnp->dn_blkptr[0]) ==
555 dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
557 if (dn->dn_next_blksz[txgoff]) {
558 ASSERT(P2PHASE(dn->dn_next_blksz[txgoff],
559 SPA_MINBLOCKSIZE) == 0);
560 ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
561 list_head(list) != NULL ||
562 dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT ==
563 dnp->dn_datablkszsec);
564 dnp->dn_datablkszsec =
565 dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT;
566 dn->dn_next_blksz[txgoff] = 0;
569 if (dn->dn_next_indblkshift[txgoff]) {
570 ASSERT(dnp->dn_nlevels == 1);
571 dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff];
572 dn->dn_next_indblkshift[txgoff] = 0;
576 * Just take the live (open-context) values for checksum and compress.
577 * Strictly speaking it's a future leak, but nothing bad happens if we
578 * start using the new checksum or compress algorithm a little early.
580 dnp->dn_checksum = dn->dn_checksum;
581 dnp->dn_compress = dn->dn_compress;
583 mutex_exit(&dn->dn_mtx);
585 /* process all the "freed" ranges in the file */
586 if (dn->dn_free_txg == 0 || dn->dn_free_txg > tx->tx_txg) {
587 for (rp = avl_last(&dn->dn_ranges[txgoff]); rp != NULL;
588 rp = AVL_PREV(&dn->dn_ranges[txgoff], rp))
589 dnode_sync_free_range(dn,
590 rp->fr_blkid, rp->fr_nblks, tx);
592 mutex_enter(&dn->dn_mtx);
593 for (rp = avl_first(&dn->dn_ranges[txgoff]); rp; ) {
594 free_range_t *last = rp;
595 rp = AVL_NEXT(&dn->dn_ranges[txgoff], rp);
596 avl_remove(&dn->dn_ranges[txgoff], last);
597 kmem_free(last, sizeof (free_range_t));
599 mutex_exit(&dn->dn_mtx);
601 if (dn->dn_free_txg > 0 && dn->dn_free_txg <= tx->tx_txg) {
602 dnode_sync_free(dn, tx);
606 if (dn->dn_next_nlevels[txgoff]) {
607 dnode_increase_indirection(dn, tx);
608 dn->dn_next_nlevels[txgoff] = 0;
611 dbuf_sync_list(list, tx);
613 if (dn->dn_object != DMU_META_DNODE_OBJECT) {
614 ASSERT3P(list_head(list), ==, NULL);
615 dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
619 * Although we have dropped our reference to the dnode, it
620 * can't be evicted until its written, and we haven't yet
621 * initiated the IO for the dnode's dbuf.