4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
29 #include <sys/dmu_impl.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dmu_objset.h>
33 #include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */
34 #include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */
35 #include <sys/dsl_pool.h>
36 #include <sys/zap_impl.h> /* for fzap_default_block_shift */
39 #include <sys/sa_impl.h>
40 #include <sys/zfs_context.h>
41 #include <sys/varargs.h>
43 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
44 uint64_t arg1, uint64_t arg2);
48 dmu_tx_create_dd(dsl_dir_t *dd)
50 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
53 tx->tx_pool = dd->dd_pool;
54 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
55 offsetof(dmu_tx_hold_t, txh_node));
56 list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
57 offsetof(dmu_tx_callback_t, dcb_node));
58 tx->tx_start = gethrtime();
60 refcount_create(&tx->tx_space_written);
61 refcount_create(&tx->tx_space_freed);
67 dmu_tx_create(objset_t *os)
69 dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
71 tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset);
76 dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
78 dmu_tx_t *tx = dmu_tx_create_dd(NULL);
80 ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
89 dmu_tx_is_syncing(dmu_tx_t *tx)
91 return (tx->tx_anyobj);
95 dmu_tx_private_ok(dmu_tx_t *tx)
97 return (tx->tx_anyobj);
100 static dmu_tx_hold_t *
101 dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
102 enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
108 if (object != DMU_NEW_OBJECT) {
109 err = dnode_hold(os, object, tx, &dn);
115 if (err == 0 && tx->tx_txg != 0) {
116 mutex_enter(&dn->dn_mtx);
118 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
119 * problem, but there's no way for it to happen (for
122 ASSERT(dn->dn_assigned_txg == 0);
123 dn->dn_assigned_txg = tx->tx_txg;
124 (void) refcount_add(&dn->dn_tx_holds, tx);
125 mutex_exit(&dn->dn_mtx);
129 txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
132 refcount_create(&txh->txh_space_towrite);
133 refcount_create(&txh->txh_space_tofree);
134 refcount_create(&txh->txh_space_tooverwrite);
135 refcount_create(&txh->txh_space_tounref);
136 refcount_create(&txh->txh_memory_tohold);
137 refcount_create(&txh->txh_fudge);
139 txh->txh_type = type;
140 txh->txh_arg1 = arg1;
141 txh->txh_arg2 = arg2;
143 list_insert_tail(&tx->tx_holds, txh);
149 dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object)
152 * If we're syncing, they can manipulate any object anyhow, and
153 * the hold on the dnode_t can cause problems.
155 if (!dmu_tx_is_syncing(tx)) {
156 (void) dmu_tx_hold_object_impl(tx, os,
157 object, THT_NEWOBJECT, 0, 0);
162 dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
167 rw_enter(&dn->dn_struct_rwlock, RW_READER);
168 db = dbuf_hold_level(dn, level, blkid, FTAG);
169 rw_exit(&dn->dn_struct_rwlock);
171 return (SET_ERROR(EIO));
172 err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH);
178 dmu_tx_count_twig(dmu_tx_hold_t *txh, dnode_t *dn, dmu_buf_impl_t *db,
179 int level, uint64_t blkid, boolean_t freeable, uint64_t *history)
181 objset_t *os = dn->dn_objset;
182 dsl_dataset_t *ds = os->os_dsl_dataset;
183 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
184 dmu_buf_impl_t *parent = NULL;
188 if (level >= dn->dn_nlevels || history[level] == blkid)
191 history[level] = blkid;
193 space = (level == 0) ? dn->dn_datablksz : (1ULL << dn->dn_indblkshift);
195 if (db == NULL || db == dn->dn_dbuf) {
199 ASSERT(DB_DNODE(db) == dn);
200 ASSERT(db->db_level == level);
201 ASSERT(db->db.db_size == space);
202 ASSERT(db->db_blkid == blkid);
204 parent = db->db_parent;
207 freeable = (bp && (freeable ||
208 dsl_dataset_block_freeable(ds, bp, bp->blk_birth)));
211 (void) refcount_add_many(&txh->txh_space_tooverwrite,
214 (void) refcount_add_many(&txh->txh_space_towrite,
219 (void) refcount_add_many(&txh->txh_space_tounref,
220 bp_get_dsize(os->os_spa, bp), FTAG);
223 dmu_tx_count_twig(txh, dn, parent, level + 1,
224 blkid >> epbs, freeable, history);
229 dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
231 dnode_t *dn = txh->txh_dnode;
232 uint64_t start, end, i;
233 int min_bs, max_bs, min_ibs, max_ibs, epbs, bits;
239 min_bs = SPA_MINBLOCKSHIFT;
240 max_bs = highbit64(txh->txh_tx->tx_objset->os_recordsize) - 1;
241 min_ibs = DN_MIN_INDBLKSHIFT;
242 max_ibs = DN_MAX_INDBLKSHIFT;
245 uint64_t history[DN_MAX_LEVELS];
246 int nlvls = dn->dn_nlevels;
250 * For i/o error checking, read the first and last level-0
251 * blocks (if they are not aligned), and all the level-1 blocks.
253 if (dn->dn_maxblkid == 0) {
254 delta = dn->dn_datablksz;
255 start = (off < dn->dn_datablksz) ? 0 : 1;
256 end = (off+len <= dn->dn_datablksz) ? 0 : 1;
257 if (start == 0 && (off > 0 || len < dn->dn_datablksz)) {
258 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
264 zio_t *zio = zio_root(dn->dn_objset->os_spa,
265 NULL, NULL, ZIO_FLAG_CANFAIL);
267 /* first level-0 block */
268 start = off >> dn->dn_datablkshift;
269 if (P2PHASE(off, dn->dn_datablksz) ||
270 len < dn->dn_datablksz) {
271 err = dmu_tx_check_ioerr(zio, dn, 0, start);
276 /* last level-0 block */
277 end = (off+len-1) >> dn->dn_datablkshift;
278 if (end != start && end <= dn->dn_maxblkid &&
279 P2PHASE(off+len, dn->dn_datablksz)) {
280 err = dmu_tx_check_ioerr(zio, dn, 0, end);
287 int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
288 for (i = (start>>shft)+1; i < end>>shft; i++) {
289 err = dmu_tx_check_ioerr(zio, dn, 1, i);
298 delta = P2NPHASE(off, dn->dn_datablksz);
301 min_ibs = max_ibs = dn->dn_indblkshift;
302 if (dn->dn_maxblkid > 0) {
304 * The blocksize can't change,
305 * so we can make a more precise estimate.
307 ASSERT(dn->dn_datablkshift != 0);
308 min_bs = max_bs = dn->dn_datablkshift;
311 * The blocksize can increase up to the recordsize,
312 * or if it is already more than the recordsize,
313 * up to the next power of 2.
315 min_bs = highbit64(dn->dn_datablksz - 1);
316 max_bs = MAX(max_bs, highbit64(dn->dn_datablksz - 1));
320 * If this write is not off the end of the file
321 * we need to account for overwrites/unref.
323 if (start <= dn->dn_maxblkid) {
324 for (int l = 0; l < DN_MAX_LEVELS; l++)
327 while (start <= dn->dn_maxblkid) {
330 rw_enter(&dn->dn_struct_rwlock, RW_READER);
331 err = dbuf_hold_impl(dn, 0, start,
332 FALSE, FALSE, FTAG, &db);
333 rw_exit(&dn->dn_struct_rwlock);
336 txh->txh_tx->tx_err = err;
340 dmu_tx_count_twig(txh, dn, db, 0, start, B_FALSE,
345 * Account for new indirects appearing
346 * before this IO gets assigned into a txg.
349 epbs = min_ibs - SPA_BLKPTRSHIFT;
350 for (bits -= epbs * (nlvls - 1);
351 bits >= 0; bits -= epbs) {
352 (void) refcount_add_many(
354 1ULL << max_ibs, FTAG);
361 delta = dn->dn_datablksz;
366 * 'end' is the last thing we will access, not one past.
367 * This way we won't overflow when accessing the last byte.
369 start = P2ALIGN(off, 1ULL << max_bs);
370 end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1;
371 (void) refcount_add_many(&txh->txh_space_towrite,
372 end - start + 1, FTAG);
377 epbs = min_ibs - SPA_BLKPTRSHIFT;
380 * The object contains at most 2^(64 - min_bs) blocks,
381 * and each indirect level maps 2^epbs.
383 for (bits = 64 - min_bs; bits >= 0; bits -= epbs) {
386 ASSERT3U(end, >=, start);
387 (void) refcount_add_many(&txh->txh_space_towrite,
388 (end - start + 1) << max_ibs, FTAG);
391 * We also need a new blkid=0 indirect block
392 * to reference any existing file data.
394 (void) refcount_add_many(&txh->txh_space_towrite,
395 1ULL << max_ibs, FTAG);
400 if (refcount_count(&txh->txh_space_towrite) +
401 refcount_count(&txh->txh_space_tooverwrite) >
403 err = SET_ERROR(EFBIG);
406 txh->txh_tx->tx_err = err;
410 dmu_tx_count_dnode(dmu_tx_hold_t *txh)
412 dnode_t *dn = txh->txh_dnode;
413 dnode_t *mdn = DMU_META_DNODE(txh->txh_tx->tx_objset);
414 uint64_t space = mdn->dn_datablksz +
415 ((mdn->dn_nlevels-1) << mdn->dn_indblkshift);
417 if (dn && dn->dn_dbuf->db_blkptr &&
418 dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
419 dn->dn_dbuf->db_blkptr, dn->dn_dbuf->db_blkptr->blk_birth)) {
420 (void) refcount_add_many(&txh->txh_space_tooverwrite,
422 (void) refcount_add_many(&txh->txh_space_tounref, space, FTAG);
424 (void) refcount_add_many(&txh->txh_space_towrite, space, FTAG);
425 if (dn && dn->dn_dbuf->db_blkptr) {
426 (void) refcount_add_many(&txh->txh_space_tounref,
433 dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
437 ASSERT(tx->tx_txg == 0);
438 ASSERT(len < DMU_MAX_ACCESS);
439 ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
441 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
442 object, THT_WRITE, off, len);
446 dmu_tx_count_write(txh, off, len);
447 dmu_tx_count_dnode(txh);
451 dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
453 uint64_t blkid, nblks, lastblk;
454 uint64_t space = 0, unref = 0, skipped = 0;
455 dnode_t *dn = txh->txh_dnode;
456 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
457 spa_t *spa = txh->txh_tx->tx_pool->dp_spa;
459 uint64_t l0span = 0, nl1blks = 0;
461 if (dn->dn_nlevels == 0)
465 * The struct_rwlock protects us against dn_nlevels
466 * changing, in case (against all odds) we manage to dirty &
467 * sync out the changes after we check for being dirty.
468 * Also, dbuf_hold_impl() wants us to have the struct_rwlock.
470 rw_enter(&dn->dn_struct_rwlock, RW_READER);
471 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
472 if (dn->dn_maxblkid == 0) {
473 if (off == 0 && len >= dn->dn_datablksz) {
477 rw_exit(&dn->dn_struct_rwlock);
481 blkid = off >> dn->dn_datablkshift;
482 nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift;
484 if (blkid > dn->dn_maxblkid) {
485 rw_exit(&dn->dn_struct_rwlock);
488 if (blkid + nblks > dn->dn_maxblkid)
489 nblks = dn->dn_maxblkid - blkid + 1;
492 l0span = nblks; /* save for later use to calc level > 1 overhead */
493 if (dn->dn_nlevels == 1) {
495 for (i = 0; i < nblks; i++) {
496 blkptr_t *bp = dn->dn_phys->dn_blkptr;
497 ASSERT3U(blkid + i, <, dn->dn_nblkptr);
499 if (dsl_dataset_block_freeable(ds, bp, bp->blk_birth)) {
500 dprintf_bp(bp, "can free old%s", "");
501 space += bp_get_dsize(spa, bp);
503 unref += BP_GET_ASIZE(bp);
509 lastblk = blkid + nblks - 1;
511 dmu_buf_impl_t *dbuf;
512 uint64_t ibyte, new_blkid;
514 int err, i, blkoff, tochk;
517 ibyte = blkid << dn->dn_datablkshift;
518 err = dnode_next_offset(dn,
519 DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0);
520 new_blkid = ibyte >> dn->dn_datablkshift;
522 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
526 txh->txh_tx->tx_err = err;
529 if (new_blkid > lastblk) {
530 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
534 if (new_blkid > blkid) {
535 ASSERT((new_blkid >> epbs) > (blkid >> epbs));
536 skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1;
537 nblks -= new_blkid - blkid;
540 blkoff = P2PHASE(blkid, epb);
541 tochk = MIN(epb - blkoff, nblks);
543 err = dbuf_hold_impl(dn, 1, blkid >> epbs,
544 FALSE, FALSE, FTAG, &dbuf);
546 txh->txh_tx->tx_err = err;
550 (void) refcount_add_many(&txh->txh_memory_tohold,
551 dbuf->db.db_size, FTAG);
554 * We don't check memory_tohold against DMU_MAX_ACCESS because
555 * memory_tohold is an over-estimation (especially the >L1
556 * indirect blocks), so it could fail. Callers should have
557 * already verified that they will not be holding too much
561 err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL);
563 txh->txh_tx->tx_err = err;
564 dbuf_rele(dbuf, FTAG);
568 bp = dbuf->db.db_data;
571 for (i = 0; i < tochk; i++) {
572 if (dsl_dataset_block_freeable(ds, &bp[i],
574 dprintf_bp(&bp[i], "can free old%s", "");
575 space += bp_get_dsize(spa, &bp[i]);
577 unref += BP_GET_ASIZE(bp);
579 dbuf_rele(dbuf, FTAG);
585 rw_exit(&dn->dn_struct_rwlock);
588 * Add in memory requirements of higher-level indirects.
589 * This assumes a worst-possible scenario for dn_nlevels and a
590 * worst-possible distribution of l1-blocks over the region to free.
593 uint64_t blkcnt = 1 + ((l0span >> epbs) >> epbs);
596 * Here we don't use DN_MAX_LEVEL, but calculate it with the
597 * given datablkshift and indblkshift. This makes the
598 * difference between 19 and 8 on large files.
600 int maxlevel = 2 + (DN_MAX_OFFSET_SHIFT - dn->dn_datablkshift) /
601 (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
603 while (level++ < maxlevel) {
604 (void) refcount_add_many(&txh->txh_memory_tohold,
605 MAX(MIN(blkcnt, nl1blks), 1) << dn->dn_indblkshift,
607 blkcnt = 1 + (blkcnt >> epbs);
611 /* account for new level 1 indirect blocks that might show up */
613 (void) refcount_add_many(&txh->txh_fudge,
614 skipped << dn->dn_indblkshift, FTAG);
615 skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs);
616 (void) refcount_add_many(&txh->txh_memory_tohold,
617 skipped << dn->dn_indblkshift, FTAG);
619 (void) refcount_add_many(&txh->txh_space_tofree, space, FTAG);
620 (void) refcount_add_many(&txh->txh_space_tounref, unref, FTAG);
624 * This function marks the transaction as being a "net free". The end
625 * result is that refquotas will be disabled for this transaction, and
626 * this transaction will be able to use half of the pool space overhead
627 * (see dsl_pool_adjustedsize()). Therefore this function should only
628 * be called for transactions that we expect will not cause a net increase
629 * in the amount of space used (but it's OK if that is occasionally not true).
632 dmu_tx_mark_netfree(dmu_tx_t *tx)
636 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
637 DMU_NEW_OBJECT, THT_FREE, 0, 0);
640 * Pretend that this operation will free 1GB of space. This
641 * should be large enough to cancel out the largest write.
642 * We don't want to use something like UINT64_MAX, because that would
643 * cause overflows when doing math with these values (e.g. in
644 * dmu_tx_try_assign()).
646 (void) refcount_add_many(&txh->txh_space_tofree,
647 1024 * 1024 * 1024, FTAG);
648 (void) refcount_add_many(&txh->txh_space_tounref,
649 1024 * 1024 * 1024, FTAG);
653 dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
660 ASSERT(tx->tx_txg == 0);
662 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
663 object, THT_FREE, off, len);
667 dmu_tx_count_dnode(txh);
669 if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz)
671 if (len == DMU_OBJECT_END)
672 len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off;
676 * For i/o error checking, we read the first and last level-0
677 * blocks if they are not aligned, and all the level-1 blocks.
679 * Note: dbuf_free_range() assumes that we have not instantiated
680 * any level-0 dbufs that will be completely freed. Therefore we must
681 * exercise care to not read or count the first and last blocks
682 * if they are blocksize-aligned.
684 if (dn->dn_datablkshift == 0) {
685 if (off != 0 || len < dn->dn_datablksz)
686 dmu_tx_count_write(txh, 0, dn->dn_datablksz);
688 /* first block will be modified if it is not aligned */
689 if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift))
690 dmu_tx_count_write(txh, off, 1);
691 /* last block will be modified if it is not aligned */
692 if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift))
693 dmu_tx_count_write(txh, off+len, 1);
697 * Check level-1 blocks.
699 if (dn->dn_nlevels > 1) {
700 int shift = dn->dn_datablkshift + dn->dn_indblkshift -
702 uint64_t start = off >> shift;
703 uint64_t end = (off + len) >> shift;
705 ASSERT(dn->dn_indblkshift != 0);
708 * dnode_reallocate() can result in an object with indirect
709 * blocks having an odd data block size. In this case,
710 * just check the single block.
712 if (dn->dn_datablkshift == 0)
715 zio = zio_root(tx->tx_pool->dp_spa,
716 NULL, NULL, ZIO_FLAG_CANFAIL);
717 for (uint64_t i = start; i <= end; i++) {
718 uint64_t ibyte = i << shift;
719 err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
721 if (err == ESRCH || i > end)
728 err = dmu_tx_check_ioerr(zio, dn, 1, i);
741 dmu_tx_count_free(txh, off, len);
745 dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
751 ASSERT(tx->tx_txg == 0);
753 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
754 object, THT_ZAP, add, (uintptr_t)name);
759 dmu_tx_count_dnode(txh);
763 * We will be able to fit a new object's entries into one leaf
764 * block. So there will be at most 2 blocks total,
765 * including the header block.
767 dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift);
771 ASSERT3P(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP);
773 if (dn->dn_maxblkid == 0 && !add) {
777 * If there is only one block (i.e. this is a micro-zap)
778 * and we are not adding anything, the accounting is simple.
780 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
787 * Use max block size here, since we don't know how much
788 * the size will change between now and the dbuf dirty call.
790 bp = &dn->dn_phys->dn_blkptr[0];
791 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
792 bp, bp->blk_birth)) {
793 (void) refcount_add_many(&txh->txh_space_tooverwrite,
794 MZAP_MAX_BLKSZ, FTAG);
796 (void) refcount_add_many(&txh->txh_space_towrite,
797 MZAP_MAX_BLKSZ, FTAG);
799 if (!BP_IS_HOLE(bp)) {
800 (void) refcount_add_many(&txh->txh_space_tounref,
801 MZAP_MAX_BLKSZ, FTAG);
806 if (dn->dn_maxblkid > 0 && name) {
808 * access the name in this fat-zap so that we'll check
809 * for i/o errors to the leaf blocks, etc.
811 err = zap_lookup_by_dnode(dn, name, 8, 0, NULL);
818 err = zap_count_write_by_dnode(dn, name, add,
819 &txh->txh_space_towrite, &txh->txh_space_tooverwrite);
822 * If the modified blocks are scattered to the four winds,
823 * we'll have to modify an indirect twig for each. We can make
824 * modifications at up to 3 locations:
825 * - header block at the beginning of the object
826 * - target leaf block
827 * - end of the object, where we might need to write:
828 * - a new leaf block if the target block needs to be split
829 * - the new pointer table, if it is growing
830 * - the new cookie table, if it is growing
832 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
833 dsl_dataset_phys_t *ds_phys =
834 dsl_dataset_phys(dn->dn_objset->os_dsl_dataset);
835 for (int lvl = 1; lvl < dn->dn_nlevels; lvl++) {
836 uint64_t num_indirects = 1 + (dn->dn_maxblkid >> (epbs * lvl));
837 uint64_t spc = MIN(3, num_indirects) << dn->dn_indblkshift;
838 if (ds_phys->ds_prev_snap_obj != 0) {
839 (void) refcount_add_many(&txh->txh_space_towrite,
842 (void) refcount_add_many(&txh->txh_space_tooverwrite,
849 dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
853 ASSERT(tx->tx_txg == 0);
855 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
856 object, THT_BONUS, 0, 0);
858 dmu_tx_count_dnode(txh);
862 dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
865 ASSERT(tx->tx_txg == 0);
867 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
868 DMU_NEW_OBJECT, THT_SPACE, space, 0);
870 (void) refcount_add_many(&txh->txh_space_towrite, space, FTAG);
874 dmu_tx_holds(dmu_tx_t *tx, uint64_t object)
880 * By asserting that the tx is assigned, we're counting the
881 * number of dn_tx_holds, which is the same as the number of
882 * dn_holds. Otherwise, we'd be counting dn_holds, but
883 * dn_tx_holds could be 0.
885 ASSERT(tx->tx_txg != 0);
887 /* if (tx->tx_anyobj == TRUE) */
890 for (txh = list_head(&tx->tx_holds); txh;
891 txh = list_next(&tx->tx_holds, txh)) {
892 if (txh->txh_dnode && txh->txh_dnode->dn_object == object)
901 dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
904 int match_object = FALSE, match_offset = FALSE;
909 ASSERT(tx->tx_txg != 0);
910 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
911 ASSERT3U(dn->dn_object, ==, db->db.db_object);
918 /* XXX No checking on the meta dnode for now */
919 if (db->db.db_object == DMU_META_DNODE_OBJECT) {
924 for (txh = list_head(&tx->tx_holds); txh;
925 txh = list_next(&tx->tx_holds, txh)) {
926 ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg);
927 if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
929 if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
930 int datablkshift = dn->dn_datablkshift ?
931 dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
932 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
933 int shift = datablkshift + epbs * db->db_level;
934 uint64_t beginblk = shift >= 64 ? 0 :
935 (txh->txh_arg1 >> shift);
936 uint64_t endblk = shift >= 64 ? 0 :
937 ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
938 uint64_t blkid = db->db_blkid;
940 /* XXX txh_arg2 better not be zero... */
942 dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
943 txh->txh_type, beginblk, endblk);
945 switch (txh->txh_type) {
947 if (blkid >= beginblk && blkid <= endblk)
950 * We will let this hold work for the bonus
951 * or spill buffer so that we don't need to
952 * hold it when creating a new object.
954 if (blkid == DMU_BONUS_BLKID ||
955 blkid == DMU_SPILL_BLKID)
958 * They might have to increase nlevels,
959 * thus dirtying the new TLIBs. Or the
960 * might have to change the block size,
961 * thus dirying the new lvl=0 blk=0.
968 * We will dirty all the level 1 blocks in
969 * the free range and perhaps the first and
970 * last level 0 block.
972 if (blkid >= beginblk && (blkid <= endblk ||
973 txh->txh_arg2 == DMU_OBJECT_END))
977 if (blkid == DMU_SPILL_BLKID)
981 if (blkid == DMU_BONUS_BLKID)
991 ASSERT(!"bad txh_type");
994 if (match_object && match_offset) {
1000 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
1001 (u_longlong_t)db->db.db_object, db->db_level,
1002 (u_longlong_t)db->db_blkid);
1007 * If we can't do 10 iops, something is wrong. Let us go ahead
1008 * and hit zfs_dirty_data_max.
1010 hrtime_t zfs_delay_max_ns = MSEC2NSEC(100);
1011 int zfs_delay_resolution_ns = 100 * 1000; /* 100 microseconds */
1014 * We delay transactions when we've determined that the backend storage
1015 * isn't able to accommodate the rate of incoming writes.
1017 * If there is already a transaction waiting, we delay relative to when
1018 * that transaction finishes waiting. This way the calculated min_time
1019 * is independent of the number of threads concurrently executing
1022 * If we are the only waiter, wait relative to when the transaction
1023 * started, rather than the current time. This credits the transaction for
1024 * "time already served", e.g. reading indirect blocks.
1026 * The minimum time for a transaction to take is calculated as:
1027 * min_time = scale * (dirty - min) / (max - dirty)
1028 * min_time is then capped at zfs_delay_max_ns.
1030 * The delay has two degrees of freedom that can be adjusted via tunables.
1031 * The percentage of dirty data at which we start to delay is defined by
1032 * zfs_delay_min_dirty_percent. This should typically be at or above
1033 * zfs_vdev_async_write_active_max_dirty_percent so that we only start to
1034 * delay after writing at full speed has failed to keep up with the incoming
1035 * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly
1036 * speaking, this variable determines the amount of delay at the midpoint of
1040 * 10ms +-------------------------------------------------------------*+
1056 * 2ms + (midpoint) * +
1059 * | zfs_delay_scale ----------> ******** |
1060 * 0 +-------------------------------------*********----------------+
1061 * 0% <- zfs_dirty_data_max -> 100%
1063 * Note that since the delay is added to the outstanding time remaining on the
1064 * most recent transaction, the delay is effectively the inverse of IOPS.
1065 * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
1066 * was chosen such that small changes in the amount of accumulated dirty data
1067 * in the first 3/4 of the curve yield relatively small differences in the
1070 * The effects can be easier to understand when the amount of delay is
1071 * represented on a log scale:
1074 * 100ms +-------------------------------------------------------------++
1083 * + zfs_delay_scale ----------> ***** +
1094 * +--------------------------------------------------------------+
1095 * 0% <- zfs_dirty_data_max -> 100%
1097 * Note here that only as the amount of dirty data approaches its limit does
1098 * the delay start to increase rapidly. The goal of a properly tuned system
1099 * should be to keep the amount of dirty data out of that range by first
1100 * ensuring that the appropriate limits are set for the I/O scheduler to reach
1101 * optimal throughput on the backend storage, and then by changing the value
1102 * of zfs_delay_scale to increase the steepness of the curve.
1105 dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty)
1107 dsl_pool_t *dp = tx->tx_pool;
1108 uint64_t delay_min_bytes =
1109 zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
1110 hrtime_t wakeup, min_tx_time, now;
1112 if (dirty <= delay_min_bytes)
1116 * The caller has already waited until we are under the max.
1117 * We make them pass us the amount of dirty data so we don't
1118 * have to handle the case of it being >= the max, which could
1119 * cause a divide-by-zero if it's == the max.
1121 ASSERT3U(dirty, <, zfs_dirty_data_max);
1124 min_tx_time = zfs_delay_scale *
1125 (dirty - delay_min_bytes) / (zfs_dirty_data_max - dirty);
1126 if (now > tx->tx_start + min_tx_time)
1129 min_tx_time = MIN(min_tx_time, zfs_delay_max_ns);
1131 DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty,
1132 uint64_t, min_tx_time);
1134 mutex_enter(&dp->dp_lock);
1135 wakeup = MAX(tx->tx_start + min_tx_time,
1136 dp->dp_last_wakeup + min_tx_time);
1137 dp->dp_last_wakeup = wakeup;
1138 mutex_exit(&dp->dp_lock);
1142 mutex_enter(&curthread->t_delay_lock);
1143 while (cv_timedwait_hires(&curthread->t_delay_cv,
1144 &curthread->t_delay_lock, wakeup, zfs_delay_resolution_ns,
1145 CALLOUT_FLAG_ABSOLUTE | CALLOUT_FLAG_ROUNDUP) > 0)
1147 mutex_exit(&curthread->t_delay_lock);
1149 pause_sbt("dmu_tx_delay", wakeup * SBT_1NS,
1150 zfs_delay_resolution_ns * SBT_1NS, C_ABSOLUTE);
1153 hrtime_t delta = wakeup - gethrtime();
1155 ts.tv_sec = delta / NANOSEC;
1156 ts.tv_nsec = delta % NANOSEC;
1157 (void) nanosleep(&ts, NULL);
1162 dmu_tx_try_assign(dmu_tx_t *tx, txg_how_t txg_how)
1165 spa_t *spa = tx->tx_pool->dp_spa;
1166 uint64_t memory, asize, fsize, usize;
1167 uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge;
1169 ASSERT0(tx->tx_txg);
1172 return (tx->tx_err);
1174 if (spa_suspended(spa)) {
1176 * If the user has indicated a blocking failure mode
1177 * then return ERESTART which will block in dmu_tx_wait().
1178 * Otherwise, return EIO so that an error can get
1179 * propagated back to the VOP calls.
1181 * Note that we always honor the txg_how flag regardless
1182 * of the failuremode setting.
1184 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
1185 txg_how != TXG_WAIT)
1186 return (SET_ERROR(EIO));
1188 return (SET_ERROR(ERESTART));
1191 if (!tx->tx_waited &&
1192 dsl_pool_need_dirty_delay(tx->tx_pool)) {
1193 tx->tx_wait_dirty = B_TRUE;
1194 return (SET_ERROR(ERESTART));
1197 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
1198 tx->tx_needassign_txh = NULL;
1201 * NB: No error returns are allowed after txg_hold_open, but
1202 * before processing the dnode holds, due to the
1203 * dmu_tx_unassign() logic.
1206 towrite = tofree = tooverwrite = tounref = tohold = fudge = 0;
1207 for (txh = list_head(&tx->tx_holds); txh;
1208 txh = list_next(&tx->tx_holds, txh)) {
1209 dnode_t *dn = txh->txh_dnode;
1211 mutex_enter(&dn->dn_mtx);
1212 if (dn->dn_assigned_txg == tx->tx_txg - 1) {
1213 mutex_exit(&dn->dn_mtx);
1214 tx->tx_needassign_txh = txh;
1215 return (SET_ERROR(ERESTART));
1217 if (dn->dn_assigned_txg == 0)
1218 dn->dn_assigned_txg = tx->tx_txg;
1219 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1220 (void) refcount_add(&dn->dn_tx_holds, tx);
1221 mutex_exit(&dn->dn_mtx);
1223 towrite += refcount_count(&txh->txh_space_towrite);
1224 tofree += refcount_count(&txh->txh_space_tofree);
1225 tooverwrite += refcount_count(&txh->txh_space_tooverwrite);
1226 tounref += refcount_count(&txh->txh_space_tounref);
1227 tohold += refcount_count(&txh->txh_memory_tohold);
1228 fudge += refcount_count(&txh->txh_fudge);
1232 * If a snapshot has been taken since we made our estimates,
1233 * assume that we won't be able to free or overwrite anything.
1235 if (tx->tx_objset &&
1236 dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) >
1237 tx->tx_lastsnap_txg) {
1238 towrite += tooverwrite;
1239 tooverwrite = tofree = 0;
1242 /* needed allocation: worst-case estimate of write space */
1243 asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite);
1244 /* freed space estimate: worst-case overwrite + free estimate */
1245 fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree;
1246 /* convert unrefd space to worst-case estimate */
1247 usize = spa_get_asize(tx->tx_pool->dp_spa, tounref);
1248 /* calculate memory footprint estimate */
1249 memory = towrite + tooverwrite + tohold;
1253 * Add in 'tohold' to account for our dirty holds on this memory
1254 * XXX - the "fudge" factor is to account for skipped blocks that
1255 * we missed because dnode_next_offset() misses in-core-only blocks.
1257 tx->tx_space_towrite = asize +
1258 spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge);
1259 tx->tx_space_tofree = tofree;
1260 tx->tx_space_tooverwrite = tooverwrite;
1261 tx->tx_space_tounref = tounref;
1264 if (tx->tx_dir && asize != 0) {
1265 int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
1266 asize, fsize, usize, &tx->tx_tempreserve_cookie, tx);
1275 dmu_tx_unassign(dmu_tx_t *tx)
1279 if (tx->tx_txg == 0)
1282 txg_rele_to_quiesce(&tx->tx_txgh);
1285 * Walk the transaction's hold list, removing the hold on the
1286 * associated dnode, and notifying waiters if the refcount drops to 0.
1288 for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh;
1289 txh = list_next(&tx->tx_holds, txh)) {
1290 dnode_t *dn = txh->txh_dnode;
1294 mutex_enter(&dn->dn_mtx);
1295 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1297 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1298 dn->dn_assigned_txg = 0;
1299 cv_broadcast(&dn->dn_notxholds);
1301 mutex_exit(&dn->dn_mtx);
1304 txg_rele_to_sync(&tx->tx_txgh);
1306 tx->tx_lasttried_txg = tx->tx_txg;
1311 * Assign tx to a transaction group. txg_how can be one of:
1313 * (1) TXG_WAIT. If the current open txg is full, waits until there's
1314 * a new one. This should be used when you're not holding locks.
1315 * It will only fail if we're truly out of space (or over quota).
1317 * (2) TXG_NOWAIT. If we can't assign into the current open txg without
1318 * blocking, returns immediately with ERESTART. This should be used
1319 * whenever you're holding locks. On an ERESTART error, the caller
1320 * should drop locks, do a dmu_tx_wait(tx), and try again.
1322 * (3) TXG_WAITED. Like TXG_NOWAIT, but indicates that dmu_tx_wait()
1323 * has already been called on behalf of this operation (though
1324 * most likely on a different tx).
1327 dmu_tx_assign(dmu_tx_t *tx, txg_how_t txg_how)
1331 ASSERT(tx->tx_txg == 0);
1332 ASSERT(txg_how == TXG_WAIT || txg_how == TXG_NOWAIT ||
1333 txg_how == TXG_WAITED);
1334 ASSERT(!dsl_pool_sync_context(tx->tx_pool));
1336 /* If we might wait, we must not hold the config lock. */
1337 ASSERT(txg_how != TXG_WAIT || !dsl_pool_config_held(tx->tx_pool));
1339 if (txg_how == TXG_WAITED)
1340 tx->tx_waited = B_TRUE;
1342 while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
1343 dmu_tx_unassign(tx);
1345 if (err != ERESTART || txg_how != TXG_WAIT)
1351 txg_rele_to_quiesce(&tx->tx_txgh);
1357 dmu_tx_wait(dmu_tx_t *tx)
1359 spa_t *spa = tx->tx_pool->dp_spa;
1360 dsl_pool_t *dp = tx->tx_pool;
1362 ASSERT(tx->tx_txg == 0);
1363 ASSERT(!dsl_pool_config_held(tx->tx_pool));
1365 if (tx->tx_wait_dirty) {
1367 * dmu_tx_try_assign() has determined that we need to wait
1368 * because we've consumed much or all of the dirty buffer
1371 mutex_enter(&dp->dp_lock);
1372 while (dp->dp_dirty_total >= zfs_dirty_data_max)
1373 cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock);
1374 uint64_t dirty = dp->dp_dirty_total;
1375 mutex_exit(&dp->dp_lock);
1377 dmu_tx_delay(tx, dirty);
1379 tx->tx_wait_dirty = B_FALSE;
1382 * Note: setting tx_waited only has effect if the caller
1383 * used TX_WAIT. Otherwise they are going to destroy
1384 * this tx and try again. The common case, zfs_write(),
1387 tx->tx_waited = B_TRUE;
1388 } else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
1390 * If the pool is suspended we need to wait until it
1391 * is resumed. Note that it's possible that the pool
1392 * has become active after this thread has tried to
1393 * obtain a tx. If that's the case then tx_lasttried_txg
1394 * would not have been set.
1396 txg_wait_synced(dp, spa_last_synced_txg(spa) + 1);
1397 } else if (tx->tx_needassign_txh) {
1399 * A dnode is assigned to the quiescing txg. Wait for its
1400 * transaction to complete.
1402 dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
1404 mutex_enter(&dn->dn_mtx);
1405 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
1406 cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
1407 mutex_exit(&dn->dn_mtx);
1408 tx->tx_needassign_txh = NULL;
1410 txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1);
1415 dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta)
1418 if (tx->tx_dir == NULL || delta == 0)
1422 ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=,
1423 tx->tx_space_towrite);
1424 (void) refcount_add_many(&tx->tx_space_written, delta, NULL);
1426 (void) refcount_add_many(&tx->tx_space_freed, -delta, NULL);
1432 dmu_tx_destroy(dmu_tx_t *tx)
1436 while ((txh = list_head(&tx->tx_holds)) != NULL) {
1437 dnode_t *dn = txh->txh_dnode;
1439 list_remove(&tx->tx_holds, txh);
1440 refcount_destroy_many(&txh->txh_space_towrite,
1441 refcount_count(&txh->txh_space_towrite));
1442 refcount_destroy_many(&txh->txh_space_tofree,
1443 refcount_count(&txh->txh_space_tofree));
1444 refcount_destroy_many(&txh->txh_space_tooverwrite,
1445 refcount_count(&txh->txh_space_tooverwrite));
1446 refcount_destroy_many(&txh->txh_space_tounref,
1447 refcount_count(&txh->txh_space_tounref));
1448 refcount_destroy_many(&txh->txh_memory_tohold,
1449 refcount_count(&txh->txh_memory_tohold));
1450 refcount_destroy_many(&txh->txh_fudge,
1451 refcount_count(&txh->txh_fudge));
1452 kmem_free(txh, sizeof (dmu_tx_hold_t));
1457 list_destroy(&tx->tx_callbacks);
1458 list_destroy(&tx->tx_holds);
1460 refcount_destroy_many(&tx->tx_space_written,
1461 refcount_count(&tx->tx_space_written));
1462 refcount_destroy_many(&tx->tx_space_freed,
1463 refcount_count(&tx->tx_space_freed));
1465 kmem_free(tx, sizeof (dmu_tx_t));
1469 dmu_tx_commit(dmu_tx_t *tx)
1471 ASSERT(tx->tx_txg != 0);
1474 * Go through the transaction's hold list and remove holds on
1475 * associated dnodes, notifying waiters if no holds remain.
1477 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
1478 txh = list_next(&tx->tx_holds, txh)) {
1479 dnode_t *dn = txh->txh_dnode;
1484 mutex_enter(&dn->dn_mtx);
1485 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1487 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1488 dn->dn_assigned_txg = 0;
1489 cv_broadcast(&dn->dn_notxholds);
1491 mutex_exit(&dn->dn_mtx);
1494 if (tx->tx_tempreserve_cookie)
1495 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1497 if (!list_is_empty(&tx->tx_callbacks))
1498 txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
1500 if (tx->tx_anyobj == FALSE)
1501 txg_rele_to_sync(&tx->tx_txgh);
1504 dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n",
1505 tx->tx_space_towrite, refcount_count(&tx->tx_space_written),
1506 tx->tx_space_tofree, refcount_count(&tx->tx_space_freed));
1512 dmu_tx_abort(dmu_tx_t *tx)
1514 ASSERT(tx->tx_txg == 0);
1517 * Call any registered callbacks with an error code.
1519 if (!list_is_empty(&tx->tx_callbacks))
1520 dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED);
1526 dmu_tx_get_txg(dmu_tx_t *tx)
1528 ASSERT(tx->tx_txg != 0);
1529 return (tx->tx_txg);
1533 dmu_tx_pool(dmu_tx_t *tx)
1535 ASSERT(tx->tx_pool != NULL);
1536 return (tx->tx_pool);
1541 dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
1543 dmu_tx_callback_t *dcb;
1545 dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
1547 dcb->dcb_func = func;
1548 dcb->dcb_data = data;
1550 list_insert_tail(&tx->tx_callbacks, dcb);
1554 * Call all the commit callbacks on a list, with a given error code.
1557 dmu_tx_do_callbacks(list_t *cb_list, int error)
1559 dmu_tx_callback_t *dcb;
1561 while ((dcb = list_head(cb_list)) != NULL) {
1562 list_remove(cb_list, dcb);
1563 dcb->dcb_func(dcb->dcb_data, error);
1564 kmem_free(dcb, sizeof (dmu_tx_callback_t));
1569 * Interface to hold a bunch of attributes.
1570 * used for creating new files.
1571 * attrsize is the total size of all attributes
1572 * to be added during object creation
1574 * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1578 * hold necessary attribute name for attribute registration.
1579 * should be a very rare case where this is needed. If it does
1580 * happen it would only happen on the first write to the file system.
1583 dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx)
1587 if (!sa->sa_need_attr_registration)
1590 for (i = 0; i != sa->sa_num_attrs; i++) {
1591 if (!sa->sa_attr_table[i].sa_registered) {
1592 if (sa->sa_reg_attr_obj)
1593 dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj,
1594 B_TRUE, sa->sa_attr_table[i].sa_name);
1596 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT,
1597 B_TRUE, sa->sa_attr_table[i].sa_name);
1604 dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
1609 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
1612 dn = txh->txh_dnode;
1617 /* If blkptr doesn't exist then add space to towrite */
1618 if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
1619 (void) refcount_add_many(&txh->txh_space_towrite,
1620 SPA_OLD_MAXBLOCKSIZE, FTAG);
1624 bp = &dn->dn_phys->dn_spill;
1625 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
1626 bp, bp->blk_birth)) {
1627 (void) refcount_add_many(&txh->txh_space_tooverwrite,
1628 SPA_OLD_MAXBLOCKSIZE, FTAG);
1630 (void) refcount_add_many(&txh->txh_space_towrite,
1631 SPA_OLD_MAXBLOCKSIZE, FTAG);
1633 if (!BP_IS_HOLE(bp)) {
1634 (void) refcount_add_many(&txh->txh_space_tounref,
1635 SPA_OLD_MAXBLOCKSIZE, FTAG);
1641 dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize)
1643 sa_os_t *sa = tx->tx_objset->os_sa;
1645 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1647 if (tx->tx_objset->os_sa->sa_master_obj == 0)
1650 if (tx->tx_objset->os_sa->sa_layout_attr_obj)
1651 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1653 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1654 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1655 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1656 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1659 dmu_tx_sa_registration_hold(sa, tx);
1661 if (attrsize <= DN_MAX_BONUSLEN && !sa->sa_force_spill)
1664 (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT,
1671 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1673 * variable_size is the total size of all variable sized attributes
1674 * passed to this function. It is not the total size of all
1675 * variable size attributes that *may* exist on this object.
1678 dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
1681 sa_os_t *sa = tx->tx_objset->os_sa;
1683 ASSERT(hdl != NULL);
1685 object = sa_handle_object(hdl);
1687 dmu_tx_hold_bonus(tx, object);
1689 if (tx->tx_objset->os_sa->sa_master_obj == 0)
1692 if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 ||
1693 tx->tx_objset->os_sa->sa_layout_attr_obj == 0) {
1694 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1695 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1696 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1697 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1700 dmu_tx_sa_registration_hold(sa, tx);
1702 if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj)
1703 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1705 if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
1706 ASSERT(tx->tx_txg == 0);
1707 dmu_tx_hold_spill(tx, object);
1709 dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1714 if (dn->dn_have_spill) {
1715 ASSERT(tx->tx_txg == 0);
1716 dmu_tx_hold_spill(tx, object);