4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "%Z%%M% %I% %E% SMI"
29 #include <sys/dmu_impl.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dmu_objset.h>
33 #include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */
34 #include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */
35 #include <sys/dsl_pool.h>
36 #include <sys/zap_impl.h> /* for fzap_default_block_shift */
38 #include <sys/zfs_context.h>
40 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
41 uint64_t arg1, uint64_t arg2);
45 dmu_tx_create_dd(dsl_dir_t *dd)
47 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
50 tx->tx_pool = dd->dd_pool;
51 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
52 offsetof(dmu_tx_hold_t, txh_node));
54 refcount_create(&tx->tx_space_written);
55 refcount_create(&tx->tx_space_freed);
61 dmu_tx_create(objset_t *os)
63 dmu_tx_t *tx = dmu_tx_create_dd(os->os->os_dsl_dataset->ds_dir);
65 tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os->os_dsl_dataset);
70 dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
72 dmu_tx_t *tx = dmu_tx_create_dd(NULL);
74 ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
83 dmu_tx_is_syncing(dmu_tx_t *tx)
85 return (tx->tx_anyobj);
89 dmu_tx_private_ok(dmu_tx_t *tx)
91 return (tx->tx_anyobj);
94 static dmu_tx_hold_t *
95 dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
96 enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
102 if (object != DMU_NEW_OBJECT) {
103 err = dnode_hold(os->os, object, tx, &dn);
109 if (err == 0 && tx->tx_txg != 0) {
110 mutex_enter(&dn->dn_mtx);
112 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
113 * problem, but there's no way for it to happen (for
116 ASSERT(dn->dn_assigned_txg == 0);
117 dn->dn_assigned_txg = tx->tx_txg;
118 (void) refcount_add(&dn->dn_tx_holds, tx);
119 mutex_exit(&dn->dn_mtx);
123 txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
127 txh->txh_type = type;
128 txh->txh_arg1 = arg1;
129 txh->txh_arg2 = arg2;
131 list_insert_tail(&tx->tx_holds, txh);
137 dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object)
140 * If we're syncing, they can manipulate any object anyhow, and
141 * the hold on the dnode_t can cause problems.
143 if (!dmu_tx_is_syncing(tx)) {
144 (void) dmu_tx_hold_object_impl(tx, os,
145 object, THT_NEWOBJECT, 0, 0);
150 dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
155 rw_enter(&dn->dn_struct_rwlock, RW_READER);
156 db = dbuf_hold_level(dn, level, blkid, FTAG);
157 rw_exit(&dn->dn_struct_rwlock);
160 err = dbuf_read(db, zio, DB_RF_CANFAIL);
167 dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
169 dnode_t *dn = txh->txh_dnode;
170 uint64_t start, end, i;
171 int min_bs, max_bs, min_ibs, max_ibs, epbs, bits;
177 min_bs = SPA_MINBLOCKSHIFT;
178 max_bs = SPA_MAXBLOCKSHIFT;
179 min_ibs = DN_MIN_INDBLKSHIFT;
180 max_ibs = DN_MAX_INDBLKSHIFT;
184 * For i/o error checking, read the first and last level-0
185 * blocks (if they are not aligned), and all the level-1 blocks.
189 if (dn->dn_maxblkid == 0) {
190 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
194 zio_t *zio = zio_root(dn->dn_objset->os_spa,
195 NULL, NULL, ZIO_FLAG_CANFAIL);
197 /* first level-0 block */
198 start = off >> dn->dn_datablkshift;
199 if (P2PHASE(off, dn->dn_datablksz) ||
200 len < dn->dn_datablksz) {
201 err = dmu_tx_check_ioerr(zio, dn, 0, start);
206 /* last level-0 block */
207 end = (off+len-1) >> dn->dn_datablkshift;
209 P2PHASE(off+len, dn->dn_datablksz)) {
210 err = dmu_tx_check_ioerr(zio, dn, 0, end);
216 if (dn->dn_nlevels > 1) {
217 start >>= dn->dn_indblkshift - SPA_BLKPTRSHIFT;
218 end >>= dn->dn_indblkshift - SPA_BLKPTRSHIFT;
219 for (i = start+1; i < end; i++) {
220 err = dmu_tx_check_ioerr(zio, dn, 1, i);
233 * If there's more than one block, the blocksize can't change,
234 * so we can make a more precise estimate. Alternatively,
235 * if the dnode's ibs is larger than max_ibs, always use that.
236 * This ensures that if we reduce DN_MAX_INDBLKSHIFT,
237 * the code will still work correctly on existing pools.
239 if (dn && (dn->dn_maxblkid != 0 || dn->dn_indblkshift > max_ibs)) {
240 min_ibs = max_ibs = dn->dn_indblkshift;
241 if (dn->dn_datablkshift != 0)
242 min_bs = max_bs = dn->dn_datablkshift;
246 * 'end' is the last thing we will access, not one past.
247 * This way we won't overflow when accessing the last byte.
249 start = P2ALIGN(off, 1ULL << max_bs);
250 end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1;
251 txh->txh_space_towrite += end - start + 1;
256 epbs = min_ibs - SPA_BLKPTRSHIFT;
259 * The object contains at most 2^(64 - min_bs) blocks,
260 * and each indirect level maps 2^epbs.
262 for (bits = 64 - min_bs; bits >= 0; bits -= epbs) {
266 * If we increase the number of levels of indirection,
267 * we'll need new blkid=0 indirect blocks. If start == 0,
268 * we're already accounting for that blocks; and if end == 0,
269 * we can't increase the number of levels beyond that.
271 if (start != 0 && end != 0)
272 txh->txh_space_towrite += 1ULL << max_ibs;
273 txh->txh_space_towrite += (end - start + 1) << max_ibs;
276 ASSERT(txh->txh_space_towrite < 2 * DMU_MAX_ACCESS);
280 txh->txh_tx->tx_err = err;
284 dmu_tx_count_dnode(dmu_tx_hold_t *txh)
286 dnode_t *dn = txh->txh_dnode;
287 dnode_t *mdn = txh->txh_tx->tx_objset->os->os_meta_dnode;
288 uint64_t space = mdn->dn_datablksz +
289 ((mdn->dn_nlevels-1) << mdn->dn_indblkshift);
291 if (dn && dn->dn_dbuf->db_blkptr &&
292 dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
293 dn->dn_dbuf->db_blkptr->blk_birth)) {
294 txh->txh_space_tooverwrite += space;
296 txh->txh_space_towrite += space;
301 dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
305 ASSERT(tx->tx_txg == 0);
306 ASSERT(len < DMU_MAX_ACCESS);
307 ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
309 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
310 object, THT_WRITE, off, len);
314 dmu_tx_count_write(txh, off, len);
315 dmu_tx_count_dnode(txh);
319 dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
321 uint64_t blkid, nblks;
323 dnode_t *dn = txh->txh_dnode;
324 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
325 spa_t *spa = txh->txh_tx->tx_pool->dp_spa;
329 * We don't need to use any locking to check for dirtyness
330 * because it's OK if we get stale data -- the dnode may become
331 * dirty immediately after our check anyway. This is just a
332 * means to avoid the expensive count when we aren't sure we
333 * need it. We need to be able to deal with a dirty dnode.
335 dirty = list_link_active(&dn->dn_dirty_link[0]) |
336 list_link_active(&dn->dn_dirty_link[1]) |
337 list_link_active(&dn->dn_dirty_link[2]) |
338 list_link_active(&dn->dn_dirty_link[3]);
339 if (dirty || dn->dn_assigned_txg || dn->dn_phys->dn_nlevels == 0)
343 * the struct_rwlock protects us against dn_phys->dn_nlevels
344 * changing, in case (against all odds) we manage to dirty &
345 * sync out the changes after we check for being dirty.
346 * also, dbuf_hold_impl() wants us to have the struct_rwlock.
348 * It's fine to use dn_datablkshift rather than the dn_phys
349 * equivalent because if it is changing, maxblkid==0 and we will
352 rw_enter(&dn->dn_struct_rwlock, RW_READER);
353 if (dn->dn_phys->dn_maxblkid == 0) {
354 if (off == 0 && len >= dn->dn_datablksz) {
358 rw_exit(&dn->dn_struct_rwlock);
362 blkid = off >> dn->dn_datablkshift;
363 nblks = (off + len) >> dn->dn_datablkshift;
365 if (blkid >= dn->dn_phys->dn_maxblkid) {
366 rw_exit(&dn->dn_struct_rwlock);
369 if (blkid + nblks > dn->dn_phys->dn_maxblkid)
370 nblks = dn->dn_phys->dn_maxblkid - blkid;
372 /* don't bother after 128,000 blocks */
373 nblks = MIN(nblks, 128*1024);
376 if (dn->dn_phys->dn_nlevels == 1) {
378 for (i = 0; i < nblks; i++) {
379 blkptr_t *bp = dn->dn_phys->dn_blkptr;
380 ASSERT3U(blkid + i, <, dn->dn_phys->dn_nblkptr);
382 if (dsl_dataset_block_freeable(ds, bp->blk_birth)) {
383 dprintf_bp(bp, "can free old%s", "");
384 space += bp_get_dasize(spa, bp);
391 dmu_buf_impl_t *dbuf;
392 int err, epbs, blkoff, tochk;
394 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
395 blkoff = P2PHASE(blkid, 1<<epbs);
396 tochk = MIN((1<<epbs) - blkoff, nblks);
398 err = dbuf_hold_impl(dn, 1, blkid >> epbs, TRUE, FTAG, &dbuf);
403 err = dbuf_read(dbuf, NULL,
404 DB_RF_HAVESTRUCT | DB_RF_CANFAIL);
406 txh->txh_tx->tx_err = err;
407 dbuf_rele(dbuf, FTAG);
411 bp = dbuf->db.db_data;
414 for (i = 0; i < tochk; i++) {
415 if (dsl_dataset_block_freeable(ds,
418 "can free old%s", "");
419 space += bp_get_dasize(spa, &bp[i]);
422 dbuf_rele(dbuf, FTAG);
424 if (err && err != ENOENT) {
425 txh->txh_tx->tx_err = err;
432 rw_exit(&dn->dn_struct_rwlock);
434 txh->txh_space_tofree += space;
438 dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
442 uint64_t start, end, i;
446 ASSERT(tx->tx_txg == 0);
448 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
449 object, THT_FREE, off, len);
456 dmu_tx_count_write(txh, off, 1);
458 if (len != DMU_OBJECT_END)
459 dmu_tx_count_write(txh, off+len, 1);
461 if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz)
463 if (len == DMU_OBJECT_END)
464 len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off;
467 * For i/o error checking, read the first and last level-0
468 * blocks, and all the level-1 blocks. The above count_write's
469 * will take care of the level-0 blocks.
471 if (dn->dn_nlevels > 1) {
472 shift = dn->dn_datablkshift + dn->dn_indblkshift -
474 start = off >> shift;
475 end = dn->dn_datablkshift ? ((off+len) >> shift) : 0;
477 zio = zio_root(tx->tx_pool->dp_spa,
478 NULL, NULL, ZIO_FLAG_CANFAIL);
479 for (i = start; i <= end; i++) {
480 uint64_t ibyte = i << shift;
481 err = dnode_next_offset(dn, FALSE, &ibyte, 2, 1, 0);
490 err = dmu_tx_check_ioerr(zio, dn, 1, i);
503 dmu_tx_count_dnode(txh);
504 dmu_tx_count_free(txh, off, len);
508 dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, char *name)
515 ASSERT(tx->tx_txg == 0);
517 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
518 object, THT_ZAP, add, (uintptr_t)name);
523 dmu_tx_count_dnode(txh);
527 * We will be able to fit a new object's entries into one leaf
528 * block. So there will be at most 2 blocks total,
529 * including the header block.
531 dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift);
535 ASSERT3P(dmu_ot[dn->dn_type].ot_byteswap, ==, zap_byteswap);
537 if (dn->dn_maxblkid == 0 && !add) {
539 * If there is only one block (i.e. this is a micro-zap)
540 * and we are not adding anything, the accounting is simple.
542 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
549 * Use max block size here, since we don't know how much
550 * the size will change between now and the dbuf dirty call.
552 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
553 dn->dn_phys->dn_blkptr[0].blk_birth))
554 txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
556 txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
560 if (dn->dn_maxblkid > 0 && name) {
562 * access the name in this fat-zap so that we'll check
563 * for i/o errors to the leaf blocks, etc.
565 err = zap_lookup(&dn->dn_objset->os, dn->dn_object, name,
574 * 3 blocks overwritten: target leaf, ptrtbl block, header block
575 * 3 new blocks written if adding: new split leaf, 2 grown ptrtbl blocks
577 dmu_tx_count_write(txh, dn->dn_maxblkid * dn->dn_datablksz,
578 (3 + add ? 3 : 0) << dn->dn_datablkshift);
581 * If the modified blocks are scattered to the four winds,
582 * we'll have to modify an indirect twig for each.
584 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
585 for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs)
586 txh->txh_space_towrite += 3 << dn->dn_indblkshift;
590 dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
594 ASSERT(tx->tx_txg == 0);
596 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
597 object, THT_BONUS, 0, 0);
599 dmu_tx_count_dnode(txh);
603 dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
606 ASSERT(tx->tx_txg == 0);
608 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
609 DMU_NEW_OBJECT, THT_SPACE, space, 0);
611 txh->txh_space_towrite += space;
615 dmu_tx_holds(dmu_tx_t *tx, uint64_t object)
621 * By asserting that the tx is assigned, we're counting the
622 * number of dn_tx_holds, which is the same as the number of
623 * dn_holds. Otherwise, we'd be counting dn_holds, but
624 * dn_tx_holds could be 0.
626 ASSERT(tx->tx_txg != 0);
628 /* if (tx->tx_anyobj == TRUE) */
631 for (txh = list_head(&tx->tx_holds); txh;
632 txh = list_next(&tx->tx_holds, txh)) {
633 if (txh->txh_dnode && txh->txh_dnode->dn_object == object)
642 dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
645 int match_object = FALSE, match_offset = FALSE;
646 dnode_t *dn = db->db_dnode;
648 ASSERT(tx->tx_txg != 0);
649 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset->os);
650 ASSERT3U(dn->dn_object, ==, db->db.db_object);
655 /* XXX No checking on the meta dnode for now */
656 if (db->db.db_object == DMU_META_DNODE_OBJECT)
659 for (txh = list_head(&tx->tx_holds); txh;
660 txh = list_next(&tx->tx_holds, txh)) {
661 ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg);
662 if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
664 if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
665 int datablkshift = dn->dn_datablkshift ?
666 dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
667 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
668 int shift = datablkshift + epbs * db->db_level;
669 uint64_t beginblk = shift >= 64 ? 0 :
670 (txh->txh_arg1 >> shift);
671 uint64_t endblk = shift >= 64 ? 0 :
672 ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
673 uint64_t blkid = db->db_blkid;
675 /* XXX txh_arg2 better not be zero... */
677 dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
678 txh->txh_type, beginblk, endblk);
680 switch (txh->txh_type) {
682 if (blkid >= beginblk && blkid <= endblk)
685 * We will let this hold work for the bonus
686 * buffer so that we don't need to hold it
687 * when creating a new object.
689 if (blkid == DB_BONUS_BLKID)
692 * They might have to increase nlevels,
693 * thus dirtying the new TLIBs. Or the
694 * might have to change the block size,
695 * thus dirying the new lvl=0 blk=0.
701 if (blkid == beginblk &&
702 (txh->txh_arg1 != 0 ||
703 dn->dn_maxblkid == 0))
705 if (blkid == endblk &&
706 txh->txh_arg2 != DMU_OBJECT_END)
710 if (blkid == DB_BONUS_BLKID)
720 ASSERT(!"bad txh_type");
723 if (match_object && match_offset)
726 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
727 (u_longlong_t)db->db.db_object, db->db_level,
728 (u_longlong_t)db->db_blkid);
733 dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
736 uint64_t lsize, asize, fsize, towrite, tofree, tooverwrite;
738 ASSERT3U(tx->tx_txg, ==, 0);
742 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
743 tx->tx_needassign_txh = NULL;
746 * NB: No error returns are allowed after txg_hold_open, but
747 * before processing the dnode holds, due to the
748 * dmu_tx_unassign() logic.
751 towrite = tofree = tooverwrite = 0;
752 for (txh = list_head(&tx->tx_holds); txh;
753 txh = list_next(&tx->tx_holds, txh)) {
754 dnode_t *dn = txh->txh_dnode;
756 mutex_enter(&dn->dn_mtx);
757 if (dn->dn_assigned_txg == tx->tx_txg - 1) {
758 mutex_exit(&dn->dn_mtx);
759 tx->tx_needassign_txh = txh;
762 if (dn->dn_assigned_txg == 0)
763 dn->dn_assigned_txg = tx->tx_txg;
764 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
765 (void) refcount_add(&dn->dn_tx_holds, tx);
766 mutex_exit(&dn->dn_mtx);
768 towrite += txh->txh_space_towrite;
769 tofree += txh->txh_space_tofree;
770 tooverwrite += txh->txh_space_tooverwrite;
774 * NB: This check must be after we've held the dnodes, so that
775 * the dmu_tx_unassign() logic will work properly
777 if (txg_how >= TXG_INITIAL && txg_how != tx->tx_txg)
781 * If a snapshot has been taken since we made our estimates,
782 * assume that we won't be able to free or overwrite anything.
785 dsl_dataset_prev_snap_txg(tx->tx_objset->os->os_dsl_dataset) >
786 tx->tx_lastsnap_txg) {
787 towrite += tooverwrite;
788 tooverwrite = tofree = 0;
792 * Convert logical size to worst-case allocated size.
794 fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree;
795 lsize = towrite + tooverwrite;
796 asize = spa_get_asize(tx->tx_pool->dp_spa, lsize);
799 tx->tx_space_towrite = asize;
800 tx->tx_space_tofree = tofree;
801 tx->tx_space_tooverwrite = tooverwrite;
804 if (tx->tx_dir && asize != 0) {
805 int err = dsl_dir_tempreserve_space(tx->tx_dir,
806 lsize, asize, fsize, &tx->tx_tempreserve_cookie, tx);
815 dmu_tx_unassign(dmu_tx_t *tx)
822 txg_rele_to_quiesce(&tx->tx_txgh);
824 for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh;
825 txh = list_next(&tx->tx_holds, txh)) {
826 dnode_t *dn = txh->txh_dnode;
830 mutex_enter(&dn->dn_mtx);
831 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
833 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
834 dn->dn_assigned_txg = 0;
835 cv_broadcast(&dn->dn_notxholds);
837 mutex_exit(&dn->dn_mtx);
840 txg_rele_to_sync(&tx->tx_txgh);
842 tx->tx_lasttried_txg = tx->tx_txg;
847 * Assign tx to a transaction group. txg_how can be one of:
849 * (1) TXG_WAIT. If the current open txg is full, waits until there's
850 * a new one. This should be used when you're not holding locks.
851 * If will only fail if we're truly out of space (or over quota).
853 * (2) TXG_NOWAIT. If we can't assign into the current open txg without
854 * blocking, returns immediately with ERESTART. This should be used
855 * whenever you're holding locks. On an ERESTART error, the caller
856 * should drop locks, do a dmu_tx_wait(tx), and try again.
858 * (3) A specific txg. Use this if you need to ensure that multiple
859 * transactions all sync in the same txg. Like TXG_NOWAIT, it
860 * returns ERESTART if it can't assign you into the requested txg.
863 dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
867 ASSERT(tx->tx_txg == 0);
868 ASSERT(txg_how != 0);
869 ASSERT(!dsl_pool_sync_context(tx->tx_pool));
871 while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
874 if (err != ERESTART || txg_how != TXG_WAIT)
880 txg_rele_to_quiesce(&tx->tx_txgh);
886 dmu_tx_wait(dmu_tx_t *tx)
888 ASSERT(tx->tx_txg == 0);
889 ASSERT(tx->tx_lasttried_txg != 0);
891 if (tx->tx_needassign_txh) {
892 dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
894 mutex_enter(&dn->dn_mtx);
895 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
896 cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
897 mutex_exit(&dn->dn_mtx);
898 tx->tx_needassign_txh = NULL;
900 txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1);
905 dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta)
908 if (tx->tx_dir == NULL || delta == 0)
912 ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=,
913 tx->tx_space_towrite);
914 (void) refcount_add_many(&tx->tx_space_written, delta, NULL);
916 (void) refcount_add_many(&tx->tx_space_freed, -delta, NULL);
922 dmu_tx_commit(dmu_tx_t *tx)
926 ASSERT(tx->tx_txg != 0);
928 while (txh = list_head(&tx->tx_holds)) {
929 dnode_t *dn = txh->txh_dnode;
931 list_remove(&tx->tx_holds, txh);
932 kmem_free(txh, sizeof (dmu_tx_hold_t));
935 mutex_enter(&dn->dn_mtx);
936 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
938 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
939 dn->dn_assigned_txg = 0;
940 cv_broadcast(&dn->dn_notxholds);
942 mutex_exit(&dn->dn_mtx);
946 if (tx->tx_tempreserve_cookie)
947 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
949 if (tx->tx_anyobj == FALSE)
950 txg_rele_to_sync(&tx->tx_txgh);
952 dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n",
953 tx->tx_space_towrite, refcount_count(&tx->tx_space_written),
954 tx->tx_space_tofree, refcount_count(&tx->tx_space_freed));
955 refcount_destroy_many(&tx->tx_space_written,
956 refcount_count(&tx->tx_space_written));
957 refcount_destroy_many(&tx->tx_space_freed,
958 refcount_count(&tx->tx_space_freed));
960 kmem_free(tx, sizeof (dmu_tx_t));
964 dmu_tx_abort(dmu_tx_t *tx)
968 ASSERT(tx->tx_txg == 0);
970 while (txh = list_head(&tx->tx_holds)) {
971 dnode_t *dn = txh->txh_dnode;
973 list_remove(&tx->tx_holds, txh);
974 kmem_free(txh, sizeof (dmu_tx_hold_t));
979 refcount_destroy_many(&tx->tx_space_written,
980 refcount_count(&tx->tx_space_written));
981 refcount_destroy_many(&tx->tx_space_freed,
982 refcount_count(&tx->tx_space_freed));
984 kmem_free(tx, sizeof (dmu_tx_t));
988 dmu_tx_get_txg(dmu_tx_t *tx)
990 ASSERT(tx->tx_txg != 0);