4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/zfs_context.h>
28 #include <sys/spa_impl.h>
33 #include <sys/resource.h>
35 #include <sys/zil_impl.h>
36 #include <sys/dsl_dataset.h>
38 #include <sys/dmu_tx.h>
41 * The zfs intent log (ZIL) saves transaction records of system calls
42 * that change the file system in memory with enough information
43 * to be able to replay them. These are stored in memory until
44 * either the DMU transaction group (txg) commits them to the stable pool
45 * and they can be discarded, or they are flushed to the stable log
46 * (also in the pool) due to a fsync, O_DSYNC or other synchronous
47 * requirement. In the event of a panic or power fail then those log
48 * records (transactions) are replayed.
50 * There is one ZIL per file system. Its on-disk (pool) format consists
57 * A log record holds a system call transaction. Log blocks can
58 * hold many log records and the blocks are chained together.
59 * Each ZIL block contains a block pointer (blkptr_t) to the next
60 * ZIL block in the chain. The ZIL header points to the first
61 * block in the chain. Note there is not a fixed place in the pool
62 * to hold blocks. They are dynamically allocated and freed as
63 * needed from the blocks available. Figure X shows the ZIL structure:
67 * This global ZIL switch affects all pools
69 int zil_disable = 0; /* disable intent logging */
70 SYSCTL_DECL(_vfs_zfs);
71 TUNABLE_INT("vfs.zfs.zil_disable", &zil_disable);
72 SYSCTL_INT(_vfs_zfs, OID_AUTO, zil_disable, CTLFLAG_RW, &zil_disable, 0,
73 "Disable ZFS Intent Log (ZIL)");
76 * Tunable parameter for debugging or performance analysis. Setting
77 * zfs_nocacheflush will cause corruption on power loss if a volatile
78 * out-of-order write cache is enabled.
80 boolean_t zfs_nocacheflush = B_FALSE;
81 TUNABLE_INT("vfs.zfs.cache_flush_disable", &zfs_nocacheflush);
82 SYSCTL_INT(_vfs_zfs, OID_AUTO, cache_flush_disable, CTLFLAG_RDTUN,
83 &zfs_nocacheflush, 0, "Disable cache flush");
85 static kmem_cache_t *zil_lwb_cache;
88 zil_dva_compare(const void *x1, const void *x2)
90 const dva_t *dva1 = x1;
91 const dva_t *dva2 = x2;
93 if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2))
95 if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2))
98 if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2))
100 if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2))
107 zil_dva_tree_init(avl_tree_t *t)
109 avl_create(t, zil_dva_compare, sizeof (zil_dva_node_t),
110 offsetof(zil_dva_node_t, zn_node));
114 zil_dva_tree_fini(avl_tree_t *t)
119 while ((zn = avl_destroy_nodes(t, &cookie)) != NULL)
120 kmem_free(zn, sizeof (zil_dva_node_t));
126 zil_dva_tree_add(avl_tree_t *t, dva_t *dva)
131 if (avl_find(t, dva, &where) != NULL)
134 zn = kmem_alloc(sizeof (zil_dva_node_t), KM_SLEEP);
136 avl_insert(t, zn, where);
141 static zil_header_t *
142 zil_header_in_syncing_context(zilog_t *zilog)
144 return ((zil_header_t *)zilog->zl_header);
148 zil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
150 zio_cksum_t *zc = &bp->blk_cksum;
152 zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL);
153 zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL);
154 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
155 zc->zc_word[ZIL_ZC_SEQ] = 1ULL;
159 * Read a log block, make sure it's valid, and byteswap it if necessary.
162 zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, arc_buf_t **abufpp)
166 uint32_t aflags = ARC_WAIT;
169 zb.zb_objset = bp->blk_cksum.zc_word[ZIL_ZC_OBJSET];
172 zb.zb_blkid = bp->blk_cksum.zc_word[ZIL_ZC_SEQ];
177 * We shouldn't be doing any scrubbing while we're doing log
178 * replay, it's OK to not lock.
180 error = arc_read_nolock(NULL, zilog->zl_spa, &blk,
181 arc_getbuf_func, abufpp, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL |
182 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB, &aflags, &zb);
185 char *data = (*abufpp)->b_data;
186 uint64_t blksz = BP_GET_LSIZE(bp);
187 zil_trailer_t *ztp = (zil_trailer_t *)(data + blksz) - 1;
188 zio_cksum_t cksum = bp->blk_cksum;
191 * Validate the checksummed log block.
193 * Sequence numbers should be... sequential. The checksum
194 * verifier for the next block should be bp's checksum plus 1.
196 * Also check the log chain linkage and size used.
198 cksum.zc_word[ZIL_ZC_SEQ]++;
200 if (bcmp(&cksum, &ztp->zit_next_blk.blk_cksum,
201 sizeof (cksum)) || BP_IS_HOLE(&ztp->zit_next_blk) ||
202 (ztp->zit_nused > (blksz - sizeof (zil_trailer_t)))) {
207 VERIFY(arc_buf_remove_ref(*abufpp, abufpp) == 1);
212 dprintf("error %d on %llu:%llu\n", error, zb.zb_objset, zb.zb_blkid);
218 * Parse the intent log, and call parse_func for each valid record within.
219 * Return the highest sequence number.
222 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
223 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg)
225 const zil_header_t *zh = zilog->zl_header;
226 uint64_t claim_seq = zh->zh_claim_seq;
228 uint64_t max_seq = 0;
229 blkptr_t blk = zh->zh_log;
235 if (BP_IS_HOLE(&blk))
239 * Starting at the block pointed to by zh_log we read the log chain.
240 * For each block in the chain we strongly check that block to
241 * ensure its validity. We stop when an invalid block is found.
242 * For each block pointer in the chain we call parse_blk_func().
243 * For each record in each valid block we call parse_lr_func().
244 * If the log has been claimed, stop if we encounter a sequence
245 * number greater than the highest claimed sequence number.
247 zil_dva_tree_init(&zilog->zl_dva_tree);
249 seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
251 if (claim_seq != 0 && seq > claim_seq)
254 ASSERT(max_seq < seq);
257 error = zil_read_log_block(zilog, &blk, &abuf);
259 if (parse_blk_func != NULL)
260 parse_blk_func(zilog, &blk, arg, txg);
265 lrbuf = abuf->b_data;
266 ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1;
267 blk = ztp->zit_next_blk;
269 if (parse_lr_func == NULL) {
270 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
274 for (lrp = lrbuf; lrp < lrbuf + ztp->zit_nused; lrp += reclen) {
275 lr_t *lr = (lr_t *)lrp;
276 reclen = lr->lrc_reclen;
277 ASSERT3U(reclen, >=, sizeof (lr_t));
278 parse_lr_func(zilog, lr, arg, txg);
280 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
282 zil_dva_tree_fini(&zilog->zl_dva_tree);
289 zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg)
291 spa_t *spa = zilog->zl_spa;
295 * Claim log block if not already committed and not already claimed.
297 if (bp->blk_birth >= first_txg &&
298 zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp)) == 0) {
299 err = zio_wait(zio_claim(NULL, spa, first_txg, bp, NULL, NULL,
300 ZIO_FLAG_MUSTSUCCEED));
306 zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg)
308 if (lrc->lrc_txtype == TX_WRITE) {
309 lr_write_t *lr = (lr_write_t *)lrc;
310 zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg);
316 zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg)
318 zio_free_blk(zilog->zl_spa, bp, dmu_tx_get_txg(tx));
322 zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg)
325 * If we previously claimed it, we need to free it.
327 if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE) {
328 lr_write_t *lr = (lr_write_t *)lrc;
329 blkptr_t *bp = &lr->lr_blkptr;
330 if (bp->blk_birth >= claim_txg &&
331 !zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp))) {
332 (void) arc_free(NULL, zilog->zl_spa,
333 dmu_tx_get_txg(tx), bp, NULL, NULL, ARC_WAIT);
339 * Create an on-disk intent log.
342 zil_create(zilog_t *zilog)
344 const zil_header_t *zh = zilog->zl_header;
352 * Wait for any previous destroy to complete.
354 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
356 ASSERT(zh->zh_claim_txg == 0);
357 ASSERT(zh->zh_replay_seq == 0);
362 * If we don't already have an initial log block or we have one
363 * but it's the wrong endianness then allocate one.
365 if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) {
366 tx = dmu_tx_create(zilog->zl_os);
367 (void) dmu_tx_assign(tx, TXG_WAIT);
368 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
369 txg = dmu_tx_get_txg(tx);
371 if (!BP_IS_HOLE(&blk)) {
372 zio_free_blk(zilog->zl_spa, &blk, txg);
376 error = zio_alloc_blk(zilog->zl_spa, ZIL_MIN_BLKSZ, &blk,
380 zil_init_log_chain(zilog, &blk);
384 * Allocate a log write buffer (lwb) for the first log block.
387 lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
388 lwb->lwb_zilog = zilog;
391 lwb->lwb_sz = BP_GET_LSIZE(&lwb->lwb_blk);
392 lwb->lwb_buf = zio_buf_alloc(lwb->lwb_sz);
393 lwb->lwb_max_txg = txg;
396 mutex_enter(&zilog->zl_lock);
397 list_insert_tail(&zilog->zl_lwb_list, lwb);
398 mutex_exit(&zilog->zl_lock);
402 * If we just allocated the first log block, commit our transaction
403 * and wait for zil_sync() to stuff the block poiner into zh_log.
404 * (zh is part of the MOS, so we cannot modify it in open context.)
408 txg_wait_synced(zilog->zl_dmu_pool, txg);
411 ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);
415 * In one tx, free all log blocks and clear the log header.
416 * If keep_first is set, then we're replaying a log with no content.
417 * We want to keep the first block, however, so that the first
418 * synchronous transaction doesn't require a txg_wait_synced()
419 * in zil_create(). We don't need to txg_wait_synced() here either
420 * when keep_first is set, because both zil_create() and zil_destroy()
421 * will wait for any in-progress destroys to complete.
424 zil_destroy(zilog_t *zilog, boolean_t keep_first)
426 const zil_header_t *zh = zilog->zl_header;
432 * Wait for any previous destroy to complete.
434 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
436 if (BP_IS_HOLE(&zh->zh_log))
439 tx = dmu_tx_create(zilog->zl_os);
440 (void) dmu_tx_assign(tx, TXG_WAIT);
441 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
442 txg = dmu_tx_get_txg(tx);
444 mutex_enter(&zilog->zl_lock);
447 * It is possible for the ZIL to get the previously mounted zilog
448 * structure of the same dataset if quickly remounted and the dbuf
449 * eviction has not completed. In this case we can see a non
450 * empty lwb list and keep_first will be set. We fix this by
451 * clearing the keep_first. This will be slower but it's very rare.
453 if (!list_is_empty(&zilog->zl_lwb_list) && keep_first)
454 keep_first = B_FALSE;
456 ASSERT3U(zilog->zl_destroy_txg, <, txg);
457 zilog->zl_destroy_txg = txg;
458 zilog->zl_keep_first = keep_first;
460 if (!list_is_empty(&zilog->zl_lwb_list)) {
461 ASSERT(zh->zh_claim_txg == 0);
463 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
464 list_remove(&zilog->zl_lwb_list, lwb);
465 if (lwb->lwb_buf != NULL)
466 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
467 zio_free_blk(zilog->zl_spa, &lwb->lwb_blk, txg);
468 kmem_cache_free(zil_lwb_cache, lwb);
472 (void) zil_parse(zilog, zil_free_log_block,
473 zil_free_log_record, tx, zh->zh_claim_txg);
476 mutex_exit(&zilog->zl_lock);
482 * return true if the initial log block is not valid
485 zil_empty(zilog_t *zilog)
487 const zil_header_t *zh = zilog->zl_header;
488 arc_buf_t *abuf = NULL;
490 if (BP_IS_HOLE(&zh->zh_log))
493 if (zil_read_log_block(zilog, &zh->zh_log, &abuf) != 0)
496 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
501 zil_claim(char *osname, void *txarg)
503 dmu_tx_t *tx = txarg;
504 uint64_t first_txg = dmu_tx_get_txg(tx);
510 error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_USER, &os);
512 cmn_err(CE_WARN, "can't open objset for %s", osname);
516 zilog = dmu_objset_zil(os);
517 zh = zil_header_in_syncing_context(zilog);
519 if (zilog->zl_spa->spa_log_state == SPA_LOG_CLEAR) {
520 if (!BP_IS_HOLE(&zh->zh_log))
521 zio_free_blk(zilog->zl_spa, &zh->zh_log, first_txg);
522 BP_ZERO(&zh->zh_log);
523 dsl_dataset_dirty(dmu_objset_ds(os), tx);
527 * Record here whether the zil has any records to replay.
528 * If the header block pointer is null or the block points
529 * to the stubby then we know there are no valid log records.
530 * We use the header to store this state as the the zilog gets
531 * freed later in dmu_objset_close().
532 * The flags (and the rest of the header fields) are cleared in
533 * zil_sync() as a result of a zil_destroy(), after replaying the log.
535 * Note, the intent log can be empty but still need the
536 * stubby to be claimed.
538 if (!zil_empty(zilog)) {
539 zh->zh_flags |= ZIL_REPLAY_NEEDED;
540 dsl_dataset_dirty(dmu_objset_ds(os), tx);
544 * Claim all log blocks if we haven't already done so, and remember
545 * the highest claimed sequence number. This ensures that if we can
546 * read only part of the log now (e.g. due to a missing device),
547 * but we can read the entire log later, we will not try to replay
548 * or destroy beyond the last block we successfully claimed.
550 ASSERT3U(zh->zh_claim_txg, <=, first_txg);
551 if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) {
552 zh->zh_claim_txg = first_txg;
553 zh->zh_claim_seq = zil_parse(zilog, zil_claim_log_block,
554 zil_claim_log_record, tx, first_txg);
555 dsl_dataset_dirty(dmu_objset_ds(os), tx);
558 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
559 dmu_objset_close(os);
564 * Check the log by walking the log chain.
565 * Checksum errors are ok as they indicate the end of the chain.
566 * Any other error (no device or read failure) returns an error.
570 zil_check_log_chain(char *osname, void *txarg)
581 error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_USER, &os);
583 cmn_err(CE_WARN, "can't open objset for %s", osname);
587 zilog = dmu_objset_zil(os);
588 zh = zil_header_in_syncing_context(zilog);
590 if (BP_IS_HOLE(&blk)) {
591 dmu_objset_close(os);
592 return (0); /* no chain */
596 error = zil_read_log_block(zilog, &blk, &abuf);
599 lrbuf = abuf->b_data;
600 ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1;
601 blk = ztp->zit_next_blk;
602 VERIFY(arc_buf_remove_ref(abuf, &abuf) == 1);
604 dmu_objset_close(os);
606 return (0); /* normal end of chain */
611 zil_vdev_compare(const void *x1, const void *x2)
613 uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev;
614 uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev;
625 zil_add_block(zilog_t *zilog, blkptr_t *bp)
627 avl_tree_t *t = &zilog->zl_vdev_tree;
629 zil_vdev_node_t *zv, zvsearch;
630 int ndvas = BP_GET_NDVAS(bp);
633 if (zfs_nocacheflush)
636 ASSERT(zilog->zl_writer);
639 * Even though we're zl_writer, we still need a lock because the
640 * zl_get_data() callbacks may have dmu_sync() done callbacks
641 * that will run concurrently.
643 mutex_enter(&zilog->zl_vdev_lock);
644 for (i = 0; i < ndvas; i++) {
645 zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
646 if (avl_find(t, &zvsearch, &where) == NULL) {
647 zv = kmem_alloc(sizeof (*zv), KM_SLEEP);
648 zv->zv_vdev = zvsearch.zv_vdev;
649 avl_insert(t, zv, where);
652 mutex_exit(&zilog->zl_vdev_lock);
656 zil_flush_vdevs(zilog_t *zilog)
658 spa_t *spa = zilog->zl_spa;
659 avl_tree_t *t = &zilog->zl_vdev_tree;
664 ASSERT(zilog->zl_writer);
667 * We don't need zl_vdev_lock here because we're the zl_writer,
668 * and all zl_get_data() callbacks are done.
670 if (avl_numnodes(t) == 0)
673 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
675 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
677 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) {
678 vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev);
681 kmem_free(zv, sizeof (*zv));
685 * Wait for all the flushes to complete. Not all devices actually
686 * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails.
688 (void) zio_wait(zio);
690 spa_config_exit(spa, SCL_STATE, FTAG);
694 * Function called when a log block write completes
697 zil_lwb_write_done(zio_t *zio)
699 lwb_t *lwb = zio->io_private;
700 zilog_t *zilog = lwb->lwb_zilog;
702 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
703 ASSERT(BP_GET_CHECKSUM(zio->io_bp) == ZIO_CHECKSUM_ZILOG);
704 ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG);
705 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
706 ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER);
707 ASSERT(!BP_IS_GANG(zio->io_bp));
708 ASSERT(!BP_IS_HOLE(zio->io_bp));
709 ASSERT(zio->io_bp->blk_fill == 0);
712 * Ensure the lwb buffer pointer is cleared before releasing
713 * the txg. If we have had an allocation failure and
714 * the txg is waiting to sync then we want want zil_sync()
715 * to remove the lwb so that it's not picked up as the next new
716 * one in zil_commit_writer(). zil_sync() will only remove
717 * the lwb if lwb_buf is null.
719 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
720 mutex_enter(&zilog->zl_lock);
723 zilog->zl_log_error = B_TRUE;
726 * Now that we've written this log block, we have a stable pointer
727 * to the next block in the chain, so it's OK to let the txg in
728 * which we allocated the next block sync. We still have the
729 * zl_lock to ensure zil_sync doesn't kmem free the lwb.
731 txg_rele_to_sync(&lwb->lwb_txgh);
732 mutex_exit(&zilog->zl_lock);
736 * Initialize the io for a log block.
739 zil_lwb_write_init(zilog_t *zilog, lwb_t *lwb)
743 zb.zb_objset = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET];
746 zb.zb_blkid = lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
748 if (zilog->zl_root_zio == NULL) {
749 zilog->zl_root_zio = zio_root(zilog->zl_spa, NULL, NULL,
752 if (lwb->lwb_zio == NULL) {
753 lwb->lwb_zio = zio_rewrite(zilog->zl_root_zio, zilog->zl_spa,
754 0, &lwb->lwb_blk, lwb->lwb_buf, lwb->lwb_sz,
755 zil_lwb_write_done, lwb, ZIO_PRIORITY_LOG_WRITE,
756 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb);
761 * Start a log block write and advance to the next log block.
762 * Calls are serialized.
765 zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb)
768 zil_trailer_t *ztp = (zil_trailer_t *)(lwb->lwb_buf + lwb->lwb_sz) - 1;
769 spa_t *spa = zilog->zl_spa;
770 blkptr_t *bp = &ztp->zit_next_blk;
775 ASSERT(lwb->lwb_nused <= ZIL_BLK_DATA_SZ(lwb));
778 * Allocate the next block and save its address in this block
779 * before writing it in order to establish the log chain.
780 * Note that if the allocation of nlwb synced before we wrote
781 * the block that points at it (lwb), we'd leak it if we crashed.
782 * Therefore, we don't do txg_rele_to_sync() until zil_lwb_write_done().
784 txg = txg_hold_open(zilog->zl_dmu_pool, &lwb->lwb_txgh);
785 txg_rele_to_quiesce(&lwb->lwb_txgh);
788 * Pick a ZIL blocksize. We request a size that is the
789 * maximum of the previous used size, the current used size and
790 * the amount waiting in the queue.
792 zil_blksz = MAX(zilog->zl_prev_used,
793 zilog->zl_cur_used + sizeof (*ztp));
794 zil_blksz = MAX(zil_blksz, zilog->zl_itx_list_sz + sizeof (*ztp));
795 zil_blksz = P2ROUNDUP_TYPED(zil_blksz, ZIL_MIN_BLKSZ, uint64_t);
796 if (zil_blksz > ZIL_MAX_BLKSZ)
797 zil_blksz = ZIL_MAX_BLKSZ;
800 /* pass the old blkptr in order to spread log blocks across devs */
801 error = zio_alloc_blk(spa, zil_blksz, bp, &lwb->lwb_blk, txg);
803 dmu_tx_t *tx = dmu_tx_create_assigned(zilog->zl_dmu_pool, txg);
806 * We dirty the dataset to ensure that zil_sync() will
807 * be called to remove this lwb from our zl_lwb_list.
808 * Failing to do so, may leave an lwb with a NULL lwb_buf
809 * hanging around on the zl_lwb_list.
811 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
815 * Since we've just experienced an allocation failure so we
816 * terminate the current lwb and send it on its way.
819 ztp->zit_nused = lwb->lwb_nused;
820 ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum;
821 zio_nowait(lwb->lwb_zio);
824 * By returning NULL the caller will call tx_wait_synced()
829 ASSERT3U(bp->blk_birth, ==, txg);
831 ztp->zit_nused = lwb->lwb_nused;
832 ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum;
833 bp->blk_cksum = lwb->lwb_blk.blk_cksum;
834 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
837 * Allocate a new log write buffer (lwb).
839 nlwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
841 nlwb->lwb_zilog = zilog;
844 nlwb->lwb_sz = BP_GET_LSIZE(&nlwb->lwb_blk);
845 nlwb->lwb_buf = zio_buf_alloc(nlwb->lwb_sz);
846 nlwb->lwb_max_txg = txg;
847 nlwb->lwb_zio = NULL;
850 * Put new lwb at the end of the log chain
852 mutex_enter(&zilog->zl_lock);
853 list_insert_tail(&zilog->zl_lwb_list, nlwb);
854 mutex_exit(&zilog->zl_lock);
856 /* Record the block for later vdev flushing */
857 zil_add_block(zilog, &lwb->lwb_blk);
860 * kick off the write for the old log block
862 dprintf_bp(&lwb->lwb_blk, "lwb %p txg %llu: ", lwb, txg);
863 ASSERT(lwb->lwb_zio);
864 zio_nowait(lwb->lwb_zio);
870 zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
872 lr_t *lrc = &itx->itx_lr; /* common log record */
873 lr_write_t *lr = (lr_write_t *)lrc;
874 uint64_t txg = lrc->lrc_txg;
875 uint64_t reclen = lrc->lrc_reclen;
880 ASSERT(lwb->lwb_buf != NULL);
882 if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY)
883 dlen = P2ROUNDUP_TYPED(
884 lr->lr_length, sizeof (uint64_t), uint64_t);
888 zilog->zl_cur_used += (reclen + dlen);
890 zil_lwb_write_init(zilog, lwb);
893 * If this record won't fit in the current log block, start a new one.
895 if (lwb->lwb_nused + reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) {
896 lwb = zil_lwb_write_start(zilog, lwb);
899 zil_lwb_write_init(zilog, lwb);
900 ASSERT(lwb->lwb_nused == 0);
901 if (reclen + dlen > ZIL_BLK_DATA_SZ(lwb)) {
902 txg_wait_synced(zilog->zl_dmu_pool, txg);
908 * Update the lrc_seq, to be log record sequence number. See zil.h
909 * Then copy the record to the log buffer.
911 lrc->lrc_seq = ++zilog->zl_lr_seq; /* we are single threaded */
912 bcopy(lrc, lwb->lwb_buf + lwb->lwb_nused, reclen);
915 * If it's a write, fetch the data or get its blkptr as appropriate.
917 if (lrc->lrc_txtype == TX_WRITE) {
918 if (txg > spa_freeze_txg(zilog->zl_spa))
919 txg_wait_synced(zilog->zl_dmu_pool, txg);
920 if (itx->itx_wr_state != WR_COPIED) {
924 /* alignment is guaranteed */
925 lr = (lr_write_t *)(lwb->lwb_buf + lwb->lwb_nused);
927 ASSERT(itx->itx_wr_state == WR_NEED_COPY);
928 dbuf = lwb->lwb_buf + lwb->lwb_nused + reclen;
929 lr->lr_common.lrc_reclen += dlen;
931 ASSERT(itx->itx_wr_state == WR_INDIRECT);
934 error = zilog->zl_get_data(
935 itx->itx_private, lr, dbuf, lwb->lwb_zio);
937 txg_wait_synced(zilog->zl_dmu_pool, txg);
941 ASSERT(error == ENOENT || error == EEXIST ||
948 lwb->lwb_nused += reclen + dlen;
949 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
950 ASSERT3U(lwb->lwb_nused, <=, ZIL_BLK_DATA_SZ(lwb));
951 ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0);
957 zil_itx_create(uint64_t txtype, size_t lrsize)
961 lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t);
963 itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP);
964 itx->itx_lr.lrc_txtype = txtype;
965 itx->itx_lr.lrc_reclen = lrsize;
966 itx->itx_sod = lrsize; /* if write & WR_NEED_COPY will be increased */
967 itx->itx_lr.lrc_seq = 0; /* defensive */
973 zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
977 ASSERT(itx->itx_lr.lrc_seq == 0);
979 mutex_enter(&zilog->zl_lock);
980 list_insert_tail(&zilog->zl_itx_list, itx);
981 zilog->zl_itx_list_sz += itx->itx_sod;
982 itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
983 itx->itx_lr.lrc_seq = seq = ++zilog->zl_itx_seq;
984 mutex_exit(&zilog->zl_lock);
990 * Free up all in-memory intent log transactions that have now been synced.
993 zil_itx_clean(zilog_t *zilog)
995 uint64_t synced_txg = spa_last_synced_txg(zilog->zl_spa);
996 uint64_t freeze_txg = spa_freeze_txg(zilog->zl_spa);
1000 list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node));
1002 mutex_enter(&zilog->zl_lock);
1003 /* wait for a log writer to finish walking list */
1004 while (zilog->zl_writer) {
1005 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
1009 * Move the sync'd log transactions to a separate list so we can call
1010 * kmem_free without holding the zl_lock.
1012 * There is no need to set zl_writer as we don't drop zl_lock here
1014 while ((itx = list_head(&zilog->zl_itx_list)) != NULL &&
1015 itx->itx_lr.lrc_txg <= MIN(synced_txg, freeze_txg)) {
1016 list_remove(&zilog->zl_itx_list, itx);
1017 zilog->zl_itx_list_sz -= itx->itx_sod;
1018 list_insert_tail(&clean_list, itx);
1020 cv_broadcast(&zilog->zl_cv_writer);
1021 mutex_exit(&zilog->zl_lock);
1023 /* destroy sync'd log transactions */
1024 while ((itx = list_head(&clean_list)) != NULL) {
1025 list_remove(&clean_list, itx);
1026 kmem_free(itx, offsetof(itx_t, itx_lr)
1027 + itx->itx_lr.lrc_reclen);
1029 list_destroy(&clean_list);
1033 * If there are any in-memory intent log transactions which have now been
1034 * synced then start up a taskq to free them.
1037 zil_clean(zilog_t *zilog)
1041 mutex_enter(&zilog->zl_lock);
1042 itx = list_head(&zilog->zl_itx_list);
1043 if ((itx != NULL) &&
1044 (itx->itx_lr.lrc_txg <= spa_last_synced_txg(zilog->zl_spa))) {
1045 (void) taskq_dispatch(zilog->zl_clean_taskq,
1046 (task_func_t *)zil_itx_clean, zilog, TQ_SLEEP);
1048 mutex_exit(&zilog->zl_lock);
1052 zil_commit_writer(zilog_t *zilog, uint64_t seq, uint64_t foid)
1055 uint64_t commit_seq = 0;
1056 itx_t *itx, *itx_next = (itx_t *)-1;
1060 zilog->zl_writer = B_TRUE;
1061 ASSERT(zilog->zl_root_zio == NULL);
1062 spa = zilog->zl_spa;
1064 if (zilog->zl_suspend) {
1067 lwb = list_tail(&zilog->zl_lwb_list);
1070 * Return if there's nothing to flush before we
1071 * dirty the fs by calling zil_create()
1073 if (list_is_empty(&zilog->zl_itx_list)) {
1074 zilog->zl_writer = B_FALSE;
1077 mutex_exit(&zilog->zl_lock);
1079 mutex_enter(&zilog->zl_lock);
1080 lwb = list_tail(&zilog->zl_lwb_list);
1084 /* Loop through in-memory log transactions filling log blocks. */
1085 DTRACE_PROBE1(zil__cw1, zilog_t *, zilog);
1088 * Find the next itx to push:
1089 * Push all transactions related to specified foid and all
1090 * other transactions except TX_WRITE, TX_TRUNCATE,
1091 * TX_SETATTR and TX_ACL for all other files.
1093 if (itx_next != (itx_t *)-1)
1096 itx = list_head(&zilog->zl_itx_list);
1097 for (; itx != NULL; itx = list_next(&zilog->zl_itx_list, itx)) {
1098 if (foid == 0) /* push all foids? */
1100 if (itx->itx_sync) /* push all O_[D]SYNC */
1102 switch (itx->itx_lr.lrc_txtype) {
1107 /* lr_foid is same offset for these records */
1108 if (((lr_write_t *)&itx->itx_lr)->lr_foid
1110 continue; /* skip this record */
1118 if ((itx->itx_lr.lrc_seq > seq) &&
1119 ((lwb == NULL) || (lwb->lwb_nused == 0) ||
1120 (lwb->lwb_nused + itx->itx_sod > ZIL_BLK_DATA_SZ(lwb)))) {
1125 * Save the next pointer. Even though we soon drop
1126 * zl_lock all threads that may change the list
1127 * (another writer or zil_itx_clean) can't do so until
1128 * they have zl_writer.
1130 itx_next = list_next(&zilog->zl_itx_list, itx);
1131 list_remove(&zilog->zl_itx_list, itx);
1132 zilog->zl_itx_list_sz -= itx->itx_sod;
1133 mutex_exit(&zilog->zl_lock);
1134 txg = itx->itx_lr.lrc_txg;
1137 if (txg > spa_last_synced_txg(spa) ||
1138 txg > spa_freeze_txg(spa))
1139 lwb = zil_lwb_commit(zilog, itx, lwb);
1140 kmem_free(itx, offsetof(itx_t, itx_lr)
1141 + itx->itx_lr.lrc_reclen);
1142 mutex_enter(&zilog->zl_lock);
1144 DTRACE_PROBE1(zil__cw2, zilog_t *, zilog);
1145 /* determine commit sequence number */
1146 itx = list_head(&zilog->zl_itx_list);
1148 commit_seq = itx->itx_lr.lrc_seq;
1150 commit_seq = zilog->zl_itx_seq;
1151 mutex_exit(&zilog->zl_lock);
1153 /* write the last block out */
1154 if (lwb != NULL && lwb->lwb_zio != NULL)
1155 lwb = zil_lwb_write_start(zilog, lwb);
1157 zilog->zl_prev_used = zilog->zl_cur_used;
1158 zilog->zl_cur_used = 0;
1161 * Wait if necessary for the log blocks to be on stable storage.
1163 if (zilog->zl_root_zio) {
1164 DTRACE_PROBE1(zil__cw3, zilog_t *, zilog);
1165 (void) zio_wait(zilog->zl_root_zio);
1166 zilog->zl_root_zio = NULL;
1167 DTRACE_PROBE1(zil__cw4, zilog_t *, zilog);
1168 zil_flush_vdevs(zilog);
1171 if (zilog->zl_log_error || lwb == NULL) {
1172 zilog->zl_log_error = 0;
1173 txg_wait_synced(zilog->zl_dmu_pool, 0);
1176 mutex_enter(&zilog->zl_lock);
1177 zilog->zl_writer = B_FALSE;
1179 ASSERT3U(commit_seq, >=, zilog->zl_commit_seq);
1180 zilog->zl_commit_seq = commit_seq;
1184 * Push zfs transactions to stable storage up to the supplied sequence number.
1185 * If foid is 0 push out all transactions, otherwise push only those
1186 * for that file or might have been used to create that file.
1189 zil_commit(zilog_t *zilog, uint64_t seq, uint64_t foid)
1191 if (zilog == NULL || seq == 0)
1194 mutex_enter(&zilog->zl_lock);
1196 seq = MIN(seq, zilog->zl_itx_seq); /* cap seq at largest itx seq */
1198 while (zilog->zl_writer) {
1199 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
1200 if (seq < zilog->zl_commit_seq) {
1201 mutex_exit(&zilog->zl_lock);
1205 zil_commit_writer(zilog, seq, foid); /* drops zl_lock */
1206 /* wake up others waiting on the commit */
1207 cv_broadcast(&zilog->zl_cv_writer);
1208 mutex_exit(&zilog->zl_lock);
1212 * Called in syncing context to free committed log blocks and update log header.
1215 zil_sync(zilog_t *zilog, dmu_tx_t *tx)
1217 zil_header_t *zh = zil_header_in_syncing_context(zilog);
1218 uint64_t txg = dmu_tx_get_txg(tx);
1219 spa_t *spa = zilog->zl_spa;
1223 * We don't zero out zl_destroy_txg, so make sure we don't try
1224 * to destroy it twice.
1226 if (spa_sync_pass(spa) != 1)
1229 mutex_enter(&zilog->zl_lock);
1231 ASSERT(zilog->zl_stop_sync == 0);
1233 zh->zh_replay_seq = zilog->zl_replayed_seq[txg & TXG_MASK];
1235 if (zilog->zl_destroy_txg == txg) {
1236 blkptr_t blk = zh->zh_log;
1238 ASSERT(list_head(&zilog->zl_lwb_list) == NULL);
1240 bzero(zh, sizeof (zil_header_t));
1241 bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq));
1243 if (zilog->zl_keep_first) {
1245 * If this block was part of log chain that couldn't
1246 * be claimed because a device was missing during
1247 * zil_claim(), but that device later returns,
1248 * then this block could erroneously appear valid.
1249 * To guard against this, assign a new GUID to the new
1250 * log chain so it doesn't matter what blk points to.
1252 zil_init_log_chain(zilog, &blk);
1257 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
1258 zh->zh_log = lwb->lwb_blk;
1259 if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg)
1261 list_remove(&zilog->zl_lwb_list, lwb);
1262 zio_free_blk(spa, &lwb->lwb_blk, txg);
1263 kmem_cache_free(zil_lwb_cache, lwb);
1266 * If we don't have anything left in the lwb list then
1267 * we've had an allocation failure and we need to zero
1268 * out the zil_header blkptr so that we don't end
1269 * up freeing the same block twice.
1271 if (list_head(&zilog->zl_lwb_list) == NULL)
1272 BP_ZERO(&zh->zh_log);
1274 mutex_exit(&zilog->zl_lock);
1280 zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
1281 sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0);
1287 kmem_cache_destroy(zil_lwb_cache);
1291 zil_alloc(objset_t *os, zil_header_t *zh_phys)
1295 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
1297 zilog->zl_header = zh_phys;
1299 zilog->zl_spa = dmu_objset_spa(os);
1300 zilog->zl_dmu_pool = dmu_objset_pool(os);
1301 zilog->zl_destroy_txg = TXG_INITIAL - 1;
1303 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
1305 list_create(&zilog->zl_itx_list, sizeof (itx_t),
1306 offsetof(itx_t, itx_node));
1308 list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
1309 offsetof(lwb_t, lwb_node));
1311 mutex_init(&zilog->zl_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
1313 avl_create(&zilog->zl_vdev_tree, zil_vdev_compare,
1314 sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node));
1316 cv_init(&zilog->zl_cv_writer, NULL, CV_DEFAULT, NULL);
1317 cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
1323 zil_free(zilog_t *zilog)
1327 zilog->zl_stop_sync = 1;
1329 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
1330 list_remove(&zilog->zl_lwb_list, lwb);
1331 if (lwb->lwb_buf != NULL)
1332 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
1333 kmem_cache_free(zil_lwb_cache, lwb);
1335 list_destroy(&zilog->zl_lwb_list);
1337 avl_destroy(&zilog->zl_vdev_tree);
1338 mutex_destroy(&zilog->zl_vdev_lock);
1340 ASSERT(list_head(&zilog->zl_itx_list) == NULL);
1341 list_destroy(&zilog->zl_itx_list);
1342 mutex_destroy(&zilog->zl_lock);
1344 cv_destroy(&zilog->zl_cv_writer);
1345 cv_destroy(&zilog->zl_cv_suspend);
1347 kmem_free(zilog, sizeof (zilog_t));
1351 * Open an intent log.
1354 zil_open(objset_t *os, zil_get_data_t *get_data)
1356 zilog_t *zilog = dmu_objset_zil(os);
1358 zilog->zl_get_data = get_data;
1359 zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri,
1360 2, 2, TASKQ_PREPOPULATE);
1366 * Close an intent log.
1369 zil_close(zilog_t *zilog)
1372 * If the log isn't already committed, mark the objset dirty
1373 * (so zil_sync() will be called) and wait for that txg to sync.
1375 if (!zil_is_committed(zilog)) {
1377 dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
1378 (void) dmu_tx_assign(tx, TXG_WAIT);
1379 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
1380 txg = dmu_tx_get_txg(tx);
1382 txg_wait_synced(zilog->zl_dmu_pool, txg);
1385 taskq_destroy(zilog->zl_clean_taskq);
1386 zilog->zl_clean_taskq = NULL;
1387 zilog->zl_get_data = NULL;
1389 zil_itx_clean(zilog);
1390 ASSERT(list_head(&zilog->zl_itx_list) == NULL);
1394 * Suspend an intent log. While in suspended mode, we still honor
1395 * synchronous semantics, but we rely on txg_wait_synced() to do it.
1396 * We suspend the log briefly when taking a snapshot so that the snapshot
1397 * contains all the data it's supposed to, and has an empty intent log.
1400 zil_suspend(zilog_t *zilog)
1402 const zil_header_t *zh = zilog->zl_header;
1404 mutex_enter(&zilog->zl_lock);
1405 if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */
1406 mutex_exit(&zilog->zl_lock);
1409 if (zilog->zl_suspend++ != 0) {
1411 * Someone else already began a suspend.
1412 * Just wait for them to finish.
1414 while (zilog->zl_suspending)
1415 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
1416 mutex_exit(&zilog->zl_lock);
1419 zilog->zl_suspending = B_TRUE;
1420 mutex_exit(&zilog->zl_lock);
1422 zil_commit(zilog, UINT64_MAX, 0);
1425 * Wait for any in-flight log writes to complete.
1427 mutex_enter(&zilog->zl_lock);
1428 while (zilog->zl_writer)
1429 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
1430 mutex_exit(&zilog->zl_lock);
1432 zil_destroy(zilog, B_FALSE);
1434 mutex_enter(&zilog->zl_lock);
1435 zilog->zl_suspending = B_FALSE;
1436 cv_broadcast(&zilog->zl_cv_suspend);
1437 mutex_exit(&zilog->zl_lock);
1443 zil_resume(zilog_t *zilog)
1445 mutex_enter(&zilog->zl_lock);
1446 ASSERT(zilog->zl_suspend != 0);
1447 zilog->zl_suspend--;
1448 mutex_exit(&zilog->zl_lock);
1452 * Read in the data for the dmu_sync()ed block, and change the log
1453 * record to write this whole block.
1456 zil_get_replay_data(zilog_t *zilog, lr_write_t *lr)
1458 blkptr_t *wbp = &lr->lr_blkptr;
1459 char *wbuf = (char *)(lr + 1); /* data follows lr_write_t */
1462 if (BP_IS_HOLE(wbp)) { /* compressed to a hole */
1463 blksz = BP_GET_LSIZE(&lr->lr_blkptr);
1465 * If the blksz is zero then we must be replaying a log
1466 * from an version prior to setting the blksize of null blocks.
1467 * So we just zero the actual write size reqeusted.
1470 bzero(wbuf, lr->lr_length);
1476 * A subsequent write may have overwritten this block, in which
1477 * case wbp may have been been freed and reallocated, and our
1478 * read of wbp may fail with a checksum error. We can safely
1479 * ignore this because the later write will provide the
1484 zb.zb_objset = dmu_objset_id(zilog->zl_os);
1485 zb.zb_object = lr->lr_foid;
1487 zb.zb_blkid = -1; /* unknown */
1489 blksz = BP_GET_LSIZE(&lr->lr_blkptr);
1490 (void) zio_wait(zio_read(NULL, zilog->zl_spa, wbp, wbuf, blksz,
1491 NULL, NULL, ZIO_PRIORITY_SYNC_READ,
1492 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb));
1494 lr->lr_offset -= lr->lr_offset % blksz;
1495 lr->lr_length = blksz;
1498 typedef struct zil_replay_arg {
1500 zil_replay_func_t **zr_replay;
1502 boolean_t zr_byteswap;
1507 zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg)
1509 zil_replay_arg_t *zr = zra;
1510 const zil_header_t *zh = zilog->zl_header;
1511 uint64_t reclen = lr->lrc_reclen;
1512 uint64_t txtype = lr->lrc_txtype;
1516 if (!zilog->zl_replay) /* giving up */
1519 if (lr->lrc_txg < claim_txg) /* already committed */
1522 if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */
1525 /* Strip case-insensitive bit, still present in log record */
1528 if (txtype == 0 || txtype >= TX_MAX_TYPE) {
1534 * Make a copy of the data so we can revise and extend it.
1536 bcopy(lr, zr->zr_lrbuf, reclen);
1539 * The log block containing this lr may have been byteswapped
1540 * so that we can easily examine common fields like lrc_txtype.
1541 * However, the log is a mix of different data types, and only the
1542 * replay vectors know how to byteswap their records. Therefore, if
1543 * the lr was byteswapped, undo it before invoking the replay vector.
1545 if (zr->zr_byteswap)
1546 byteswap_uint64_array(zr->zr_lrbuf, reclen);
1549 * We must now do two things atomically: replay this log record,
1550 * and update the log header sequence number to reflect the fact that
1551 * we did so. At the end of each replay function the sequence number
1552 * is updated if we are in replay mode.
1554 for (pass = 1; pass <= 2; pass++) {
1555 zilog->zl_replaying_seq = lr->lrc_seq;
1556 /* Only byteswap (if needed) on the 1st pass. */
1557 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lrbuf,
1558 zr->zr_byteswap && pass == 1);
1564 * The DMU's dnode layer doesn't see removes until the txg
1565 * commits, so a subsequent claim can spuriously fail with
1566 * EEXIST. So if we receive any error we try syncing out
1567 * any removes then retry the transaction.
1570 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
1575 name = kmem_alloc(MAXNAMELEN, KM_SLEEP);
1576 dmu_objset_name(zr->zr_os, name);
1577 cmn_err(CE_WARN, "ZFS replay transaction error %d, "
1578 "dataset %s, seq 0x%llx, txtype %llu %s\n",
1579 error, name, (u_longlong_t)lr->lrc_seq, (u_longlong_t)txtype,
1580 (lr->lrc_txtype & TX_CI) ? "CI" : "");
1581 zilog->zl_replay = B_FALSE;
1582 kmem_free(name, MAXNAMELEN);
1587 zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
1589 zilog->zl_replay_blks++;
1593 * If this dataset has a non-empty intent log, replay it and destroy it.
1596 zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE])
1598 zilog_t *zilog = dmu_objset_zil(os);
1599 const zil_header_t *zh = zilog->zl_header;
1600 zil_replay_arg_t zr;
1602 if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) {
1603 zil_destroy(zilog, B_TRUE);
1606 //printf("ZFS: Replaying ZIL on %s...\n", os->os->os_spa->spa_name);
1609 zr.zr_replay = replay_func;
1611 zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
1612 zr.zr_lrbuf = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
1615 * Wait for in-progress removes to sync before starting replay.
1617 txg_wait_synced(zilog->zl_dmu_pool, 0);
1619 zilog->zl_replay = B_TRUE;
1620 zilog->zl_replay_time = LBOLT;
1621 ASSERT(zilog->zl_replay_blks == 0);
1622 (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
1624 kmem_free(zr.zr_lrbuf, 2 * SPA_MAXBLOCKSIZE);
1626 zil_destroy(zilog, B_FALSE);
1627 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
1628 zilog->zl_replay = B_FALSE;
1629 //printf("ZFS: Replay of ZIL on %s finished.\n", os->os->os_spa->spa_name);
1633 * Report whether all transactions are committed
1636 zil_is_committed(zilog_t *zilog)
1641 mutex_enter(&zilog->zl_lock);
1642 while (zilog->zl_writer)
1643 cv_wait(&zilog->zl_cv_writer, &zilog->zl_lock);
1645 /* recent unpushed intent log transactions? */
1646 if (!list_is_empty(&zilog->zl_itx_list)) {
1651 /* intent log never used? */
1652 lwb = list_head(&zilog->zl_lwb_list);
1659 * more than 1 log buffer means zil_sync() hasn't yet freed
1660 * entries after a txg has committed
1662 if (list_next(&zilog->zl_lwb_list, lwb)) {
1667 ASSERT(zil_empty(zilog));
1670 cv_broadcast(&zilog->zl_cv_writer);
1671 mutex_exit(&zilog->zl_lock);
1677 zil_vdev_offline(char *osname, void *arg)
1683 error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_USER, &os);
1687 zilog = dmu_objset_zil(os);
1688 if (zil_suspend(zilog) != 0)
1692 dmu_objset_close(os);