4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
24 * Copyright (c) 2014 Integros [integros.com]
27 /* Portions Copyright 2010 Robert Milkowski */
29 #include <sys/zfs_context.h>
35 #include <sys/resource.h>
37 #include <sys/zil_impl.h>
38 #include <sys/dsl_dataset.h>
39 #include <sys/vdev_impl.h>
40 #include <sys/dmu_tx.h>
41 #include <sys/dsl_pool.h>
45 * The ZFS Intent Log (ZIL) saves "transaction records" (itxs) of system
46 * calls that change the file system. Each itx has enough information to
47 * be able to replay them after a system crash, power loss, or
48 * equivalent failure mode. These are stored in memory until either:
50 * 1. they are committed to the pool by the DMU transaction group
51 * (txg), at which point they can be discarded; or
52 * 2. they are committed to the on-disk ZIL for the dataset being
53 * modified (e.g. due to an fsync, O_DSYNC, or other synchronous
56 * In the event of a crash or power loss, the itxs contained by each
57 * dataset's on-disk ZIL will be replayed when that dataset is first
58 * instantianted (e.g. if the dataset is a normal fileystem, when it is
61 * As hinted at above, there is one ZIL per dataset (both the in-memory
62 * representation, and the on-disk representation). The on-disk format
63 * consists of 3 parts:
65 * - a single, per-dataset, ZIL header; which points to a chain of
66 * - zero or more ZIL blocks; each of which contains
67 * - zero or more ZIL records
69 * A ZIL record holds the information necessary to replay a single
70 * system call transaction. A ZIL block can hold many ZIL records, and
71 * the blocks are chained together, similarly to a singly linked list.
73 * Each ZIL block contains a block pointer (blkptr_t) to the next ZIL
74 * block in the chain, and the ZIL header points to the first block in
77 * Note, there is not a fixed place in the pool to hold these ZIL
78 * blocks; they are dynamically allocated and freed as needed from the
79 * blocks available on the pool, though they can be preferentially
80 * allocated from a dedicated "log" vdev.
84 * This controls the amount of time that a ZIL block (lwb) will remain
85 * "open" when it isn't "full", and it has a thread waiting for it to be
86 * committed to stable storage. Please refer to the zil_commit_waiter()
87 * function (and the comments within it) for more details.
89 int zfs_commit_timeout_pct = 5;
92 * Disable intent logging replay. This global ZIL switch affects all pools.
94 int zil_replay_disable = 0;
95 SYSCTL_DECL(_vfs_zfs);
96 SYSCTL_INT(_vfs_zfs, OID_AUTO, zil_replay_disable, CTLFLAG_RWTUN,
97 &zil_replay_disable, 0, "Disable intent logging replay");
100 * Tunable parameter for debugging or performance analysis. Setting
101 * zfs_nocacheflush will cause corruption on power loss if a volatile
102 * out-of-order write cache is enabled.
104 boolean_t zfs_nocacheflush = B_FALSE;
105 SYSCTL_INT(_vfs_zfs, OID_AUTO, cache_flush_disable, CTLFLAG_RDTUN,
106 &zfs_nocacheflush, 0, "Disable cache flush");
107 boolean_t zfs_trim_enabled = B_TRUE;
108 SYSCTL_DECL(_vfs_zfs_trim);
109 SYSCTL_INT(_vfs_zfs_trim, OID_AUTO, enabled, CTLFLAG_RDTUN, &zfs_trim_enabled, 0,
113 * Limit SLOG write size per commit executed with synchronous priority.
114 * Any writes above that will be executed with lower (asynchronous) priority
115 * to limit potential SLOG device abuse by single active ZIL writer.
117 uint64_t zil_slog_bulk = 768 * 1024;
118 SYSCTL_QUAD(_vfs_zfs, OID_AUTO, zil_slog_bulk, CTLFLAG_RWTUN,
119 &zil_slog_bulk, 0, "Maximal SLOG commit size with sync priority");
121 static kmem_cache_t *zil_lwb_cache;
122 static kmem_cache_t *zil_zcw_cache;
124 #define LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \
125 sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused))
128 zil_bp_compare(const void *x1, const void *x2)
130 const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva;
131 const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva;
133 if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2))
135 if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2))
138 if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2))
140 if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2))
147 zil_bp_tree_init(zilog_t *zilog)
149 avl_create(&zilog->zl_bp_tree, zil_bp_compare,
150 sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node));
154 zil_bp_tree_fini(zilog_t *zilog)
156 avl_tree_t *t = &zilog->zl_bp_tree;
160 while ((zn = avl_destroy_nodes(t, &cookie)) != NULL)
161 kmem_free(zn, sizeof (zil_bp_node_t));
167 zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp)
169 avl_tree_t *t = &zilog->zl_bp_tree;
174 if (BP_IS_EMBEDDED(bp))
177 dva = BP_IDENTITY(bp);
179 if (avl_find(t, dva, &where) != NULL)
180 return (SET_ERROR(EEXIST));
182 zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP);
184 avl_insert(t, zn, where);
189 static zil_header_t *
190 zil_header_in_syncing_context(zilog_t *zilog)
192 return ((zil_header_t *)zilog->zl_header);
196 zil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
198 zio_cksum_t *zc = &bp->blk_cksum;
200 zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL);
201 zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL);
202 zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
203 zc->zc_word[ZIL_ZC_SEQ] = 1ULL;
207 * Read a log block and make sure it's valid.
210 zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst,
213 enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
214 arc_flags_t aflags = ARC_FLAG_WAIT;
215 arc_buf_t *abuf = NULL;
219 if (zilog->zl_header->zh_claim_txg == 0)
220 zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
222 if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
223 zio_flags |= ZIO_FLAG_SPECULATIVE;
225 SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET],
226 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
228 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
229 ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
232 zio_cksum_t cksum = bp->blk_cksum;
235 * Validate the checksummed log block.
237 * Sequence numbers should be... sequential. The checksum
238 * verifier for the next block should be bp's checksum plus 1.
240 * Also check the log chain linkage and size used.
242 cksum.zc_word[ZIL_ZC_SEQ]++;
244 if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
245 zil_chain_t *zilc = abuf->b_data;
246 char *lr = (char *)(zilc + 1);
247 uint64_t len = zilc->zc_nused - sizeof (zil_chain_t);
249 if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
250 sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) {
251 error = SET_ERROR(ECKSUM);
253 ASSERT3U(len, <=, SPA_OLD_MAXBLOCKSIZE);
255 *end = (char *)dst + len;
256 *nbp = zilc->zc_next_blk;
259 char *lr = abuf->b_data;
260 uint64_t size = BP_GET_LSIZE(bp);
261 zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1;
263 if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
264 sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) ||
265 (zilc->zc_nused > (size - sizeof (*zilc)))) {
266 error = SET_ERROR(ECKSUM);
268 ASSERT3U(zilc->zc_nused, <=,
269 SPA_OLD_MAXBLOCKSIZE);
270 bcopy(lr, dst, zilc->zc_nused);
271 *end = (char *)dst + zilc->zc_nused;
272 *nbp = zilc->zc_next_blk;
276 arc_buf_destroy(abuf, &abuf);
283 * Read a TX_WRITE log data block.
286 zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf)
288 enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
289 const blkptr_t *bp = &lr->lr_blkptr;
290 arc_flags_t aflags = ARC_FLAG_WAIT;
291 arc_buf_t *abuf = NULL;
295 if (BP_IS_HOLE(bp)) {
297 bzero(wbuf, MAX(BP_GET_LSIZE(bp), lr->lr_length));
301 if (zilog->zl_header->zh_claim_txg == 0)
302 zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
304 SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid,
305 ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
307 error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
308 ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
312 bcopy(abuf->b_data, wbuf, arc_buf_size(abuf));
313 arc_buf_destroy(abuf, &abuf);
320 * Parse the intent log, and call parse_func for each valid record within.
323 zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
324 zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg)
326 const zil_header_t *zh = zilog->zl_header;
327 boolean_t claimed = !!zh->zh_claim_txg;
328 uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX;
329 uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX;
330 uint64_t max_blk_seq = 0;
331 uint64_t max_lr_seq = 0;
332 uint64_t blk_count = 0;
333 uint64_t lr_count = 0;
334 blkptr_t blk, next_blk;
339 * Old logs didn't record the maximum zh_claim_lr_seq.
341 if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
342 claim_lr_seq = UINT64_MAX;
345 * Starting at the block pointed to by zh_log we read the log chain.
346 * For each block in the chain we strongly check that block to
347 * ensure its validity. We stop when an invalid block is found.
348 * For each block pointer in the chain we call parse_blk_func().
349 * For each record in each valid block we call parse_lr_func().
350 * If the log has been claimed, stop if we encounter a sequence
351 * number greater than the highest claimed sequence number.
353 lrbuf = zio_buf_alloc(SPA_OLD_MAXBLOCKSIZE);
354 zil_bp_tree_init(zilog);
356 for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) {
357 uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
361 if (blk_seq > claim_blk_seq)
363 if ((error = parse_blk_func(zilog, &blk, arg, txg)) != 0)
365 ASSERT3U(max_blk_seq, <, blk_seq);
366 max_blk_seq = blk_seq;
369 if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq)
372 error = zil_read_log_block(zilog, &blk, &next_blk, lrbuf, &end);
376 for (lrp = lrbuf; lrp < end; lrp += reclen) {
377 lr_t *lr = (lr_t *)lrp;
378 reclen = lr->lrc_reclen;
379 ASSERT3U(reclen, >=, sizeof (lr_t));
380 if (lr->lrc_seq > claim_lr_seq)
382 if ((error = parse_lr_func(zilog, lr, arg, txg)) != 0)
384 ASSERT3U(max_lr_seq, <, lr->lrc_seq);
385 max_lr_seq = lr->lrc_seq;
390 zilog->zl_parse_error = error;
391 zilog->zl_parse_blk_seq = max_blk_seq;
392 zilog->zl_parse_lr_seq = max_lr_seq;
393 zilog->zl_parse_blk_count = blk_count;
394 zilog->zl_parse_lr_count = lr_count;
396 ASSERT(!claimed || !(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID) ||
397 (max_blk_seq == claim_blk_seq && max_lr_seq == claim_lr_seq));
399 zil_bp_tree_fini(zilog);
400 zio_buf_free(lrbuf, SPA_OLD_MAXBLOCKSIZE);
406 zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg)
409 * Claim log block if not already committed and not already claimed.
410 * If tx == NULL, just verify that the block is claimable.
412 if (BP_IS_HOLE(bp) || bp->blk_birth < first_txg ||
413 zil_bp_tree_add(zilog, bp) != 0)
416 return (zio_wait(zio_claim(NULL, zilog->zl_spa,
417 tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL,
418 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB)));
422 zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg)
424 lr_write_t *lr = (lr_write_t *)lrc;
427 if (lrc->lrc_txtype != TX_WRITE)
431 * If the block is not readable, don't claim it. This can happen
432 * in normal operation when a log block is written to disk before
433 * some of the dmu_sync() blocks it points to. In this case, the
434 * transaction cannot have been committed to anyone (we would have
435 * waited for all writes to be stable first), so it is semantically
436 * correct to declare this the end of the log.
438 if (lr->lr_blkptr.blk_birth >= first_txg &&
439 (error = zil_read_log_data(zilog, lr, NULL)) != 0)
441 return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg));
446 zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg)
448 zio_free_zil(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
454 zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg)
456 lr_write_t *lr = (lr_write_t *)lrc;
457 blkptr_t *bp = &lr->lr_blkptr;
460 * If we previously claimed it, we need to free it.
462 if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE &&
463 bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 &&
465 zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
471 zil_lwb_vdev_compare(const void *x1, const void *x2)
473 const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev;
474 const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev;
485 zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, boolean_t slog, uint64_t txg)
489 lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
490 lwb->lwb_zilog = zilog;
492 lwb->lwb_slog = slog;
493 lwb->lwb_state = LWB_STATE_CLOSED;
494 lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp));
495 lwb->lwb_max_txg = txg;
496 lwb->lwb_write_zio = NULL;
497 lwb->lwb_root_zio = NULL;
499 lwb->lwb_issued_timestamp = 0;
500 if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
501 lwb->lwb_nused = sizeof (zil_chain_t);
502 lwb->lwb_sz = BP_GET_LSIZE(bp);
505 lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t);
508 mutex_enter(&zilog->zl_lock);
509 list_insert_tail(&zilog->zl_lwb_list, lwb);
510 mutex_exit(&zilog->zl_lock);
512 ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock));
513 ASSERT(avl_is_empty(&lwb->lwb_vdev_tree));
514 ASSERT(list_is_empty(&lwb->lwb_waiters));
520 zil_free_lwb(zilog_t *zilog, lwb_t *lwb)
522 ASSERT(MUTEX_HELD(&zilog->zl_lock));
523 ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock));
524 ASSERT(list_is_empty(&lwb->lwb_waiters));
526 if (lwb->lwb_state == LWB_STATE_OPENED) {
527 avl_tree_t *t = &lwb->lwb_vdev_tree;
531 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL)
532 kmem_free(zv, sizeof (*zv));
534 ASSERT3P(lwb->lwb_root_zio, !=, NULL);
535 ASSERT3P(lwb->lwb_write_zio, !=, NULL);
537 zio_cancel(lwb->lwb_root_zio);
538 zio_cancel(lwb->lwb_write_zio);
540 lwb->lwb_root_zio = NULL;
541 lwb->lwb_write_zio = NULL;
543 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED);
546 ASSERT(avl_is_empty(&lwb->lwb_vdev_tree));
547 ASSERT3P(lwb->lwb_write_zio, ==, NULL);
548 ASSERT3P(lwb->lwb_root_zio, ==, NULL);
551 * Clear the zilog's field to indicate this lwb is no longer
552 * valid, and prevent use-after-free errors.
554 if (zilog->zl_last_lwb_opened == lwb)
555 zilog->zl_last_lwb_opened = NULL;
557 kmem_cache_free(zil_lwb_cache, lwb);
561 * Called when we create in-memory log transactions so that we know
562 * to cleanup the itxs at the end of spa_sync().
565 zilog_dirty(zilog_t *zilog, uint64_t txg)
567 dsl_pool_t *dp = zilog->zl_dmu_pool;
568 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
570 ASSERT(spa_writeable(zilog->zl_spa));
572 if (ds->ds_is_snapshot)
573 panic("dirtying snapshot!");
575 if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
576 /* up the hold count until we can be written out */
577 dmu_buf_add_ref(ds->ds_dbuf, zilog);
579 zilog->zl_dirty_max_txg = MAX(txg, zilog->zl_dirty_max_txg);
584 * Determine if the zil is dirty in the specified txg. Callers wanting to
585 * ensure that the dirty state does not change must hold the itxg_lock for
586 * the specified txg. Holding the lock will ensure that the zil cannot be
587 * dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current
591 zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg)
593 dsl_pool_t *dp = zilog->zl_dmu_pool;
595 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK))
601 * Determine if the zil is dirty. The zil is considered dirty if it has
602 * any pending itx records that have not been cleaned by zil_clean().
605 zilog_is_dirty(zilog_t *zilog)
607 dsl_pool_t *dp = zilog->zl_dmu_pool;
609 for (int t = 0; t < TXG_SIZE; t++) {
610 if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t))
617 * Create an on-disk intent log.
620 zil_create(zilog_t *zilog)
622 const zil_header_t *zh = zilog->zl_header;
628 boolean_t slog = FALSE;
631 * Wait for any previous destroy to complete.
633 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
635 ASSERT(zh->zh_claim_txg == 0);
636 ASSERT(zh->zh_replay_seq == 0);
641 * Allocate an initial log block if:
642 * - there isn't one already
643 * - the existing block is the wrong endianess
645 if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) {
646 tx = dmu_tx_create(zilog->zl_os);
647 VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
648 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
649 txg = dmu_tx_get_txg(tx);
651 if (!BP_IS_HOLE(&blk)) {
652 zio_free_zil(zilog->zl_spa, txg, &blk);
656 error = zio_alloc_zil(zilog->zl_spa, txg, &blk, NULL,
657 ZIL_MIN_BLKSZ, &slog);
660 zil_init_log_chain(zilog, &blk);
664 * Allocate a log write block (lwb) for the first log block.
667 lwb = zil_alloc_lwb(zilog, &blk, slog, txg);
670 * If we just allocated the first log block, commit our transaction
671 * and wait for zil_sync() to stuff the block poiner into zh_log.
672 * (zh is part of the MOS, so we cannot modify it in open context.)
676 txg_wait_synced(zilog->zl_dmu_pool, txg);
679 ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);
685 * In one tx, free all log blocks and clear the log header. If keep_first
686 * is set, then we're replaying a log with no content. We want to keep the
687 * first block, however, so that the first synchronous transaction doesn't
688 * require a txg_wait_synced() in zil_create(). We don't need to
689 * txg_wait_synced() here either when keep_first is set, because both
690 * zil_create() and zil_destroy() will wait for any in-progress destroys
694 zil_destroy(zilog_t *zilog, boolean_t keep_first)
696 const zil_header_t *zh = zilog->zl_header;
702 * Wait for any previous destroy to complete.
704 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
706 zilog->zl_old_header = *zh; /* debugging aid */
708 if (BP_IS_HOLE(&zh->zh_log))
711 tx = dmu_tx_create(zilog->zl_os);
712 VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
713 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
714 txg = dmu_tx_get_txg(tx);
716 mutex_enter(&zilog->zl_lock);
718 ASSERT3U(zilog->zl_destroy_txg, <, txg);
719 zilog->zl_destroy_txg = txg;
720 zilog->zl_keep_first = keep_first;
722 if (!list_is_empty(&zilog->zl_lwb_list)) {
723 ASSERT(zh->zh_claim_txg == 0);
725 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
726 list_remove(&zilog->zl_lwb_list, lwb);
727 if (lwb->lwb_buf != NULL)
728 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
729 zio_free(zilog->zl_spa, txg, &lwb->lwb_blk);
730 zil_free_lwb(zilog, lwb);
732 } else if (!keep_first) {
733 zil_destroy_sync(zilog, tx);
735 mutex_exit(&zilog->zl_lock);
741 zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx)
743 ASSERT(list_is_empty(&zilog->zl_lwb_list));
744 (void) zil_parse(zilog, zil_free_log_block,
745 zil_free_log_record, tx, zilog->zl_header->zh_claim_txg);
749 zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg)
751 dmu_tx_t *tx = txarg;
752 uint64_t first_txg = dmu_tx_get_txg(tx);
758 error = dmu_objset_own_obj(dp, ds->ds_object,
759 DMU_OST_ANY, B_FALSE, FTAG, &os);
762 * EBUSY indicates that the objset is inconsistent, in which
763 * case it can not have a ZIL.
765 if (error != EBUSY) {
766 cmn_err(CE_WARN, "can't open objset for %llu, error %u",
767 (unsigned long long)ds->ds_object, error);
772 zilog = dmu_objset_zil(os);
773 zh = zil_header_in_syncing_context(zilog);
775 if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR) {
776 if (!BP_IS_HOLE(&zh->zh_log))
777 zio_free_zil(zilog->zl_spa, first_txg, &zh->zh_log);
778 BP_ZERO(&zh->zh_log);
779 dsl_dataset_dirty(dmu_objset_ds(os), tx);
780 dmu_objset_disown(os, FTAG);
785 * Claim all log blocks if we haven't already done so, and remember
786 * the highest claimed sequence number. This ensures that if we can
787 * read only part of the log now (e.g. due to a missing device),
788 * but we can read the entire log later, we will not try to replay
789 * or destroy beyond the last block we successfully claimed.
791 ASSERT3U(zh->zh_claim_txg, <=, first_txg);
792 if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) {
793 (void) zil_parse(zilog, zil_claim_log_block,
794 zil_claim_log_record, tx, first_txg);
795 zh->zh_claim_txg = first_txg;
796 zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq;
797 zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq;
798 if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1)
799 zh->zh_flags |= ZIL_REPLAY_NEEDED;
800 zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID;
801 dsl_dataset_dirty(dmu_objset_ds(os), tx);
804 ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
805 dmu_objset_disown(os, FTAG);
810 * Check the log by walking the log chain.
811 * Checksum errors are ok as they indicate the end of the chain.
812 * Any other error (no device or read failure) returns an error.
816 zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx)
825 error = dmu_objset_from_ds(ds, &os);
827 cmn_err(CE_WARN, "can't open objset %llu, error %d",
828 (unsigned long long)ds->ds_object, error);
832 zilog = dmu_objset_zil(os);
833 bp = (blkptr_t *)&zilog->zl_header->zh_log;
836 * Check the first block and determine if it's on a log device
837 * which may have been removed or faulted prior to loading this
838 * pool. If so, there's no point in checking the rest of the log
839 * as its content should have already been synced to the pool.
841 if (!BP_IS_HOLE(bp)) {
843 boolean_t valid = B_TRUE;
845 spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER);
846 vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0]));
847 if (vd->vdev_islog && vdev_is_dead(vd))
848 valid = vdev_log_state_valid(vd);
849 spa_config_exit(os->os_spa, SCL_STATE, FTAG);
856 * Because tx == NULL, zil_claim_log_block() will not actually claim
857 * any blocks, but just determine whether it is possible to do so.
858 * In addition to checking the log chain, zil_claim_log_block()
859 * will invoke zio_claim() with a done func of spa_claim_notify(),
860 * which will update spa_max_claim_txg. See spa_load() for details.
862 error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx,
863 zilog->zl_header->zh_claim_txg ? -1ULL : spa_first_txg(os->os_spa));
865 return ((error == ECKSUM || error == ENOENT) ? 0 : error);
869 * When an itx is "skipped", this function is used to properly mark the
870 * waiter as "done, and signal any thread(s) waiting on it. An itx can
871 * be skipped (and not committed to an lwb) for a variety of reasons,
872 * one of them being that the itx was committed via spa_sync(), prior to
873 * it being committed to an lwb; this can happen if a thread calling
874 * zil_commit() is racing with spa_sync().
877 zil_commit_waiter_skip(zil_commit_waiter_t *zcw)
879 mutex_enter(&zcw->zcw_lock);
880 ASSERT3B(zcw->zcw_done, ==, B_FALSE);
881 zcw->zcw_done = B_TRUE;
882 cv_broadcast(&zcw->zcw_cv);
883 mutex_exit(&zcw->zcw_lock);
887 * This function is used when the given waiter is to be linked into an
888 * lwb's "lwb_waiter" list; i.e. when the itx is committed to the lwb.
889 * At this point, the waiter will no longer be referenced by the itx,
890 * and instead, will be referenced by the lwb.
893 zil_commit_waiter_link_lwb(zil_commit_waiter_t *zcw, lwb_t *lwb)
895 mutex_enter(&zcw->zcw_lock);
896 ASSERT(!list_link_active(&zcw->zcw_node));
897 ASSERT3P(zcw->zcw_lwb, ==, NULL);
898 ASSERT3P(lwb, !=, NULL);
899 ASSERT(lwb->lwb_state == LWB_STATE_OPENED ||
900 lwb->lwb_state == LWB_STATE_ISSUED);
902 list_insert_tail(&lwb->lwb_waiters, zcw);
904 mutex_exit(&zcw->zcw_lock);
908 * This function is used when zio_alloc_zil() fails to allocate a ZIL
909 * block, and the given waiter must be linked to the "nolwb waiters"
910 * list inside of zil_process_commit_list().
913 zil_commit_waiter_link_nolwb(zil_commit_waiter_t *zcw, list_t *nolwb)
915 mutex_enter(&zcw->zcw_lock);
916 ASSERT(!list_link_active(&zcw->zcw_node));
917 ASSERT3P(zcw->zcw_lwb, ==, NULL);
918 list_insert_tail(nolwb, zcw);
919 mutex_exit(&zcw->zcw_lock);
923 zil_lwb_add_block(lwb_t *lwb, const blkptr_t *bp)
925 avl_tree_t *t = &lwb->lwb_vdev_tree;
927 zil_vdev_node_t *zv, zvsearch;
928 int ndvas = BP_GET_NDVAS(bp);
931 if (zfs_nocacheflush)
934 mutex_enter(&lwb->lwb_vdev_lock);
935 for (i = 0; i < ndvas; i++) {
936 zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
937 if (avl_find(t, &zvsearch, &where) == NULL) {
938 zv = kmem_alloc(sizeof (*zv), KM_SLEEP);
939 zv->zv_vdev = zvsearch.zv_vdev;
940 avl_insert(t, zv, where);
943 mutex_exit(&lwb->lwb_vdev_lock);
947 zil_lwb_add_txg(lwb_t *lwb, uint64_t txg)
949 lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
953 * This function is a called after all VDEVs associated with a given lwb
954 * write have completed their DKIOCFLUSHWRITECACHE command; or as soon
955 * as the lwb write completes, if "zfs_nocacheflush" is set.
957 * The intention is for this function to be called as soon as the
958 * contents of an lwb are considered "stable" on disk, and will survive
959 * any sudden loss of power. At this point, any threads waiting for the
960 * lwb to reach this state are signalled, and the "waiter" structures
964 zil_lwb_flush_vdevs_done(zio_t *zio)
966 lwb_t *lwb = zio->io_private;
967 zilog_t *zilog = lwb->lwb_zilog;
968 dmu_tx_t *tx = lwb->lwb_tx;
969 zil_commit_waiter_t *zcw;
971 spa_config_exit(zilog->zl_spa, SCL_STATE, lwb);
973 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
975 mutex_enter(&zilog->zl_lock);
978 * Ensure the lwb buffer pointer is cleared before releasing the
979 * txg. If we have had an allocation failure and the txg is
980 * waiting to sync then we want zil_sync() to remove the lwb so
981 * that it's not picked up as the next new one in
982 * zil_process_commit_list(). zil_sync() will only remove the
983 * lwb if lwb_buf is null.
988 ASSERT3U(lwb->lwb_issued_timestamp, >, 0);
989 zilog->zl_last_lwb_latency = gethrtime() - lwb->lwb_issued_timestamp;
991 lwb->lwb_root_zio = NULL;
992 lwb->lwb_state = LWB_STATE_DONE;
994 if (zilog->zl_last_lwb_opened == lwb) {
996 * Remember the highest committed log sequence number
997 * for ztest. We only update this value when all the log
998 * writes succeeded, because ztest wants to ASSERT that
999 * it got the whole log chain.
1001 zilog->zl_commit_lr_seq = zilog->zl_lr_seq;
1004 while ((zcw = list_head(&lwb->lwb_waiters)) != NULL) {
1005 mutex_enter(&zcw->zcw_lock);
1007 ASSERT(list_link_active(&zcw->zcw_node));
1008 list_remove(&lwb->lwb_waiters, zcw);
1010 ASSERT3P(zcw->zcw_lwb, ==, lwb);
1011 zcw->zcw_lwb = NULL;
1013 zcw->zcw_zio_error = zio->io_error;
1015 ASSERT3B(zcw->zcw_done, ==, B_FALSE);
1016 zcw->zcw_done = B_TRUE;
1017 cv_broadcast(&zcw->zcw_cv);
1019 mutex_exit(&zcw->zcw_lock);
1022 mutex_exit(&zilog->zl_lock);
1025 * Now that we've written this log block, we have a stable pointer
1026 * to the next block in the chain, so it's OK to let the txg in
1027 * which we allocated the next block sync.
1033 * This is called when an lwb write completes. This means, this specific
1034 * lwb was written to disk, and all dependent lwb have also been
1037 * At this point, a DKIOCFLUSHWRITECACHE command hasn't been issued to
1038 * the VDEVs involved in writing out this specific lwb. The lwb will be
1039 * "done" once zil_lwb_flush_vdevs_done() is called, which occurs in the
1040 * zio completion callback for the lwb's root zio.
1043 zil_lwb_write_done(zio_t *zio)
1045 lwb_t *lwb = zio->io_private;
1046 spa_t *spa = zio->io_spa;
1047 zilog_t *zilog = lwb->lwb_zilog;
1048 avl_tree_t *t = &lwb->lwb_vdev_tree;
1049 void *cookie = NULL;
1050 zil_vdev_node_t *zv;
1052 ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), !=, 0);
1054 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
1055 ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG);
1056 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
1057 ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER);
1058 ASSERT(!BP_IS_GANG(zio->io_bp));
1059 ASSERT(!BP_IS_HOLE(zio->io_bp));
1060 ASSERT(BP_GET_FILL(zio->io_bp) == 0);
1062 abd_put(zio->io_abd);
1064 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_ISSUED);
1066 mutex_enter(&zilog->zl_lock);
1067 lwb->lwb_write_zio = NULL;
1068 mutex_exit(&zilog->zl_lock);
1070 if (avl_numnodes(t) == 0)
1074 * If there was an IO error, we're not going to call zio_flush()
1075 * on these vdevs, so we simply empty the tree and free the
1076 * nodes. We avoid calling zio_flush() since there isn't any
1077 * good reason for doing so, after the lwb block failed to be
1080 if (zio->io_error != 0) {
1081 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL)
1082 kmem_free(zv, sizeof (*zv));
1086 while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) {
1087 vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev);
1089 zio_flush(lwb->lwb_root_zio, vd);
1090 kmem_free(zv, sizeof (*zv));
1095 * This function's purpose is to "open" an lwb such that it is ready to
1096 * accept new itxs being committed to it. To do this, the lwb's zio
1097 * structures are created, and linked to the lwb. This function is
1098 * idempotent; if the passed in lwb has already been opened, this
1099 * function is essentially a no-op.
1102 zil_lwb_write_open(zilog_t *zilog, lwb_t *lwb)
1104 zbookmark_phys_t zb;
1105 zio_priority_t prio;
1107 ASSERT(MUTEX_HELD(&zilog->zl_writer_lock));
1108 ASSERT3P(lwb, !=, NULL);
1109 EQUIV(lwb->lwb_root_zio == NULL, lwb->lwb_state == LWB_STATE_CLOSED);
1110 EQUIV(lwb->lwb_root_zio != NULL, lwb->lwb_state == LWB_STATE_OPENED);
1112 SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET],
1113 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
1114 lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]);
1116 if (lwb->lwb_root_zio == NULL) {
1117 abd_t *lwb_abd = abd_get_from_buf(lwb->lwb_buf,
1118 BP_GET_LSIZE(&lwb->lwb_blk));
1120 if (!lwb->lwb_slog || zilog->zl_cur_used <= zil_slog_bulk)
1121 prio = ZIO_PRIORITY_SYNC_WRITE;
1123 prio = ZIO_PRIORITY_ASYNC_WRITE;
1125 lwb->lwb_root_zio = zio_root(zilog->zl_spa,
1126 zil_lwb_flush_vdevs_done, lwb, ZIO_FLAG_CANFAIL);
1127 ASSERT3P(lwb->lwb_root_zio, !=, NULL);
1129 lwb->lwb_write_zio = zio_rewrite(lwb->lwb_root_zio,
1130 zilog->zl_spa, 0, &lwb->lwb_blk, lwb_abd,
1131 BP_GET_LSIZE(&lwb->lwb_blk), zil_lwb_write_done, lwb,
1132 prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE, &zb);
1133 ASSERT3P(lwb->lwb_write_zio, !=, NULL);
1135 lwb->lwb_state = LWB_STATE_OPENED;
1137 mutex_enter(&zilog->zl_lock);
1140 * The zilog's "zl_last_lwb_opened" field is used to
1141 * build the lwb/zio dependency chain, which is used to
1142 * preserve the ordering of lwb completions that is
1143 * required by the semantics of the ZIL. Each new lwb
1144 * zio becomes a parent of the "previous" lwb zio, such
1145 * that the new lwb's zio cannot complete until the
1146 * "previous" lwb's zio completes.
1148 * This is required by the semantics of zil_commit();
1149 * the commit waiters attached to the lwbs will be woken
1150 * in the lwb zio's completion callback, so this zio
1151 * dependency graph ensures the waiters are woken in the
1152 * correct order (the same order the lwbs were created).
1154 lwb_t *last_lwb_opened = zilog->zl_last_lwb_opened;
1155 if (last_lwb_opened != NULL &&
1156 last_lwb_opened->lwb_state != LWB_STATE_DONE) {
1157 ASSERT(last_lwb_opened->lwb_state == LWB_STATE_OPENED ||
1158 last_lwb_opened->lwb_state == LWB_STATE_ISSUED);
1159 ASSERT3P(last_lwb_opened->lwb_root_zio, !=, NULL);
1160 zio_add_child(lwb->lwb_root_zio,
1161 last_lwb_opened->lwb_root_zio);
1163 zilog->zl_last_lwb_opened = lwb;
1165 mutex_exit(&zilog->zl_lock);
1168 ASSERT3P(lwb->lwb_root_zio, !=, NULL);
1169 ASSERT3P(lwb->lwb_write_zio, !=, NULL);
1170 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
1174 * Define a limited set of intent log block sizes.
1176 * These must be a multiple of 4KB. Note only the amount used (again
1177 * aligned to 4KB) actually gets written. However, we can't always just
1178 * allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted.
1180 uint64_t zil_block_buckets[] = {
1181 4096, /* non TX_WRITE */
1182 8192+4096, /* data base */
1183 32*1024 + 4096, /* NFS writes */
1188 * Start a log block write and advance to the next log block.
1189 * Calls are serialized.
1192 zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb)
1196 spa_t *spa = zilog->zl_spa;
1200 uint64_t zil_blksz, wsz;
1204 ASSERT(MUTEX_HELD(&zilog->zl_writer_lock));
1205 ASSERT3P(lwb->lwb_root_zio, !=, NULL);
1206 ASSERT3P(lwb->lwb_write_zio, !=, NULL);
1207 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
1209 if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
1210 zilc = (zil_chain_t *)lwb->lwb_buf;
1211 bp = &zilc->zc_next_blk;
1213 zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz);
1214 bp = &zilc->zc_next_blk;
1217 ASSERT(lwb->lwb_nused <= lwb->lwb_sz);
1220 * Allocate the next block and save its address in this block
1221 * before writing it in order to establish the log chain.
1222 * Note that if the allocation of nlwb synced before we wrote
1223 * the block that points at it (lwb), we'd leak it if we crashed.
1224 * Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done().
1225 * We dirty the dataset to ensure that zil_sync() will be called
1226 * to clean up in the event of allocation failure or I/O failure.
1229 tx = dmu_tx_create(zilog->zl_os);
1232 * Since we are not going to create any new dirty data and we can even
1233 * help with clearing the existing dirty data, we should not be subject
1234 * to the dirty data based delays.
1235 * We (ab)use TXG_WAITED to bypass the delay mechanism.
1236 * One side effect from using TXG_WAITED is that dmu_tx_assign() can
1237 * fail if the pool is suspended. Those are dramatic circumstances,
1238 * so we return NULL to signal that the normal ZIL processing is not
1239 * possible and txg_wait_synced() should be used to ensure that the data
1242 error = dmu_tx_assign(tx, TXG_WAITED);
1244 ASSERT3S(error, ==, EIO);
1248 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
1249 txg = dmu_tx_get_txg(tx);
1254 * Log blocks are pre-allocated. Here we select the size of the next
1255 * block, based on size used in the last block.
1256 * - first find the smallest bucket that will fit the block from a
1257 * limited set of block sizes. This is because it's faster to write
1258 * blocks allocated from the same metaslab as they are adjacent or
1260 * - next find the maximum from the new suggested size and an array of
1261 * previous sizes. This lessens a picket fence effect of wrongly
1262 * guesssing the size if we have a stream of say 2k, 64k, 2k, 64k
1265 * Note we only write what is used, but we can't just allocate
1266 * the maximum block size because we can exhaust the available
1269 zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t);
1270 for (i = 0; zil_blksz > zil_block_buckets[i]; i++)
1272 zil_blksz = zil_block_buckets[i];
1273 if (zil_blksz == UINT64_MAX)
1274 zil_blksz = SPA_OLD_MAXBLOCKSIZE;
1275 zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz;
1276 for (i = 0; i < ZIL_PREV_BLKS; i++)
1277 zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]);
1278 zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1);
1282 /* pass the old blkptr in order to spread log blocks across devs */
1283 error = zio_alloc_zil(spa, txg, bp, &lwb->lwb_blk, zil_blksz, &slog);
1285 ASSERT3U(bp->blk_birth, ==, txg);
1286 bp->blk_cksum = lwb->lwb_blk.blk_cksum;
1287 bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
1290 * Allocate a new log write block (lwb).
1292 nlwb = zil_alloc_lwb(zilog, bp, slog, txg);
1295 if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
1296 /* For Slim ZIL only write what is used. */
1297 wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t);
1298 ASSERT3U(wsz, <=, lwb->lwb_sz);
1299 zio_shrink(lwb->lwb_write_zio, wsz);
1306 zilc->zc_nused = lwb->lwb_nused;
1307 zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum;
1310 * clear unused data for security
1312 bzero(lwb->lwb_buf + lwb->lwb_nused, wsz - lwb->lwb_nused);
1314 spa_config_enter(zilog->zl_spa, SCL_STATE, lwb, RW_READER);
1316 zil_lwb_add_block(lwb, &lwb->lwb_blk);
1317 lwb->lwb_issued_timestamp = gethrtime();
1318 lwb->lwb_state = LWB_STATE_ISSUED;
1320 zio_nowait(lwb->lwb_root_zio);
1321 zio_nowait(lwb->lwb_write_zio);
1324 * If there was an allocation failure then nlwb will be null which
1325 * forces a txg_wait_synced().
1331 zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
1334 lr_write_t *lrwb, *lrw;
1336 uint64_t dlen, dnow, lwb_sp, reclen, txg;
1338 ASSERT(MUTEX_HELD(&zilog->zl_writer_lock));
1339 ASSERT3P(lwb, !=, NULL);
1340 ASSERT3P(lwb->lwb_buf, !=, NULL);
1342 zil_lwb_write_open(zilog, lwb);
1345 lrw = (lr_write_t *)lrc;
1348 * A commit itx doesn't represent any on-disk state; instead
1349 * it's simply used as a place holder on the commit list, and
1350 * provides a mechanism for attaching a "commit waiter" onto the
1351 * correct lwb (such that the waiter can be signalled upon
1352 * completion of that lwb). Thus, we don't process this itx's
1353 * log record if it's a commit itx (these itx's don't have log
1354 * records), and instead link the itx's waiter onto the lwb's
1357 * For more details, see the comment above zil_commit().
1359 if (lrc->lrc_txtype == TX_COMMIT) {
1360 zil_commit_waiter_link_lwb(itx->itx_private, lwb);
1361 itx->itx_private = NULL;
1365 if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) {
1366 dlen = P2ROUNDUP_TYPED(
1367 lrw->lr_length, sizeof (uint64_t), uint64_t);
1371 reclen = lrc->lrc_reclen;
1372 zilog->zl_cur_used += (reclen + dlen);
1375 ASSERT3U(zilog->zl_cur_used, <, UINT64_MAX - (reclen + dlen));
1379 * If this record won't fit in the current log block, start a new one.
1380 * For WR_NEED_COPY optimize layout for minimal number of chunks.
1382 lwb_sp = lwb->lwb_sz - lwb->lwb_nused;
1383 if (reclen > lwb_sp || (reclen + dlen > lwb_sp &&
1384 lwb_sp < ZIL_MAX_WASTE_SPACE && (dlen % ZIL_MAX_LOG_DATA == 0 ||
1385 lwb_sp < reclen + dlen % ZIL_MAX_LOG_DATA))) {
1386 lwb = zil_lwb_write_issue(zilog, lwb);
1389 zil_lwb_write_open(zilog, lwb);
1390 ASSERT(LWB_EMPTY(lwb));
1391 lwb_sp = lwb->lwb_sz - lwb->lwb_nused;
1392 ASSERT3U(reclen + MIN(dlen, sizeof (uint64_t)), <=, lwb_sp);
1395 dnow = MIN(dlen, lwb_sp - reclen);
1396 lr_buf = lwb->lwb_buf + lwb->lwb_nused;
1397 bcopy(lrc, lr_buf, reclen);
1398 lrcb = (lr_t *)lr_buf; /* Like lrc, but inside lwb. */
1399 lrwb = (lr_write_t *)lrcb; /* Like lrw, but inside lwb. */
1402 * If it's a write, fetch the data or get its blkptr as appropriate.
1404 if (lrc->lrc_txtype == TX_WRITE) {
1405 if (txg > spa_freeze_txg(zilog->zl_spa))
1406 txg_wait_synced(zilog->zl_dmu_pool, txg);
1407 if (itx->itx_wr_state != WR_COPIED) {
1411 if (itx->itx_wr_state == WR_NEED_COPY) {
1412 dbuf = lr_buf + reclen;
1413 lrcb->lrc_reclen += dnow;
1414 if (lrwb->lr_length > dnow)
1415 lrwb->lr_length = dnow;
1416 lrw->lr_offset += dnow;
1417 lrw->lr_length -= dnow;
1419 ASSERT(itx->itx_wr_state == WR_INDIRECT);
1424 * We pass in the "lwb_write_zio" rather than
1425 * "lwb_root_zio" so that the "lwb_write_zio"
1426 * becomes the parent of any zio's created by
1427 * the "zl_get_data" callback. The vdevs are
1428 * flushed after the "lwb_write_zio" completes,
1429 * so we want to make sure that completion
1430 * callback waits for these additional zio's,
1431 * such that the vdevs used by those zio's will
1432 * be included in the lwb's vdev tree, and those
1433 * vdevs will be properly flushed. If we passed
1434 * in "lwb_root_zio" here, then these additional
1435 * vdevs may not be flushed; e.g. if these zio's
1436 * completed after "lwb_write_zio" completed.
1438 error = zilog->zl_get_data(itx->itx_private,
1439 lrwb, dbuf, lwb, lwb->lwb_write_zio);
1442 txg_wait_synced(zilog->zl_dmu_pool, txg);
1446 ASSERT(error == ENOENT || error == EEXIST ||
1454 * We're actually making an entry, so update lrc_seq to be the
1455 * log record sequence number. Note that this is generally not
1456 * equal to the itx sequence number because not all transactions
1457 * are synchronous, and sometimes spa_sync() gets there first.
1459 lrcb->lrc_seq = ++zilog->zl_lr_seq;
1460 lwb->lwb_nused += reclen + dnow;
1462 zil_lwb_add_txg(lwb, txg);
1464 ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz);
1465 ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)));
1469 zilog->zl_cur_used += reclen;
1477 zil_itx_create(uint64_t txtype, size_t lrsize)
1481 lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t);
1483 itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP);
1484 itx->itx_lr.lrc_txtype = txtype;
1485 itx->itx_lr.lrc_reclen = lrsize;
1486 itx->itx_lr.lrc_seq = 0; /* defensive */
1487 itx->itx_sync = B_TRUE; /* default is synchronous */
1493 zil_itx_destroy(itx_t *itx)
1495 kmem_free(itx, offsetof(itx_t, itx_lr) + itx->itx_lr.lrc_reclen);
1499 * Free up the sync and async itxs. The itxs_t has already been detached
1500 * so no locks are needed.
1503 zil_itxg_clean(itxs_t *itxs)
1509 itx_async_node_t *ian;
1511 list = &itxs->i_sync_list;
1512 while ((itx = list_head(list)) != NULL) {
1514 * In the general case, commit itxs will not be found
1515 * here, as they'll be committed to an lwb via
1516 * zil_lwb_commit(), and free'd in that function. Having
1517 * said that, it is still possible for commit itxs to be
1518 * found here, due to the following race:
1520 * - a thread calls zil_commit() which assigns the
1521 * commit itx to a per-txg i_sync_list
1522 * - zil_itxg_clean() is called (e.g. via spa_sync())
1523 * while the waiter is still on the i_sync_list
1525 * There's nothing to prevent syncing the txg while the
1526 * waiter is on the i_sync_list. This normally doesn't
1527 * happen because spa_sync() is slower than zil_commit(),
1528 * but if zil_commit() calls txg_wait_synced() (e.g.
1529 * because zil_create() or zil_commit_writer_stall() is
1530 * called) we will hit this case.
1532 if (itx->itx_lr.lrc_txtype == TX_COMMIT)
1533 zil_commit_waiter_skip(itx->itx_private);
1535 list_remove(list, itx);
1536 zil_itx_destroy(itx);
1540 t = &itxs->i_async_tree;
1541 while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
1542 list = &ian->ia_list;
1543 while ((itx = list_head(list)) != NULL) {
1544 list_remove(list, itx);
1545 /* commit itxs should never be on the async lists. */
1546 ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT);
1547 zil_itx_destroy(itx);
1550 kmem_free(ian, sizeof (itx_async_node_t));
1554 kmem_free(itxs, sizeof (itxs_t));
1558 zil_aitx_compare(const void *x1, const void *x2)
1560 const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid;
1561 const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid;
1572 * Remove all async itx with the given oid.
1575 zil_remove_async(zilog_t *zilog, uint64_t oid)
1578 itx_async_node_t *ian;
1585 list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node));
1587 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1590 otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1592 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1593 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1595 mutex_enter(&itxg->itxg_lock);
1596 if (itxg->itxg_txg != txg) {
1597 mutex_exit(&itxg->itxg_lock);
1602 * Locate the object node and append its list.
1604 t = &itxg->itxg_itxs->i_async_tree;
1605 ian = avl_find(t, &oid, &where);
1607 list_move_tail(&clean_list, &ian->ia_list);
1608 mutex_exit(&itxg->itxg_lock);
1610 while ((itx = list_head(&clean_list)) != NULL) {
1611 list_remove(&clean_list, itx);
1612 /* commit itxs should never be on the async lists. */
1613 ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT);
1614 zil_itx_destroy(itx);
1616 list_destroy(&clean_list);
1620 zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
1624 itxs_t *itxs, *clean = NULL;
1627 * Object ids can be re-instantiated in the next txg so
1628 * remove any async transactions to avoid future leaks.
1629 * This can happen if a fsync occurs on the re-instantiated
1630 * object for a WR_INDIRECT or WR_NEED_COPY write, which gets
1631 * the new file data and flushes a write record for the old object.
1633 if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_REMOVE)
1634 zil_remove_async(zilog, itx->itx_oid);
1637 * Ensure the data of a renamed file is committed before the rename.
1639 if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME)
1640 zil_async_to_sync(zilog, itx->itx_oid);
1642 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX)
1645 txg = dmu_tx_get_txg(tx);
1647 itxg = &zilog->zl_itxg[txg & TXG_MASK];
1648 mutex_enter(&itxg->itxg_lock);
1649 itxs = itxg->itxg_itxs;
1650 if (itxg->itxg_txg != txg) {
1653 * The zil_clean callback hasn't got around to cleaning
1654 * this itxg. Save the itxs for release below.
1655 * This should be rare.
1657 zfs_dbgmsg("zil_itx_assign: missed itx cleanup for "
1658 "txg %llu", itxg->itxg_txg);
1659 clean = itxg->itxg_itxs;
1661 itxg->itxg_txg = txg;
1662 itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), KM_SLEEP);
1664 list_create(&itxs->i_sync_list, sizeof (itx_t),
1665 offsetof(itx_t, itx_node));
1666 avl_create(&itxs->i_async_tree, zil_aitx_compare,
1667 sizeof (itx_async_node_t),
1668 offsetof(itx_async_node_t, ia_node));
1670 if (itx->itx_sync) {
1671 list_insert_tail(&itxs->i_sync_list, itx);
1673 avl_tree_t *t = &itxs->i_async_tree;
1674 uint64_t foid = ((lr_ooo_t *)&itx->itx_lr)->lr_foid;
1675 itx_async_node_t *ian;
1678 ian = avl_find(t, &foid, &where);
1680 ian = kmem_alloc(sizeof (itx_async_node_t), KM_SLEEP);
1681 list_create(&ian->ia_list, sizeof (itx_t),
1682 offsetof(itx_t, itx_node));
1683 ian->ia_foid = foid;
1684 avl_insert(t, ian, where);
1686 list_insert_tail(&ian->ia_list, itx);
1689 itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
1692 * We don't want to dirty the ZIL using ZILTEST_TXG, because
1693 * zil_clean() will never be called using ZILTEST_TXG. Thus, we
1694 * need to be careful to always dirty the ZIL using the "real"
1695 * TXG (not itxg_txg) even when the SPA is frozen.
1697 zilog_dirty(zilog, dmu_tx_get_txg(tx));
1698 mutex_exit(&itxg->itxg_lock);
1700 /* Release the old itxs now we've dropped the lock */
1702 zil_itxg_clean(clean);
1706 * If there are any in-memory intent log transactions which have now been
1707 * synced then start up a taskq to free them. We should only do this after we
1708 * have written out the uberblocks (i.e. txg has been comitted) so that
1709 * don't inadvertently clean out in-memory log records that would be required
1713 zil_clean(zilog_t *zilog, uint64_t synced_txg)
1715 itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK];
1718 ASSERT3U(synced_txg, <, ZILTEST_TXG);
1720 mutex_enter(&itxg->itxg_lock);
1721 if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) {
1722 mutex_exit(&itxg->itxg_lock);
1725 ASSERT3U(itxg->itxg_txg, <=, synced_txg);
1726 ASSERT3U(itxg->itxg_txg, !=, 0);
1727 clean_me = itxg->itxg_itxs;
1728 itxg->itxg_itxs = NULL;
1730 mutex_exit(&itxg->itxg_lock);
1732 * Preferably start a task queue to free up the old itxs but
1733 * if taskq_dispatch can't allocate resources to do that then
1734 * free it in-line. This should be rare. Note, using TQ_SLEEP
1735 * created a bad performance problem.
1737 ASSERT3P(zilog->zl_dmu_pool, !=, NULL);
1738 ASSERT3P(zilog->zl_dmu_pool->dp_zil_clean_taskq, !=, NULL);
1739 if (taskq_dispatch(zilog->zl_dmu_pool->dp_zil_clean_taskq,
1740 (void (*)(void *))zil_itxg_clean, clean_me, TQ_NOSLEEP) == 0)
1741 zil_itxg_clean(clean_me);
1745 * This function will traverse the queue of itxs that need to be
1746 * committed, and move them onto the ZIL's zl_itx_commit_list.
1749 zil_get_commit_list(zilog_t *zilog)
1752 list_t *commit_list = &zilog->zl_itx_commit_list;
1754 ASSERT(MUTEX_HELD(&zilog->zl_writer_lock));
1756 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1759 otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1762 * This is inherently racy, since there is nothing to prevent
1763 * the last synced txg from changing. That's okay since we'll
1764 * only commit things in the future.
1766 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1767 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1769 mutex_enter(&itxg->itxg_lock);
1770 if (itxg->itxg_txg != txg) {
1771 mutex_exit(&itxg->itxg_lock);
1776 * If we're adding itx records to the zl_itx_commit_list,
1777 * then the zil better be dirty in this "txg". We can assert
1778 * that here since we're holding the itxg_lock which will
1779 * prevent spa_sync from cleaning it. Once we add the itxs
1780 * to the zl_itx_commit_list we must commit it to disk even
1781 * if it's unnecessary (i.e. the txg was synced).
1783 ASSERT(zilog_is_dirty_in_txg(zilog, txg) ||
1784 spa_freeze_txg(zilog->zl_spa) != UINT64_MAX);
1785 list_move_tail(commit_list, &itxg->itxg_itxs->i_sync_list);
1787 mutex_exit(&itxg->itxg_lock);
1792 * Move the async itxs for a specified object to commit into sync lists.
1795 zil_async_to_sync(zilog_t *zilog, uint64_t foid)
1798 itx_async_node_t *ian;
1802 if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
1805 otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
1808 * This is inherently racy, since there is nothing to prevent
1809 * the last synced txg from changing.
1811 for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
1812 itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
1814 mutex_enter(&itxg->itxg_lock);
1815 if (itxg->itxg_txg != txg) {
1816 mutex_exit(&itxg->itxg_lock);
1821 * If a foid is specified then find that node and append its
1822 * list. Otherwise walk the tree appending all the lists
1823 * to the sync list. We add to the end rather than the
1824 * beginning to ensure the create has happened.
1826 t = &itxg->itxg_itxs->i_async_tree;
1828 ian = avl_find(t, &foid, &where);
1830 list_move_tail(&itxg->itxg_itxs->i_sync_list,
1834 void *cookie = NULL;
1836 while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
1837 list_move_tail(&itxg->itxg_itxs->i_sync_list,
1839 list_destroy(&ian->ia_list);
1840 kmem_free(ian, sizeof (itx_async_node_t));
1843 mutex_exit(&itxg->itxg_lock);
1848 * This function will prune commit itxs that are at the head of the
1849 * commit list (it won't prune past the first non-commit itx), and
1850 * either: a) attach them to the last lwb that's still pending
1851 * completion, or b) skip them altogether.
1853 * This is used as a performance optimization to prevent commit itxs
1854 * from generating new lwbs when it's unnecessary to do so.
1857 zil_prune_commit_list(zilog_t *zilog)
1861 ASSERT(MUTEX_HELD(&zilog->zl_writer_lock));
1863 while (itx = list_head(&zilog->zl_itx_commit_list)) {
1864 lr_t *lrc = &itx->itx_lr;
1865 if (lrc->lrc_txtype != TX_COMMIT)
1868 mutex_enter(&zilog->zl_lock);
1870 lwb_t *last_lwb = zilog->zl_last_lwb_opened;
1871 if (last_lwb == NULL || last_lwb->lwb_state == LWB_STATE_DONE) {
1873 * All of the itxs this waiter was waiting on
1874 * must have already completed (or there were
1875 * never any itx's for it to wait on), so it's
1876 * safe to skip this waiter and mark it done.
1878 zil_commit_waiter_skip(itx->itx_private);
1880 zil_commit_waiter_link_lwb(itx->itx_private, last_lwb);
1881 itx->itx_private = NULL;
1884 mutex_exit(&zilog->zl_lock);
1886 list_remove(&zilog->zl_itx_commit_list, itx);
1887 zil_itx_destroy(itx);
1890 IMPLY(itx != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT);
1894 zil_commit_writer_stall(zilog_t *zilog)
1897 * When zio_alloc_zil() fails to allocate the next lwb block on
1898 * disk, we must call txg_wait_synced() to ensure all of the
1899 * lwbs in the zilog's zl_lwb_list are synced and then freed (in
1900 * zil_sync()), such that any subsequent ZIL writer (i.e. a call
1901 * to zil_process_commit_list()) will have to call zil_create(),
1902 * and start a new ZIL chain.
1904 * Since zil_alloc_zil() failed, the lwb that was previously
1905 * issued does not have a pointer to the "next" lwb on disk.
1906 * Thus, if another ZIL writer thread was to allocate the "next"
1907 * on-disk lwb, that block could be leaked in the event of a
1908 * crash (because the previous lwb on-disk would not point to
1911 * We must hold the zilog's zl_writer_lock while we do this, to
1912 * ensure no new threads enter zil_process_commit_list() until
1913 * all lwb's in the zl_lwb_list have been synced and freed
1914 * (which is achieved via the txg_wait_synced() call).
1916 ASSERT(MUTEX_HELD(&zilog->zl_writer_lock));
1917 txg_wait_synced(zilog->zl_dmu_pool, 0);
1918 ASSERT3P(list_tail(&zilog->zl_lwb_list), ==, NULL);
1922 * This function will traverse the commit list, creating new lwbs as
1923 * needed, and committing the itxs from the commit list to these newly
1924 * created lwbs. Additionally, as a new lwb is created, the previous
1925 * lwb will be issued to the zio layer to be written to disk.
1928 zil_process_commit_list(zilog_t *zilog)
1930 spa_t *spa = zilog->zl_spa;
1931 list_t nolwb_waiters;
1935 ASSERT(MUTEX_HELD(&zilog->zl_writer_lock));
1938 * Return if there's nothing to commit before we dirty the fs by
1939 * calling zil_create().
1941 if (list_head(&zilog->zl_itx_commit_list) == NULL)
1944 list_create(&nolwb_waiters, sizeof (zil_commit_waiter_t),
1945 offsetof(zil_commit_waiter_t, zcw_node));
1947 lwb = list_tail(&zilog->zl_lwb_list);
1949 lwb = zil_create(zilog);
1951 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED);
1952 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_DONE);
1955 while (itx = list_head(&zilog->zl_itx_commit_list)) {
1956 lr_t *lrc = &itx->itx_lr;
1957 uint64_t txg = lrc->lrc_txg;
1959 ASSERT3U(txg, !=, 0);
1961 if (lrc->lrc_txtype == TX_COMMIT) {
1962 DTRACE_PROBE2(zil__process__commit__itx,
1963 zilog_t *, zilog, itx_t *, itx);
1965 DTRACE_PROBE2(zil__process__normal__itx,
1966 zilog_t *, zilog, itx_t *, itx);
1970 * This is inherently racy and may result in us writing
1971 * out a log block for a txg that was just synced. This
1972 * is ok since we'll end cleaning up that log block the
1973 * next time we call zil_sync().
1975 boolean_t synced = txg <= spa_last_synced_txg(spa);
1976 boolean_t frozen = txg > spa_freeze_txg(spa);
1978 if (!synced || frozen) {
1980 lwb = zil_lwb_commit(zilog, itx, lwb);
1981 } else if (lrc->lrc_txtype == TX_COMMIT) {
1982 ASSERT3P(lwb, ==, NULL);
1983 zil_commit_waiter_link_nolwb(
1984 itx->itx_private, &nolwb_waiters);
1986 } else if (lrc->lrc_txtype == TX_COMMIT) {
1987 ASSERT3B(synced, ==, B_TRUE);
1988 ASSERT3B(frozen, ==, B_FALSE);
1991 * If this is a commit itx, then there will be a
1992 * thread that is either: already waiting for
1993 * it, or soon will be waiting.
1995 * This itx has already been committed to disk
1996 * via spa_sync() so we don't bother committing
1997 * it to an lwb. As a result, we cannot use the
1998 * lwb zio callback to signal the waiter and
1999 * mark it as done, so we must do that here.
2001 zil_commit_waiter_skip(itx->itx_private);
2004 list_remove(&zilog->zl_itx_commit_list, itx);
2005 zil_itx_destroy(itx);
2010 * This indicates zio_alloc_zil() failed to allocate the
2011 * "next" lwb on-disk. When this happens, we must stall
2012 * the ZIL write pipeline; see the comment within
2013 * zil_commit_writer_stall() for more details.
2015 zil_commit_writer_stall(zilog);
2018 * Additionally, we have to signal and mark the "nolwb"
2019 * waiters as "done" here, since without an lwb, we
2020 * can't do this via zil_lwb_flush_vdevs_done() like
2023 zil_commit_waiter_t *zcw;
2024 while (zcw = list_head(&nolwb_waiters)) {
2025 zil_commit_waiter_skip(zcw);
2026 list_remove(&nolwb_waiters, zcw);
2029 ASSERT(list_is_empty(&nolwb_waiters));
2030 ASSERT3P(lwb, !=, NULL);
2031 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED);
2032 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_DONE);
2035 * At this point, the ZIL block pointed at by the "lwb"
2036 * variable is in one of the following states: "closed"
2039 * If its "closed", then no itxs have been committed to
2040 * it, so there's no point in issuing its zio (i.e.
2043 * If its "open" state, then it contains one or more
2044 * itxs that eventually need to be committed to stable
2045 * storage. In this case we intentionally do not issue
2046 * the lwb's zio to disk yet, and instead rely on one of
2047 * the following two mechanisms for issuing the zio:
2049 * 1. Ideally, there will be more ZIL activity occuring
2050 * on the system, such that this function will be
2051 * immediately called again (not necessarily by the same
2052 * thread) and this lwb's zio will be issued via
2053 * zil_lwb_commit(). This way, the lwb is guaranteed to
2054 * be "full" when it is issued to disk, and we'll make
2055 * use of the lwb's size the best we can.
2057 * 2. If there isn't sufficient ZIL activity occuring on
2058 * the system, such that this lwb's zio isn't issued via
2059 * zil_lwb_commit(), zil_commit_waiter() will issue the
2060 * lwb's zio. If this occurs, the lwb is not guaranteed
2061 * to be "full" by the time its zio is issued, and means
2062 * the size of the lwb was "too large" given the amount
2063 * of ZIL activity occuring on the system at that time.
2065 * We do this for a couple of reasons:
2067 * 1. To try and reduce the number of IOPs needed to
2068 * write the same number of itxs. If an lwb has space
2069 * available in it's buffer for more itxs, and more itxs
2070 * will be committed relatively soon (relative to the
2071 * latency of performing a write), then it's beneficial
2072 * to wait for these "next" itxs. This way, more itxs
2073 * can be committed to stable storage with fewer writes.
2075 * 2. To try and use the largest lwb block size that the
2076 * incoming rate of itxs can support. Again, this is to
2077 * try and pack as many itxs into as few lwbs as
2078 * possible, without significantly impacting the latency
2079 * of each individual itx.
2085 * This function is responsible for ensuring the passed in commit waiter
2086 * (and associated commit itx) is committed to an lwb. If the waiter is
2087 * not already committed to an lwb, all itxs in the zilog's queue of
2088 * itxs will be processed. The assumption is the passed in waiter's
2089 * commit itx will found in the queue just like the other non-commit
2090 * itxs, such that when the entire queue is processed, the waiter will
2091 * have been commited to an lwb.
2093 * The lwb associated with the passed in waiter is not guaranteed to
2094 * have been issued by the time this function completes. If the lwb is
2095 * not issued, we rely on future calls to zil_commit_writer() to issue
2096 * the lwb, or the timeout mechanism found in zil_commit_waiter().
2099 zil_commit_writer(zilog_t *zilog, zil_commit_waiter_t *zcw)
2101 ASSERT(!MUTEX_HELD(&zilog->zl_lock));
2102 ASSERT(spa_writeable(zilog->zl_spa));
2103 ASSERT0(zilog->zl_suspend);
2105 mutex_enter(&zilog->zl_writer_lock);
2107 if (zcw->zcw_lwb != NULL || zcw->zcw_done) {
2109 * It's possible that, while we were waiting to acquire
2110 * the "zl_writer_lock", another thread committed this
2111 * waiter to an lwb. If that occurs, we bail out early,
2112 * without processing any of the zilog's queue of itxs.
2114 * On certain workloads and system configurations, the
2115 * "zl_writer_lock" can become highly contended. In an
2116 * attempt to reduce this contention, we immediately drop
2117 * the lock if the waiter has already been processed.
2119 * We've measured this optimization to reduce CPU spent
2120 * contending on this lock by up to 5%, using a system
2121 * with 32 CPUs, low latency storage (~50 usec writes),
2122 * and 1024 threads performing sync writes.
2127 zil_get_commit_list(zilog);
2128 zil_prune_commit_list(zilog);
2129 zil_process_commit_list(zilog);
2132 mutex_exit(&zilog->zl_writer_lock);
2136 zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw)
2138 ASSERT(!MUTEX_HELD(&zilog->zl_writer_lock));
2139 ASSERT(MUTEX_HELD(&zcw->zcw_lock));
2140 ASSERT3B(zcw->zcw_done, ==, B_FALSE);
2142 lwb_t *lwb = zcw->zcw_lwb;
2143 ASSERT3P(lwb, !=, NULL);
2144 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_CLOSED);
2147 * If the lwb has already been issued by another thread, we can
2148 * immediately return since there's no work to be done (the
2149 * point of this function is to issue the lwb). Additionally, we
2150 * do this prior to acquiring the zl_writer_lock, to avoid
2151 * acquiring it when it's not necessary to do so.
2153 if (lwb->lwb_state == LWB_STATE_ISSUED ||
2154 lwb->lwb_state == LWB_STATE_DONE)
2158 * In order to call zil_lwb_write_issue() we must hold the
2159 * zilog's "zl_writer_lock". We can't simply acquire that lock,
2160 * since we're already holding the commit waiter's "zcw_lock",
2161 * and those two locks are aquired in the opposite order
2164 mutex_exit(&zcw->zcw_lock);
2165 mutex_enter(&zilog->zl_writer_lock);
2166 mutex_enter(&zcw->zcw_lock);
2169 * Since we just dropped and re-acquired the commit waiter's
2170 * lock, we have to re-check to see if the waiter was marked
2171 * "done" during that process. If the waiter was marked "done",
2172 * the "lwb" pointer is no longer valid (it can be free'd after
2173 * the waiter is marked "done"), so without this check we could
2174 * wind up with a use-after-free error below.
2179 ASSERT3P(lwb, ==, zcw->zcw_lwb);
2182 * We've already checked this above, but since we hadn't
2183 * acquired the zilog's zl_writer_lock, we have to perform this
2184 * check a second time while holding the lock. We can't call
2185 * zil_lwb_write_issue() if the lwb had already been issued.
2187 if (lwb->lwb_state == LWB_STATE_ISSUED ||
2188 lwb->lwb_state == LWB_STATE_DONE)
2191 ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
2194 * As described in the comments above zil_commit_waiter() and
2195 * zil_process_commit_list(), we need to issue this lwb's zio
2196 * since we've reached the commit waiter's timeout and it still
2197 * hasn't been issued.
2199 lwb_t *nlwb = zil_lwb_write_issue(zilog, lwb);
2201 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED);
2204 * Since the lwb's zio hadn't been issued by the time this thread
2205 * reached its timeout, we reset the zilog's "zl_cur_used" field
2206 * to influence the zil block size selection algorithm.
2208 * By having to issue the lwb's zio here, it means the size of the
2209 * lwb was too large, given the incoming throughput of itxs. By
2210 * setting "zl_cur_used" to zero, we communicate this fact to the
2211 * block size selection algorithm, so it can take this informaiton
2212 * into account, and potentially select a smaller size for the
2213 * next lwb block that is allocated.
2215 zilog->zl_cur_used = 0;
2219 * When zil_lwb_write_issue() returns NULL, this
2220 * indicates zio_alloc_zil() failed to allocate the
2221 * "next" lwb on-disk. When this occurs, the ZIL write
2222 * pipeline must be stalled; see the comment within the
2223 * zil_commit_writer_stall() function for more details.
2225 * We must drop the commit waiter's lock prior to
2226 * calling zil_commit_writer_stall() or else we can wind
2227 * up with the following deadlock:
2229 * - This thread is waiting for the txg to sync while
2230 * holding the waiter's lock; txg_wait_synced() is
2231 * used within txg_commit_writer_stall().
2233 * - The txg can't sync because it is waiting for this
2234 * lwb's zio callback to call dmu_tx_commit().
2236 * - The lwb's zio callback can't call dmu_tx_commit()
2237 * because it's blocked trying to acquire the waiter's
2238 * lock, which occurs prior to calling dmu_tx_commit()
2240 mutex_exit(&zcw->zcw_lock);
2241 zil_commit_writer_stall(zilog);
2242 mutex_enter(&zcw->zcw_lock);
2246 mutex_exit(&zilog->zl_writer_lock);
2247 ASSERT(MUTEX_HELD(&zcw->zcw_lock));
2251 * This function is responsible for performing the following two tasks:
2253 * 1. its primary responsibility is to block until the given "commit
2254 * waiter" is considered "done".
2256 * 2. its secondary responsibility is to issue the zio for the lwb that
2257 * the given "commit waiter" is waiting on, if this function has
2258 * waited "long enough" and the lwb is still in the "open" state.
2260 * Given a sufficient amount of itxs being generated and written using
2261 * the ZIL, the lwb's zio will be issued via the zil_lwb_commit()
2262 * function. If this does not occur, this secondary responsibility will
2263 * ensure the lwb is issued even if there is not other synchronous
2264 * activity on the system.
2266 * For more details, see zil_process_commit_list(); more specifically,
2267 * the comment at the bottom of that function.
2270 zil_commit_waiter(zilog_t *zilog, zil_commit_waiter_t *zcw)
2272 ASSERT(!MUTEX_HELD(&zilog->zl_lock));
2273 ASSERT(!MUTEX_HELD(&zilog->zl_writer_lock));
2274 ASSERT(spa_writeable(zilog->zl_spa));
2275 ASSERT0(zilog->zl_suspend);
2277 mutex_enter(&zcw->zcw_lock);
2280 * The timeout is scaled based on the lwb latency to avoid
2281 * significantly impacting the latency of each individual itx.
2282 * For more details, see the comment at the bottom of the
2283 * zil_process_commit_list() function.
2285 int pct = MAX(zfs_commit_timeout_pct, 1);
2286 #if defined(illumos) || !defined(_KERNEL)
2287 hrtime_t sleep = (zilog->zl_last_lwb_latency * pct) / 100;
2288 hrtime_t wakeup = gethrtime() + sleep;
2290 sbintime_t sleep = nstosbt((zilog->zl_last_lwb_latency * pct) / 100);
2291 sbintime_t wakeup = getsbinuptime() + sleep;
2293 boolean_t timedout = B_FALSE;
2295 while (!zcw->zcw_done) {
2296 ASSERT(MUTEX_HELD(&zcw->zcw_lock));
2298 lwb_t *lwb = zcw->zcw_lwb;
2301 * Usually, the waiter will have a non-NULL lwb field here,
2302 * but it's possible for it to be NULL as a result of
2303 * zil_commit() racing with spa_sync().
2305 * When zil_clean() is called, it's possible for the itxg
2306 * list (which may be cleaned via a taskq) to contain
2307 * commit itxs. When this occurs, the commit waiters linked
2308 * off of these commit itxs will not be committed to an
2309 * lwb. Additionally, these commit waiters will not be
2310 * marked done until zil_commit_waiter_skip() is called via
2313 * Thus, it's possible for this commit waiter (i.e. the
2314 * "zcw" variable) to be found in this "in between" state;
2315 * where it's "zcw_lwb" field is NULL, and it hasn't yet
2316 * been skipped, so it's "zcw_done" field is still B_FALSE.
2318 IMPLY(lwb != NULL, lwb->lwb_state != LWB_STATE_CLOSED);
2320 if (lwb != NULL && lwb->lwb_state == LWB_STATE_OPENED) {
2321 ASSERT3B(timedout, ==, B_FALSE);
2324 * If the lwb hasn't been issued yet, then we
2325 * need to wait with a timeout, in case this
2326 * function needs to issue the lwb after the
2327 * timeout is reached; responsibility (2) from
2328 * the comment above this function.
2330 #if defined(illumos) || !defined(_KERNEL)
2331 clock_t timeleft = cv_timedwait_hires(&zcw->zcw_cv,
2332 &zcw->zcw_lock, wakeup, USEC2NSEC(1),
2333 CALLOUT_FLAG_ABSOLUTE);
2335 if (timeleft >= 0 || zcw->zcw_done)
2338 int wait_err = cv_timedwait_sbt(&zcw->zcw_cv,
2339 &zcw->zcw_lock, wakeup, SBT_1NS, C_ABSOLUTE);
2340 if (wait_err != EWOULDBLOCK || zcw->zcw_done)
2345 zil_commit_waiter_timeout(zilog, zcw);
2347 if (!zcw->zcw_done) {
2349 * If the commit waiter has already been
2350 * marked "done", it's possible for the
2351 * waiter's lwb structure to have already
2352 * been freed. Thus, we can only reliably
2353 * make these assertions if the waiter
2356 ASSERT3P(lwb, ==, zcw->zcw_lwb);
2357 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED);
2361 * If the lwb isn't open, then it must have already
2362 * been issued. In that case, there's no need to
2363 * use a timeout when waiting for the lwb to
2366 * Additionally, if the lwb is NULL, the waiter
2367 * will soon be signalled and marked done via
2368 * zil_clean() and zil_itxg_clean(), so no timeout
2373 lwb->lwb_state == LWB_STATE_ISSUED ||
2374 lwb->lwb_state == LWB_STATE_DONE);
2375 cv_wait(&zcw->zcw_cv, &zcw->zcw_lock);
2379 mutex_exit(&zcw->zcw_lock);
2382 static zil_commit_waiter_t *
2383 zil_alloc_commit_waiter()
2385 zil_commit_waiter_t *zcw = kmem_cache_alloc(zil_zcw_cache, KM_SLEEP);
2387 cv_init(&zcw->zcw_cv, NULL, CV_DEFAULT, NULL);
2388 mutex_init(&zcw->zcw_lock, NULL, MUTEX_DEFAULT, NULL);
2389 list_link_init(&zcw->zcw_node);
2390 zcw->zcw_lwb = NULL;
2391 zcw->zcw_done = B_FALSE;
2392 zcw->zcw_zio_error = 0;
2398 zil_free_commit_waiter(zil_commit_waiter_t *zcw)
2400 ASSERT(!list_link_active(&zcw->zcw_node));
2401 ASSERT3P(zcw->zcw_lwb, ==, NULL);
2402 ASSERT3B(zcw->zcw_done, ==, B_TRUE);
2403 mutex_destroy(&zcw->zcw_lock);
2404 cv_destroy(&zcw->zcw_cv);
2405 kmem_cache_free(zil_zcw_cache, zcw);
2409 * This function is used to create a TX_COMMIT itx and assign it. This
2410 * way, it will be linked into the ZIL's list of synchronous itxs, and
2411 * then later committed to an lwb (or skipped) when
2412 * zil_process_commit_list() is called.
2415 zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw)
2417 dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
2418 VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
2420 itx_t *itx = zil_itx_create(TX_COMMIT, sizeof (lr_t));
2421 itx->itx_sync = B_TRUE;
2422 itx->itx_private = zcw;
2424 zil_itx_assign(zilog, itx, tx);
2430 * Commit ZFS Intent Log transactions (itxs) to stable storage.
2432 * When writing ZIL transactions to the on-disk representation of the
2433 * ZIL, the itxs are committed to a Log Write Block (lwb). Multiple
2434 * itxs can be committed to a single lwb. Once a lwb is written and
2435 * committed to stable storage (i.e. the lwb is written, and vdevs have
2436 * been flushed), each itx that was committed to that lwb is also
2437 * considered to be committed to stable storage.
2439 * When an itx is committed to an lwb, the log record (lr_t) contained
2440 * by the itx is copied into the lwb's zio buffer, and once this buffer
2441 * is written to disk, it becomes an on-disk ZIL block.
2443 * As itxs are generated, they're inserted into the ZIL's queue of
2444 * uncommitted itxs. The semantics of zil_commit() are such that it will
2445 * block until all itxs that were in the queue when it was called, are
2446 * committed to stable storage.
2448 * If "foid" is zero, this means all "synchronous" and "asynchronous"
2449 * itxs, for all objects in the dataset, will be committed to stable
2450 * storage prior to zil_commit() returning. If "foid" is non-zero, all
2451 * "synchronous" itxs for all objects, but only "asynchronous" itxs
2452 * that correspond to the foid passed in, will be committed to stable
2453 * storage prior to zil_commit() returning.
2455 * Generally speaking, when zil_commit() is called, the consumer doesn't
2456 * actually care about _all_ of the uncommitted itxs. Instead, they're
2457 * simply trying to waiting for a specific itx to be committed to disk,
2458 * but the interface(s) for interacting with the ZIL don't allow such
2459 * fine-grained communication. A better interface would allow a consumer
2460 * to create and assign an itx, and then pass a reference to this itx to
2461 * zil_commit(); such that zil_commit() would return as soon as that
2462 * specific itx was committed to disk (instead of waiting for _all_
2463 * itxs to be committed).
2465 * When a thread calls zil_commit() a special "commit itx" will be
2466 * generated, along with a corresponding "waiter" for this commit itx.
2467 * zil_commit() will wait on this waiter's CV, such that when the waiter
2468 * is marked done, and signalled, zil_commit() will return.
2470 * This commit itx is inserted into the queue of uncommitted itxs. This
2471 * provides an easy mechanism for determining which itxs were in the
2472 * queue prior to zil_commit() having been called, and which itxs were
2473 * added after zil_commit() was called.
2475 * The commit it is special; it doesn't have any on-disk representation.
2476 * When a commit itx is "committed" to an lwb, the waiter associated
2477 * with it is linked onto the lwb's list of waiters. Then, when that lwb
2478 * completes, each waiter on the lwb's list is marked done and signalled
2479 * -- allowing the thread waiting on the waiter to return from zil_commit().
2481 * It's important to point out a few critical factors that allow us
2482 * to make use of the commit itxs, commit waiters, per-lwb lists of
2483 * commit waiters, and zio completion callbacks like we're doing:
2485 * 1. The list of waiters for each lwb is traversed, and each commit
2486 * waiter is marked "done" and signalled, in the zio completion
2487 * callback of the lwb's zio[*].
2489 * * Actually, the waiters are signalled in the zio completion
2490 * callback of the root zio for the DKIOCFLUSHWRITECACHE commands
2491 * that are sent to the vdevs upon completion of the lwb zio.
2493 * 2. When the itxs are inserted into the ZIL's queue of uncommitted
2494 * itxs, the order in which they are inserted is preserved[*]; as
2495 * itxs are added to the queue, they are added to the tail of
2496 * in-memory linked lists.
2498 * When committing the itxs to lwbs (to be written to disk), they
2499 * are committed in the same order in which the itxs were added to
2500 * the uncommitted queue's linked list(s); i.e. the linked list of
2501 * itxs to commit is traversed from head to tail, and each itx is
2502 * committed to an lwb in that order.
2506 * - the order of "sync" itxs is preserved w.r.t. other
2507 * "sync" itxs, regardless of the corresponding objects.
2508 * - the order of "async" itxs is preserved w.r.t. other
2509 * "async" itxs corresponding to the same object.
2510 * - the order of "async" itxs is *not* preserved w.r.t. other
2511 * "async" itxs corresponding to different objects.
2512 * - the order of "sync" itxs w.r.t. "async" itxs (or vice
2513 * versa) is *not* preserved, even for itxs that correspond
2514 * to the same object.
2516 * For more details, see: zil_itx_assign(), zil_async_to_sync(),
2517 * zil_get_commit_list(), and zil_process_commit_list().
2519 * 3. The lwbs represent a linked list of blocks on disk. Thus, any
2520 * lwb cannot be considered committed to stable storage, until its
2521 * "previous" lwb is also committed to stable storage. This fact,
2522 * coupled with the fact described above, means that itxs are
2523 * committed in (roughly) the order in which they were generated.
2524 * This is essential because itxs are dependent on prior itxs.
2525 * Thus, we *must not* deem an itx as being committed to stable
2526 * storage, until *all* prior itxs have also been committed to
2529 * To enforce this ordering of lwb zio's, while still leveraging as
2530 * much of the underlying storage performance as possible, we rely
2531 * on two fundamental concepts:
2533 * 1. The creation and issuance of lwb zio's is protected by
2534 * the zilog's "zl_writer_lock", which ensures only a single
2535 * thread is creating and/or issuing lwb's at a time
2536 * 2. The "previous" lwb is a child of the "current" lwb
2537 * (leveraging the zio parent-child depenency graph)
2539 * By relying on this parent-child zio relationship, we can have
2540 * many lwb zio's concurrently issued to the underlying storage,
2541 * but the order in which they complete will be the same order in
2542 * which they were created.
2545 zil_commit(zilog_t *zilog, uint64_t foid)
2548 * We should never attempt to call zil_commit on a snapshot for
2549 * a couple of reasons:
2551 * 1. A snapshot may never be modified, thus it cannot have any
2552 * in-flight itxs that would have modified the dataset.
2554 * 2. By design, when zil_commit() is called, a commit itx will
2555 * be assigned to this zilog; as a result, the zilog will be
2556 * dirtied. We must not dirty the zilog of a snapshot; there's
2557 * checks in the code that enforce this invariant, and will
2558 * cause a panic if it's not upheld.
2560 ASSERT3B(dmu_objset_is_snapshot(zilog->zl_os), ==, B_FALSE);
2562 if (zilog->zl_sync == ZFS_SYNC_DISABLED)
2565 if (!spa_writeable(zilog->zl_spa)) {
2567 * If the SPA is not writable, there should never be any
2568 * pending itxs waiting to be committed to disk. If that
2569 * weren't true, we'd skip writing those itxs out, and
2570 * would break the sematics of zil_commit(); thus, we're
2571 * verifying that truth before we return to the caller.
2573 ASSERT(list_is_empty(&zilog->zl_lwb_list));
2574 ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL);
2575 for (int i = 0; i < TXG_SIZE; i++)
2576 ASSERT3P(zilog->zl_itxg[i].itxg_itxs, ==, NULL);
2581 * If the ZIL is suspended, we don't want to dirty it by calling
2582 * zil_commit_itx_assign() below, nor can we write out
2583 * lwbs like would be done in zil_commit_write(). Thus, we
2584 * simply rely on txg_wait_synced() to maintain the necessary
2585 * semantics, and avoid calling those functions altogether.
2587 if (zilog->zl_suspend > 0) {
2588 txg_wait_synced(zilog->zl_dmu_pool, 0);
2593 * Move the "async" itxs for the specified foid to the "sync"
2594 * queues, such that they will be later committed (or skipped)
2595 * to an lwb when zil_process_commit_list() is called.
2597 * Since these "async" itxs must be committed prior to this
2598 * call to zil_commit returning, we must perform this operation
2599 * before we call zil_commit_itx_assign().
2601 zil_async_to_sync(zilog, foid);
2604 * We allocate a new "waiter" structure which will initially be
2605 * linked to the commit itx using the itx's "itx_private" field.
2606 * Since the commit itx doesn't represent any on-disk state,
2607 * when it's committed to an lwb, rather than copying the its
2608 * lr_t into the lwb's buffer, the commit itx's "waiter" will be
2609 * added to the lwb's list of waiters. Then, when the lwb is
2610 * committed to stable storage, each waiter in the lwb's list of
2611 * waiters will be marked "done", and signalled.
2613 * We must create the waiter and assign the commit itx prior to
2614 * calling zil_commit_writer(), or else our specific commit itx
2615 * is not guaranteed to be committed to an lwb prior to calling
2616 * zil_commit_waiter().
2618 zil_commit_waiter_t *zcw = zil_alloc_commit_waiter();
2619 zil_commit_itx_assign(zilog, zcw);
2621 zil_commit_writer(zilog, zcw);
2622 zil_commit_waiter(zilog, zcw);
2624 if (zcw->zcw_zio_error != 0) {
2626 * If there was an error writing out the ZIL blocks that
2627 * this thread is waiting on, then we fallback to
2628 * relying on spa_sync() to write out the data this
2629 * thread is waiting on. Obviously this has performance
2630 * implications, but the expectation is for this to be
2631 * an exceptional case, and shouldn't occur often.
2633 DTRACE_PROBE2(zil__commit__io__error,
2634 zilog_t *, zilog, zil_commit_waiter_t *, zcw);
2635 txg_wait_synced(zilog->zl_dmu_pool, 0);
2638 zil_free_commit_waiter(zcw);
2642 * Called in syncing context to free committed log blocks and update log header.
2645 zil_sync(zilog_t *zilog, dmu_tx_t *tx)
2647 zil_header_t *zh = zil_header_in_syncing_context(zilog);
2648 uint64_t txg = dmu_tx_get_txg(tx);
2649 spa_t *spa = zilog->zl_spa;
2650 uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK];
2654 * We don't zero out zl_destroy_txg, so make sure we don't try
2655 * to destroy it twice.
2657 if (spa_sync_pass(spa) != 1)
2660 mutex_enter(&zilog->zl_lock);
2662 ASSERT(zilog->zl_stop_sync == 0);
2664 if (*replayed_seq != 0) {
2665 ASSERT(zh->zh_replay_seq < *replayed_seq);
2666 zh->zh_replay_seq = *replayed_seq;
2670 if (zilog->zl_destroy_txg == txg) {
2671 blkptr_t blk = zh->zh_log;
2673 ASSERT(list_head(&zilog->zl_lwb_list) == NULL);
2675 bzero(zh, sizeof (zil_header_t));
2676 bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq));
2678 if (zilog->zl_keep_first) {
2680 * If this block was part of log chain that couldn't
2681 * be claimed because a device was missing during
2682 * zil_claim(), but that device later returns,
2683 * then this block could erroneously appear valid.
2684 * To guard against this, assign a new GUID to the new
2685 * log chain so it doesn't matter what blk points to.
2687 zil_init_log_chain(zilog, &blk);
2692 while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
2693 zh->zh_log = lwb->lwb_blk;
2694 if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg)
2696 list_remove(&zilog->zl_lwb_list, lwb);
2697 zio_free(spa, txg, &lwb->lwb_blk);
2698 zil_free_lwb(zilog, lwb);
2701 * If we don't have anything left in the lwb list then
2702 * we've had an allocation failure and we need to zero
2703 * out the zil_header blkptr so that we don't end
2704 * up freeing the same block twice.
2706 if (list_head(&zilog->zl_lwb_list) == NULL)
2707 BP_ZERO(&zh->zh_log);
2709 mutex_exit(&zilog->zl_lock);
2714 zil_lwb_cons(void *vbuf, void *unused, int kmflag)
2717 list_create(&lwb->lwb_waiters, sizeof (zil_commit_waiter_t),
2718 offsetof(zil_commit_waiter_t, zcw_node));
2719 avl_create(&lwb->lwb_vdev_tree, zil_lwb_vdev_compare,
2720 sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node));
2721 mutex_init(&lwb->lwb_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
2727 zil_lwb_dest(void *vbuf, void *unused)
2730 mutex_destroy(&lwb->lwb_vdev_lock);
2731 avl_destroy(&lwb->lwb_vdev_tree);
2732 list_destroy(&lwb->lwb_waiters);
2738 zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
2739 sizeof (lwb_t), 0, zil_lwb_cons, zil_lwb_dest, NULL, NULL, NULL, 0);
2741 zil_zcw_cache = kmem_cache_create("zil_zcw_cache",
2742 sizeof (zil_commit_waiter_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
2748 kmem_cache_destroy(zil_zcw_cache);
2749 kmem_cache_destroy(zil_lwb_cache);
2753 zil_set_sync(zilog_t *zilog, uint64_t sync)
2755 zilog->zl_sync = sync;
2759 zil_set_logbias(zilog_t *zilog, uint64_t logbias)
2761 zilog->zl_logbias = logbias;
2765 zil_alloc(objset_t *os, zil_header_t *zh_phys)
2769 zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
2771 zilog->zl_header = zh_phys;
2773 zilog->zl_spa = dmu_objset_spa(os);
2774 zilog->zl_dmu_pool = dmu_objset_pool(os);
2775 zilog->zl_destroy_txg = TXG_INITIAL - 1;
2776 zilog->zl_logbias = dmu_objset_logbias(os);
2777 zilog->zl_sync = dmu_objset_syncprop(os);
2778 zilog->zl_dirty_max_txg = 0;
2779 zilog->zl_last_lwb_opened = NULL;
2780 zilog->zl_last_lwb_latency = 0;
2782 mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
2783 mutex_init(&zilog->zl_writer_lock, NULL, MUTEX_DEFAULT, NULL);
2785 for (int i = 0; i < TXG_SIZE; i++) {
2786 mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL,
2787 MUTEX_DEFAULT, NULL);
2790 list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
2791 offsetof(lwb_t, lwb_node));
2793 list_create(&zilog->zl_itx_commit_list, sizeof (itx_t),
2794 offsetof(itx_t, itx_node));
2796 cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
2802 zil_free(zilog_t *zilog)
2804 zilog->zl_stop_sync = 1;
2806 ASSERT0(zilog->zl_suspend);
2807 ASSERT0(zilog->zl_suspending);
2809 ASSERT(list_is_empty(&zilog->zl_lwb_list));
2810 list_destroy(&zilog->zl_lwb_list);
2812 ASSERT(list_is_empty(&zilog->zl_itx_commit_list));
2813 list_destroy(&zilog->zl_itx_commit_list);
2815 for (int i = 0; i < TXG_SIZE; i++) {
2817 * It's possible for an itx to be generated that doesn't dirty
2818 * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean()
2819 * callback to remove the entry. We remove those here.
2821 * Also free up the ziltest itxs.
2823 if (zilog->zl_itxg[i].itxg_itxs)
2824 zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs);
2825 mutex_destroy(&zilog->zl_itxg[i].itxg_lock);
2828 mutex_destroy(&zilog->zl_writer_lock);
2829 mutex_destroy(&zilog->zl_lock);
2831 cv_destroy(&zilog->zl_cv_suspend);
2833 kmem_free(zilog, sizeof (zilog_t));
2837 * Open an intent log.
2840 zil_open(objset_t *os, zil_get_data_t *get_data)
2842 zilog_t *zilog = dmu_objset_zil(os);
2844 ASSERT3P(zilog->zl_get_data, ==, NULL);
2845 ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL);
2846 ASSERT(list_is_empty(&zilog->zl_lwb_list));
2848 zilog->zl_get_data = get_data;
2854 * Close an intent log.
2857 zil_close(zilog_t *zilog)
2862 if (!dmu_objset_is_snapshot(zilog->zl_os)) {
2863 zil_commit(zilog, 0);
2865 ASSERT3P(list_tail(&zilog->zl_lwb_list), ==, NULL);
2866 ASSERT0(zilog->zl_dirty_max_txg);
2867 ASSERT3B(zilog_is_dirty(zilog), ==, B_FALSE);
2870 mutex_enter(&zilog->zl_lock);
2871 lwb = list_tail(&zilog->zl_lwb_list);
2873 txg = zilog->zl_dirty_max_txg;
2875 txg = MAX(zilog->zl_dirty_max_txg, lwb->lwb_max_txg);
2876 mutex_exit(&zilog->zl_lock);
2879 * We need to use txg_wait_synced() to wait long enough for the
2880 * ZIL to be clean, and to wait for all pending lwbs to be
2884 txg_wait_synced(zilog->zl_dmu_pool, txg);
2886 if (zilog_is_dirty(zilog))
2887 zfs_dbgmsg("zil (%p) is dirty, txg %llu", zilog, txg);
2888 VERIFY(!zilog_is_dirty(zilog));
2890 zilog->zl_get_data = NULL;
2893 * We should have only one lwb left on the list; remove it now.
2895 mutex_enter(&zilog->zl_lock);
2896 lwb = list_head(&zilog->zl_lwb_list);
2898 ASSERT3P(lwb, ==, list_tail(&zilog->zl_lwb_list));
2899 ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED);
2900 list_remove(&zilog->zl_lwb_list, lwb);
2901 zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
2902 zil_free_lwb(zilog, lwb);
2904 mutex_exit(&zilog->zl_lock);
2907 static char *suspend_tag = "zil suspending";
2910 * Suspend an intent log. While in suspended mode, we still honor
2911 * synchronous semantics, but we rely on txg_wait_synced() to do it.
2912 * On old version pools, we suspend the log briefly when taking a
2913 * snapshot so that it will have an empty intent log.
2915 * Long holds are not really intended to be used the way we do here --
2916 * held for such a short time. A concurrent caller of dsl_dataset_long_held()
2917 * could fail. Therefore we take pains to only put a long hold if it is
2918 * actually necessary. Fortunately, it will only be necessary if the
2919 * objset is currently mounted (or the ZVOL equivalent). In that case it
2920 * will already have a long hold, so we are not really making things any worse.
2922 * Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or
2923 * zvol_state_t), and use their mechanism to prevent their hold from being
2924 * dropped (e.g. VFS_HOLD()). However, that would be even more pain for
2927 * if cookiep == NULL, this does both the suspend & resume.
2928 * Otherwise, it returns with the dataset "long held", and the cookie
2929 * should be passed into zil_resume().
2932 zil_suspend(const char *osname, void **cookiep)
2936 const zil_header_t *zh;
2939 error = dmu_objset_hold(osname, suspend_tag, &os);
2942 zilog = dmu_objset_zil(os);
2944 mutex_enter(&zilog->zl_lock);
2945 zh = zilog->zl_header;
2947 if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */
2948 mutex_exit(&zilog->zl_lock);
2949 dmu_objset_rele(os, suspend_tag);
2950 return (SET_ERROR(EBUSY));
2954 * Don't put a long hold in the cases where we can avoid it. This
2955 * is when there is no cookie so we are doing a suspend & resume
2956 * (i.e. called from zil_vdev_offline()), and there's nothing to do
2957 * for the suspend because it's already suspended, or there's no ZIL.
2959 if (cookiep == NULL && !zilog->zl_suspending &&
2960 (zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) {
2961 mutex_exit(&zilog->zl_lock);
2962 dmu_objset_rele(os, suspend_tag);
2966 dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag);
2967 dsl_pool_rele(dmu_objset_pool(os), suspend_tag);
2969 zilog->zl_suspend++;
2971 if (zilog->zl_suspend > 1) {
2973 * Someone else is already suspending it.
2974 * Just wait for them to finish.
2977 while (zilog->zl_suspending)
2978 cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
2979 mutex_exit(&zilog->zl_lock);
2981 if (cookiep == NULL)
2989 * If there is no pointer to an on-disk block, this ZIL must not
2990 * be active (e.g. filesystem not mounted), so there's nothing
2993 if (BP_IS_HOLE(&zh->zh_log)) {
2994 ASSERT(cookiep != NULL); /* fast path already handled */
2997 mutex_exit(&zilog->zl_lock);
3001 zilog->zl_suspending = B_TRUE;
3002 mutex_exit(&zilog->zl_lock);
3004 zil_commit(zilog, 0);
3006 zil_destroy(zilog, B_FALSE);
3008 mutex_enter(&zilog->zl_lock);
3009 zilog->zl_suspending = B_FALSE;
3010 cv_broadcast(&zilog->zl_cv_suspend);
3011 mutex_exit(&zilog->zl_lock);
3013 if (cookiep == NULL)
3021 zil_resume(void *cookie)
3023 objset_t *os = cookie;
3024 zilog_t *zilog = dmu_objset_zil(os);
3026 mutex_enter(&zilog->zl_lock);
3027 ASSERT(zilog->zl_suspend != 0);
3028 zilog->zl_suspend--;
3029 mutex_exit(&zilog->zl_lock);
3030 dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag);
3031 dsl_dataset_rele(dmu_objset_ds(os), suspend_tag);
3034 typedef struct zil_replay_arg {
3035 zil_replay_func_t **zr_replay;
3037 boolean_t zr_byteswap;
3042 zil_replay_error(zilog_t *zilog, lr_t *lr, int error)
3044 char name[ZFS_MAX_DATASET_NAME_LEN];
3046 zilog->zl_replaying_seq--; /* didn't actually replay this one */
3048 dmu_objset_name(zilog->zl_os, name);
3050 cmn_err(CE_WARN, "ZFS replay transaction error %d, "
3051 "dataset %s, seq 0x%llx, txtype %llu %s\n", error, name,
3052 (u_longlong_t)lr->lrc_seq,
3053 (u_longlong_t)(lr->lrc_txtype & ~TX_CI),
3054 (lr->lrc_txtype & TX_CI) ? "CI" : "");
3060 zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg)
3062 zil_replay_arg_t *zr = zra;
3063 const zil_header_t *zh = zilog->zl_header;
3064 uint64_t reclen = lr->lrc_reclen;
3065 uint64_t txtype = lr->lrc_txtype;
3068 zilog->zl_replaying_seq = lr->lrc_seq;
3070 if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */
3073 if (lr->lrc_txg < claim_txg) /* already committed */
3076 /* Strip case-insensitive bit, still present in log record */
3079 if (txtype == 0 || txtype >= TX_MAX_TYPE)
3080 return (zil_replay_error(zilog, lr, EINVAL));
3083 * If this record type can be logged out of order, the object
3084 * (lr_foid) may no longer exist. That's legitimate, not an error.
3086 if (TX_OOO(txtype)) {
3087 error = dmu_object_info(zilog->zl_os,
3088 ((lr_ooo_t *)lr)->lr_foid, NULL);
3089 if (error == ENOENT || error == EEXIST)
3094 * Make a copy of the data so we can revise and extend it.
3096 bcopy(lr, zr->zr_lr, reclen);
3099 * If this is a TX_WRITE with a blkptr, suck in the data.
3101 if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) {
3102 error = zil_read_log_data(zilog, (lr_write_t *)lr,
3103 zr->zr_lr + reclen);
3105 return (zil_replay_error(zilog, lr, error));
3109 * The log block containing this lr may have been byteswapped
3110 * so that we can easily examine common fields like lrc_txtype.
3111 * However, the log is a mix of different record types, and only the
3112 * replay vectors know how to byteswap their records. Therefore, if
3113 * the lr was byteswapped, undo it before invoking the replay vector.
3115 if (zr->zr_byteswap)
3116 byteswap_uint64_array(zr->zr_lr, reclen);
3119 * We must now do two things atomically: replay this log record,
3120 * and update the log header sequence number to reflect the fact that
3121 * we did so. At the end of each replay function the sequence number
3122 * is updated if we are in replay mode.
3124 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap);
3127 * The DMU's dnode layer doesn't see removes until the txg
3128 * commits, so a subsequent claim can spuriously fail with
3129 * EEXIST. So if we receive any error we try syncing out
3130 * any removes then retry the transaction. Note that we
3131 * specify B_FALSE for byteswap now, so we don't do it twice.
3133 txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
3134 error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE);
3136 return (zil_replay_error(zilog, lr, error));
3143 zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
3145 zilog->zl_replay_blks++;
3151 * If this dataset has a non-empty intent log, replay it and destroy it.
3154 zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE])
3156 zilog_t *zilog = dmu_objset_zil(os);
3157 const zil_header_t *zh = zilog->zl_header;
3158 zil_replay_arg_t zr;
3160 if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) {
3161 zil_destroy(zilog, B_TRUE);
3165 zr.zr_replay = replay_func;
3167 zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
3168 zr.zr_lr = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
3171 * Wait for in-progress removes to sync before starting replay.
3173 txg_wait_synced(zilog->zl_dmu_pool, 0);
3175 zilog->zl_replay = B_TRUE;
3176 zilog->zl_replay_time = ddi_get_lbolt();
3177 ASSERT(zilog->zl_replay_blks == 0);
3178 (void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
3180 kmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
3182 zil_destroy(zilog, B_FALSE);
3183 txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
3184 zilog->zl_replay = B_FALSE;
3188 zil_replaying(zilog_t *zilog, dmu_tx_t *tx)
3190 if (zilog->zl_sync == ZFS_SYNC_DISABLED)
3193 if (zilog->zl_replay) {
3194 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
3195 zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] =
3196 zilog->zl_replaying_seq;
3205 zil_vdev_offline(const char *osname, void *arg)
3209 error = zil_suspend(osname, NULL);
3211 return (SET_ERROR(EEXIST));