4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org>
24 * Copyright (c) 2012 by Delphix. All rights reserved.
27 #include <sys/zfs_context.h>
28 #include <sys/txg_impl.h>
29 #include <sys/dmu_impl.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/dsl_pool.h>
32 #include <sys/dsl_scan.h>
33 #include <sys/callb.h>
36 * Pool-wide transaction groups.
39 static void txg_sync_thread(void *arg);
40 static void txg_quiesce_thread(void *arg);
42 int zfs_txg_timeout = 5; /* max seconds worth of delta per txg */
44 SYSCTL_DECL(_vfs_zfs);
45 SYSCTL_NODE(_vfs_zfs, OID_AUTO, txg, CTLFLAG_RW, 0, "ZFS TXG");
46 TUNABLE_INT("vfs.zfs.txg.timeout", &zfs_txg_timeout);
47 SYSCTL_INT(_vfs_zfs_txg, OID_AUTO, timeout, CTLFLAG_RW, &zfs_txg_timeout, 0,
48 "Maximum seconds worth of delta per txg");
51 * Prepare the txg subsystem.
54 txg_init(dsl_pool_t *dp, uint64_t txg)
56 tx_state_t *tx = &dp->dp_tx;
58 bzero(tx, sizeof (tx_state_t));
60 tx->tx_cpu = kmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP);
62 for (c = 0; c < max_ncpus; c++) {
65 mutex_init(&tx->tx_cpu[c].tc_lock, NULL, MUTEX_DEFAULT, NULL);
66 mutex_init(&tx->tx_cpu[c].tc_open_lock, NULL, MUTEX_DEFAULT,
68 for (i = 0; i < TXG_SIZE; i++) {
69 cv_init(&tx->tx_cpu[c].tc_cv[i], NULL, CV_DEFAULT,
71 list_create(&tx->tx_cpu[c].tc_callbacks[i],
72 sizeof (dmu_tx_callback_t),
73 offsetof(dmu_tx_callback_t, dcb_node));
77 mutex_init(&tx->tx_sync_lock, NULL, MUTEX_DEFAULT, NULL);
79 cv_init(&tx->tx_sync_more_cv, NULL, CV_DEFAULT, NULL);
80 cv_init(&tx->tx_sync_done_cv, NULL, CV_DEFAULT, NULL);
81 cv_init(&tx->tx_quiesce_more_cv, NULL, CV_DEFAULT, NULL);
82 cv_init(&tx->tx_quiesce_done_cv, NULL, CV_DEFAULT, NULL);
83 cv_init(&tx->tx_exit_cv, NULL, CV_DEFAULT, NULL);
85 tx->tx_open_txg = txg;
89 * Close down the txg subsystem.
92 txg_fini(dsl_pool_t *dp)
94 tx_state_t *tx = &dp->dp_tx;
97 ASSERT(tx->tx_threads == 0);
99 mutex_destroy(&tx->tx_sync_lock);
101 cv_destroy(&tx->tx_sync_more_cv);
102 cv_destroy(&tx->tx_sync_done_cv);
103 cv_destroy(&tx->tx_quiesce_more_cv);
104 cv_destroy(&tx->tx_quiesce_done_cv);
105 cv_destroy(&tx->tx_exit_cv);
107 for (c = 0; c < max_ncpus; c++) {
110 mutex_destroy(&tx->tx_cpu[c].tc_open_lock);
111 mutex_destroy(&tx->tx_cpu[c].tc_lock);
112 for (i = 0; i < TXG_SIZE; i++) {
113 cv_destroy(&tx->tx_cpu[c].tc_cv[i]);
114 list_destroy(&tx->tx_cpu[c].tc_callbacks[i]);
118 if (tx->tx_commit_cb_taskq != NULL)
119 taskq_destroy(tx->tx_commit_cb_taskq);
121 kmem_free(tx->tx_cpu, max_ncpus * sizeof (tx_cpu_t));
123 bzero(tx, sizeof (tx_state_t));
127 * Start syncing transaction groups.
130 txg_sync_start(dsl_pool_t *dp)
132 tx_state_t *tx = &dp->dp_tx;
134 mutex_enter(&tx->tx_sync_lock);
136 dprintf("pool %p\n", dp);
138 ASSERT(tx->tx_threads == 0);
142 tx->tx_quiesce_thread = thread_create(NULL, 0, txg_quiesce_thread,
143 dp, 0, &p0, TS_RUN, minclsyspri);
146 * The sync thread can need a larger-than-default stack size on
147 * 32-bit x86. This is due in part to nested pools and
148 * scrub_visitbp() recursion.
150 tx->tx_sync_thread = thread_create(NULL, 32<<10, txg_sync_thread,
151 dp, 0, &p0, TS_RUN, minclsyspri);
153 mutex_exit(&tx->tx_sync_lock);
157 txg_thread_enter(tx_state_t *tx, callb_cpr_t *cpr)
159 CALLB_CPR_INIT(cpr, &tx->tx_sync_lock, callb_generic_cpr, FTAG);
160 mutex_enter(&tx->tx_sync_lock);
164 txg_thread_exit(tx_state_t *tx, callb_cpr_t *cpr, kthread_t **tpp)
166 ASSERT(*tpp != NULL);
169 cv_broadcast(&tx->tx_exit_cv);
170 CALLB_CPR_EXIT(cpr); /* drops &tx->tx_sync_lock */
175 txg_thread_wait(tx_state_t *tx, callb_cpr_t *cpr, kcondvar_t *cv, uint64_t time)
177 CALLB_CPR_SAFE_BEGIN(cpr);
180 (void) cv_timedwait(cv, &tx->tx_sync_lock, time);
182 cv_wait(cv, &tx->tx_sync_lock);
184 CALLB_CPR_SAFE_END(cpr, &tx->tx_sync_lock);
188 * Stop syncing transaction groups.
191 txg_sync_stop(dsl_pool_t *dp)
193 tx_state_t *tx = &dp->dp_tx;
195 dprintf("pool %p\n", dp);
197 * Finish off any work in progress.
199 ASSERT(tx->tx_threads == 2);
202 * We need to ensure that we've vacated the deferred space_maps.
204 txg_wait_synced(dp, tx->tx_open_txg + TXG_DEFER_SIZE);
207 * Wake all sync threads and wait for them to die.
209 mutex_enter(&tx->tx_sync_lock);
211 ASSERT(tx->tx_threads == 2);
215 cv_broadcast(&tx->tx_quiesce_more_cv);
216 cv_broadcast(&tx->tx_quiesce_done_cv);
217 cv_broadcast(&tx->tx_sync_more_cv);
219 while (tx->tx_threads != 0)
220 cv_wait(&tx->tx_exit_cv, &tx->tx_sync_lock);
224 mutex_exit(&tx->tx_sync_lock);
228 txg_hold_open(dsl_pool_t *dp, txg_handle_t *th)
230 tx_state_t *tx = &dp->dp_tx;
231 tx_cpu_t *tc = &tx->tx_cpu[CPU_SEQID];
234 mutex_enter(&tc->tc_open_lock);
235 txg = tx->tx_open_txg;
237 mutex_enter(&tc->tc_lock);
238 tc->tc_count[txg & TXG_MASK]++;
239 mutex_exit(&tc->tc_lock);
248 txg_rele_to_quiesce(txg_handle_t *th)
250 tx_cpu_t *tc = th->th_cpu;
252 ASSERT(!MUTEX_HELD(&tc->tc_lock));
253 mutex_exit(&tc->tc_open_lock);
257 txg_register_callbacks(txg_handle_t *th, list_t *tx_callbacks)
259 tx_cpu_t *tc = th->th_cpu;
260 int g = th->th_txg & TXG_MASK;
262 mutex_enter(&tc->tc_lock);
263 list_move_tail(&tc->tc_callbacks[g], tx_callbacks);
264 mutex_exit(&tc->tc_lock);
268 txg_rele_to_sync(txg_handle_t *th)
270 tx_cpu_t *tc = th->th_cpu;
271 int g = th->th_txg & TXG_MASK;
273 mutex_enter(&tc->tc_lock);
274 ASSERT(tc->tc_count[g] != 0);
275 if (--tc->tc_count[g] == 0)
276 cv_broadcast(&tc->tc_cv[g]);
277 mutex_exit(&tc->tc_lock);
279 th->th_cpu = NULL; /* defensive */
283 * Blocks until all transactions in the group are committed.
285 * On return, the transaction group has reached a stable state in which it can
286 * then be passed off to the syncing context.
289 txg_quiesce(dsl_pool_t *dp, uint64_t txg)
291 tx_state_t *tx = &dp->dp_tx;
292 int g = txg & TXG_MASK;
296 * Grab all tc_open_locks so nobody else can get into this txg.
298 for (c = 0; c < max_ncpus; c++)
299 mutex_enter(&tx->tx_cpu[c].tc_open_lock);
301 ASSERT(txg == tx->tx_open_txg);
305 * Now that we've incremented tx_open_txg, we can let threads
306 * enter the next transaction group.
308 for (c = 0; c < max_ncpus; c++)
309 mutex_exit(&tx->tx_cpu[c].tc_open_lock);
312 * Quiesce the transaction group by waiting for everyone to txg_exit().
314 for (c = 0; c < max_ncpus; c++) {
315 tx_cpu_t *tc = &tx->tx_cpu[c];
316 mutex_enter(&tc->tc_lock);
317 while (tc->tc_count[g] != 0)
318 cv_wait(&tc->tc_cv[g], &tc->tc_lock);
319 mutex_exit(&tc->tc_lock);
324 txg_do_callbacks(void *arg)
326 list_t *cb_list = arg;
328 dmu_tx_do_callbacks(cb_list, 0);
330 list_destroy(cb_list);
332 kmem_free(cb_list, sizeof (list_t));
336 * Dispatch the commit callbacks registered on this txg to worker threads.
338 * If no callbacks are registered for a given TXG, nothing happens.
339 * This function creates a taskq for the associated pool, if needed.
342 txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg)
345 tx_state_t *tx = &dp->dp_tx;
348 for (c = 0; c < max_ncpus; c++) {
349 tx_cpu_t *tc = &tx->tx_cpu[c];
351 * No need to lock tx_cpu_t at this point, since this can
352 * only be called once a txg has been synced.
355 int g = txg & TXG_MASK;
357 if (list_is_empty(&tc->tc_callbacks[g]))
360 if (tx->tx_commit_cb_taskq == NULL) {
362 * Commit callback taskq hasn't been created yet.
364 tx->tx_commit_cb_taskq = taskq_create("tx_commit_cb",
365 max_ncpus, minclsyspri, max_ncpus, max_ncpus * 2,
369 cb_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
370 list_create(cb_list, sizeof (dmu_tx_callback_t),
371 offsetof(dmu_tx_callback_t, dcb_node));
373 list_move_tail(cb_list, &tc->tc_callbacks[g]);
375 (void) taskq_dispatch(tx->tx_commit_cb_taskq, (task_func_t *)
376 txg_do_callbacks, cb_list, TQ_SLEEP);
381 txg_sync_thread(void *arg)
383 dsl_pool_t *dp = arg;
384 spa_t *spa = dp->dp_spa;
385 tx_state_t *tx = &dp->dp_tx;
387 uint64_t start, delta;
389 txg_thread_enter(tx, &cpr);
393 uint64_t timer, timeout = zfs_txg_timeout * hz;
397 * We sync when we're scanning, there's someone waiting
398 * on us, or the quiesce thread has handed off a txg to
399 * us, or we have reached our timeout.
401 timer = (delta >= timeout ? 0 : timeout - delta);
402 while (!dsl_scan_active(dp->dp_scan) &&
403 !tx->tx_exiting && timer > 0 &&
404 tx->tx_synced_txg >= tx->tx_sync_txg_waiting &&
405 tx->tx_quiesced_txg == 0) {
406 dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n",
407 tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
408 txg_thread_wait(tx, &cpr, &tx->tx_sync_more_cv, timer);
409 delta = ddi_get_lbolt() - start;
410 timer = (delta > timeout ? 0 : timeout - delta);
414 * Wait until the quiesce thread hands off a txg to us,
415 * prompting it to do so if necessary.
417 while (!tx->tx_exiting && tx->tx_quiesced_txg == 0) {
418 if (tx->tx_quiesce_txg_waiting < tx->tx_open_txg+1)
419 tx->tx_quiesce_txg_waiting = tx->tx_open_txg+1;
420 cv_broadcast(&tx->tx_quiesce_more_cv);
421 txg_thread_wait(tx, &cpr, &tx->tx_quiesce_done_cv, 0);
425 txg_thread_exit(tx, &cpr, &tx->tx_sync_thread);
428 * Consume the quiesced txg which has been handed off to
429 * us. This may cause the quiescing thread to now be
430 * able to quiesce another txg, so we must signal it.
432 txg = tx->tx_quiesced_txg;
433 tx->tx_quiesced_txg = 0;
434 tx->tx_syncing_txg = txg;
435 cv_broadcast(&tx->tx_quiesce_more_cv);
437 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
438 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
439 mutex_exit(&tx->tx_sync_lock);
441 start = ddi_get_lbolt();
443 delta = ddi_get_lbolt() - start;
445 mutex_enter(&tx->tx_sync_lock);
446 tx->tx_synced_txg = txg;
447 tx->tx_syncing_txg = 0;
448 cv_broadcast(&tx->tx_sync_done_cv);
451 * Dispatch commit callbacks to worker threads.
453 txg_dispatch_callbacks(dp, txg);
458 txg_quiesce_thread(void *arg)
460 dsl_pool_t *dp = arg;
461 tx_state_t *tx = &dp->dp_tx;
464 txg_thread_enter(tx, &cpr);
470 * We quiesce when there's someone waiting on us.
471 * However, we can only have one txg in "quiescing" or
472 * "quiesced, waiting to sync" state. So we wait until
473 * the "quiesced, waiting to sync" txg has been consumed
474 * by the sync thread.
476 while (!tx->tx_exiting &&
477 (tx->tx_open_txg >= tx->tx_quiesce_txg_waiting ||
478 tx->tx_quiesced_txg != 0))
479 txg_thread_wait(tx, &cpr, &tx->tx_quiesce_more_cv, 0);
482 txg_thread_exit(tx, &cpr, &tx->tx_quiesce_thread);
484 txg = tx->tx_open_txg;
485 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
486 txg, tx->tx_quiesce_txg_waiting,
487 tx->tx_sync_txg_waiting);
488 mutex_exit(&tx->tx_sync_lock);
489 txg_quiesce(dp, txg);
490 mutex_enter(&tx->tx_sync_lock);
493 * Hand this txg off to the sync thread.
495 dprintf("quiesce done, handing off txg %llu\n", txg);
496 tx->tx_quiesced_txg = txg;
497 cv_broadcast(&tx->tx_sync_more_cv);
498 cv_broadcast(&tx->tx_quiesce_done_cv);
503 * Delay this thread by 'ticks' if we are still in the open transaction
504 * group and there is already a waiting txg quiescing or quiesced.
505 * Abort the delay if this txg stalls or enters the quiescing state.
508 txg_delay(dsl_pool_t *dp, uint64_t txg, int ticks)
510 tx_state_t *tx = &dp->dp_tx;
511 clock_t timeout = ddi_get_lbolt() + ticks;
513 /* don't delay if this txg could transition to quiescing immediately */
514 if (tx->tx_open_txg > txg ||
515 tx->tx_syncing_txg == txg-1 || tx->tx_synced_txg == txg-1)
518 mutex_enter(&tx->tx_sync_lock);
519 if (tx->tx_open_txg > txg || tx->tx_synced_txg == txg-1) {
520 mutex_exit(&tx->tx_sync_lock);
524 while (ddi_get_lbolt() < timeout &&
525 tx->tx_syncing_txg < txg-1 && !txg_stalled(dp))
526 (void) cv_timedwait(&tx->tx_quiesce_more_cv, &tx->tx_sync_lock,
527 timeout - ddi_get_lbolt());
529 mutex_exit(&tx->tx_sync_lock);
533 txg_wait_synced(dsl_pool_t *dp, uint64_t txg)
535 tx_state_t *tx = &dp->dp_tx;
537 ASSERT(!dsl_pool_config_held(dp));
539 mutex_enter(&tx->tx_sync_lock);
540 ASSERT(tx->tx_threads == 2);
542 txg = tx->tx_open_txg + TXG_DEFER_SIZE;
543 if (tx->tx_sync_txg_waiting < txg)
544 tx->tx_sync_txg_waiting = txg;
545 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
546 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
547 while (tx->tx_synced_txg < txg) {
548 dprintf("broadcasting sync more "
549 "tx_synced=%llu waiting=%llu dp=%p\n",
550 tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
551 cv_broadcast(&tx->tx_sync_more_cv);
552 cv_wait(&tx->tx_sync_done_cv, &tx->tx_sync_lock);
554 mutex_exit(&tx->tx_sync_lock);
558 txg_wait_open(dsl_pool_t *dp, uint64_t txg)
560 tx_state_t *tx = &dp->dp_tx;
562 ASSERT(!dsl_pool_config_held(dp));
564 mutex_enter(&tx->tx_sync_lock);
565 ASSERT(tx->tx_threads == 2);
567 txg = tx->tx_open_txg + 1;
568 if (tx->tx_quiesce_txg_waiting < txg)
569 tx->tx_quiesce_txg_waiting = txg;
570 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
571 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
572 while (tx->tx_open_txg < txg) {
573 cv_broadcast(&tx->tx_quiesce_more_cv);
574 cv_wait(&tx->tx_quiesce_done_cv, &tx->tx_sync_lock);
576 mutex_exit(&tx->tx_sync_lock);
580 txg_stalled(dsl_pool_t *dp)
582 tx_state_t *tx = &dp->dp_tx;
583 return (tx->tx_quiesce_txg_waiting > tx->tx_open_txg);
587 txg_sync_waiting(dsl_pool_t *dp)
589 tx_state_t *tx = &dp->dp_tx;
591 return (tx->tx_syncing_txg <= tx->tx_sync_txg_waiting ||
592 tx->tx_quiesced_txg != 0);
596 * Per-txg object lists.
599 txg_list_create(txg_list_t *tl, size_t offset)
603 mutex_init(&tl->tl_lock, NULL, MUTEX_DEFAULT, NULL);
605 tl->tl_offset = offset;
607 for (t = 0; t < TXG_SIZE; t++)
608 tl->tl_head[t] = NULL;
612 txg_list_destroy(txg_list_t *tl)
616 for (t = 0; t < TXG_SIZE; t++)
617 ASSERT(txg_list_empty(tl, t));
619 mutex_destroy(&tl->tl_lock);
623 txg_list_empty(txg_list_t *tl, uint64_t txg)
625 return (tl->tl_head[txg & TXG_MASK] == NULL);
629 * Add an entry to the list (unless it's already on the list).
630 * Returns B_TRUE if it was actually added.
633 txg_list_add(txg_list_t *tl, void *p, uint64_t txg)
635 int t = txg & TXG_MASK;
636 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
639 mutex_enter(&tl->tl_lock);
640 add = (tn->tn_member[t] == 0);
642 tn->tn_member[t] = 1;
643 tn->tn_next[t] = tl->tl_head[t];
646 mutex_exit(&tl->tl_lock);
652 * Add an entry to the end of the list, unless it's already on the list.
653 * (walks list to find end)
654 * Returns B_TRUE if it was actually added.
657 txg_list_add_tail(txg_list_t *tl, void *p, uint64_t txg)
659 int t = txg & TXG_MASK;
660 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
663 mutex_enter(&tl->tl_lock);
664 add = (tn->tn_member[t] == 0);
668 for (tp = &tl->tl_head[t]; *tp != NULL; tp = &(*tp)->tn_next[t])
671 tn->tn_member[t] = 1;
672 tn->tn_next[t] = NULL;
675 mutex_exit(&tl->tl_lock);
681 * Remove the head of the list and return it.
684 txg_list_remove(txg_list_t *tl, uint64_t txg)
686 int t = txg & TXG_MASK;
690 mutex_enter(&tl->tl_lock);
691 if ((tn = tl->tl_head[t]) != NULL) {
692 p = (char *)tn - tl->tl_offset;
693 tl->tl_head[t] = tn->tn_next[t];
694 tn->tn_next[t] = NULL;
695 tn->tn_member[t] = 0;
697 mutex_exit(&tl->tl_lock);
703 * Remove a specific item from the list and return it.
706 txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg)
708 int t = txg & TXG_MASK;
709 txg_node_t *tn, **tp;
711 mutex_enter(&tl->tl_lock);
713 for (tp = &tl->tl_head[t]; (tn = *tp) != NULL; tp = &tn->tn_next[t]) {
714 if ((char *)tn - tl->tl_offset == p) {
715 *tp = tn->tn_next[t];
716 tn->tn_next[t] = NULL;
717 tn->tn_member[t] = 0;
718 mutex_exit(&tl->tl_lock);
723 mutex_exit(&tl->tl_lock);
729 txg_list_member(txg_list_t *tl, void *p, uint64_t txg)
731 int t = txg & TXG_MASK;
732 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
734 return (tn->tn_member[t] != 0);
738 * Walk a txg list -- only safe if you know it's not changing.
741 txg_list_head(txg_list_t *tl, uint64_t txg)
743 int t = txg & TXG_MASK;
744 txg_node_t *tn = tl->tl_head[t];
746 return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
750 txg_list_next(txg_list_t *tl, void *p, uint64_t txg)
752 int t = txg & TXG_MASK;
753 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
757 return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);