4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org>
26 #include <sys/zfs_context.h>
27 #include <sys/txg_impl.h>
28 #include <sys/dmu_impl.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/dsl_pool.h>
31 #include <sys/dsl_scan.h>
32 #include <sys/callb.h>
35 * Pool-wide transaction groups.
38 static void txg_sync_thread(void *arg);
39 static void txg_quiesce_thread(void *arg);
41 int zfs_txg_timeout = 5; /* max seconds worth of delta per txg */
43 SYSCTL_DECL(_vfs_zfs);
44 SYSCTL_NODE(_vfs_zfs, OID_AUTO, txg, CTLFLAG_RW, 0, "ZFS TXG");
45 TUNABLE_INT("vfs.zfs.txg.timeout", &zfs_txg_timeout);
46 SYSCTL_INT(_vfs_zfs_txg, OID_AUTO, timeout, CTLFLAG_RDTUN, &zfs_txg_timeout, 0,
47 "Maximum seconds worth of delta per txg");
50 * Prepare the txg subsystem.
53 txg_init(dsl_pool_t *dp, uint64_t txg)
55 tx_state_t *tx = &dp->dp_tx;
57 bzero(tx, sizeof (tx_state_t));
59 tx->tx_cpu = kmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP);
61 for (c = 0; c < max_ncpus; c++) {
64 mutex_init(&tx->tx_cpu[c].tc_lock, NULL, MUTEX_DEFAULT, NULL);
65 for (i = 0; i < TXG_SIZE; i++) {
66 cv_init(&tx->tx_cpu[c].tc_cv[i], NULL, CV_DEFAULT,
68 list_create(&tx->tx_cpu[c].tc_callbacks[i],
69 sizeof (dmu_tx_callback_t),
70 offsetof(dmu_tx_callback_t, dcb_node));
74 mutex_init(&tx->tx_sync_lock, NULL, MUTEX_DEFAULT, NULL);
76 cv_init(&tx->tx_sync_more_cv, NULL, CV_DEFAULT, NULL);
77 cv_init(&tx->tx_sync_done_cv, NULL, CV_DEFAULT, NULL);
78 cv_init(&tx->tx_quiesce_more_cv, NULL, CV_DEFAULT, NULL);
79 cv_init(&tx->tx_quiesce_done_cv, NULL, CV_DEFAULT, NULL);
80 cv_init(&tx->tx_exit_cv, NULL, CV_DEFAULT, NULL);
82 tx->tx_open_txg = txg;
86 * Close down the txg subsystem.
89 txg_fini(dsl_pool_t *dp)
91 tx_state_t *tx = &dp->dp_tx;
94 ASSERT(tx->tx_threads == 0);
96 mutex_destroy(&tx->tx_sync_lock);
98 cv_destroy(&tx->tx_sync_more_cv);
99 cv_destroy(&tx->tx_sync_done_cv);
100 cv_destroy(&tx->tx_quiesce_more_cv);
101 cv_destroy(&tx->tx_quiesce_done_cv);
102 cv_destroy(&tx->tx_exit_cv);
104 for (c = 0; c < max_ncpus; c++) {
107 mutex_destroy(&tx->tx_cpu[c].tc_lock);
108 for (i = 0; i < TXG_SIZE; i++) {
109 cv_destroy(&tx->tx_cpu[c].tc_cv[i]);
110 list_destroy(&tx->tx_cpu[c].tc_callbacks[i]);
114 if (tx->tx_commit_cb_taskq != NULL)
115 taskq_destroy(tx->tx_commit_cb_taskq);
117 kmem_free(tx->tx_cpu, max_ncpus * sizeof (tx_cpu_t));
119 bzero(tx, sizeof (tx_state_t));
123 * Start syncing transaction groups.
126 txg_sync_start(dsl_pool_t *dp)
128 tx_state_t *tx = &dp->dp_tx;
130 mutex_enter(&tx->tx_sync_lock);
132 dprintf("pool %p\n", dp);
134 ASSERT(tx->tx_threads == 0);
138 tx->tx_quiesce_thread = thread_create(NULL, 0, txg_quiesce_thread,
139 dp, 0, &p0, TS_RUN, minclsyspri);
142 * The sync thread can need a larger-than-default stack size on
143 * 32-bit x86. This is due in part to nested pools and
144 * scrub_visitbp() recursion.
146 tx->tx_sync_thread = thread_create(NULL, 32<<10, txg_sync_thread,
147 dp, 0, &p0, TS_RUN, minclsyspri);
149 mutex_exit(&tx->tx_sync_lock);
153 txg_thread_enter(tx_state_t *tx, callb_cpr_t *cpr)
155 CALLB_CPR_INIT(cpr, &tx->tx_sync_lock, callb_generic_cpr, FTAG);
156 mutex_enter(&tx->tx_sync_lock);
160 txg_thread_exit(tx_state_t *tx, callb_cpr_t *cpr, kthread_t **tpp)
162 ASSERT(*tpp != NULL);
165 cv_broadcast(&tx->tx_exit_cv);
166 CALLB_CPR_EXIT(cpr); /* drops &tx->tx_sync_lock */
171 txg_thread_wait(tx_state_t *tx, callb_cpr_t *cpr, kcondvar_t *cv, uint64_t time)
173 CALLB_CPR_SAFE_BEGIN(cpr);
176 (void) cv_timedwait(cv, &tx->tx_sync_lock, time);
178 cv_wait(cv, &tx->tx_sync_lock);
180 CALLB_CPR_SAFE_END(cpr, &tx->tx_sync_lock);
184 * Stop syncing transaction groups.
187 txg_sync_stop(dsl_pool_t *dp)
189 tx_state_t *tx = &dp->dp_tx;
191 dprintf("pool %p\n", dp);
193 * Finish off any work in progress.
195 ASSERT(tx->tx_threads == 2);
198 * We need to ensure that we've vacated the deferred space_maps.
200 txg_wait_synced(dp, tx->tx_open_txg + TXG_DEFER_SIZE);
203 * Wake all sync threads and wait for them to die.
205 mutex_enter(&tx->tx_sync_lock);
207 ASSERT(tx->tx_threads == 2);
211 cv_broadcast(&tx->tx_quiesce_more_cv);
212 cv_broadcast(&tx->tx_quiesce_done_cv);
213 cv_broadcast(&tx->tx_sync_more_cv);
215 while (tx->tx_threads != 0)
216 cv_wait(&tx->tx_exit_cv, &tx->tx_sync_lock);
220 mutex_exit(&tx->tx_sync_lock);
224 txg_hold_open(dsl_pool_t *dp, txg_handle_t *th)
226 tx_state_t *tx = &dp->dp_tx;
227 tx_cpu_t *tc = &tx->tx_cpu[CPU_SEQID];
230 mutex_enter(&tc->tc_lock);
232 txg = tx->tx_open_txg;
233 tc->tc_count[txg & TXG_MASK]++;
242 txg_rele_to_quiesce(txg_handle_t *th)
244 tx_cpu_t *tc = th->th_cpu;
246 mutex_exit(&tc->tc_lock);
250 txg_register_callbacks(txg_handle_t *th, list_t *tx_callbacks)
252 tx_cpu_t *tc = th->th_cpu;
253 int g = th->th_txg & TXG_MASK;
255 mutex_enter(&tc->tc_lock);
256 list_move_tail(&tc->tc_callbacks[g], tx_callbacks);
257 mutex_exit(&tc->tc_lock);
261 txg_rele_to_sync(txg_handle_t *th)
263 tx_cpu_t *tc = th->th_cpu;
264 int g = th->th_txg & TXG_MASK;
266 mutex_enter(&tc->tc_lock);
267 ASSERT(tc->tc_count[g] != 0);
268 if (--tc->tc_count[g] == 0)
269 cv_broadcast(&tc->tc_cv[g]);
270 mutex_exit(&tc->tc_lock);
272 th->th_cpu = NULL; /* defensive */
276 txg_quiesce(dsl_pool_t *dp, uint64_t txg)
278 tx_state_t *tx = &dp->dp_tx;
279 int g = txg & TXG_MASK;
283 * Grab all tx_cpu locks so nobody else can get into this txg.
285 for (c = 0; c < max_ncpus; c++)
286 mutex_enter(&tx->tx_cpu[c].tc_lock);
288 ASSERT(txg == tx->tx_open_txg);
292 * Now that we've incremented tx_open_txg, we can let threads
293 * enter the next transaction group.
295 for (c = 0; c < max_ncpus; c++)
296 mutex_exit(&tx->tx_cpu[c].tc_lock);
299 * Quiesce the transaction group by waiting for everyone to txg_exit().
301 for (c = 0; c < max_ncpus; c++) {
302 tx_cpu_t *tc = &tx->tx_cpu[c];
303 mutex_enter(&tc->tc_lock);
304 while (tc->tc_count[g] != 0)
305 cv_wait(&tc->tc_cv[g], &tc->tc_lock);
306 mutex_exit(&tc->tc_lock);
311 txg_do_callbacks(void *arg)
313 list_t *cb_list = arg;
315 dmu_tx_do_callbacks(cb_list, 0);
317 list_destroy(cb_list);
319 kmem_free(cb_list, sizeof (list_t));
323 * Dispatch the commit callbacks registered on this txg to worker threads.
326 txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg)
329 tx_state_t *tx = &dp->dp_tx;
332 for (c = 0; c < max_ncpus; c++) {
333 tx_cpu_t *tc = &tx->tx_cpu[c];
334 /* No need to lock tx_cpu_t at this point */
336 int g = txg & TXG_MASK;
338 if (list_is_empty(&tc->tc_callbacks[g]))
341 if (tx->tx_commit_cb_taskq == NULL) {
343 * Commit callback taskq hasn't been created yet.
345 tx->tx_commit_cb_taskq = taskq_create("tx_commit_cb",
346 max_ncpus, minclsyspri, max_ncpus, max_ncpus * 2,
350 cb_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
351 list_create(cb_list, sizeof (dmu_tx_callback_t),
352 offsetof(dmu_tx_callback_t, dcb_node));
354 list_move_tail(&tc->tc_callbacks[g], cb_list);
356 (void) taskq_dispatch(tx->tx_commit_cb_taskq, (task_func_t *)
357 txg_do_callbacks, cb_list, TQ_SLEEP);
362 txg_sync_thread(void *arg)
364 dsl_pool_t *dp = arg;
365 spa_t *spa = dp->dp_spa;
366 tx_state_t *tx = &dp->dp_tx;
368 uint64_t start, delta;
370 txg_thread_enter(tx, &cpr);
374 uint64_t timer, timeout = zfs_txg_timeout * hz;
378 * We sync when we're scanning, there's someone waiting
379 * on us, or the quiesce thread has handed off a txg to
380 * us, or we have reached our timeout.
382 timer = (delta >= timeout ? 0 : timeout - delta);
383 while (!dsl_scan_active(dp->dp_scan) &&
384 !tx->tx_exiting && timer > 0 &&
385 tx->tx_synced_txg >= tx->tx_sync_txg_waiting &&
386 tx->tx_quiesced_txg == 0) {
387 dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n",
388 tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
389 txg_thread_wait(tx, &cpr, &tx->tx_sync_more_cv, timer);
390 delta = ddi_get_lbolt() - start;
391 timer = (delta > timeout ? 0 : timeout - delta);
395 * Wait until the quiesce thread hands off a txg to us,
396 * prompting it to do so if necessary.
398 while (!tx->tx_exiting && tx->tx_quiesced_txg == 0) {
399 if (tx->tx_quiesce_txg_waiting < tx->tx_open_txg+1)
400 tx->tx_quiesce_txg_waiting = tx->tx_open_txg+1;
401 cv_broadcast(&tx->tx_quiesce_more_cv);
402 txg_thread_wait(tx, &cpr, &tx->tx_quiesce_done_cv, 0);
406 txg_thread_exit(tx, &cpr, &tx->tx_sync_thread);
409 * Consume the quiesced txg which has been handed off to
410 * us. This may cause the quiescing thread to now be
411 * able to quiesce another txg, so we must signal it.
413 txg = tx->tx_quiesced_txg;
414 tx->tx_quiesced_txg = 0;
415 tx->tx_syncing_txg = txg;
416 cv_broadcast(&tx->tx_quiesce_more_cv);
418 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
419 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
420 mutex_exit(&tx->tx_sync_lock);
422 start = ddi_get_lbolt();
424 delta = ddi_get_lbolt() - start;
426 mutex_enter(&tx->tx_sync_lock);
427 tx->tx_synced_txg = txg;
428 tx->tx_syncing_txg = 0;
429 cv_broadcast(&tx->tx_sync_done_cv);
432 * Dispatch commit callbacks to worker threads.
434 txg_dispatch_callbacks(dp, txg);
439 txg_quiesce_thread(void *arg)
441 dsl_pool_t *dp = arg;
442 tx_state_t *tx = &dp->dp_tx;
445 txg_thread_enter(tx, &cpr);
451 * We quiesce when there's someone waiting on us.
452 * However, we can only have one txg in "quiescing" or
453 * "quiesced, waiting to sync" state. So we wait until
454 * the "quiesced, waiting to sync" txg has been consumed
455 * by the sync thread.
457 while (!tx->tx_exiting &&
458 (tx->tx_open_txg >= tx->tx_quiesce_txg_waiting ||
459 tx->tx_quiesced_txg != 0))
460 txg_thread_wait(tx, &cpr, &tx->tx_quiesce_more_cv, 0);
463 txg_thread_exit(tx, &cpr, &tx->tx_quiesce_thread);
465 txg = tx->tx_open_txg;
466 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
467 txg, tx->tx_quiesce_txg_waiting,
468 tx->tx_sync_txg_waiting);
469 mutex_exit(&tx->tx_sync_lock);
470 txg_quiesce(dp, txg);
471 mutex_enter(&tx->tx_sync_lock);
474 * Hand this txg off to the sync thread.
476 dprintf("quiesce done, handing off txg %llu\n", txg);
477 tx->tx_quiesced_txg = txg;
478 cv_broadcast(&tx->tx_sync_more_cv);
479 cv_broadcast(&tx->tx_quiesce_done_cv);
484 * Delay this thread by 'ticks' if we are still in the open transaction
485 * group and there is already a waiting txg quiesing or quiesced. Abort
486 * the delay if this txg stalls or enters the quiesing state.
489 txg_delay(dsl_pool_t *dp, uint64_t txg, int ticks)
491 tx_state_t *tx = &dp->dp_tx;
492 clock_t timeout = ddi_get_lbolt() + ticks;
494 /* don't delay if this txg could transition to quiesing immediately */
495 if (tx->tx_open_txg > txg ||
496 tx->tx_syncing_txg == txg-1 || tx->tx_synced_txg == txg-1)
499 mutex_enter(&tx->tx_sync_lock);
500 if (tx->tx_open_txg > txg || tx->tx_synced_txg == txg-1) {
501 mutex_exit(&tx->tx_sync_lock);
505 while (ddi_get_lbolt() < timeout &&
506 tx->tx_syncing_txg < txg-1 && !txg_stalled(dp))
507 (void) cv_timedwait(&tx->tx_quiesce_more_cv, &tx->tx_sync_lock,
508 timeout - ddi_get_lbolt());
510 mutex_exit(&tx->tx_sync_lock);
514 txg_wait_synced(dsl_pool_t *dp, uint64_t txg)
516 tx_state_t *tx = &dp->dp_tx;
518 mutex_enter(&tx->tx_sync_lock);
519 ASSERT(tx->tx_threads == 2);
521 txg = tx->tx_open_txg + TXG_DEFER_SIZE;
522 if (tx->tx_sync_txg_waiting < txg)
523 tx->tx_sync_txg_waiting = txg;
524 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
525 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
526 while (tx->tx_synced_txg < txg) {
527 dprintf("broadcasting sync more "
528 "tx_synced=%llu waiting=%llu dp=%p\n",
529 tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
530 cv_broadcast(&tx->tx_sync_more_cv);
531 cv_wait(&tx->tx_sync_done_cv, &tx->tx_sync_lock);
533 mutex_exit(&tx->tx_sync_lock);
537 txg_wait_open(dsl_pool_t *dp, uint64_t txg)
539 tx_state_t *tx = &dp->dp_tx;
541 mutex_enter(&tx->tx_sync_lock);
542 ASSERT(tx->tx_threads == 2);
544 txg = tx->tx_open_txg + 1;
545 if (tx->tx_quiesce_txg_waiting < txg)
546 tx->tx_quiesce_txg_waiting = txg;
547 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
548 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
549 while (tx->tx_open_txg < txg) {
550 cv_broadcast(&tx->tx_quiesce_more_cv);
551 cv_wait(&tx->tx_quiesce_done_cv, &tx->tx_sync_lock);
553 mutex_exit(&tx->tx_sync_lock);
557 txg_stalled(dsl_pool_t *dp)
559 tx_state_t *tx = &dp->dp_tx;
560 return (tx->tx_quiesce_txg_waiting > tx->tx_open_txg);
564 txg_sync_waiting(dsl_pool_t *dp)
566 tx_state_t *tx = &dp->dp_tx;
568 return (tx->tx_syncing_txg <= tx->tx_sync_txg_waiting ||
569 tx->tx_quiesced_txg != 0);
573 * Per-txg object lists.
576 txg_list_create(txg_list_t *tl, size_t offset)
580 mutex_init(&tl->tl_lock, NULL, MUTEX_DEFAULT, NULL);
582 tl->tl_offset = offset;
584 for (t = 0; t < TXG_SIZE; t++)
585 tl->tl_head[t] = NULL;
589 txg_list_destroy(txg_list_t *tl)
593 for (t = 0; t < TXG_SIZE; t++)
594 ASSERT(txg_list_empty(tl, t));
596 mutex_destroy(&tl->tl_lock);
600 txg_list_empty(txg_list_t *tl, uint64_t txg)
602 return (tl->tl_head[txg & TXG_MASK] == NULL);
606 * Add an entry to the list.
607 * Returns 0 if it's a new entry, 1 if it's already there.
610 txg_list_add(txg_list_t *tl, void *p, uint64_t txg)
612 int t = txg & TXG_MASK;
613 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
616 mutex_enter(&tl->tl_lock);
617 already_on_list = tn->tn_member[t];
618 if (!already_on_list) {
619 tn->tn_member[t] = 1;
620 tn->tn_next[t] = tl->tl_head[t];
623 mutex_exit(&tl->tl_lock);
625 return (already_on_list);
629 * Add an entry to the end of the list (walks list to find end).
630 * Returns 0 if it's a new entry, 1 if it's already there.
633 txg_list_add_tail(txg_list_t *tl, void *p, uint64_t txg)
635 int t = txg & TXG_MASK;
636 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
639 mutex_enter(&tl->tl_lock);
640 already_on_list = tn->tn_member[t];
641 if (!already_on_list) {
644 for (tp = &tl->tl_head[t]; *tp != NULL; tp = &(*tp)->tn_next[t])
647 tn->tn_member[t] = 1;
648 tn->tn_next[t] = NULL;
651 mutex_exit(&tl->tl_lock);
653 return (already_on_list);
657 * Remove the head of the list and return it.
660 txg_list_remove(txg_list_t *tl, uint64_t txg)
662 int t = txg & TXG_MASK;
666 mutex_enter(&tl->tl_lock);
667 if ((tn = tl->tl_head[t]) != NULL) {
668 p = (char *)tn - tl->tl_offset;
669 tl->tl_head[t] = tn->tn_next[t];
670 tn->tn_next[t] = NULL;
671 tn->tn_member[t] = 0;
673 mutex_exit(&tl->tl_lock);
679 * Remove a specific item from the list and return it.
682 txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg)
684 int t = txg & TXG_MASK;
685 txg_node_t *tn, **tp;
687 mutex_enter(&tl->tl_lock);
689 for (tp = &tl->tl_head[t]; (tn = *tp) != NULL; tp = &tn->tn_next[t]) {
690 if ((char *)tn - tl->tl_offset == p) {
691 *tp = tn->tn_next[t];
692 tn->tn_next[t] = NULL;
693 tn->tn_member[t] = 0;
694 mutex_exit(&tl->tl_lock);
699 mutex_exit(&tl->tl_lock);
705 txg_list_member(txg_list_t *tl, void *p, uint64_t txg)
707 int t = txg & TXG_MASK;
708 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
710 return (tn->tn_member[t]);
714 * Walk a txg list -- only safe if you know it's not changing.
717 txg_list_head(txg_list_t *tl, uint64_t txg)
719 int t = txg & TXG_MASK;
720 txg_node_t *tn = tl->tl_head[t];
722 return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
726 txg_list_next(txg_list_t *tl, void *p, uint64_t txg)
728 int t = txg & TXG_MASK;
729 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
733 return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);