4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/zfs_context.h>
27 #include <sys/txg_impl.h>
28 #include <sys/dmu_impl.h>
29 #include <sys/dsl_pool.h>
30 #include <sys/callb.h>
33 * Pool-wide transaction groups.
36 static void txg_sync_thread(void *arg);
37 static void txg_quiesce_thread(void *arg);
39 int zfs_txg_timeout = 30; /* max seconds worth of delta per txg */
40 extern int zfs_txg_synctime;
41 extern uint64_t zfs_write_limit_override;
43 SYSCTL_DECL(_vfs_zfs);
44 SYSCTL_NODE(_vfs_zfs, OID_AUTO, txg, CTLFLAG_RW, 0,
45 "ZFS transaction groups (TXG)");
46 TUNABLE_INT("vfs.zfs.txg.timeout", &zfs_txg_timeout);
47 SYSCTL_INT(_vfs_zfs_txg, OID_AUTO, timeout, CTLFLAG_RDTUN, &zfs_txg_timeout, 0,
48 "Maximum seconds worth of delta per txg");
49 TUNABLE_INT("vfs.zfs.txg.synctime", &zfs_txg_synctime);
50 SYSCTL_INT(_vfs_zfs_txg, OID_AUTO, synctime, CTLFLAG_RDTUN, &zfs_txg_synctime,
51 0, "Target seconds to sync a txg");
52 TUNABLE_QUAD("vfs.zfs.txg.write_limit_override", &zfs_write_limit_override);
53 SYSCTL_QUAD(_vfs_zfs_txg, OID_AUTO, write_limit_override, CTLFLAG_RW,
54 &zfs_write_limit_override, 0,
55 "Override maximum size of a txg to this size in bytes, "
56 "value of 0 means don't override");
59 * Prepare the txg subsystem.
62 txg_init(dsl_pool_t *dp, uint64_t txg)
64 tx_state_t *tx = &dp->dp_tx;
66 bzero(tx, sizeof (tx_state_t));
68 tx->tx_cpu = kmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP);
70 for (c = 0; c < max_ncpus; c++) {
73 mutex_init(&tx->tx_cpu[c].tc_lock, NULL, MUTEX_DEFAULT, NULL);
74 for (i = 0; i < TXG_SIZE; i++) {
75 cv_init(&tx->tx_cpu[c].tc_cv[i], NULL, CV_DEFAULT,
80 rw_init(&tx->tx_suspend, NULL, RW_DEFAULT, NULL);
81 mutex_init(&tx->tx_sync_lock, NULL, MUTEX_DEFAULT, NULL);
83 cv_init(&tx->tx_sync_more_cv, NULL, CV_DEFAULT, NULL);
84 cv_init(&tx->tx_sync_done_cv, NULL, CV_DEFAULT, NULL);
85 cv_init(&tx->tx_quiesce_more_cv, NULL, CV_DEFAULT, NULL);
86 cv_init(&tx->tx_quiesce_done_cv, NULL, CV_DEFAULT, NULL);
87 cv_init(&tx->tx_exit_cv, NULL, CV_DEFAULT, NULL);
89 tx->tx_open_txg = txg;
93 * Close down the txg subsystem.
96 txg_fini(dsl_pool_t *dp)
98 tx_state_t *tx = &dp->dp_tx;
101 ASSERT(tx->tx_threads == 0);
103 rw_destroy(&tx->tx_suspend);
104 mutex_destroy(&tx->tx_sync_lock);
106 cv_destroy(&tx->tx_sync_more_cv);
107 cv_destroy(&tx->tx_sync_done_cv);
108 cv_destroy(&tx->tx_quiesce_more_cv);
109 cv_destroy(&tx->tx_quiesce_done_cv);
110 cv_destroy(&tx->tx_exit_cv);
112 for (c = 0; c < max_ncpus; c++) {
115 mutex_destroy(&tx->tx_cpu[c].tc_lock);
116 for (i = 0; i < TXG_SIZE; i++)
117 cv_destroy(&tx->tx_cpu[c].tc_cv[i]);
120 kmem_free(tx->tx_cpu, max_ncpus * sizeof (tx_cpu_t));
122 bzero(tx, sizeof (tx_state_t));
126 * Start syncing transaction groups.
129 txg_sync_start(dsl_pool_t *dp)
131 tx_state_t *tx = &dp->dp_tx;
133 mutex_enter(&tx->tx_sync_lock);
135 dprintf("pool %p\n", dp);
137 ASSERT(tx->tx_threads == 0);
141 tx->tx_quiesce_thread = thread_create(NULL, 0, txg_quiesce_thread,
142 dp, 0, &p0, TS_RUN, minclsyspri);
145 * The sync thread can need a larger-than-default stack size on
146 * 32-bit x86. This is due in part to nested pools and
147 * scrub_visitbp() recursion.
149 tx->tx_sync_thread = thread_create(NULL, 12<<10, txg_sync_thread,
150 dp, 0, &p0, TS_RUN, minclsyspri);
152 mutex_exit(&tx->tx_sync_lock);
156 txg_thread_enter(tx_state_t *tx, callb_cpr_t *cpr)
158 CALLB_CPR_INIT(cpr, &tx->tx_sync_lock, callb_generic_cpr, FTAG);
159 mutex_enter(&tx->tx_sync_lock);
163 txg_thread_exit(tx_state_t *tx, callb_cpr_t *cpr, kthread_t **tpp)
165 ASSERT(*tpp != NULL);
168 cv_broadcast(&tx->tx_exit_cv);
169 CALLB_CPR_EXIT(cpr); /* drops &tx->tx_sync_lock */
174 txg_thread_wait(tx_state_t *tx, callb_cpr_t *cpr, kcondvar_t *cv, uint64_t time)
176 CALLB_CPR_SAFE_BEGIN(cpr);
179 (void) cv_timedwait(cv, &tx->tx_sync_lock, time);
181 cv_wait(cv, &tx->tx_sync_lock);
183 CALLB_CPR_SAFE_END(cpr, &tx->tx_sync_lock);
187 * Stop syncing transaction groups.
190 txg_sync_stop(dsl_pool_t *dp)
192 tx_state_t *tx = &dp->dp_tx;
194 dprintf("pool %p\n", dp);
196 * Finish off any work in progress.
198 ASSERT(tx->tx_threads == 2);
199 txg_wait_synced(dp, 0);
202 * Wake all sync threads and wait for them to die.
204 mutex_enter(&tx->tx_sync_lock);
206 ASSERT(tx->tx_threads == 2);
210 cv_broadcast(&tx->tx_quiesce_more_cv);
211 cv_broadcast(&tx->tx_quiesce_done_cv);
212 cv_broadcast(&tx->tx_sync_more_cv);
214 while (tx->tx_threads != 0)
215 cv_wait(&tx->tx_exit_cv, &tx->tx_sync_lock);
219 mutex_exit(&tx->tx_sync_lock);
223 txg_hold_open(dsl_pool_t *dp, txg_handle_t *th)
225 tx_state_t *tx = &dp->dp_tx;
226 tx_cpu_t *tc = &tx->tx_cpu[CPU_SEQID];
229 mutex_enter(&tc->tc_lock);
231 txg = tx->tx_open_txg;
232 tc->tc_count[txg & TXG_MASK]++;
241 txg_rele_to_quiesce(txg_handle_t *th)
243 tx_cpu_t *tc = th->th_cpu;
245 mutex_exit(&tc->tc_lock);
249 txg_rele_to_sync(txg_handle_t *th)
251 tx_cpu_t *tc = th->th_cpu;
252 int g = th->th_txg & TXG_MASK;
254 mutex_enter(&tc->tc_lock);
255 ASSERT(tc->tc_count[g] != 0);
256 if (--tc->tc_count[g] == 0)
257 cv_broadcast(&tc->tc_cv[g]);
258 mutex_exit(&tc->tc_lock);
260 th->th_cpu = NULL; /* defensive */
264 txg_quiesce(dsl_pool_t *dp, uint64_t txg)
266 tx_state_t *tx = &dp->dp_tx;
267 int g = txg & TXG_MASK;
271 * Grab all tx_cpu locks so nobody else can get into this txg.
273 for (c = 0; c < max_ncpus; c++)
274 mutex_enter(&tx->tx_cpu[c].tc_lock);
276 ASSERT(txg == tx->tx_open_txg);
280 * Now that we've incremented tx_open_txg, we can let threads
281 * enter the next transaction group.
283 for (c = 0; c < max_ncpus; c++)
284 mutex_exit(&tx->tx_cpu[c].tc_lock);
287 * Quiesce the transaction group by waiting for everyone to txg_exit().
289 for (c = 0; c < max_ncpus; c++) {
290 tx_cpu_t *tc = &tx->tx_cpu[c];
291 mutex_enter(&tc->tc_lock);
292 while (tc->tc_count[g] != 0)
293 cv_wait(&tc->tc_cv[g], &tc->tc_lock);
294 mutex_exit(&tc->tc_lock);
299 txg_sync_thread(void *arg)
301 dsl_pool_t *dp = arg;
302 tx_state_t *tx = &dp->dp_tx;
304 uint64_t start, delta;
306 txg_thread_enter(tx, &cpr);
310 uint64_t timer, timeout = zfs_txg_timeout * hz;
314 * We sync when we're scrubbing, there's someone waiting
315 * on us, or the quiesce thread has handed off a txg to
316 * us, or we have reached our timeout.
318 timer = (delta >= timeout ? 0 : timeout - delta);
319 while ((dp->dp_scrub_func == SCRUB_FUNC_NONE ||
320 spa_shutting_down(dp->dp_spa)) &&
321 !tx->tx_exiting && timer > 0 &&
322 tx->tx_synced_txg >= tx->tx_sync_txg_waiting &&
323 tx->tx_quiesced_txg == 0) {
324 dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n",
325 tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
326 txg_thread_wait(tx, &cpr, &tx->tx_sync_more_cv, timer);
327 delta = LBOLT - start;
328 timer = (delta > timeout ? 0 : timeout - delta);
332 * Wait until the quiesce thread hands off a txg to us,
333 * prompting it to do so if necessary.
335 while (!tx->tx_exiting && tx->tx_quiesced_txg == 0) {
336 if (tx->tx_quiesce_txg_waiting < tx->tx_open_txg+1)
337 tx->tx_quiesce_txg_waiting = tx->tx_open_txg+1;
338 cv_broadcast(&tx->tx_quiesce_more_cv);
339 txg_thread_wait(tx, &cpr, &tx->tx_quiesce_done_cv, 0);
343 txg_thread_exit(tx, &cpr, &tx->tx_sync_thread);
345 rw_enter(&tx->tx_suspend, RW_WRITER);
348 * Consume the quiesced txg which has been handed off to
349 * us. This may cause the quiescing thread to now be
350 * able to quiesce another txg, so we must signal it.
352 txg = tx->tx_quiesced_txg;
353 tx->tx_quiesced_txg = 0;
354 tx->tx_syncing_txg = txg;
355 cv_broadcast(&tx->tx_quiesce_more_cv);
356 rw_exit(&tx->tx_suspend);
358 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
359 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
360 mutex_exit(&tx->tx_sync_lock);
363 spa_sync(dp->dp_spa, txg);
364 delta = LBOLT - start;
366 mutex_enter(&tx->tx_sync_lock);
367 rw_enter(&tx->tx_suspend, RW_WRITER);
368 tx->tx_synced_txg = txg;
369 tx->tx_syncing_txg = 0;
370 rw_exit(&tx->tx_suspend);
371 cv_broadcast(&tx->tx_sync_done_cv);
376 txg_quiesce_thread(void *arg)
378 dsl_pool_t *dp = arg;
379 tx_state_t *tx = &dp->dp_tx;
382 txg_thread_enter(tx, &cpr);
388 * We quiesce when there's someone waiting on us.
389 * However, we can only have one txg in "quiescing" or
390 * "quiesced, waiting to sync" state. So we wait until
391 * the "quiesced, waiting to sync" txg has been consumed
392 * by the sync thread.
394 while (!tx->tx_exiting &&
395 (tx->tx_open_txg >= tx->tx_quiesce_txg_waiting ||
396 tx->tx_quiesced_txg != 0))
397 txg_thread_wait(tx, &cpr, &tx->tx_quiesce_more_cv, 0);
400 txg_thread_exit(tx, &cpr, &tx->tx_quiesce_thread);
402 txg = tx->tx_open_txg;
403 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
404 txg, tx->tx_quiesce_txg_waiting,
405 tx->tx_sync_txg_waiting);
406 mutex_exit(&tx->tx_sync_lock);
407 txg_quiesce(dp, txg);
408 mutex_enter(&tx->tx_sync_lock);
411 * Hand this txg off to the sync thread.
413 dprintf("quiesce done, handing off txg %llu\n", txg);
414 tx->tx_quiesced_txg = txg;
415 cv_broadcast(&tx->tx_sync_more_cv);
416 cv_broadcast(&tx->tx_quiesce_done_cv);
421 * Delay this thread by 'ticks' if we are still in the open transaction
422 * group and there is already a waiting txg quiesing or quiesced. Abort
423 * the delay if this txg stalls or enters the quiesing state.
426 txg_delay(dsl_pool_t *dp, uint64_t txg, int ticks)
428 tx_state_t *tx = &dp->dp_tx;
429 int timeout = LBOLT + ticks;
431 /* don't delay if this txg could transition to quiesing immediately */
432 if (tx->tx_open_txg > txg ||
433 tx->tx_syncing_txg == txg-1 || tx->tx_synced_txg == txg-1)
436 mutex_enter(&tx->tx_sync_lock);
437 if (tx->tx_open_txg > txg || tx->tx_synced_txg == txg-1) {
438 mutex_exit(&tx->tx_sync_lock);
442 while (LBOLT < timeout &&
443 tx->tx_syncing_txg < txg-1 && !txg_stalled(dp))
444 (void) cv_timedwait(&tx->tx_quiesce_more_cv, &tx->tx_sync_lock,
447 mutex_exit(&tx->tx_sync_lock);
451 txg_wait_synced(dsl_pool_t *dp, uint64_t txg)
453 tx_state_t *tx = &dp->dp_tx;
455 mutex_enter(&tx->tx_sync_lock);
456 ASSERT(tx->tx_threads == 2);
458 txg = tx->tx_open_txg;
459 if (tx->tx_sync_txg_waiting < txg)
460 tx->tx_sync_txg_waiting = txg;
461 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
462 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
463 while (tx->tx_synced_txg < txg) {
464 dprintf("broadcasting sync more "
465 "tx_synced=%llu waiting=%llu dp=%p\n",
466 tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
467 cv_broadcast(&tx->tx_sync_more_cv);
468 cv_wait(&tx->tx_sync_done_cv, &tx->tx_sync_lock);
470 mutex_exit(&tx->tx_sync_lock);
474 txg_wait_open(dsl_pool_t *dp, uint64_t txg)
476 tx_state_t *tx = &dp->dp_tx;
478 mutex_enter(&tx->tx_sync_lock);
479 ASSERT(tx->tx_threads == 2);
481 txg = tx->tx_open_txg + 1;
482 if (tx->tx_quiesce_txg_waiting < txg)
483 tx->tx_quiesce_txg_waiting = txg;
484 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
485 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
486 while (tx->tx_open_txg < txg) {
487 cv_broadcast(&tx->tx_quiesce_more_cv);
488 cv_wait(&tx->tx_quiesce_done_cv, &tx->tx_sync_lock);
490 mutex_exit(&tx->tx_sync_lock);
494 txg_stalled(dsl_pool_t *dp)
496 tx_state_t *tx = &dp->dp_tx;
497 return (tx->tx_quiesce_txg_waiting > tx->tx_open_txg);
501 txg_sync_waiting(dsl_pool_t *dp)
503 tx_state_t *tx = &dp->dp_tx;
505 return (tx->tx_syncing_txg <= tx->tx_sync_txg_waiting ||
506 tx->tx_quiesced_txg != 0);
510 txg_suspend(dsl_pool_t *dp)
512 tx_state_t *tx = &dp->dp_tx;
513 /* XXX some code paths suspend when they are already suspended! */
514 rw_enter(&tx->tx_suspend, RW_READER);
518 txg_resume(dsl_pool_t *dp)
520 tx_state_t *tx = &dp->dp_tx;
521 rw_exit(&tx->tx_suspend);
525 * Per-txg object lists.
528 txg_list_create(txg_list_t *tl, size_t offset)
532 mutex_init(&tl->tl_lock, NULL, MUTEX_DEFAULT, NULL);
534 tl->tl_offset = offset;
536 for (t = 0; t < TXG_SIZE; t++)
537 tl->tl_head[t] = NULL;
541 txg_list_destroy(txg_list_t *tl)
545 for (t = 0; t < TXG_SIZE; t++)
546 ASSERT(txg_list_empty(tl, t));
548 mutex_destroy(&tl->tl_lock);
552 txg_list_empty(txg_list_t *tl, uint64_t txg)
554 return (tl->tl_head[txg & TXG_MASK] == NULL);
558 * Add an entry to the list.
559 * Returns 0 if it's a new entry, 1 if it's already there.
562 txg_list_add(txg_list_t *tl, void *p, uint64_t txg)
564 int t = txg & TXG_MASK;
565 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
568 mutex_enter(&tl->tl_lock);
569 already_on_list = tn->tn_member[t];
570 if (!already_on_list) {
571 tn->tn_member[t] = 1;
572 tn->tn_next[t] = tl->tl_head[t];
575 mutex_exit(&tl->tl_lock);
577 return (already_on_list);
581 * Remove the head of the list and return it.
584 txg_list_remove(txg_list_t *tl, uint64_t txg)
586 int t = txg & TXG_MASK;
590 mutex_enter(&tl->tl_lock);
591 if ((tn = tl->tl_head[t]) != NULL) {
592 p = (char *)tn - tl->tl_offset;
593 tl->tl_head[t] = tn->tn_next[t];
594 tn->tn_next[t] = NULL;
595 tn->tn_member[t] = 0;
597 mutex_exit(&tl->tl_lock);
603 * Remove a specific item from the list and return it.
606 txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg)
608 int t = txg & TXG_MASK;
609 txg_node_t *tn, **tp;
611 mutex_enter(&tl->tl_lock);
613 for (tp = &tl->tl_head[t]; (tn = *tp) != NULL; tp = &tn->tn_next[t]) {
614 if ((char *)tn - tl->tl_offset == p) {
615 *tp = tn->tn_next[t];
616 tn->tn_next[t] = NULL;
617 tn->tn_member[t] = 0;
618 mutex_exit(&tl->tl_lock);
623 mutex_exit(&tl->tl_lock);
629 txg_list_member(txg_list_t *tl, void *p, uint64_t txg)
631 int t = txg & TXG_MASK;
632 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
634 return (tn->tn_member[t]);
638 * Walk a txg list -- only safe if you know it's not changing.
641 txg_list_head(txg_list_t *tl, uint64_t txg)
643 int t = txg & TXG_MASK;
644 txg_node_t *tn = tl->tl_head[t];
646 return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
650 txg_list_next(txg_list_t *tl, void *p, uint64_t txg)
652 int t = txg & TXG_MASK;
653 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
657 return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);