4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2017 by Lawrence Livermore National Security, LLC.
23 * Copyright 2019 Joyent, Inc.
29 #include <sys/spa_impl.h>
32 #include <sys/vdev_impl.h>
33 #include <sys/zfs_context.h>
34 #include <sys/callb.h>
37 * Multi-Modifier Protection (MMP) attempts to prevent a user from importing
38 * or opening a pool on more than one host at a time. In particular, it
39 * prevents "zpool import -f" on a host from succeeding while the pool is
40 * already imported on another host. There are many other ways in which a
41 * device could be used by two hosts for different purposes at the same time
42 * resulting in pool damage. This implementation does not attempt to detect
45 * MMP operates by ensuring there are frequent visible changes on disk (a
46 * "heartbeat") at all times. And by altering the import process to check
47 * for these changes and failing the import when they are detected. This
48 * functionality is enabled by setting the 'multihost' pool property to on.
50 * Uberblocks written by the txg_sync thread always go into the first
51 * (N-MMP_BLOCKS_PER_LABEL) slots, the remaining slots are reserved for MMP.
52 * They are used to hold uberblocks which are exactly the same as the last
53 * synced uberblock except that the ub_timestamp and mmp_config are frequently
54 * updated. Like all other uberblocks, the slot is written with an embedded
55 * checksum, and slots with invalid checksums are ignored. This provides the
56 * "heartbeat", with no risk of overwriting good uberblocks that must be
57 * preserved, e.g. previous txgs and associated block pointers.
59 * Three optional fields are added to uberblock structure; ub_mmp_magic,
60 * ub_mmp_config, and ub_mmp_delay. The ub_mmp_magic value allows zfs to tell
61 * whether the other ub_mmp_* fields are valid. The ub_mmp_config field tells
62 * the importing host the settings of zfs_multihost_interval and
63 * zfs_multihost_fail_intervals on the host which last had (or currently has)
64 * the pool imported. These determine how long a host must wait to detect
65 * activity in the pool, before concluding the pool is not in use. The
66 * mmp_delay field is a decaying average of the amount of time between
67 * completion of successive MMP writes, in nanoseconds. It indicates whether
70 * During import an activity test may now be performed to determine if
71 * the pool is in use. The activity test is typically required if the
72 * ZPOOL_CONFIG_HOSTID does not match the system hostid, the pool state is
73 * POOL_STATE_ACTIVE, and the pool is not a root pool.
75 * The activity test finds the "best" uberblock (highest txg, timestamp, and, if
76 * ub_mmp_magic is valid, sequence number from ub_mmp_config). It then waits
77 * some time, and finds the "best" uberblock again. If any of the mentioned
78 * fields have different values in the newly read uberblock, the pool is in use
79 * by another host and the import fails. In order to assure the accuracy of the
80 * activity test, the default values result in an activity test duration of 20x
81 * the mmp write interval.
83 * The duration of the "zpool import" activity test depends on the information
84 * available in the "best" uberblock:
86 * 1) If uberblock was written by zfs-0.8 or newer and fail_intervals > 0:
87 * ub_mmp_config.fail_intervals * ub_mmp_config.multihost_interval * 2
89 * In this case, a weak guarantee is provided. Since the host which last had
90 * the pool imported will suspend the pool if no mmp writes land within
91 * fail_intervals * multihost_interval ms, the absence of writes during that
92 * time means either the pool is not imported, or it is imported but the pool
93 * is suspended and no further writes will occur.
95 * Note that resuming the suspended pool on the remote host would invalidate
96 * this guarantee, and so it is not allowed.
98 * The factor of 2 provides a conservative safety factor and derives from
99 * MMP_IMPORT_SAFETY_FACTOR;
101 * 2) If uberblock was written by zfs-0.8 or newer and fail_intervals == 0:
102 * (ub_mmp_config.multihost_interval + ub_mmp_delay) *
103 * zfs_multihost_import_intervals
105 * In this case no guarantee can provided. However, as long as some devices
106 * are healthy and connected, it is likely that at least one write will land
107 * within (multihost_interval + mmp_delay) because multihost_interval is
108 * enough time for a write to be attempted to each leaf vdev, and mmp_delay
109 * is enough for one to land, based on past delays. Multiplying by
110 * zfs_multihost_import_intervals provides a conservative safety factor.
112 * 3) If uberblock was written by zfs-0.7:
113 * (zfs_multihost_interval + ub_mmp_delay) * zfs_multihost_import_intervals
115 * The same logic as case #2 applies, but we do not know remote tunables.
117 * We use the local value for zfs_multihost_interval because the original MMP
118 * did not record this value in the uberblock.
120 * ub_mmp_delay >= (zfs_multihost_interval / leaves), so if the other host
121 * has a much larger zfs_multihost_interval set, ub_mmp_delay will reflect
122 * that. We will have waited enough time for zfs_multihost_import_intervals
123 * writes to be issued and all but one to land.
125 * single device pool example delays
127 * import_delay = (1 + 1) * 20 = 40s #defaults, no I/O delay
128 * import_delay = (1 + 10) * 20 = 220s #defaults, 10s I/O delay
129 * import_delay = (10 + 10) * 20 = 400s #10s multihost_interval,
131 * 100 device pool example delays
133 * import_delay = (1 + .01) * 20 = 20s #defaults, no I/O delay
134 * import_delay = (1 + 10) * 20 = 220s #defaults, 10s I/O delay
135 * import_delay = (10 + .1) * 20 = 202s #10s multihost_interval,
138 * 4) Otherwise, this uberblock was written by a pre-MMP zfs:
139 * zfs_multihost_import_intervals * zfs_multihost_interval
141 * In this case local tunables are used. By default this product = 10s, long
142 * enough for a pool with any activity at all to write at least one
143 * uberblock. No guarantee can be provided.
145 * Additionally, the duration is then extended by a random 25% to attempt to to
146 * detect simultaneous imports. For example, if both partner hosts are rebooted
147 * at the same time and automatically attempt to import the pool.
151 * Used to control the frequency of mmp writes which are performed when the
152 * 'multihost' pool property is on. This is one factor used to determine the
153 * length of the activity check during import.
155 * On average an mmp write will be issued for each leaf vdev every
156 * zfs_multihost_interval milliseconds. In practice, the observed period can
157 * vary with the I/O load and this observed value is the ub_mmp_delay which is
158 * stored in the uberblock. The minimum allowed value is 100 ms.
160 ulong_t zfs_multihost_interval = MMP_DEFAULT_INTERVAL;
162 SYSCTL_DECL(_vfs_zfs);
163 SYSCTL_ULONG(_vfs_zfs, OID_AUTO, multihost_interval, CTLFLAG_RWTUN,
164 &zfs_multihost_interval, 0, "Interval between MMP writes, milliseconds");
168 * Used to control the duration of the activity test on import. Smaller values
169 * of zfs_multihost_import_intervals will reduce the import time but increase
170 * the risk of failing to detect an active pool. The total activity check time
171 * is never allowed to drop below one second. A value of 0 is ignored and
172 * treated as if it was set to 1.
174 uint_t zfs_multihost_import_intervals = MMP_DEFAULT_IMPORT_INTERVALS;
176 SYSCTL_UINT(_vfs_zfs, OID_AUTO, multihost_import_intervals, CTLFLAG_RWTUN,
177 &zfs_multihost_import_intervals, 0,
178 "MMP activity check period for pool import, "
179 "in units of multihost_interval");
183 * Controls the behavior of the pool when mmp write failures or delays are
186 * When zfs_multihost_fail_intervals = 0, mmp write failures or delays are
187 * ignored. The failures will still be reported to the ZED which depending on
188 * its configuration may take action such as suspending the pool or taking a
191 * When zfs_multihost_fail_intervals > 0, the pool will be suspended if
192 * zfs_multihost_fail_intervals * zfs_multihost_interval milliseconds pass
193 * without a successful mmp write. This guarantees the activity test will see
194 * mmp writes if the pool is imported. A value of 1 is ignored and treated as
195 * if it was set to 2, because a single leaf vdev pool will issue a write once
196 * per multihost_interval and thus any variation in latency would cause the
197 * pool to be suspended.
199 uint_t zfs_multihost_fail_intervals = MMP_DEFAULT_FAIL_INTERVALS;
201 SYSCTL_UINT(_vfs_zfs, OID_AUTO, multihost_fail_intervals, CTLFLAG_RWTUN,
202 &zfs_multihost_fail_intervals, 0,
203 "How long to tolerate MMP write failures before suspending a pool, "
204 "in units of multihost_interval");
207 char *mmp_tag = "mmp_write_uberblock";
208 static void mmp_thread(void *arg);
213 mmp_thread_t *mmp = &spa->spa_mmp;
215 mutex_init(&mmp->mmp_thread_lock, NULL, MUTEX_DEFAULT, NULL);
216 cv_init(&mmp->mmp_thread_cv, NULL, CV_DEFAULT, NULL);
217 mutex_init(&mmp->mmp_io_lock, NULL, MUTEX_DEFAULT, NULL);
218 mmp->mmp_kstat_id = 1;
221 * mmp_write_done() calculates mmp_delay based on prior mmp_delay and
222 * the elapsed time since the last write. For the first mmp write,
223 * there is no "last write", so we start with fake non-zero values.
225 mmp->mmp_last_write = gethrtime();
226 mmp->mmp_delay = MSEC2NSEC(MMP_INTERVAL_OK(zfs_multihost_interval));
232 mmp_thread_t *mmp = &spa->spa_mmp;
234 mutex_destroy(&mmp->mmp_thread_lock);
235 cv_destroy(&mmp->mmp_thread_cv);
236 mutex_destroy(&mmp->mmp_io_lock);
240 mmp_thread_enter(mmp_thread_t *mmp, callb_cpr_t *cpr)
242 CALLB_CPR_INIT(cpr, &mmp->mmp_thread_lock, callb_generic_cpr, FTAG);
243 mutex_enter(&mmp->mmp_thread_lock);
247 mmp_thread_exit(mmp_thread_t *mmp, kthread_t **mpp, callb_cpr_t *cpr)
249 ASSERT(*mpp != NULL);
251 cv_broadcast(&mmp->mmp_thread_cv);
252 CALLB_CPR_EXIT(cpr); /* drops &mmp->mmp_thread_lock */
257 mmp_thread_start(spa_t *spa)
259 mmp_thread_t *mmp = &spa->spa_mmp;
261 if (spa_writeable(spa)) {
262 mutex_enter(&mmp->mmp_thread_lock);
263 if (!mmp->mmp_thread) {
264 mmp->mmp_thread = thread_create(NULL, 0, mmp_thread,
265 spa, 0, &p0, TS_RUN, minclsyspri);
266 zfs_dbgmsg("MMP thread started pool '%s' "
267 "gethrtime %llu", spa_name(spa), gethrtime());
269 mutex_exit(&mmp->mmp_thread_lock);
274 mmp_thread_stop(spa_t *spa)
276 mmp_thread_t *mmp = &spa->spa_mmp;
278 mutex_enter(&mmp->mmp_thread_lock);
279 mmp->mmp_thread_exiting = 1;
280 cv_broadcast(&mmp->mmp_thread_cv);
282 while (mmp->mmp_thread) {
283 cv_wait(&mmp->mmp_thread_cv, &mmp->mmp_thread_lock);
285 mutex_exit(&mmp->mmp_thread_lock);
286 zfs_dbgmsg("MMP thread stopped pool '%s' gethrtime %llu",
287 spa_name(spa), gethrtime());
289 ASSERT(mmp->mmp_thread == NULL);
290 mmp->mmp_thread_exiting = 0;
293 typedef enum mmp_vdev_state_flag {
294 MMP_FAIL_NOT_WRITABLE = (1 << 0),
295 MMP_FAIL_WRITE_PENDING = (1 << 1),
296 } mmp_vdev_state_flag_t;
299 * Find a leaf vdev to write an MMP block to. It must not have an outstanding
300 * mmp write (if so a new write will also likely block). If there is no usable
301 * leaf, a nonzero error value is returned. The error value returned is a bit
304 * MMP_FAIL_WRITE_PENDING One or more leaf vdevs are writeable, but have an
305 * outstanding MMP write.
306 * MMP_FAIL_NOT_WRITABLE One or more leaf vdevs are not writeable.
310 mmp_next_leaf(spa_t *spa)
313 vdev_t *starting_leaf;
316 ASSERT(MUTEX_HELD(&spa->spa_mmp.mmp_io_lock));
317 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER));
318 ASSERT(list_link_active(&spa->spa_leaf_list.list_head) == B_TRUE);
319 ASSERT(!list_is_empty(&spa->spa_leaf_list));
321 if (spa->spa_mmp.mmp_leaf_last_gen != spa->spa_leaf_list_gen) {
322 spa->spa_mmp.mmp_last_leaf = list_head(&spa->spa_leaf_list);
323 spa->spa_mmp.mmp_leaf_last_gen = spa->spa_leaf_list_gen;
326 leaf = spa->spa_mmp.mmp_last_leaf;
328 leaf = list_head(&spa->spa_leaf_list);
329 starting_leaf = leaf;
332 leaf = list_next(&spa->spa_leaf_list, leaf);
334 leaf = list_head(&spa->spa_leaf_list);
336 if (!vdev_writeable(leaf)) {
337 fail_mask |= MMP_FAIL_NOT_WRITABLE;
338 } else if (leaf->vdev_mmp_pending != 0) {
339 fail_mask |= MMP_FAIL_WRITE_PENDING;
341 spa->spa_mmp.mmp_last_leaf = leaf;
344 } while (leaf != starting_leaf);
352 * MMP writes are issued on a fixed schedule, but may complete at variable,
353 * much longer, intervals. The mmp_delay captures long periods between
354 * successful writes for any reason, including disk latency, scheduling delays,
357 * The mmp_delay is usually calculated as a decaying average, but if the latest
358 * delay is higher we do not average it, so that we do not hide sudden spikes
359 * which the importing host must wait for.
361 * If writes are occurring frequently, such as due to a high rate of txg syncs,
362 * the mmp_delay could become very small. Since those short delays depend on
363 * activity we cannot count on, we never allow mmp_delay to get lower than rate
364 * expected if only mmp_thread writes occur.
366 * If an mmp write was skipped or fails, and we have already waited longer than
367 * mmp_delay, we need to update it so the next write reflects the longer delay.
369 * Do not set mmp_delay if the multihost property is not on, so as not to
370 * trigger an activity check on import.
373 mmp_delay_update(spa_t *spa, boolean_t write_completed)
375 mmp_thread_t *mts = &spa->spa_mmp;
376 hrtime_t delay = gethrtime() - mts->mmp_last_write;
378 ASSERT(MUTEX_HELD(&mts->mmp_io_lock));
380 if (spa_multihost(spa) == B_FALSE) {
385 if (delay > mts->mmp_delay)
386 mts->mmp_delay = delay;
388 if (write_completed == B_FALSE)
391 mts->mmp_last_write = gethrtime();
394 * strictly less than, in case delay was changed above.
396 if (delay < mts->mmp_delay) {
398 MSEC2NSEC(MMP_INTERVAL_OK(zfs_multihost_interval)) /
399 MAX(1, vdev_count_leaves(spa));
400 mts->mmp_delay = MAX(((delay + mts->mmp_delay * 127) / 128),
406 mmp_write_done(zio_t *zio)
408 spa_t *spa = zio->io_spa;
409 vdev_t *vd = zio->io_vd;
410 mmp_thread_t *mts = zio->io_private;
412 mutex_enter(&mts->mmp_io_lock);
413 uint64_t mmp_kstat_id = vd->vdev_mmp_kstat_id;
414 hrtime_t mmp_write_duration = gethrtime() - vd->vdev_mmp_pending;
416 mmp_delay_update(spa, (zio->io_error == 0));
418 vd->vdev_mmp_pending = 0;
419 vd->vdev_mmp_kstat_id = 0;
421 mutex_exit(&mts->mmp_io_lock);
422 spa_config_exit(spa, SCL_STATE, mmp_tag);
424 abd_free(zio->io_abd);
428 * When the uberblock on-disk is updated by a spa_sync,
429 * creating a new "best" uberblock, update the one stored
430 * in the mmp thread state, used for mmp writes.
433 mmp_update_uberblock(spa_t *spa, uberblock_t *ub)
435 mmp_thread_t *mmp = &spa->spa_mmp;
437 mutex_enter(&mmp->mmp_io_lock);
440 mmp->mmp_ub.ub_timestamp = gethrestime_sec();
441 mmp_delay_update(spa, B_TRUE);
442 mutex_exit(&mmp->mmp_io_lock);
446 * Choose a random vdev, label, and MMP block, and write over it
447 * with a copy of the last-synced uberblock, whose timestamp
448 * has been updated to reflect that the pool is in use.
451 mmp_write_uberblock(spa_t *spa)
453 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
454 mmp_thread_t *mmp = &spa->spa_mmp;
460 hrtime_t lock_acquire_time = gethrtime();
461 spa_config_enter(spa, SCL_STATE, mmp_tag, RW_READER);
462 lock_acquire_time = gethrtime() - lock_acquire_time;
463 if (lock_acquire_time > (MSEC2NSEC(MMP_MIN_INTERVAL) / 10))
464 zfs_dbgmsg("MMP SCL_STATE acquisition pool '%s' took %llu ns "
465 "gethrtime %llu", spa_name(spa), lock_acquire_time,
468 mutex_enter(&mmp->mmp_io_lock);
470 error = mmp_next_leaf(spa);
473 * spa_mmp_history has two types of entries:
474 * Issued MMP write: records time issued, error status, etc.
475 * Skipped MMP write: an MMP write could not be issued because no
476 * suitable leaf vdev was available. See comment above struct
477 * spa_mmp_history for details.
481 mmp_delay_update(spa, B_FALSE);
482 if (mmp->mmp_skip_error == error) {
484 * ZoL porting note: the following is TBD
485 * spa_mmp_history_set_skip(spa, mmp->mmp_kstat_id - 1);
488 mmp->mmp_skip_error = error;
490 * ZoL porting note: the following is TBD
491 * spa_mmp_history_add(spa, mmp->mmp_ub.ub_txg,
492 * gethrestime_sec(), mmp->mmp_delay, NULL, 0,
493 * mmp->mmp_kstat_id++, error);
495 zfs_dbgmsg("MMP error choosing leaf pool '%s' "
496 "gethrtime %llu fail_mask %#x", spa_name(spa),
499 mutex_exit(&mmp->mmp_io_lock);
500 spa_config_exit(spa, SCL_STATE, mmp_tag);
504 vd = spa->spa_mmp.mmp_last_leaf;
505 if (mmp->mmp_skip_error != 0) {
506 mmp->mmp_skip_error = 0;
507 zfs_dbgmsg("MMP write after skipping due to unavailable "
508 "leaves, pool '%s' gethrtime %llu leaf %#llu",
509 spa_name(spa), gethrtime(), vd->vdev_guid);
512 if (mmp->mmp_zio_root == NULL)
513 mmp->mmp_zio_root = zio_root(spa, NULL, NULL,
514 flags | ZIO_FLAG_GODFATHER);
516 if (mmp->mmp_ub.ub_timestamp != gethrestime_sec()) {
518 * Want to reset mmp_seq when timestamp advances because after
519 * an mmp_seq wrap new values will not be chosen by
520 * uberblock_compare() as the "best".
522 mmp->mmp_ub.ub_timestamp = gethrestime_sec();
527 ub->ub_mmp_magic = MMP_MAGIC;
528 ub->ub_mmp_delay = mmp->mmp_delay;
529 ub->ub_mmp_config = MMP_SEQ_SET(mmp->mmp_seq) |
530 MMP_INTERVAL_SET(MMP_INTERVAL_OK(zfs_multihost_interval)) |
531 MMP_FAIL_INT_SET(MMP_FAIL_INTVS_OK(
532 zfs_multihost_fail_intervals));
533 vd->vdev_mmp_pending = gethrtime();
534 vd->vdev_mmp_kstat_id = mmp->mmp_kstat_id;
536 zio_t *zio = zio_null(mmp->mmp_zio_root, spa, NULL, NULL, NULL, flags);
537 abd_t *ub_abd = abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
538 abd_zero(ub_abd, VDEV_UBERBLOCK_SIZE(vd));
539 abd_copy_from_buf(ub_abd, ub, sizeof (uberblock_t));
543 mutex_exit(&mmp->mmp_io_lock);
545 offset = VDEV_UBERBLOCK_OFFSET(vd, VDEV_UBERBLOCK_COUNT(vd) -
546 MMP_BLOCKS_PER_LABEL + spa_get_random(MMP_BLOCKS_PER_LABEL));
548 label = spa_get_random(VDEV_LABELS);
549 vdev_label_write(zio, vd, label, ub_abd, offset,
550 VDEV_UBERBLOCK_SIZE(vd), mmp_write_done, mmp,
551 flags | ZIO_FLAG_DONT_PROPAGATE);
554 * ZoL porting note: the following is TBD
555 * (void) spa_mmp_history_add(spa, ub->ub_txg, ub->ub_timestamp,
556 * ub->ub_mmp_delay, vd, label, vd->vdev_mmp_kstat_id, 0);
563 mmp_thread(void *arg)
565 spa_t *spa = (spa_t *)arg;
566 mmp_thread_t *mmp = &spa->spa_mmp;
567 boolean_t suspended = spa_suspended(spa);
568 boolean_t multihost = spa_multihost(spa);
569 uint64_t mmp_interval = MSEC2NSEC(MMP_INTERVAL_OK(
570 zfs_multihost_interval));
571 uint32_t mmp_fail_intervals = MMP_FAIL_INTVS_OK(
572 zfs_multihost_fail_intervals);
573 hrtime_t mmp_fail_ns = mmp_fail_intervals * mmp_interval;
574 boolean_t last_spa_suspended = suspended;
575 boolean_t last_spa_multihost = multihost;
576 uint64_t last_mmp_interval = mmp_interval;
577 uint32_t last_mmp_fail_intervals = mmp_fail_intervals;
578 hrtime_t last_mmp_fail_ns = mmp_fail_ns;
582 mmp_thread_enter(mmp, &cpr);
584 while (!mmp->mmp_thread_exiting) {
585 hrtime_t next_time = gethrtime() +
586 MSEC2NSEC(MMP_DEFAULT_INTERVAL);
587 int leaves = MAX(vdev_count_leaves(spa), 1);
589 /* Detect changes in tunables or state */
591 last_spa_suspended = suspended;
592 last_spa_multihost = multihost;
593 suspended = spa_suspended(spa);
594 multihost = spa_multihost(spa);
596 last_mmp_interval = mmp_interval;
597 last_mmp_fail_intervals = mmp_fail_intervals;
598 last_mmp_fail_ns = mmp_fail_ns;
599 mmp_interval = MSEC2NSEC(MMP_INTERVAL_OK(
600 zfs_multihost_interval));
601 mmp_fail_intervals = MMP_FAIL_INTVS_OK(
602 zfs_multihost_fail_intervals);
604 /* Smooth so pool is not suspended when reducing tunables */
605 if (mmp_fail_intervals * mmp_interval < mmp_fail_ns) {
606 mmp_fail_ns = (mmp_fail_ns * 31 +
607 mmp_fail_intervals * mmp_interval) / 32;
609 mmp_fail_ns = mmp_fail_intervals *
613 if (mmp_interval != last_mmp_interval ||
614 mmp_fail_intervals != last_mmp_fail_intervals) {
616 * We want other hosts to see new tunables as quickly as
617 * possible. Write out at higher frequency than usual.
623 next_time = gethrtime() + mmp_interval / leaves;
625 if (mmp_fail_ns != last_mmp_fail_ns) {
626 zfs_dbgmsg("MMP interval change pool '%s' "
627 "gethrtime %llu last_mmp_interval %llu "
628 "mmp_interval %llu last_mmp_fail_intervals %u "
629 "mmp_fail_intervals %u mmp_fail_ns %llu "
630 "skip_wait %d leaves %d next_time %llu",
631 spa_name(spa), gethrtime(), last_mmp_interval,
632 mmp_interval, last_mmp_fail_intervals,
633 mmp_fail_intervals, mmp_fail_ns, skip_wait, leaves,
638 * MMP off => on, or suspended => !suspended:
639 * No writes occurred recently. Update mmp_last_write to give
640 * us some time to try.
642 if ((!last_spa_multihost && multihost) ||
643 (last_spa_suspended && !suspended)) {
644 zfs_dbgmsg("MMP state change pool '%s': gethrtime %llu "
645 "last_spa_multihost %u multihost %u "
646 "last_spa_suspended %u suspended %u",
647 spa_name(spa), last_spa_multihost, multihost,
648 last_spa_suspended, suspended);
649 mutex_enter(&mmp->mmp_io_lock);
650 mmp->mmp_last_write = gethrtime();
651 mmp->mmp_delay = mmp_interval;
652 mutex_exit(&mmp->mmp_io_lock);
657 * mmp_delay == 0 tells importing node to skip activity check.
659 if (last_spa_multihost && !multihost) {
660 mutex_enter(&mmp->mmp_io_lock);
662 mutex_exit(&mmp->mmp_io_lock);
666 * Suspend the pool if no MMP write has succeeded in over
667 * mmp_interval * mmp_fail_intervals nanoseconds.
669 if (multihost && !suspended && mmp_fail_intervals &&
670 (gethrtime() - mmp->mmp_last_write) > mmp_fail_ns) {
671 zfs_dbgmsg("MMP suspending pool '%s': gethrtime %llu "
672 "mmp_last_write %llu mmp_interval %llu "
673 "mmp_fail_intervals %llu mmp_fail_ns %llu",
674 spa_name(spa), (u_longlong_t)gethrtime(),
675 (u_longlong_t)mmp->mmp_last_write,
676 (u_longlong_t)mmp_interval,
677 (u_longlong_t)mmp_fail_intervals,
678 (u_longlong_t)mmp_fail_ns);
679 cmn_err(CE_WARN, "MMP writes to pool '%s' have not "
680 "succeeded in over %llu ms; suspending pool. "
683 NSEC2MSEC(gethrtime() - mmp->mmp_last_write),
685 zio_suspend(spa, NULL, ZIO_SUSPEND_MMP);
688 if (multihost && !suspended)
689 mmp_write_uberblock(spa);
692 next_time = gethrtime() + MSEC2NSEC(MMP_MIN_INTERVAL) /
697 CALLB_CPR_SAFE_BEGIN(&cpr);
699 (void) cv_timedwait_sig_hrtime(&mmp->mmp_thread_cv,
700 &mmp->mmp_thread_lock, next_time);
701 #elif defined(_KERNEL)
702 (void) cv_timedwait_sig_sbt(&mmp->mmp_thread_cv,
703 &mmp->mmp_thread_lock, nstosbt(next_time),
704 100 * SBT_1US, C_ABSOLUTE);
706 (void) cv_timedwait_sig_hires(&mmp->mmp_thread_cv,
707 &mmp->mmp_thread_lock, next_time, USEC2NSEC(100),
708 CALLOUT_FLAG_ABSOLUTE);
710 CALLB_CPR_SAFE_END(&cpr, &mmp->mmp_thread_lock);
713 /* Outstanding writes are allowed to complete. */
714 if (mmp->mmp_zio_root)
715 zio_wait(mmp->mmp_zio_root);
717 mmp->mmp_zio_root = NULL;
718 mmp_thread_exit(mmp, &mmp->mmp_thread, &cpr);
722 * Signal the MMP thread to wake it, when it is sleeping on
723 * its cv. Used when some module parameter has changed and
724 * we want the thread to know about it.
725 * Only signal if the pool is active and mmp thread is
726 * running, otherwise there is no thread to wake.
729 mmp_signal_thread(spa_t *spa)
731 mmp_thread_t *mmp = &spa->spa_mmp;
733 mutex_enter(&mmp->mmp_thread_lock);
735 cv_broadcast(&mmp->mmp_thread_cv);
736 mutex_exit(&mmp->mmp_thread_lock);
740 mmp_signal_all_threads(void)
744 mutex_enter(&spa_namespace_lock);
745 while ((spa = spa_next(spa))) {
746 if (spa->spa_state == POOL_STATE_ACTIVE)
747 mmp_signal_thread(spa);
749 mutex_exit(&spa_namespace_lock);