2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
35 #include <sys/eventhandler.h>
37 #include <sys/kernel.h>
38 #include <sys/kthread.h>
39 #include <sys/limits.h>
41 #include <sys/malloc.h>
42 #include <sys/mutex.h>
45 #include <sys/sched.h>
47 #include <sys/sysctl.h>
49 #include <geom/geom.h>
50 #include <geom/mirror/g_mirror.h>
52 FEATURE(geom_mirror, "GEOM mirroring support");
54 static MALLOC_DEFINE(M_MIRROR, "mirror_data", "GEOM_MIRROR Data");
56 SYSCTL_DECL(_kern_geom);
57 static SYSCTL_NODE(_kern_geom, OID_AUTO, mirror, CTLFLAG_RW, 0,
59 int g_mirror_debug = 0;
60 SYSCTL_INT(_kern_geom_mirror, OID_AUTO, debug, CTLFLAG_RWTUN, &g_mirror_debug, 0,
62 static u_int g_mirror_timeout = 4;
63 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, timeout, CTLFLAG_RWTUN, &g_mirror_timeout,
64 0, "Time to wait on all mirror components");
65 static u_int g_mirror_idletime = 5;
66 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, idletime, CTLFLAG_RWTUN,
67 &g_mirror_idletime, 0, "Mark components as clean when idling");
68 static u_int g_mirror_disconnect_on_failure = 1;
69 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, disconnect_on_failure, CTLFLAG_RWTUN,
70 &g_mirror_disconnect_on_failure, 0, "Disconnect component on I/O failure.");
71 static u_int g_mirror_syncreqs = 2;
72 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_requests, CTLFLAG_RDTUN,
73 &g_mirror_syncreqs, 0, "Parallel synchronization I/O requests.");
74 static u_int g_mirror_sync_period = 5;
75 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_update_period, CTLFLAG_RWTUN,
76 &g_mirror_sync_period, 0,
77 "Metadata update period during synchronization, in seconds");
79 #define MSLEEP(ident, mtx, priority, wmesg, timeout) do { \
80 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \
81 msleep((ident), (mtx), (priority), (wmesg), (timeout)); \
82 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \
85 static eventhandler_tag g_mirror_post_sync = NULL;
86 static int g_mirror_shutdown = 0;
88 static g_ctl_destroy_geom_t g_mirror_destroy_geom;
89 static g_taste_t g_mirror_taste;
90 static g_init_t g_mirror_init;
91 static g_fini_t g_mirror_fini;
92 static g_provgone_t g_mirror_providergone;
93 static g_resize_t g_mirror_resize;
95 struct g_class g_mirror_class = {
96 .name = G_MIRROR_CLASS_NAME,
98 .ctlreq = g_mirror_config,
99 .taste = g_mirror_taste,
100 .destroy_geom = g_mirror_destroy_geom,
101 .init = g_mirror_init,
102 .fini = g_mirror_fini,
103 .providergone = g_mirror_providergone,
104 .resize = g_mirror_resize
108 static void g_mirror_destroy_provider(struct g_mirror_softc *sc);
109 static int g_mirror_update_disk(struct g_mirror_disk *disk, u_int state);
110 static void g_mirror_update_device(struct g_mirror_softc *sc, bool force);
111 static void g_mirror_dumpconf(struct sbuf *sb, const char *indent,
112 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp);
113 static void g_mirror_sync_reinit(const struct g_mirror_disk *disk,
114 struct bio *bp, off_t offset);
115 static void g_mirror_sync_stop(struct g_mirror_disk *disk, int type);
116 static void g_mirror_register_request(struct g_mirror_softc *sc,
118 static void g_mirror_sync_release(struct g_mirror_softc *sc);
122 g_mirror_disk_state2str(int state)
126 case G_MIRROR_DISK_STATE_NONE:
128 case G_MIRROR_DISK_STATE_NEW:
130 case G_MIRROR_DISK_STATE_ACTIVE:
132 case G_MIRROR_DISK_STATE_STALE:
134 case G_MIRROR_DISK_STATE_SYNCHRONIZING:
135 return ("SYNCHRONIZING");
136 case G_MIRROR_DISK_STATE_DISCONNECTED:
137 return ("DISCONNECTED");
138 case G_MIRROR_DISK_STATE_DESTROY:
146 g_mirror_device_state2str(int state)
150 case G_MIRROR_DEVICE_STATE_STARTING:
152 case G_MIRROR_DEVICE_STATE_RUNNING:
160 g_mirror_get_diskname(struct g_mirror_disk *disk)
163 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL)
164 return ("[unknown]");
165 return (disk->d_name);
169 * --- Events handling functions ---
170 * Events in geom_mirror are used to maintain disks and device status
171 * from one thread to simplify locking.
174 g_mirror_event_free(struct g_mirror_event *ep)
181 g_mirror_event_send(void *arg, int state, int flags)
183 struct g_mirror_softc *sc;
184 struct g_mirror_disk *disk;
185 struct g_mirror_event *ep;
188 ep = malloc(sizeof(*ep), M_MIRROR, M_WAITOK);
189 G_MIRROR_DEBUG(4, "%s: Sending event %p.", __func__, ep);
190 if ((flags & G_MIRROR_EVENT_DEVICE) != 0) {
201 mtx_lock(&sc->sc_events_mtx);
202 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next);
203 mtx_unlock(&sc->sc_events_mtx);
204 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
205 mtx_lock(&sc->sc_queue_mtx);
207 mtx_unlock(&sc->sc_queue_mtx);
208 if ((flags & G_MIRROR_EVENT_DONTWAIT) != 0)
210 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, ep);
211 sx_xunlock(&sc->sc_lock);
212 while ((ep->e_flags & G_MIRROR_EVENT_DONE) == 0) {
213 mtx_lock(&sc->sc_events_mtx);
214 MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "m:event",
218 g_mirror_event_free(ep);
219 sx_xlock(&sc->sc_lock);
223 static struct g_mirror_event *
224 g_mirror_event_first(struct g_mirror_softc *sc)
226 struct g_mirror_event *ep;
228 mtx_lock(&sc->sc_events_mtx);
229 ep = TAILQ_FIRST(&sc->sc_events);
230 mtx_unlock(&sc->sc_events_mtx);
235 g_mirror_event_remove(struct g_mirror_softc *sc, struct g_mirror_event *ep)
238 mtx_lock(&sc->sc_events_mtx);
239 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
240 mtx_unlock(&sc->sc_events_mtx);
244 g_mirror_event_cancel(struct g_mirror_disk *disk)
246 struct g_mirror_softc *sc;
247 struct g_mirror_event *ep, *tmpep;
250 sx_assert(&sc->sc_lock, SX_XLOCKED);
252 mtx_lock(&sc->sc_events_mtx);
253 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) {
254 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0)
256 if (ep->e_disk != disk)
258 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
259 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0)
260 g_mirror_event_free(ep);
262 ep->e_error = ECANCELED;
266 mtx_unlock(&sc->sc_events_mtx);
270 * Return the number of disks in given state.
271 * If state is equal to -1, count all connected disks.
274 g_mirror_ndisks(struct g_mirror_softc *sc, int state)
276 struct g_mirror_disk *disk;
279 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
280 if (state == -1 || disk->d_state == state)
287 * Find a disk in mirror by its disk ID.
289 static struct g_mirror_disk *
290 g_mirror_id2disk(struct g_mirror_softc *sc, uint32_t id)
292 struct g_mirror_disk *disk;
294 sx_assert(&sc->sc_lock, SX_XLOCKED);
296 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
297 if (disk->d_id == id)
304 g_mirror_nrequests(struct g_mirror_softc *sc, struct g_consumer *cp)
309 mtx_lock(&sc->sc_queue_mtx);
310 TAILQ_FOREACH(bp, &sc->sc_queue, bio_queue) {
311 if (bp->bio_from == cp)
314 mtx_unlock(&sc->sc_queue_mtx);
319 g_mirror_is_busy(struct g_mirror_softc *sc, struct g_consumer *cp)
324 "I/O requests for %s exist, can't destroy it now.",
328 if (g_mirror_nrequests(sc, cp) > 0) {
330 "I/O requests for %s in queue, can't destroy it now.",
338 g_mirror_destroy_consumer(void *arg, int flags __unused)
340 struct g_consumer *cp;
345 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", cp->provider->name);
347 g_destroy_consumer(cp);
351 g_mirror_kill_consumer(struct g_mirror_softc *sc, struct g_consumer *cp)
353 struct g_provider *pp;
359 if (g_mirror_is_busy(sc, cp))
364 if ((pp->geom->flags & G_GEOM_WITHER) == 0)
367 G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr,
368 -cp->acw, -cp->ace, 0);
369 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
370 g_access(cp, -cp->acr, -cp->acw, -cp->ace);
373 * After retaste event was send (inside g_access()), we can send
374 * event to detach and destroy consumer.
375 * A class, which has consumer to the given provider connected
376 * will not receive retaste event for the provider.
377 * This is the way how I ignore retaste events when I close
378 * consumers opened for write: I detach and destroy consumer
379 * after retaste event is sent.
381 g_post_event(g_mirror_destroy_consumer, cp, M_WAITOK, NULL);
384 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", pp->name);
386 g_destroy_consumer(cp);
390 g_mirror_connect_disk(struct g_mirror_disk *disk, struct g_provider *pp)
392 struct g_consumer *cp;
395 g_topology_assert_not();
396 KASSERT(disk->d_consumer == NULL,
397 ("Disk already connected (device %s).", disk->d_softc->sc_name));
400 cp = g_new_consumer(disk->d_softc->sc_geom);
401 cp->flags |= G_CF_DIRECT_RECEIVE;
402 error = g_attach(cp, pp);
404 g_destroy_consumer(cp);
408 error = g_access(cp, 1, 1, 1);
411 g_destroy_consumer(cp);
413 G_MIRROR_DEBUG(0, "Cannot open consumer %s (error=%d).",
418 disk->d_consumer = cp;
419 disk->d_consumer->private = disk;
420 disk->d_consumer->index = 0;
422 G_MIRROR_DEBUG(2, "Disk %s connected.", g_mirror_get_diskname(disk));
427 g_mirror_disconnect_consumer(struct g_mirror_softc *sc, struct g_consumer *cp)
434 if (cp->provider != NULL)
435 g_mirror_kill_consumer(sc, cp);
437 g_destroy_consumer(cp);
441 * Initialize disk. This means allocate memory, create consumer, attach it
442 * to the provider and open access (r1w1e1) to it.
444 static struct g_mirror_disk *
445 g_mirror_init_disk(struct g_mirror_softc *sc, struct g_provider *pp,
446 struct g_mirror_metadata *md, int *errorp)
448 struct g_mirror_disk *disk;
451 disk = malloc(sizeof(*disk), M_MIRROR, M_NOWAIT | M_ZERO);
457 error = g_mirror_connect_disk(disk, pp);
460 disk->d_id = md->md_did;
461 disk->d_state = G_MIRROR_DISK_STATE_NONE;
462 disk->d_priority = md->md_priority;
463 disk->d_flags = md->md_dflags;
464 error = g_getattr("GEOM::candelete", disk->d_consumer, &i);
465 if (error == 0 && i != 0)
466 disk->d_flags |= G_MIRROR_DISK_FLAG_CANDELETE;
467 if (md->md_provider[0] != '\0')
468 disk->d_flags |= G_MIRROR_DISK_FLAG_HARDCODED;
469 disk->d_sync.ds_consumer = NULL;
470 disk->d_sync.ds_offset = md->md_sync_offset;
471 disk->d_sync.ds_offset_done = md->md_sync_offset;
472 disk->d_sync.ds_update_ts = time_uptime;
473 disk->d_genid = md->md_genid;
474 disk->d_sync.ds_syncid = md->md_syncid;
482 free(disk, M_MIRROR);
487 g_mirror_destroy_disk(struct g_mirror_disk *disk)
489 struct g_mirror_softc *sc;
491 g_topology_assert_not();
493 sx_assert(&sc->sc_lock, SX_XLOCKED);
496 LIST_REMOVE(disk, d_next);
498 g_mirror_event_cancel(disk);
499 if (sc->sc_hint == disk)
501 switch (disk->d_state) {
502 case G_MIRROR_DISK_STATE_SYNCHRONIZING:
503 g_mirror_sync_stop(disk, 1);
505 case G_MIRROR_DISK_STATE_NEW:
506 case G_MIRROR_DISK_STATE_STALE:
507 case G_MIRROR_DISK_STATE_ACTIVE:
509 g_mirror_disconnect_consumer(sc, disk->d_consumer);
511 free(disk, M_MIRROR);
514 KASSERT(0 == 1, ("Wrong disk state (%s, %s).",
515 g_mirror_get_diskname(disk),
516 g_mirror_disk_state2str(disk->d_state)));
521 g_mirror_free_device(struct g_mirror_softc *sc)
526 mtx_destroy(&sc->sc_queue_mtx);
527 mtx_destroy(&sc->sc_events_mtx);
528 mtx_destroy(&sc->sc_done_mtx);
529 sx_destroy(&sc->sc_lock);
534 g_mirror_providergone(struct g_provider *pp)
536 struct g_mirror_softc *sc = pp->private;
538 if ((--sc->sc_refcnt) == 0)
539 g_mirror_free_device(sc);
543 g_mirror_destroy_device(struct g_mirror_softc *sc)
545 struct g_mirror_disk *disk;
546 struct g_mirror_event *ep;
548 struct g_consumer *cp, *tmpcp;
550 g_topology_assert_not();
551 sx_assert(&sc->sc_lock, SX_XLOCKED);
554 if (sc->sc_provider != NULL)
555 g_mirror_destroy_provider(sc);
556 for (disk = LIST_FIRST(&sc->sc_disks); disk != NULL;
557 disk = LIST_FIRST(&sc->sc_disks)) {
558 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
559 g_mirror_update_metadata(disk);
560 g_mirror_destroy_disk(disk);
562 while ((ep = g_mirror_event_first(sc)) != NULL) {
563 g_mirror_event_remove(sc, ep);
564 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0)
565 g_mirror_event_free(ep);
567 ep->e_error = ECANCELED;
568 ep->e_flags |= G_MIRROR_EVENT_DONE;
569 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, ep);
570 mtx_lock(&sc->sc_events_mtx);
572 mtx_unlock(&sc->sc_events_mtx);
575 callout_drain(&sc->sc_callout);
578 LIST_FOREACH_SAFE(cp, &sc->sc_sync.ds_geom->consumer, consumer, tmpcp) {
579 g_mirror_disconnect_consumer(sc, cp);
581 g_wither_geom(sc->sc_sync.ds_geom, ENXIO);
582 G_MIRROR_DEBUG(0, "Device %s destroyed.", gp->name);
583 g_wither_geom(gp, ENXIO);
584 sx_xunlock(&sc->sc_lock);
585 if ((--sc->sc_refcnt) == 0)
586 g_mirror_free_device(sc);
591 g_mirror_orphan(struct g_consumer *cp)
593 struct g_mirror_disk *disk;
600 disk->d_softc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
601 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED,
602 G_MIRROR_EVENT_DONTWAIT);
606 * Function should return the next active disk on the list.
607 * It is possible that it will be the same disk as given.
608 * If there are no active disks on list, NULL is returned.
610 static __inline struct g_mirror_disk *
611 g_mirror_find_next(struct g_mirror_softc *sc, struct g_mirror_disk *disk)
613 struct g_mirror_disk *dp;
615 for (dp = LIST_NEXT(disk, d_next); dp != disk;
616 dp = LIST_NEXT(dp, d_next)) {
618 dp = LIST_FIRST(&sc->sc_disks);
619 if (dp->d_state == G_MIRROR_DISK_STATE_ACTIVE)
622 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE)
627 static struct g_mirror_disk *
628 g_mirror_get_disk(struct g_mirror_softc *sc)
630 struct g_mirror_disk *disk;
632 if (sc->sc_hint == NULL) {
633 sc->sc_hint = LIST_FIRST(&sc->sc_disks);
634 if (sc->sc_hint == NULL)
638 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) {
639 disk = g_mirror_find_next(sc, disk);
643 sc->sc_hint = g_mirror_find_next(sc, disk);
648 g_mirror_write_metadata(struct g_mirror_disk *disk,
649 struct g_mirror_metadata *md)
651 struct g_mirror_softc *sc;
652 struct g_consumer *cp;
653 off_t offset, length;
657 g_topology_assert_not();
659 sx_assert(&sc->sc_lock, SX_LOCKED);
661 cp = disk->d_consumer;
662 KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name));
663 KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name));
664 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
665 ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr,
667 length = cp->provider->sectorsize;
668 offset = cp->provider->mediasize - length;
669 sector = malloc((size_t)length, M_MIRROR, M_WAITOK | M_ZERO);
671 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_WIPE) == 0) {
673 * Handle the case, when the size of parent provider reduced.
675 if (offset < md->md_mediasize)
678 mirror_metadata_encode(md, sector);
680 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_metadata_write, error);
682 error = g_write_data(cp, offset, sector, length);
683 free(sector, M_MIRROR);
685 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) {
686 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN;
687 G_MIRROR_DEBUG(0, "Cannot write metadata on %s "
688 "(device=%s, error=%d).",
689 g_mirror_get_diskname(disk), sc->sc_name, error);
691 G_MIRROR_DEBUG(1, "Cannot write metadata on %s "
692 "(device=%s, error=%d).",
693 g_mirror_get_diskname(disk), sc->sc_name, error);
695 if (g_mirror_disconnect_on_failure &&
696 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) {
697 sc->sc_bump_id |= G_MIRROR_BUMP_GENID;
698 g_mirror_event_send(disk,
699 G_MIRROR_DISK_STATE_DISCONNECTED,
700 G_MIRROR_EVENT_DONTWAIT);
707 g_mirror_clear_metadata(struct g_mirror_disk *disk)
711 g_topology_assert_not();
712 sx_assert(&disk->d_softc->sc_lock, SX_LOCKED);
714 if (disk->d_softc->sc_type != G_MIRROR_TYPE_AUTOMATIC)
716 error = g_mirror_write_metadata(disk, NULL);
718 G_MIRROR_DEBUG(2, "Metadata on %s cleared.",
719 g_mirror_get_diskname(disk));
722 "Cannot clear metadata on disk %s (error=%d).",
723 g_mirror_get_diskname(disk), error);
729 g_mirror_fill_metadata(struct g_mirror_softc *sc, struct g_mirror_disk *disk,
730 struct g_mirror_metadata *md)
733 strlcpy(md->md_magic, G_MIRROR_MAGIC, sizeof(md->md_magic));
734 md->md_version = G_MIRROR_VERSION;
735 strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name));
736 md->md_mid = sc->sc_id;
737 md->md_all = sc->sc_ndisks;
738 md->md_slice = sc->sc_slice;
739 md->md_balance = sc->sc_balance;
740 md->md_genid = sc->sc_genid;
741 md->md_mediasize = sc->sc_mediasize;
742 md->md_sectorsize = sc->sc_sectorsize;
743 md->md_mflags = (sc->sc_flags & G_MIRROR_DEVICE_FLAG_MASK);
744 bzero(md->md_provider, sizeof(md->md_provider));
746 md->md_did = arc4random();
750 md->md_sync_offset = 0;
753 md->md_did = disk->d_id;
754 md->md_priority = disk->d_priority;
755 md->md_syncid = disk->d_sync.ds_syncid;
756 md->md_dflags = (disk->d_flags & G_MIRROR_DISK_FLAG_MASK);
757 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
758 md->md_sync_offset = disk->d_sync.ds_offset_done;
760 md->md_sync_offset = 0;
761 if ((disk->d_flags & G_MIRROR_DISK_FLAG_HARDCODED) != 0) {
762 strlcpy(md->md_provider,
763 disk->d_consumer->provider->name,
764 sizeof(md->md_provider));
766 md->md_provsize = disk->d_consumer->provider->mediasize;
771 g_mirror_update_metadata(struct g_mirror_disk *disk)
773 struct g_mirror_softc *sc;
774 struct g_mirror_metadata md;
777 g_topology_assert_not();
779 sx_assert(&sc->sc_lock, SX_LOCKED);
781 if (sc->sc_type != G_MIRROR_TYPE_AUTOMATIC)
783 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WIPE) == 0)
784 g_mirror_fill_metadata(sc, disk, &md);
785 error = g_mirror_write_metadata(disk, &md);
787 G_MIRROR_DEBUG(2, "Metadata on %s updated.",
788 g_mirror_get_diskname(disk));
791 "Cannot update metadata on disk %s (error=%d).",
792 g_mirror_get_diskname(disk), error);
797 g_mirror_bump_syncid(struct g_mirror_softc *sc)
799 struct g_mirror_disk *disk;
801 g_topology_assert_not();
802 sx_assert(&sc->sc_lock, SX_XLOCKED);
803 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0,
804 ("%s called with no active disks (device=%s).", __func__,
808 G_MIRROR_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name,
810 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
811 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
812 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
813 disk->d_sync.ds_syncid = sc->sc_syncid;
814 g_mirror_update_metadata(disk);
820 g_mirror_bump_genid(struct g_mirror_softc *sc)
822 struct g_mirror_disk *disk;
824 g_topology_assert_not();
825 sx_assert(&sc->sc_lock, SX_XLOCKED);
826 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0,
827 ("%s called with no active disks (device=%s).", __func__,
831 G_MIRROR_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name,
833 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
834 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
835 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
836 disk->d_genid = sc->sc_genid;
837 g_mirror_update_metadata(disk);
843 g_mirror_idle(struct g_mirror_softc *sc, int acw)
845 struct g_mirror_disk *disk;
848 g_topology_assert_not();
849 sx_assert(&sc->sc_lock, SX_XLOCKED);
851 if (sc->sc_provider == NULL)
853 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0)
857 if (sc->sc_writes > 0)
859 if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) {
860 timeout = g_mirror_idletime - (time_uptime - sc->sc_last_write);
861 if (!g_mirror_shutdown && timeout > 0)
865 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
866 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
868 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as clean.",
869 g_mirror_get_diskname(disk), sc->sc_name);
870 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
871 g_mirror_update_metadata(disk);
877 g_mirror_unidle(struct g_mirror_softc *sc)
879 struct g_mirror_disk *disk;
881 g_topology_assert_not();
882 sx_assert(&sc->sc_lock, SX_XLOCKED);
884 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0)
887 sc->sc_last_write = time_uptime;
888 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
889 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
891 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as dirty.",
892 g_mirror_get_diskname(disk), sc->sc_name);
893 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
894 g_mirror_update_metadata(disk);
899 g_mirror_done(struct bio *bp)
901 struct g_mirror_softc *sc;
903 sc = bp->bio_from->geom->softc;
904 bp->bio_cflags = G_MIRROR_BIO_FLAG_REGULAR;
905 mtx_lock(&sc->sc_queue_mtx);
906 TAILQ_INSERT_TAIL(&sc->sc_queue, bp, bio_queue);
907 mtx_unlock(&sc->sc_queue_mtx);
912 g_mirror_regular_request_error(struct g_mirror_softc *sc,
913 struct g_mirror_disk *disk, struct bio *bp)
916 if (bp->bio_cmd == BIO_FLUSH && bp->bio_error == EOPNOTSUPP)
919 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) {
920 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN;
921 G_MIRROR_LOGREQ(0, bp, "Request failed (error=%d).",
924 G_MIRROR_LOGREQ(1, bp, "Request failed (error=%d).",
927 if (g_mirror_disconnect_on_failure &&
928 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) {
929 if (bp->bio_error == ENXIO &&
930 bp->bio_cmd == BIO_READ)
931 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
932 else if (bp->bio_error == ENXIO)
933 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID_NOW;
935 sc->sc_bump_id |= G_MIRROR_BUMP_GENID;
936 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED,
937 G_MIRROR_EVENT_DONTWAIT);
942 g_mirror_regular_request(struct g_mirror_softc *sc, struct bio *bp)
944 struct g_mirror_disk *disk;
947 g_topology_assert_not();
948 KASSERT(sc->sc_provider == bp->bio_parent->bio_to,
949 ("regular request %p with unexpected origin", bp));
951 pbp = bp->bio_parent;
952 bp->bio_from->index--;
953 if (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_DELETE)
955 disk = bp->bio_from->private;
958 g_mirror_kill_consumer(sc, bp->bio_from);
962 switch (bp->bio_cmd) {
964 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_read,
968 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_write,
972 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_delete,
976 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_flush,
982 KASSERT(pbp->bio_inbed <= pbp->bio_children,
983 ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed,
985 if (bp->bio_error == 0 && pbp->bio_error == 0) {
986 G_MIRROR_LOGREQ(3, bp, "Request delivered.");
988 if (pbp->bio_children == pbp->bio_inbed) {
989 G_MIRROR_LOGREQ(3, pbp, "Request delivered.");
990 pbp->bio_completed = pbp->bio_length;
991 if (pbp->bio_cmd == BIO_WRITE ||
992 pbp->bio_cmd == BIO_DELETE) {
993 TAILQ_REMOVE(&sc->sc_inflight, pbp, bio_queue);
994 /* Release delayed sync requests if possible. */
995 g_mirror_sync_release(sc);
997 g_io_deliver(pbp, pbp->bio_error);
1000 } else if (bp->bio_error != 0) {
1001 if (pbp->bio_error == 0)
1002 pbp->bio_error = bp->bio_error;
1004 g_mirror_regular_request_error(sc, disk, bp);
1005 switch (pbp->bio_cmd) {
1010 pbp->bio_children--;
1016 switch (pbp->bio_cmd) {
1018 if (pbp->bio_inbed < pbp->bio_children)
1020 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 1)
1021 g_io_deliver(pbp, pbp->bio_error);
1024 mtx_lock(&sc->sc_queue_mtx);
1025 TAILQ_INSERT_TAIL(&sc->sc_queue, pbp, bio_queue);
1026 mtx_unlock(&sc->sc_queue_mtx);
1027 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
1034 if (pbp->bio_children == 0) {
1036 * All requests failed.
1038 } else if (pbp->bio_inbed < pbp->bio_children) {
1041 } else if (pbp->bio_children == pbp->bio_inbed) {
1042 /* Some requests succeeded. */
1044 pbp->bio_completed = pbp->bio_length;
1046 if (pbp->bio_cmd == BIO_WRITE || pbp->bio_cmd == BIO_DELETE) {
1047 TAILQ_REMOVE(&sc->sc_inflight, pbp, bio_queue);
1048 /* Release delayed sync requests if possible. */
1049 g_mirror_sync_release(sc);
1051 g_io_deliver(pbp, pbp->bio_error);
1054 KASSERT(1 == 0, ("Invalid request: %u.", pbp->bio_cmd));
1060 g_mirror_sync_done(struct bio *bp)
1062 struct g_mirror_softc *sc;
1064 G_MIRROR_LOGREQ(3, bp, "Synchronization request delivered.");
1065 sc = bp->bio_from->geom->softc;
1066 bp->bio_cflags = G_MIRROR_BIO_FLAG_SYNC;
1067 mtx_lock(&sc->sc_queue_mtx);
1068 TAILQ_INSERT_TAIL(&sc->sc_queue, bp, bio_queue);
1069 mtx_unlock(&sc->sc_queue_mtx);
1074 g_mirror_candelete(struct bio *bp)
1076 struct g_mirror_softc *sc;
1077 struct g_mirror_disk *disk;
1080 sc = bp->bio_to->private;
1081 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1082 if (disk->d_flags & G_MIRROR_DISK_FLAG_CANDELETE)
1085 val = (int *)bp->bio_data;
1086 *val = (disk != NULL);
1087 g_io_deliver(bp, 0);
1091 g_mirror_kernel_dump(struct bio *bp)
1093 struct g_mirror_softc *sc;
1094 struct g_mirror_disk *disk;
1096 struct g_kerneldump *gkd;
1099 * We configure dumping to the first component, because this component
1100 * will be used for reading with 'prefer' balance algorithm.
1101 * If the component with the highest priority is currently disconnected
1102 * we will not be able to read the dump after the reboot if it will be
1103 * connected and synchronized later. Can we do something better?
1105 sc = bp->bio_to->private;
1106 disk = LIST_FIRST(&sc->sc_disks);
1108 gkd = (struct g_kerneldump *)bp->bio_data;
1109 if (gkd->length > bp->bio_to->mediasize)
1110 gkd->length = bp->bio_to->mediasize;
1111 cbp = g_clone_bio(bp);
1113 g_io_deliver(bp, ENOMEM);
1116 cbp->bio_done = g_std_done;
1117 g_io_request(cbp, disk->d_consumer);
1118 G_MIRROR_DEBUG(1, "Kernel dump will go to %s.",
1119 g_mirror_get_diskname(disk));
1123 g_mirror_start(struct bio *bp)
1125 struct g_mirror_softc *sc;
1127 sc = bp->bio_to->private;
1129 * If sc == NULL or there are no valid disks, provider's error
1130 * should be set and g_mirror_start() should not be called at all.
1132 KASSERT(sc != NULL && sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
1133 ("Provider's error should be set (error=%d)(mirror=%s).",
1134 bp->bio_to->error, bp->bio_to->name));
1135 G_MIRROR_LOGREQ(3, bp, "Request received.");
1137 switch (bp->bio_cmd) {
1144 if (!strcmp(bp->bio_attribute, "GEOM::candelete")) {
1145 g_mirror_candelete(bp);
1147 } else if (strcmp("GEOM::kerneldump", bp->bio_attribute) == 0) {
1148 g_mirror_kernel_dump(bp);
1153 g_io_deliver(bp, EOPNOTSUPP);
1156 mtx_lock(&sc->sc_queue_mtx);
1157 if (bp->bio_to->error != 0) {
1158 mtx_unlock(&sc->sc_queue_mtx);
1159 g_io_deliver(bp, bp->bio_to->error);
1162 TAILQ_INSERT_TAIL(&sc->sc_queue, bp, bio_queue);
1163 mtx_unlock(&sc->sc_queue_mtx);
1164 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
1169 * Return TRUE if the given request is colliding with a in-progress
1170 * synchronization request.
1173 g_mirror_sync_collision(struct g_mirror_softc *sc, struct bio *bp)
1175 struct g_mirror_disk *disk;
1177 off_t rstart, rend, sstart, send;
1180 if (sc->sc_sync.ds_ndisks == 0)
1182 rstart = bp->bio_offset;
1183 rend = bp->bio_offset + bp->bio_length;
1184 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1185 if (disk->d_state != G_MIRROR_DISK_STATE_SYNCHRONIZING)
1187 for (i = 0; i < g_mirror_syncreqs; i++) {
1188 sbp = disk->d_sync.ds_bios[i];
1191 sstart = sbp->bio_offset;
1192 send = sbp->bio_offset + sbp->bio_length;
1193 if (rend > sstart && rstart < send)
1201 * Return TRUE if the given sync request is colliding with a in-progress regular
1205 g_mirror_regular_collision(struct g_mirror_softc *sc, struct bio *sbp)
1207 off_t rstart, rend, sstart, send;
1210 if (sc->sc_sync.ds_ndisks == 0)
1212 sstart = sbp->bio_offset;
1213 send = sbp->bio_offset + sbp->bio_length;
1214 TAILQ_FOREACH(bp, &sc->sc_inflight, bio_queue) {
1215 rstart = bp->bio_offset;
1216 rend = bp->bio_offset + bp->bio_length;
1217 if (rend > sstart && rstart < send)
1224 * Puts regular request onto delayed queue.
1227 g_mirror_regular_delay(struct g_mirror_softc *sc, struct bio *bp)
1230 G_MIRROR_LOGREQ(2, bp, "Delaying request.");
1231 TAILQ_INSERT_TAIL(&sc->sc_regular_delayed, bp, bio_queue);
1235 * Puts synchronization request onto delayed queue.
1238 g_mirror_sync_delay(struct g_mirror_softc *sc, struct bio *bp)
1241 G_MIRROR_LOGREQ(2, bp, "Delaying synchronization request.");
1242 TAILQ_INSERT_TAIL(&sc->sc_sync_delayed, bp, bio_queue);
1246 * Requeue delayed regular requests.
1249 g_mirror_regular_release(struct g_mirror_softc *sc)
1253 if ((bp = TAILQ_FIRST(&sc->sc_regular_delayed)) == NULL)
1255 if (g_mirror_sync_collision(sc, bp))
1258 G_MIRROR_DEBUG(2, "Requeuing regular requests after collision.");
1259 mtx_lock(&sc->sc_queue_mtx);
1260 TAILQ_CONCAT(&sc->sc_regular_delayed, &sc->sc_queue, bio_queue);
1261 TAILQ_SWAP(&sc->sc_regular_delayed, &sc->sc_queue, bio, bio_queue);
1262 mtx_unlock(&sc->sc_queue_mtx);
1266 * Releases delayed sync requests which don't collide anymore with regular
1270 g_mirror_sync_release(struct g_mirror_softc *sc)
1272 struct bio *bp, *bp2;
1274 TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed, bio_queue, bp2) {
1275 if (g_mirror_regular_collision(sc, bp))
1277 TAILQ_REMOVE(&sc->sc_sync_delayed, bp, bio_queue);
1278 G_MIRROR_LOGREQ(2, bp,
1279 "Releasing delayed synchronization request.");
1280 g_io_request(bp, bp->bio_from);
1285 * Free a synchronization request and clear its slot in the array.
1288 g_mirror_sync_request_free(struct g_mirror_disk *disk, struct bio *bp)
1292 if (disk != NULL && disk->d_sync.ds_bios != NULL) {
1293 idx = (int)(uintptr_t)bp->bio_caller1;
1294 KASSERT(disk->d_sync.ds_bios[idx] == bp,
1295 ("unexpected sync BIO at %p:%d", disk, idx));
1296 disk->d_sync.ds_bios[idx] = NULL;
1298 free(bp->bio_data, M_MIRROR);
1303 * Handle synchronization requests.
1304 * Every synchronization request is a two-step process: first, a read request is
1305 * sent to the mirror provider via the sync consumer. If that request completes
1306 * successfully, it is converted to a write and sent to the disk being
1307 * synchronized. If the write also completes successfully, the synchronization
1308 * offset is advanced and a new read request is submitted.
1311 g_mirror_sync_request(struct g_mirror_softc *sc, struct bio *bp)
1313 struct g_mirror_disk *disk;
1314 struct g_mirror_disk_sync *sync;
1316 KASSERT((bp->bio_cmd == BIO_READ &&
1317 bp->bio_from->geom == sc->sc_sync.ds_geom) ||
1318 (bp->bio_cmd == BIO_WRITE && bp->bio_from->geom == sc->sc_geom),
1319 ("Sync BIO %p with unexpected origin", bp));
1321 bp->bio_from->index--;
1322 disk = bp->bio_from->private;
1324 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */
1326 g_mirror_kill_consumer(sc, bp->bio_from);
1327 g_topology_unlock();
1328 g_mirror_sync_request_free(NULL, bp);
1329 sx_xlock(&sc->sc_lock);
1333 sync = &disk->d_sync;
1336 * Synchronization request.
1338 switch (bp->bio_cmd) {
1340 struct g_consumer *cp;
1342 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_sync_request_read,
1345 if (bp->bio_error != 0) {
1346 G_MIRROR_LOGREQ(0, bp,
1347 "Synchronization request failed (error=%d).",
1351 * The read error will trigger a syncid bump, so there's
1352 * no need to do that here.
1354 * The read error handling for regular requests will
1355 * retry the read from all active mirrors before passing
1356 * the error back up, so there's no need to retry here.
1358 g_mirror_sync_request_free(disk, bp);
1359 g_mirror_event_send(disk,
1360 G_MIRROR_DISK_STATE_DISCONNECTED,
1361 G_MIRROR_EVENT_DONTWAIT);
1364 G_MIRROR_LOGREQ(3, bp,
1365 "Synchronization request half-finished.");
1366 bp->bio_cmd = BIO_WRITE;
1368 cp = disk->d_consumer;
1369 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1370 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1371 cp->acr, cp->acw, cp->ace));
1373 g_io_request(bp, cp);
1380 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_sync_request_write,
1383 if (bp->bio_error != 0) {
1384 G_MIRROR_LOGREQ(0, bp,
1385 "Synchronization request failed (error=%d).",
1387 g_mirror_sync_request_free(disk, bp);
1388 sc->sc_bump_id |= G_MIRROR_BUMP_GENID;
1389 g_mirror_event_send(disk,
1390 G_MIRROR_DISK_STATE_DISCONNECTED,
1391 G_MIRROR_EVENT_DONTWAIT);
1394 G_MIRROR_LOGREQ(3, bp, "Synchronization request finished.");
1395 if (sync->ds_offset >= sc->sc_mediasize ||
1396 sync->ds_consumer == NULL ||
1397 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1398 /* Don't send more synchronization requests. */
1399 sync->ds_inflight--;
1400 g_mirror_sync_request_free(disk, bp);
1401 if (sync->ds_inflight > 0)
1403 if (sync->ds_consumer == NULL ||
1404 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1407 /* Disk up-to-date, activate it. */
1408 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_ACTIVE,
1409 G_MIRROR_EVENT_DONTWAIT);
1413 /* Send next synchronization request. */
1414 g_mirror_sync_reinit(disk, bp, sync->ds_offset);
1415 sync->ds_offset += bp->bio_length;
1417 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request.");
1418 sync->ds_consumer->index++;
1421 * Delay the request if it is colliding with a regular request.
1423 if (g_mirror_regular_collision(sc, bp))
1424 g_mirror_sync_delay(sc, bp);
1426 g_io_request(bp, sync->ds_consumer);
1428 /* Requeue delayed requests if possible. */
1429 g_mirror_regular_release(sc);
1431 /* Find the smallest offset */
1432 offset = sc->sc_mediasize;
1433 for (i = 0; i < g_mirror_syncreqs; i++) {
1434 bp = sync->ds_bios[i];
1435 if (bp != NULL && bp->bio_offset < offset)
1436 offset = bp->bio_offset;
1438 if (g_mirror_sync_period > 0 &&
1439 time_uptime - sync->ds_update_ts > g_mirror_sync_period) {
1440 sync->ds_offset_done = offset;
1441 g_mirror_update_metadata(disk);
1442 sync->ds_update_ts = time_uptime;
1447 panic("Invalid I/O request %p", bp);
1452 g_mirror_request_prefer(struct g_mirror_softc *sc, struct bio *bp)
1454 struct g_mirror_disk *disk;
1455 struct g_consumer *cp;
1458 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1459 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE)
1463 if (bp->bio_error == 0)
1464 bp->bio_error = ENXIO;
1465 g_io_deliver(bp, bp->bio_error);
1468 cbp = g_clone_bio(bp);
1470 if (bp->bio_error == 0)
1471 bp->bio_error = ENOMEM;
1472 g_io_deliver(bp, bp->bio_error);
1476 * Fill in the component buf structure.
1478 cp = disk->d_consumer;
1479 cbp->bio_done = g_mirror_done;
1480 cbp->bio_to = cp->provider;
1481 G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1482 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1483 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1486 g_io_request(cbp, cp);
1490 g_mirror_request_round_robin(struct g_mirror_softc *sc, struct bio *bp)
1492 struct g_mirror_disk *disk;
1493 struct g_consumer *cp;
1496 disk = g_mirror_get_disk(sc);
1498 if (bp->bio_error == 0)
1499 bp->bio_error = ENXIO;
1500 g_io_deliver(bp, bp->bio_error);
1503 cbp = g_clone_bio(bp);
1505 if (bp->bio_error == 0)
1506 bp->bio_error = ENOMEM;
1507 g_io_deliver(bp, bp->bio_error);
1511 * Fill in the component buf structure.
1513 cp = disk->d_consumer;
1514 cbp->bio_done = g_mirror_done;
1515 cbp->bio_to = cp->provider;
1516 G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1517 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1518 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1521 g_io_request(cbp, cp);
1524 #define TRACK_SIZE (1 * 1024 * 1024)
1525 #define LOAD_SCALE 256
1526 #define ABS(x) (((x) >= 0) ? (x) : (-(x)))
1529 g_mirror_request_load(struct g_mirror_softc *sc, struct bio *bp)
1531 struct g_mirror_disk *disk, *dp;
1532 struct g_consumer *cp;
1536 /* Find a disk with the smallest load. */
1539 LIST_FOREACH(dp, &sc->sc_disks, d_next) {
1540 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1543 /* If disk head is precisely in position - highly prefer it. */
1544 if (dp->d_last_offset == bp->bio_offset)
1545 prio -= 2 * LOAD_SCALE;
1547 /* If disk head is close to position - prefer it. */
1548 if (ABS(dp->d_last_offset - bp->bio_offset) < TRACK_SIZE)
1549 prio -= 1 * LOAD_SCALE;
1555 KASSERT(disk != NULL, ("NULL disk for %s.", sc->sc_name));
1556 cbp = g_clone_bio(bp);
1558 if (bp->bio_error == 0)
1559 bp->bio_error = ENOMEM;
1560 g_io_deliver(bp, bp->bio_error);
1564 * Fill in the component buf structure.
1566 cp = disk->d_consumer;
1567 cbp->bio_done = g_mirror_done;
1568 cbp->bio_to = cp->provider;
1569 G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1570 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1571 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1574 /* Remember last head position */
1575 disk->d_last_offset = bp->bio_offset + bp->bio_length;
1577 LIST_FOREACH(dp, &sc->sc_disks, d_next) {
1578 dp->load = (dp->d_consumer->index * LOAD_SCALE +
1581 g_io_request(cbp, cp);
1585 g_mirror_request_split(struct g_mirror_softc *sc, struct bio *bp)
1587 struct bio_queue queue;
1588 struct g_mirror_disk *disk;
1589 struct g_consumer *cp;
1591 off_t left, mod, offset, slice;
1595 if (bp->bio_length <= sc->sc_slice) {
1596 g_mirror_request_round_robin(sc, bp);
1599 ndisks = g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE);
1600 slice = bp->bio_length / ndisks;
1601 mod = slice % sc->sc_provider->sectorsize;
1603 slice += sc->sc_provider->sectorsize - mod;
1605 * Allocate all bios before sending any request, so we can
1606 * return ENOMEM in nice and clean way.
1608 left = bp->bio_length;
1609 offset = bp->bio_offset;
1610 data = bp->bio_data;
1612 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1613 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1615 cbp = g_clone_bio(bp);
1617 while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
1618 TAILQ_REMOVE(&queue, cbp, bio_queue);
1621 if (bp->bio_error == 0)
1622 bp->bio_error = ENOMEM;
1623 g_io_deliver(bp, bp->bio_error);
1626 TAILQ_INSERT_TAIL(&queue, cbp, bio_queue);
1627 cbp->bio_done = g_mirror_done;
1628 cbp->bio_caller1 = disk;
1629 cbp->bio_to = disk->d_consumer->provider;
1630 cbp->bio_offset = offset;
1631 cbp->bio_data = data;
1632 cbp->bio_length = MIN(left, slice);
1633 left -= cbp->bio_length;
1636 offset += cbp->bio_length;
1637 data += cbp->bio_length;
1639 while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
1640 TAILQ_REMOVE(&queue, cbp, bio_queue);
1641 G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1642 disk = cbp->bio_caller1;
1643 cbp->bio_caller1 = NULL;
1644 cp = disk->d_consumer;
1645 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1646 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1647 cp->acr, cp->acw, cp->ace));
1648 disk->d_consumer->index++;
1649 g_io_request(cbp, disk->d_consumer);
1654 g_mirror_register_request(struct g_mirror_softc *sc, struct bio *bp)
1656 struct bio_queue queue;
1658 struct g_consumer *cp;
1659 struct g_mirror_disk *disk;
1661 sx_assert(&sc->sc_lock, SA_XLOCKED);
1664 * To avoid ordering issues, if a write is deferred because of a
1665 * collision with a sync request, all I/O is deferred until that
1666 * write is initiated.
1668 if (bp->bio_from->geom != sc->sc_sync.ds_geom &&
1669 !TAILQ_EMPTY(&sc->sc_regular_delayed)) {
1670 g_mirror_regular_delay(sc, bp);
1674 switch (bp->bio_cmd) {
1676 switch (sc->sc_balance) {
1677 case G_MIRROR_BALANCE_LOAD:
1678 g_mirror_request_load(sc, bp);
1680 case G_MIRROR_BALANCE_PREFER:
1681 g_mirror_request_prefer(sc, bp);
1683 case G_MIRROR_BALANCE_ROUND_ROBIN:
1684 g_mirror_request_round_robin(sc, bp);
1686 case G_MIRROR_BALANCE_SPLIT:
1687 g_mirror_request_split(sc, bp);
1694 * Delay the request if it is colliding with a synchronization
1697 if (g_mirror_sync_collision(sc, bp)) {
1698 g_mirror_regular_delay(sc, bp);
1703 g_mirror_unidle(sc);
1705 sc->sc_last_write = time_uptime;
1708 * Bump syncid on first write.
1710 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) {
1711 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID;
1712 g_mirror_bump_syncid(sc);
1716 * Allocate all bios before sending any request, so we can
1717 * return ENOMEM in nice and clean way.
1720 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1721 switch (disk->d_state) {
1722 case G_MIRROR_DISK_STATE_ACTIVE:
1724 case G_MIRROR_DISK_STATE_SYNCHRONIZING:
1725 if (bp->bio_offset >= disk->d_sync.ds_offset)
1731 if (bp->bio_cmd == BIO_DELETE &&
1732 (disk->d_flags & G_MIRROR_DISK_FLAG_CANDELETE) == 0)
1734 cbp = g_clone_bio(bp);
1736 while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
1737 TAILQ_REMOVE(&queue, cbp, bio_queue);
1740 if (bp->bio_error == 0)
1741 bp->bio_error = ENOMEM;
1742 g_io_deliver(bp, bp->bio_error);
1745 TAILQ_INSERT_TAIL(&queue, cbp, bio_queue);
1746 cbp->bio_done = g_mirror_done;
1747 cp = disk->d_consumer;
1748 cbp->bio_caller1 = cp;
1749 cbp->bio_to = cp->provider;
1750 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1751 ("Consumer %s not opened (r%dw%de%d).",
1752 cp->provider->name, cp->acr, cp->acw, cp->ace));
1754 if (TAILQ_EMPTY(&queue)) {
1755 KASSERT(bp->bio_cmd == BIO_DELETE,
1756 ("No consumers for regular request %p", bp));
1757 g_io_deliver(bp, EOPNOTSUPP);
1760 while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
1761 G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1762 TAILQ_REMOVE(&queue, cbp, bio_queue);
1763 cp = cbp->bio_caller1;
1764 cbp->bio_caller1 = NULL;
1767 g_io_request(cbp, cp);
1770 * Put request onto inflight queue, so we can check if new
1771 * synchronization requests don't collide with it.
1773 TAILQ_INSERT_TAIL(&sc->sc_inflight, bp, bio_queue);
1777 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1778 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1780 cbp = g_clone_bio(bp);
1782 while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
1783 TAILQ_REMOVE(&queue, cbp, bio_queue);
1786 if (bp->bio_error == 0)
1787 bp->bio_error = ENOMEM;
1788 g_io_deliver(bp, bp->bio_error);
1791 TAILQ_INSERT_TAIL(&queue, cbp, bio_queue);
1792 cbp->bio_done = g_mirror_done;
1793 cbp->bio_caller1 = disk;
1794 cbp->bio_to = disk->d_consumer->provider;
1796 KASSERT(!TAILQ_EMPTY(&queue),
1797 ("No consumers for regular request %p", bp));
1798 while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
1799 G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1800 TAILQ_REMOVE(&queue, cbp, bio_queue);
1801 disk = cbp->bio_caller1;
1802 cbp->bio_caller1 = NULL;
1803 cp = disk->d_consumer;
1804 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1805 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1806 cp->acr, cp->acw, cp->ace));
1808 g_io_request(cbp, cp);
1812 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)",
1813 bp->bio_cmd, sc->sc_name));
1819 g_mirror_can_destroy(struct g_mirror_softc *sc)
1822 struct g_consumer *cp;
1824 g_topology_assert();
1826 if (gp->softc == NULL)
1828 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_TASTING) != 0)
1830 LIST_FOREACH(cp, &gp->consumer, consumer) {
1831 if (g_mirror_is_busy(sc, cp))
1834 gp = sc->sc_sync.ds_geom;
1835 LIST_FOREACH(cp, &gp->consumer, consumer) {
1836 if (g_mirror_is_busy(sc, cp))
1839 G_MIRROR_DEBUG(2, "No I/O requests for %s, it can be destroyed.",
1845 g_mirror_try_destroy(struct g_mirror_softc *sc)
1848 if (sc->sc_rootmount != NULL) {
1849 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
1851 root_mount_rel(sc->sc_rootmount);
1852 sc->sc_rootmount = NULL;
1855 if (!g_mirror_can_destroy(sc)) {
1856 g_topology_unlock();
1859 sc->sc_geom->softc = NULL;
1860 sc->sc_sync.ds_geom->softc = NULL;
1861 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DRAIN) != 0) {
1862 g_topology_unlock();
1863 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__,
1865 /* Unlock sc_lock here, as it can be destroyed after wakeup. */
1866 sx_xunlock(&sc->sc_lock);
1867 wakeup(&sc->sc_worker);
1868 sc->sc_worker = NULL;
1870 g_topology_unlock();
1871 g_mirror_destroy_device(sc);
1880 g_mirror_worker(void *arg)
1882 struct g_mirror_softc *sc;
1883 struct g_mirror_event *ep;
1888 thread_lock(curthread);
1889 sched_prio(curthread, PRIBIO);
1890 thread_unlock(curthread);
1892 sx_xlock(&sc->sc_lock);
1894 G_MIRROR_DEBUG(5, "%s: Let's see...", __func__);
1896 * First take a look at events.
1897 * This is important to handle events before any I/O requests.
1899 ep = g_mirror_event_first(sc);
1901 g_mirror_event_remove(sc, ep);
1902 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) {
1903 /* Update only device status. */
1905 "Running event for device %s.",
1908 g_mirror_update_device(sc, true);
1910 /* Update disk status. */
1911 G_MIRROR_DEBUG(3, "Running event for disk %s.",
1912 g_mirror_get_diskname(ep->e_disk));
1913 ep->e_error = g_mirror_update_disk(ep->e_disk,
1915 if (ep->e_error == 0)
1916 g_mirror_update_device(sc, false);
1918 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) {
1919 KASSERT(ep->e_error == 0,
1920 ("Error cannot be handled."));
1921 g_mirror_event_free(ep);
1923 ep->e_flags |= G_MIRROR_EVENT_DONE;
1924 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__,
1926 mtx_lock(&sc->sc_events_mtx);
1928 mtx_unlock(&sc->sc_events_mtx);
1931 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1932 if (g_mirror_try_destroy(sc)) {
1933 curthread->td_pflags &= ~TDP_GEOM;
1934 G_MIRROR_DEBUG(1, "Thread exiting.");
1938 G_MIRROR_DEBUG(5, "%s: I'm here 1.", __func__);
1943 * Check if we can mark array as CLEAN and if we can't take
1944 * how much seconds should we wait.
1946 timeout = g_mirror_idle(sc, -1);
1949 * Handle I/O requests.
1951 mtx_lock(&sc->sc_queue_mtx);
1952 bp = TAILQ_FIRST(&sc->sc_queue);
1954 TAILQ_REMOVE(&sc->sc_queue, bp, bio_queue);
1957 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1958 mtx_unlock(&sc->sc_queue_mtx);
1959 if (g_mirror_try_destroy(sc)) {
1960 curthread->td_pflags &= ~TDP_GEOM;
1961 G_MIRROR_DEBUG(1, "Thread exiting.");
1964 mtx_lock(&sc->sc_queue_mtx);
1965 if (!TAILQ_EMPTY(&sc->sc_queue)) {
1966 mtx_unlock(&sc->sc_queue_mtx);
1970 if (g_mirror_event_first(sc) != NULL) {
1971 mtx_unlock(&sc->sc_queue_mtx);
1974 sx_xunlock(&sc->sc_lock);
1975 MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:w1",
1977 sx_xlock(&sc->sc_lock);
1978 G_MIRROR_DEBUG(5, "%s: I'm here 4.", __func__);
1981 mtx_unlock(&sc->sc_queue_mtx);
1983 if (bp->bio_from->geom == sc->sc_sync.ds_geom &&
1984 (bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) {
1986 * Handle completion of the first half (the read) of a
1987 * block synchronization operation.
1989 g_mirror_sync_request(sc, bp);
1990 } else if (bp->bio_to != sc->sc_provider) {
1991 if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_REGULAR) != 0)
1993 * Handle completion of a regular I/O request.
1995 g_mirror_regular_request(sc, bp);
1996 else if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0)
1998 * Handle completion of the second half (the
1999 * write) of a block synchronization operation.
2001 g_mirror_sync_request(sc, bp);
2004 ("Invalid request cflags=0x%hx to=%s.",
2005 bp->bio_cflags, bp->bio_to->name));
2009 * Initiate an I/O request.
2011 g_mirror_register_request(sc, bp);
2013 G_MIRROR_DEBUG(5, "%s: I'm here 9.", __func__);
2018 g_mirror_update_idle(struct g_mirror_softc *sc, struct g_mirror_disk *disk)
2021 sx_assert(&sc->sc_lock, SX_LOCKED);
2023 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0)
2025 if (!sc->sc_idle && (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0) {
2026 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as dirty.",
2027 g_mirror_get_diskname(disk), sc->sc_name);
2028 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
2029 } else if (sc->sc_idle &&
2030 (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) {
2031 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as clean.",
2032 g_mirror_get_diskname(disk), sc->sc_name);
2033 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2038 g_mirror_sync_reinit(const struct g_mirror_disk *disk, struct bio *bp,
2044 data = bp->bio_data;
2045 idx = (int)(uintptr_t)bp->bio_caller1;
2048 bp->bio_cmd = BIO_READ;
2049 bp->bio_data = data;
2050 bp->bio_done = g_mirror_sync_done;
2051 bp->bio_from = disk->d_sync.ds_consumer;
2052 bp->bio_to = disk->d_softc->sc_provider;
2053 bp->bio_caller1 = (void *)(uintptr_t)idx;
2054 bp->bio_offset = offset;
2055 bp->bio_length = MIN(MAXPHYS,
2056 disk->d_softc->sc_mediasize - bp->bio_offset);
2060 g_mirror_sync_start(struct g_mirror_disk *disk)
2062 struct g_mirror_softc *sc;
2063 struct g_mirror_disk_sync *sync;
2064 struct g_consumer *cp;
2068 g_topology_assert_not();
2070 sync = &disk->d_sync;
2071 sx_assert(&sc->sc_lock, SX_LOCKED);
2073 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2074 ("Disk %s is not marked for synchronization.",
2075 g_mirror_get_diskname(disk)));
2076 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2077 ("Device not in RUNNING state (%s, %u).", sc->sc_name,
2080 sx_xunlock(&sc->sc_lock);
2082 cp = g_new_consumer(sc->sc_sync.ds_geom);
2083 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
2084 error = g_attach(cp, sc->sc_provider);
2086 ("Cannot attach to %s (error=%d).", sc->sc_name, error));
2087 error = g_access(cp, 1, 0, 0);
2088 KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error));
2089 g_topology_unlock();
2090 sx_xlock(&sc->sc_lock);
2092 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name,
2093 g_mirror_get_diskname(disk));
2094 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) == 0)
2095 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
2096 KASSERT(sync->ds_consumer == NULL,
2097 ("Sync consumer already exists (device=%s, disk=%s).",
2098 sc->sc_name, g_mirror_get_diskname(disk)));
2100 sync->ds_consumer = cp;
2101 sync->ds_consumer->private = disk;
2102 sync->ds_consumer->index = 0;
2105 * Allocate memory for synchronization bios and initialize them.
2107 sync->ds_bios = malloc(sizeof(struct bio *) * g_mirror_syncreqs,
2108 M_MIRROR, M_WAITOK);
2109 for (i = 0; i < g_mirror_syncreqs; i++) {
2111 sync->ds_bios[i] = bp;
2113 bp->bio_data = malloc(MAXPHYS, M_MIRROR, M_WAITOK);
2114 bp->bio_caller1 = (void *)(uintptr_t)i;
2115 g_mirror_sync_reinit(disk, bp, sync->ds_offset);
2116 sync->ds_offset += bp->bio_length;
2119 /* Increase the number of disks in SYNCHRONIZING state. */
2120 sc->sc_sync.ds_ndisks++;
2121 /* Set the number of in-flight synchronization requests. */
2122 sync->ds_inflight = g_mirror_syncreqs;
2125 * Fire off first synchronization requests.
2127 for (i = 0; i < g_mirror_syncreqs; i++) {
2128 bp = sync->ds_bios[i];
2129 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request.");
2130 sync->ds_consumer->index++;
2132 * Delay the request if it is colliding with a regular request.
2134 if (g_mirror_regular_collision(sc, bp))
2135 g_mirror_sync_delay(sc, bp);
2137 g_io_request(bp, sync->ds_consumer);
2142 * Stop synchronization process.
2143 * type: 0 - synchronization finished
2144 * 1 - synchronization stopped
2147 g_mirror_sync_stop(struct g_mirror_disk *disk, int type)
2149 struct g_mirror_softc *sc;
2150 struct g_consumer *cp;
2152 g_topology_assert_not();
2154 sx_assert(&sc->sc_lock, SX_LOCKED);
2156 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2157 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2158 g_mirror_disk_state2str(disk->d_state)));
2159 if (disk->d_sync.ds_consumer == NULL)
2163 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s finished.",
2164 sc->sc_name, g_mirror_get_diskname(disk));
2165 } else /* if (type == 1) */ {
2166 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s stopped.",
2167 sc->sc_name, g_mirror_get_diskname(disk));
2169 g_mirror_regular_release(sc);
2170 free(disk->d_sync.ds_bios, M_MIRROR);
2171 disk->d_sync.ds_bios = NULL;
2172 cp = disk->d_sync.ds_consumer;
2173 disk->d_sync.ds_consumer = NULL;
2174 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2175 sc->sc_sync.ds_ndisks--;
2176 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */
2178 g_mirror_kill_consumer(sc, cp);
2179 g_topology_unlock();
2180 sx_xlock(&sc->sc_lock);
2184 g_mirror_launch_provider(struct g_mirror_softc *sc)
2186 struct g_mirror_disk *disk;
2187 struct g_provider *pp, *dp;
2189 sx_assert(&sc->sc_lock, SX_LOCKED);
2192 pp = g_new_providerf(sc->sc_geom, "mirror/%s", sc->sc_name);
2193 pp->flags |= G_PF_DIRECT_RECEIVE;
2194 pp->mediasize = sc->sc_mediasize;
2195 pp->sectorsize = sc->sc_sectorsize;
2197 pp->stripeoffset = 0;
2199 /* Splitting of unmapped BIO's could work but isn't implemented now */
2200 if (sc->sc_balance != G_MIRROR_BALANCE_SPLIT)
2201 pp->flags |= G_PF_ACCEPT_UNMAPPED;
2203 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2204 if (disk->d_consumer && disk->d_consumer->provider) {
2205 dp = disk->d_consumer->provider;
2206 if (dp->stripesize > pp->stripesize) {
2207 pp->stripesize = dp->stripesize;
2208 pp->stripeoffset = dp->stripeoffset;
2210 /* A provider underneath us doesn't support unmapped */
2211 if ((dp->flags & G_PF_ACCEPT_UNMAPPED) == 0) {
2212 G_MIRROR_DEBUG(0, "Cancelling unmapped "
2213 "because of %s.", dp->name);
2214 pp->flags &= ~G_PF_ACCEPT_UNMAPPED;
2220 sc->sc_provider = pp;
2221 g_error_provider(pp, 0);
2222 g_topology_unlock();
2223 G_MIRROR_DEBUG(0, "Device %s launched (%u/%u).", pp->name,
2224 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE), sc->sc_ndisks);
2225 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2226 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
2227 g_mirror_sync_start(disk);
2232 g_mirror_destroy_provider(struct g_mirror_softc *sc)
2234 struct g_mirror_disk *disk;
2237 g_topology_assert_not();
2238 KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).",
2241 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2242 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
2243 g_mirror_sync_stop(disk, 1);
2247 g_error_provider(sc->sc_provider, ENXIO);
2248 mtx_lock(&sc->sc_queue_mtx);
2249 while ((bp = TAILQ_FIRST(&sc->sc_queue)) != NULL) {
2250 TAILQ_REMOVE(&sc->sc_queue, bp, bio_queue);
2252 * Abort any pending I/O that wasn't generated by us.
2253 * Synchronization requests and requests destined for individual
2254 * mirror components can be destroyed immediately.
2256 if (bp->bio_to == sc->sc_provider &&
2257 bp->bio_from->geom != sc->sc_sync.ds_geom) {
2258 g_io_deliver(bp, ENXIO);
2260 if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0)
2261 free(bp->bio_data, M_MIRROR);
2265 mtx_unlock(&sc->sc_queue_mtx);
2266 g_wither_provider(sc->sc_provider, ENXIO);
2267 sc->sc_provider = NULL;
2268 G_MIRROR_DEBUG(0, "Device %s: provider destroyed.", sc->sc_name);
2269 g_topology_unlock();
2273 g_mirror_go(void *arg)
2275 struct g_mirror_softc *sc;
2278 G_MIRROR_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name);
2279 g_mirror_event_send(sc, 0,
2280 G_MIRROR_EVENT_DONTWAIT | G_MIRROR_EVENT_DEVICE);
2284 g_mirror_determine_state(struct g_mirror_disk *disk)
2286 struct g_mirror_softc *sc;
2290 if (sc->sc_syncid == disk->d_sync.ds_syncid) {
2291 if ((disk->d_flags &
2292 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0 &&
2293 (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 ||
2294 (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0)) {
2295 /* Disk does not need synchronization. */
2296 state = G_MIRROR_DISK_STATE_ACTIVE;
2299 G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
2301 G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) {
2303 * We can start synchronization from
2304 * the stored offset.
2306 state = G_MIRROR_DISK_STATE_SYNCHRONIZING;
2308 state = G_MIRROR_DISK_STATE_STALE;
2311 } else if (disk->d_sync.ds_syncid < sc->sc_syncid) {
2313 * Reset all synchronization data for this disk,
2314 * because if it even was synchronized, it was
2315 * synchronized to disks with different syncid.
2317 disk->d_flags |= G_MIRROR_DISK_FLAG_SYNCHRONIZING;
2318 disk->d_sync.ds_offset = 0;
2319 disk->d_sync.ds_offset_done = 0;
2320 disk->d_sync.ds_syncid = sc->sc_syncid;
2321 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
2322 (disk->d_flags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) {
2323 state = G_MIRROR_DISK_STATE_SYNCHRONIZING;
2325 state = G_MIRROR_DISK_STATE_STALE;
2327 } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ {
2329 * Not good, NOT GOOD!
2330 * It means that mirror was started on stale disks
2331 * and more fresh disk just arrive.
2332 * If there were writes, mirror is broken, sorry.
2333 * I think the best choice here is don't touch
2334 * this disk and inform the user loudly.
2336 G_MIRROR_DEBUG(0, "Device %s was started before the freshest "
2337 "disk (%s) arrives!! It will not be connected to the "
2338 "running device.", sc->sc_name,
2339 g_mirror_get_diskname(disk));
2340 g_mirror_destroy_disk(disk);
2341 state = G_MIRROR_DISK_STATE_NONE;
2342 /* Return immediately, because disk was destroyed. */
2345 G_MIRROR_DEBUG(3, "State for %s disk: %s.",
2346 g_mirror_get_diskname(disk), g_mirror_disk_state2str(state));
2351 * Update device state.
2354 g_mirror_update_device(struct g_mirror_softc *sc, bool force)
2356 struct g_mirror_disk *disk;
2359 sx_assert(&sc->sc_lock, SX_XLOCKED);
2361 switch (sc->sc_state) {
2362 case G_MIRROR_DEVICE_STATE_STARTING:
2364 struct g_mirror_disk *pdisk, *tdisk;
2365 u_int dirty, ndisks, genid, syncid;
2368 KASSERT(sc->sc_provider == NULL,
2369 ("Non-NULL provider in STARTING state (%s).", sc->sc_name));
2371 * Are we ready? We are, if all disks are connected or
2372 * if we have any disks and 'force' is true.
2374 ndisks = g_mirror_ndisks(sc, -1);
2375 if (sc->sc_ndisks == ndisks || (force && ndisks > 0)) {
2377 } else if (ndisks == 0) {
2379 * Disks went down in starting phase, so destroy
2382 callout_drain(&sc->sc_callout);
2383 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2384 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
2386 root_mount_rel(sc->sc_rootmount);
2387 sc->sc_rootmount = NULL;
2394 * Activate all disks with the biggest syncid.
2398 * If 'force' is true, we have been called due to
2399 * timeout, so don't bother canceling timeout.
2402 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2403 if ((disk->d_flags &
2404 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) {
2409 /* No valid disks found, destroy device. */
2410 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2411 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p",
2412 __LINE__, sc->sc_rootmount);
2413 root_mount_rel(sc->sc_rootmount);
2414 sc->sc_rootmount = NULL;
2418 /* Cancel timeout. */
2419 callout_drain(&sc->sc_callout);
2423 * Find the biggest genid.
2426 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2427 if (disk->d_genid > genid)
2428 genid = disk->d_genid;
2430 sc->sc_genid = genid;
2432 * Remove all disks without the biggest genid.
2435 LIST_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tdisk) {
2436 if (disk->d_genid < genid) {
2438 "Component %s (device %s) broken, skipping.",
2439 g_mirror_get_diskname(disk), sc->sc_name);
2440 g_mirror_destroy_disk(disk);
2442 * Bump the syncid in case we discover a healthy
2443 * replacement disk after starting the mirror.
2450 * Find the biggest syncid.
2453 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2454 if (disk->d_sync.ds_syncid > syncid)
2455 syncid = disk->d_sync.ds_syncid;
2459 * Here we need to look for dirty disks and if all disks
2460 * with the biggest syncid are dirty, we have to choose
2461 * one with the biggest priority and rebuild the rest.
2464 * Find the number of dirty disks with the biggest syncid.
2465 * Find the number of disks with the biggest syncid.
2466 * While here, find a disk with the biggest priority.
2470 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2471 if (disk->d_sync.ds_syncid != syncid)
2473 if ((disk->d_flags &
2474 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
2478 if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) {
2480 if (pdisk == NULL ||
2481 pdisk->d_priority < disk->d_priority) {
2487 /* No dirty disks at all, great. */
2488 } else if (dirty == ndisks) {
2490 * Force synchronization for all dirty disks except one
2491 * with the biggest priority.
2493 KASSERT(pdisk != NULL, ("pdisk == NULL"));
2494 G_MIRROR_DEBUG(1, "Using disk %s (device %s) as a "
2495 "master disk for synchronization.",
2496 g_mirror_get_diskname(pdisk), sc->sc_name);
2497 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2498 if (disk->d_sync.ds_syncid != syncid)
2500 if ((disk->d_flags &
2501 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
2504 KASSERT((disk->d_flags &
2505 G_MIRROR_DISK_FLAG_DIRTY) != 0,
2506 ("Disk %s isn't marked as dirty.",
2507 g_mirror_get_diskname(disk)));
2508 /* Skip the disk with the biggest priority. */
2511 disk->d_sync.ds_syncid = 0;
2513 } else if (dirty < ndisks) {
2515 * Force synchronization for all dirty disks.
2516 * We have some non-dirty disks.
2518 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2519 if (disk->d_sync.ds_syncid != syncid)
2521 if ((disk->d_flags &
2522 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
2525 if ((disk->d_flags &
2526 G_MIRROR_DISK_FLAG_DIRTY) == 0) {
2529 disk->d_sync.ds_syncid = 0;
2535 sc->sc_syncid = syncid;
2536 if (force || broken) {
2537 /* Remember to bump syncid on first write. */
2538 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
2540 state = G_MIRROR_DEVICE_STATE_RUNNING;
2541 G_MIRROR_DEBUG(1, "Device %s state changed from %s to %s.",
2542 sc->sc_name, g_mirror_device_state2str(sc->sc_state),
2543 g_mirror_device_state2str(state));
2544 sc->sc_state = state;
2545 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2546 state = g_mirror_determine_state(disk);
2547 g_mirror_event_send(disk, state,
2548 G_MIRROR_EVENT_DONTWAIT);
2549 if (state == G_MIRROR_DISK_STATE_STALE)
2550 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
2554 case G_MIRROR_DEVICE_STATE_RUNNING:
2555 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 &&
2556 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) {
2558 * No usable disks, so destroy the device.
2560 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2562 } else if (g_mirror_ndisks(sc,
2563 G_MIRROR_DISK_STATE_ACTIVE) > 0 &&
2564 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) {
2566 * We have active disks, launch provider if it doesn't
2569 if (sc->sc_provider == NULL)
2570 g_mirror_launch_provider(sc);
2571 if (sc->sc_rootmount != NULL) {
2572 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p",
2573 __LINE__, sc->sc_rootmount);
2574 root_mount_rel(sc->sc_rootmount);
2575 sc->sc_rootmount = NULL;
2579 * Genid should be bumped immediately, so do it here.
2581 if ((sc->sc_bump_id & G_MIRROR_BUMP_GENID) != 0) {
2582 sc->sc_bump_id &= ~G_MIRROR_BUMP_GENID;
2583 g_mirror_bump_genid(sc);
2585 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID_NOW) != 0) {
2586 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID_NOW;
2587 g_mirror_bump_syncid(sc);
2591 KASSERT(1 == 0, ("Wrong device state (%s, %s).",
2592 sc->sc_name, g_mirror_device_state2str(sc->sc_state)));
2598 * Update disk state and device state if needed.
2600 #define DISK_STATE_CHANGED() G_MIRROR_DEBUG(1, \
2601 "Disk %s state changed from %s to %s (device %s).", \
2602 g_mirror_get_diskname(disk), \
2603 g_mirror_disk_state2str(disk->d_state), \
2604 g_mirror_disk_state2str(state), sc->sc_name)
2606 g_mirror_update_disk(struct g_mirror_disk *disk, u_int state)
2608 struct g_mirror_softc *sc;
2611 sx_assert(&sc->sc_lock, SX_XLOCKED);
2614 G_MIRROR_DEBUG(3, "Changing disk %s state from %s to %s.",
2615 g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state),
2616 g_mirror_disk_state2str(state));
2618 case G_MIRROR_DISK_STATE_NEW:
2620 * Possible scenarios:
2621 * 1. New disk arrive.
2623 /* Previous state should be NONE. */
2624 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NONE,
2625 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2626 g_mirror_disk_state2str(disk->d_state)));
2627 DISK_STATE_CHANGED();
2629 disk->d_state = state;
2631 if (LIST_EMPTY(&sc->sc_disks))
2632 LIST_INSERT_HEAD(&sc->sc_disks, disk, d_next);
2634 struct g_mirror_disk *dp;
2636 LIST_FOREACH(dp, &sc->sc_disks, d_next) {
2637 if (disk->d_priority >= dp->d_priority) {
2638 LIST_INSERT_BEFORE(dp, disk, d_next);
2642 if (LIST_NEXT(dp, d_next) == NULL)
2646 LIST_INSERT_AFTER(dp, disk, d_next);
2648 g_topology_unlock();
2649 G_MIRROR_DEBUG(1, "Device %s: provider %s detected.",
2650 sc->sc_name, g_mirror_get_diskname(disk));
2651 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING)
2653 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2654 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2655 g_mirror_device_state2str(sc->sc_state),
2656 g_mirror_get_diskname(disk),
2657 g_mirror_disk_state2str(disk->d_state)));
2658 state = g_mirror_determine_state(disk);
2659 if (state != G_MIRROR_DISK_STATE_NONE)
2662 case G_MIRROR_DISK_STATE_ACTIVE:
2664 * Possible scenarios:
2665 * 1. New disk does not need synchronization.
2666 * 2. Synchronization process finished successfully.
2668 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2669 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2670 g_mirror_device_state2str(sc->sc_state),
2671 g_mirror_get_diskname(disk),
2672 g_mirror_disk_state2str(disk->d_state)));
2673 /* Previous state should be NEW or SYNCHRONIZING. */
2674 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW ||
2675 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2676 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2677 g_mirror_disk_state2str(disk->d_state)));
2678 DISK_STATE_CHANGED();
2680 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
2681 disk->d_flags &= ~G_MIRROR_DISK_FLAG_SYNCHRONIZING;
2682 disk->d_flags &= ~G_MIRROR_DISK_FLAG_FORCE_SYNC;
2683 g_mirror_sync_stop(disk, 0);
2685 disk->d_state = state;
2686 disk->d_sync.ds_offset = 0;
2687 disk->d_sync.ds_offset_done = 0;
2688 g_mirror_update_idle(sc, disk);
2689 g_mirror_update_metadata(disk);
2690 G_MIRROR_DEBUG(1, "Device %s: provider %s activated.",
2691 sc->sc_name, g_mirror_get_diskname(disk));
2693 case G_MIRROR_DISK_STATE_STALE:
2695 * Possible scenarios:
2696 * 1. Stale disk was connected.
2698 /* Previous state should be NEW. */
2699 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2700 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2701 g_mirror_disk_state2str(disk->d_state)));
2702 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2703 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2704 g_mirror_device_state2str(sc->sc_state),
2705 g_mirror_get_diskname(disk),
2706 g_mirror_disk_state2str(disk->d_state)));
2708 * STALE state is only possible if device is marked
2711 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0,
2712 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2713 g_mirror_device_state2str(sc->sc_state),
2714 g_mirror_get_diskname(disk),
2715 g_mirror_disk_state2str(disk->d_state)));
2716 DISK_STATE_CHANGED();
2718 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2719 disk->d_state = state;
2720 g_mirror_update_metadata(disk);
2721 G_MIRROR_DEBUG(0, "Device %s: provider %s is stale.",
2722 sc->sc_name, g_mirror_get_diskname(disk));
2724 case G_MIRROR_DISK_STATE_SYNCHRONIZING:
2726 * Possible scenarios:
2727 * 1. Disk which needs synchronization was connected.
2729 /* Previous state should be NEW. */
2730 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2731 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2732 g_mirror_disk_state2str(disk->d_state)));
2733 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2734 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2735 g_mirror_device_state2str(sc->sc_state),
2736 g_mirror_get_diskname(disk),
2737 g_mirror_disk_state2str(disk->d_state)));
2738 DISK_STATE_CHANGED();
2740 if (disk->d_state == G_MIRROR_DISK_STATE_NEW)
2741 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2742 disk->d_state = state;
2743 if (sc->sc_provider != NULL) {
2744 g_mirror_sync_start(disk);
2745 g_mirror_update_metadata(disk);
2748 case G_MIRROR_DISK_STATE_DISCONNECTED:
2750 * Possible scenarios:
2751 * 1. Device wasn't running yet, but disk disappear.
2752 * 2. Disk was active and disapppear.
2753 * 3. Disk disappear during synchronization process.
2755 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING) {
2757 * Previous state should be ACTIVE, STALE or
2760 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
2761 disk->d_state == G_MIRROR_DISK_STATE_STALE ||
2762 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2763 ("Wrong disk state (%s, %s).",
2764 g_mirror_get_diskname(disk),
2765 g_mirror_disk_state2str(disk->d_state)));
2766 } else if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) {
2767 /* Previous state should be NEW. */
2768 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2769 ("Wrong disk state (%s, %s).",
2770 g_mirror_get_diskname(disk),
2771 g_mirror_disk_state2str(disk->d_state)));
2773 * Reset bumping syncid if disk disappeared in STARTING
2776 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0)
2777 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID;
2780 KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).",
2782 g_mirror_device_state2str(sc->sc_state),
2783 g_mirror_get_diskname(disk),
2784 g_mirror_disk_state2str(disk->d_state)));
2787 DISK_STATE_CHANGED();
2788 G_MIRROR_DEBUG(0, "Device %s: provider %s disconnected.",
2789 sc->sc_name, g_mirror_get_diskname(disk));
2791 g_mirror_destroy_disk(disk);
2793 case G_MIRROR_DISK_STATE_DESTROY:
2797 error = g_mirror_clear_metadata(disk);
2800 "Device %s: failed to clear metadata on %s: %d.",
2801 sc->sc_name, g_mirror_get_diskname(disk), error);
2804 DISK_STATE_CHANGED();
2805 G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.",
2806 sc->sc_name, g_mirror_get_diskname(disk));
2808 g_mirror_destroy_disk(disk);
2810 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2811 g_mirror_update_metadata(disk);
2816 KASSERT(1 == 0, ("Unknown state (%u).", state));
2821 #undef DISK_STATE_CHANGED
2824 g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md)
2826 struct g_provider *pp;
2830 g_topology_assert();
2832 error = g_access(cp, 1, 0, 0);
2836 g_topology_unlock();
2837 /* Metadata are stored on last sector. */
2838 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
2841 g_access(cp, -1, 0, 0);
2843 G_MIRROR_DEBUG(1, "Cannot read metadata from %s (error=%d).",
2844 cp->provider->name, error);
2848 /* Decode metadata. */
2849 error = mirror_metadata_decode(buf, md);
2851 if (strcmp(md->md_magic, G_MIRROR_MAGIC) != 0)
2853 if (md->md_version > G_MIRROR_VERSION) {
2855 "Kernel module is too old to handle metadata from %s.",
2856 cp->provider->name);
2860 G_MIRROR_DEBUG(1, "MD5 metadata hash mismatch for provider %s.",
2861 cp->provider->name);
2869 g_mirror_check_metadata(struct g_mirror_softc *sc, struct g_provider *pp,
2870 struct g_mirror_metadata *md)
2873 if (g_mirror_id2disk(sc, md->md_did) != NULL) {
2874 G_MIRROR_DEBUG(1, "Disk %s (id=%u) already exists, skipping.",
2875 pp->name, md->md_did);
2878 if (md->md_all != sc->sc_ndisks) {
2880 "Invalid '%s' field on disk %s (device %s), skipping.",
2881 "md_all", pp->name, sc->sc_name);
2884 if (md->md_slice != sc->sc_slice) {
2886 "Invalid '%s' field on disk %s (device %s), skipping.",
2887 "md_slice", pp->name, sc->sc_name);
2890 if (md->md_balance != sc->sc_balance) {
2892 "Invalid '%s' field on disk %s (device %s), skipping.",
2893 "md_balance", pp->name, sc->sc_name);
2897 if (md->md_mediasize != sc->sc_mediasize) {
2899 "Invalid '%s' field on disk %s (device %s), skipping.",
2900 "md_mediasize", pp->name, sc->sc_name);
2904 if (sc->sc_mediasize > pp->mediasize) {
2906 "Invalid size of disk %s (device %s), skipping.", pp->name,
2910 if (md->md_sectorsize != sc->sc_sectorsize) {
2912 "Invalid '%s' field on disk %s (device %s), skipping.",
2913 "md_sectorsize", pp->name, sc->sc_name);
2916 if ((sc->sc_sectorsize % pp->sectorsize) != 0) {
2918 "Invalid sector size of disk %s (device %s), skipping.",
2919 pp->name, sc->sc_name);
2922 if ((md->md_mflags & ~G_MIRROR_DEVICE_FLAG_MASK) != 0) {
2924 "Invalid device flags on disk %s (device %s), skipping.",
2925 pp->name, sc->sc_name);
2928 if ((md->md_dflags & ~G_MIRROR_DISK_FLAG_MASK) != 0) {
2930 "Invalid disk flags on disk %s (device %s), skipping.",
2931 pp->name, sc->sc_name);
2938 g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp,
2939 struct g_mirror_metadata *md)
2941 struct g_mirror_disk *disk;
2944 g_topology_assert_not();
2945 G_MIRROR_DEBUG(2, "Adding disk %s.", pp->name);
2947 error = g_mirror_check_metadata(sc, pp, md);
2950 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING &&
2951 md->md_genid < sc->sc_genid) {
2952 G_MIRROR_DEBUG(0, "Component %s (device %s) broken, skipping.",
2953 pp->name, sc->sc_name);
2956 disk = g_mirror_init_disk(sc, pp, md, &error);
2959 error = g_mirror_event_send(disk, G_MIRROR_DISK_STATE_NEW,
2960 G_MIRROR_EVENT_WAIT);
2963 if (md->md_version < G_MIRROR_VERSION) {
2964 G_MIRROR_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).",
2965 pp->name, md->md_version, G_MIRROR_VERSION);
2966 g_mirror_update_metadata(disk);
2972 g_mirror_destroy_delayed(void *arg, int flag)
2974 struct g_mirror_softc *sc;
2977 if (flag == EV_CANCEL) {
2978 G_MIRROR_DEBUG(1, "Destroying canceled.");
2982 g_topology_unlock();
2983 sx_xlock(&sc->sc_lock);
2984 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) == 0,
2985 ("DESTROY flag set on %s.", sc->sc_name));
2986 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_CLOSEWAIT) != 0,
2987 ("CLOSEWAIT flag not set on %s.", sc->sc_name));
2988 G_MIRROR_DEBUG(1, "Destroying %s (delayed).", sc->sc_name);
2989 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_SOFT);
2991 G_MIRROR_DEBUG(0, "Cannot destroy %s (error=%d).",
2992 sc->sc_name, error);
2993 sx_xunlock(&sc->sc_lock);
2999 g_mirror_access(struct g_provider *pp, int acr, int acw, int ace)
3001 struct g_mirror_softc *sc;
3004 g_topology_assert();
3005 G_MIRROR_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr,
3009 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name));
3011 g_topology_unlock();
3012 sx_xlock(&sc->sc_lock);
3013 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0 ||
3014 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_CLOSEWAIT) != 0 ||
3015 LIST_EMPTY(&sc->sc_disks)) {
3016 if (acr > 0 || acw > 0 || ace > 0)
3020 sc->sc_provider_open += acr + acw + ace;
3021 if (pp->acw + acw == 0)
3022 g_mirror_idle(sc, 0);
3023 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_CLOSEWAIT) != 0 &&
3024 sc->sc_provider_open == 0)
3025 g_post_event(g_mirror_destroy_delayed, sc, M_WAITOK, sc, NULL);
3027 sx_xunlock(&sc->sc_lock);
3033 g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md,
3036 struct g_mirror_softc *sc;
3040 g_topology_assert();
3041 G_MIRROR_DEBUG(1, "Creating device %s (id=%u).", md->md_name,
3044 /* One disk is minimum. */
3050 gp = g_new_geomf(mp, "%s", md->md_name);
3051 sc = malloc(sizeof(*sc), M_MIRROR, M_WAITOK | M_ZERO);
3052 gp->start = g_mirror_start;
3053 gp->orphan = g_mirror_orphan;
3054 gp->access = g_mirror_access;
3055 gp->dumpconf = g_mirror_dumpconf;
3058 sc->sc_id = md->md_mid;
3059 sc->sc_slice = md->md_slice;
3060 sc->sc_balance = md->md_balance;
3061 sc->sc_mediasize = md->md_mediasize;
3062 sc->sc_sectorsize = md->md_sectorsize;
3063 sc->sc_ndisks = md->md_all;
3064 sc->sc_flags = md->md_mflags;
3067 sc->sc_last_write = time_uptime;
3070 sx_init(&sc->sc_lock, "gmirror:lock");
3071 TAILQ_INIT(&sc->sc_queue);
3072 mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF);
3073 TAILQ_INIT(&sc->sc_regular_delayed);
3074 TAILQ_INIT(&sc->sc_inflight);
3075 TAILQ_INIT(&sc->sc_sync_delayed);
3076 LIST_INIT(&sc->sc_disks);
3077 TAILQ_INIT(&sc->sc_events);
3078 mtx_init(&sc->sc_events_mtx, "gmirror:events", NULL, MTX_DEF);
3079 callout_init(&sc->sc_callout, 1);
3080 mtx_init(&sc->sc_done_mtx, "gmirror:done", NULL, MTX_DEF);
3081 sc->sc_state = G_MIRROR_DEVICE_STATE_STARTING;
3084 sc->sc_provider = NULL;
3085 sc->sc_provider_open = 0;
3087 * Synchronization geom.
3089 gp = g_new_geomf(mp, "%s.sync", md->md_name);
3091 gp->orphan = g_mirror_orphan;
3092 sc->sc_sync.ds_geom = gp;
3093 sc->sc_sync.ds_ndisks = 0;
3094 error = kproc_create(g_mirror_worker, sc, &sc->sc_worker, 0, 0,
3095 "g_mirror %s", md->md_name);
3097 G_MIRROR_DEBUG(1, "Cannot create kernel thread for %s.",
3099 g_destroy_geom(sc->sc_sync.ds_geom);
3100 g_destroy_geom(sc->sc_geom);
3101 g_mirror_free_device(sc);
3105 G_MIRROR_DEBUG(1, "Device %s created (%u components, id=%u).",
3106 sc->sc_name, sc->sc_ndisks, sc->sc_id);
3108 sc->sc_rootmount = root_mount_hold("GMIRROR");
3109 G_MIRROR_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount);
3113 timeout = g_mirror_timeout * hz;
3114 callout_reset(&sc->sc_callout, timeout, g_mirror_go, sc);
3115 return (sc->sc_geom);
3119 g_mirror_destroy(struct g_mirror_softc *sc, int how)
3121 struct g_mirror_disk *disk;
3123 g_topology_assert_not();
3124 sx_assert(&sc->sc_lock, SX_XLOCKED);
3126 if (sc->sc_provider_open != 0) {
3128 case G_MIRROR_DESTROY_SOFT:
3130 "Device %s is still open (%d).", sc->sc_name,
3131 sc->sc_provider_open);
3133 case G_MIRROR_DESTROY_DELAYED:
3135 "Device %s will be destroyed on last close.",
3137 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
3138 if (disk->d_state ==
3139 G_MIRROR_DISK_STATE_SYNCHRONIZING) {
3140 g_mirror_sync_stop(disk, 1);
3143 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_CLOSEWAIT;
3145 case G_MIRROR_DESTROY_HARD:
3146 G_MIRROR_DEBUG(1, "Device %s is still open, so it "
3147 "can't be definitely removed.", sc->sc_name);
3151 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
3152 sx_xunlock(&sc->sc_lock);
3155 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
3156 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DRAIN;
3157 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
3158 sx_xunlock(&sc->sc_lock);
3159 mtx_lock(&sc->sc_queue_mtx);
3161 mtx_unlock(&sc->sc_queue_mtx);
3162 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker);
3163 while (sc->sc_worker != NULL)
3164 tsleep(&sc->sc_worker, PRIBIO, "m:destroy", hz / 5);
3165 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker);
3166 sx_xlock(&sc->sc_lock);
3167 g_mirror_destroy_device(sc);
3172 g_mirror_taste_orphan(struct g_consumer *cp)
3175 KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
3176 cp->provider->name));
3179 static struct g_geom *
3180 g_mirror_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
3182 struct g_mirror_metadata md;
3183 struct g_mirror_softc *sc;
3184 struct g_consumer *cp;
3188 g_topology_assert();
3189 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
3190 G_MIRROR_DEBUG(2, "Tasting %s.", pp->name);
3192 gp = g_new_geomf(mp, "mirror:taste");
3194 * This orphan function should be never called.
3196 gp->orphan = g_mirror_taste_orphan;
3197 cp = g_new_consumer(gp);
3199 error = g_mirror_read_metadata(cp, &md);
3201 g_destroy_consumer(cp);
3207 if (md.md_provider[0] != '\0' &&
3208 !g_compare_names(md.md_provider, pp->name))
3210 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize)
3212 if ((md.md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) {
3214 "Device %s: provider %s marked as inactive, skipping.",
3215 md.md_name, pp->name);
3218 if (g_mirror_debug >= 2)
3219 mirror_metadata_dump(&md);
3222 * Let's check if device already exists.
3225 LIST_FOREACH(gp, &mp->geom, geom) {
3229 if (sc->sc_type != G_MIRROR_TYPE_AUTOMATIC)
3231 if (sc->sc_sync.ds_geom == gp)
3233 if (strcmp(md.md_name, sc->sc_name) != 0)
3235 if (md.md_mid != sc->sc_id) {
3236 G_MIRROR_DEBUG(0, "Device %s already configured.",
3243 gp = g_mirror_create(mp, &md, G_MIRROR_TYPE_AUTOMATIC);
3245 G_MIRROR_DEBUG(0, "Cannot create device %s.",
3251 G_MIRROR_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
3252 g_topology_unlock();
3253 sx_xlock(&sc->sc_lock);
3254 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_TASTING;
3255 error = g_mirror_add_disk(sc, pp, &md);
3257 G_MIRROR_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
3258 pp->name, gp->name, error);
3259 if (LIST_EMPTY(&sc->sc_disks)) {
3261 g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD);
3267 sc->sc_flags &= ~G_MIRROR_DEVICE_FLAG_TASTING;
3268 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
3269 g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD);
3273 sx_xunlock(&sc->sc_lock);
3279 g_mirror_resize(struct g_consumer *cp)
3281 struct g_mirror_disk *disk;
3283 g_topology_assert();
3284 g_trace(G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name);
3289 g_topology_unlock();
3290 g_mirror_update_metadata(disk);
3295 g_mirror_destroy_geom(struct gctl_req *req __unused,
3296 struct g_class *mp __unused, struct g_geom *gp)
3298 struct g_mirror_softc *sc;
3301 g_topology_unlock();
3303 sx_xlock(&sc->sc_lock);
3305 error = g_mirror_destroy(gp->softc, G_MIRROR_DESTROY_SOFT);
3307 sx_xunlock(&sc->sc_lock);
3313 g_mirror_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
3314 struct g_consumer *cp, struct g_provider *pp)
3316 struct g_mirror_softc *sc;
3318 g_topology_assert();
3323 /* Skip synchronization geom. */
3324 if (gp == sc->sc_sync.ds_geom)
3328 } else if (cp != NULL) {
3329 struct g_mirror_disk *disk;
3334 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)disk->d_id);
3335 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
3336 sbuf_printf(sb, "%s<Synchronized>", indent);
3337 if (disk->d_sync.ds_offset == 0)
3338 sbuf_printf(sb, "0%%");
3340 sbuf_printf(sb, "%u%%",
3341 (u_int)((disk->d_sync.ds_offset * 100) /
3343 sbuf_printf(sb, "</Synchronized>\n");
3344 if (disk->d_sync.ds_offset > 0)
3345 sbuf_printf(sb, "%s<BytesSynced>%jd"
3346 "</BytesSynced>\n", indent,
3347 (intmax_t)disk->d_sync.ds_offset);
3349 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent,
3350 disk->d_sync.ds_syncid);
3351 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent,
3353 sbuf_printf(sb, "%s<Flags>", indent);
3354 if (disk->d_flags == 0)
3355 sbuf_printf(sb, "NONE");
3359 #define ADD_FLAG(flag, name) do { \
3360 if ((disk->d_flags & (flag)) != 0) { \
3362 sbuf_printf(sb, ", "); \
3365 sbuf_printf(sb, name); \
3368 ADD_FLAG(G_MIRROR_DISK_FLAG_DIRTY, "DIRTY");
3369 ADD_FLAG(G_MIRROR_DISK_FLAG_HARDCODED, "HARDCODED");
3370 ADD_FLAG(G_MIRROR_DISK_FLAG_INACTIVE, "INACTIVE");
3371 ADD_FLAG(G_MIRROR_DISK_FLAG_SYNCHRONIZING,
3373 ADD_FLAG(G_MIRROR_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC");
3374 ADD_FLAG(G_MIRROR_DISK_FLAG_BROKEN, "BROKEN");
3377 sbuf_printf(sb, "</Flags>\n");
3378 sbuf_printf(sb, "%s<Priority>%u</Priority>\n", indent,
3380 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
3381 g_mirror_disk_state2str(disk->d_state));
3383 sbuf_printf(sb, "%s<Type>", indent);
3384 switch (sc->sc_type) {
3385 case G_MIRROR_TYPE_AUTOMATIC:
3386 sbuf_printf(sb, "AUTOMATIC");
3388 case G_MIRROR_TYPE_MANUAL:
3389 sbuf_printf(sb, "MANUAL");
3392 sbuf_printf(sb, "UNKNOWN");
3395 sbuf_printf(sb, "</Type>\n");
3396 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
3397 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid);
3398 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid);
3399 sbuf_printf(sb, "%s<Flags>", indent);
3400 if (sc->sc_flags == 0)
3401 sbuf_printf(sb, "NONE");
3405 #define ADD_FLAG(flag, name) do { \
3406 if ((sc->sc_flags & (flag)) != 0) { \
3408 sbuf_printf(sb, ", "); \
3411 sbuf_printf(sb, name); \
3414 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC");
3415 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC");
3418 sbuf_printf(sb, "</Flags>\n");
3419 sbuf_printf(sb, "%s<Slice>%u</Slice>\n", indent,
3420 (u_int)sc->sc_slice);
3421 sbuf_printf(sb, "%s<Balance>%s</Balance>\n", indent,
3422 balance_name(sc->sc_balance));
3423 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent,
3425 sbuf_printf(sb, "%s<State>", indent);
3426 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING)
3427 sbuf_printf(sb, "%s", "STARTING");
3428 else if (sc->sc_ndisks ==
3429 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE))
3430 sbuf_printf(sb, "%s", "COMPLETE");
3432 sbuf_printf(sb, "%s", "DEGRADED");
3433 sbuf_printf(sb, "</State>\n");
3438 g_mirror_shutdown_post_sync(void *arg, int howto)
3441 struct g_geom *gp, *gp2;
3442 struct g_mirror_softc *sc;
3445 if (panicstr != NULL)
3450 g_mirror_shutdown = 1;
3451 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
3452 if ((sc = gp->softc) == NULL)
3454 /* Skip synchronization geom. */
3455 if (gp == sc->sc_sync.ds_geom)
3457 g_topology_unlock();
3458 sx_xlock(&sc->sc_lock);
3459 g_mirror_idle(sc, -1);
3461 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_DELAYED);
3463 sx_xunlock(&sc->sc_lock);
3466 g_topology_unlock();
3470 g_mirror_init(struct g_class *mp)
3473 g_mirror_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync,
3474 g_mirror_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST);
3475 if (g_mirror_post_sync == NULL)
3476 G_MIRROR_DEBUG(0, "Warning! Cannot register shutdown event.");
3480 g_mirror_fini(struct g_class *mp)
3483 if (g_mirror_post_sync != NULL)
3484 EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_mirror_post_sync);
3487 DECLARE_GEOM_CLASS(g_mirror_class, g_mirror);
3488 MODULE_VERSION(geom_mirror, 0);