2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
34 #include <sys/limits.h>
36 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
40 #include <sys/malloc.h>
41 #include <sys/eventhandler.h>
43 #include <geom/geom.h>
45 #include <sys/kthread.h>
46 #include <sys/sched.h>
47 #include <geom/raid3/g_raid3.h>
49 FEATURE(geom_raid3, "GEOM RAID-3 functionality");
51 static MALLOC_DEFINE(M_RAID3, "raid3_data", "GEOM_RAID3 Data");
53 SYSCTL_DECL(_kern_geom);
54 SYSCTL_NODE(_kern_geom, OID_AUTO, raid3, CTLFLAG_RW, 0, "GEOM_RAID3 stuff");
55 u_int g_raid3_debug = 0;
56 TUNABLE_INT("kern.geom.raid3.debug", &g_raid3_debug);
57 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, debug, CTLFLAG_RW, &g_raid3_debug, 0,
59 static u_int g_raid3_timeout = 4;
60 TUNABLE_INT("kern.geom.raid3.timeout", &g_raid3_timeout);
61 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, timeout, CTLFLAG_RW, &g_raid3_timeout,
62 0, "Time to wait on all raid3 components");
63 static u_int g_raid3_idletime = 5;
64 TUNABLE_INT("kern.geom.raid3.idletime", &g_raid3_idletime);
65 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, idletime, CTLFLAG_RW,
66 &g_raid3_idletime, 0, "Mark components as clean when idling");
67 static u_int g_raid3_disconnect_on_failure = 1;
68 TUNABLE_INT("kern.geom.raid3.disconnect_on_failure",
69 &g_raid3_disconnect_on_failure);
70 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, disconnect_on_failure, CTLFLAG_RW,
71 &g_raid3_disconnect_on_failure, 0, "Disconnect component on I/O failure.");
72 static u_int g_raid3_syncreqs = 2;
73 TUNABLE_INT("kern.geom.raid3.sync_requests", &g_raid3_syncreqs);
74 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, sync_requests, CTLFLAG_RDTUN,
75 &g_raid3_syncreqs, 0, "Parallel synchronization I/O requests.");
76 static u_int g_raid3_use_malloc = 0;
77 TUNABLE_INT("kern.geom.raid3.use_malloc", &g_raid3_use_malloc);
78 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, use_malloc, CTLFLAG_RDTUN,
79 &g_raid3_use_malloc, 0, "Use malloc(9) instead of uma(9).");
81 static u_int g_raid3_n64k = 50;
82 TUNABLE_INT("kern.geom.raid3.n64k", &g_raid3_n64k);
83 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n64k, CTLFLAG_RD, &g_raid3_n64k, 0,
84 "Maximum number of 64kB allocations");
85 static u_int g_raid3_n16k = 200;
86 TUNABLE_INT("kern.geom.raid3.n16k", &g_raid3_n16k);
87 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n16k, CTLFLAG_RD, &g_raid3_n16k, 0,
88 "Maximum number of 16kB allocations");
89 static u_int g_raid3_n4k = 1200;
90 TUNABLE_INT("kern.geom.raid3.n4k", &g_raid3_n4k);
91 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n4k, CTLFLAG_RD, &g_raid3_n4k, 0,
92 "Maximum number of 4kB allocations");
94 SYSCTL_NODE(_kern_geom_raid3, OID_AUTO, stat, CTLFLAG_RW, 0,
95 "GEOM_RAID3 statistics");
96 static u_int g_raid3_parity_mismatch = 0;
97 SYSCTL_UINT(_kern_geom_raid3_stat, OID_AUTO, parity_mismatch, CTLFLAG_RD,
98 &g_raid3_parity_mismatch, 0, "Number of failures in VERIFY mode");
100 #define MSLEEP(ident, mtx, priority, wmesg, timeout) do { \
101 G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \
102 msleep((ident), (mtx), (priority), (wmesg), (timeout)); \
103 G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \
106 static eventhandler_tag g_raid3_post_sync = NULL;
107 static int g_raid3_shutdown = 0;
109 static int g_raid3_destroy_geom(struct gctl_req *req, struct g_class *mp,
111 static g_taste_t g_raid3_taste;
112 static void g_raid3_init(struct g_class *mp);
113 static void g_raid3_fini(struct g_class *mp);
115 struct g_class g_raid3_class = {
116 .name = G_RAID3_CLASS_NAME,
117 .version = G_VERSION,
118 .ctlreq = g_raid3_config,
119 .taste = g_raid3_taste,
120 .destroy_geom = g_raid3_destroy_geom,
121 .init = g_raid3_init,
126 static void g_raid3_destroy_provider(struct g_raid3_softc *sc);
127 static int g_raid3_update_disk(struct g_raid3_disk *disk, u_int state);
128 static void g_raid3_update_device(struct g_raid3_softc *sc, boolean_t force);
129 static void g_raid3_dumpconf(struct sbuf *sb, const char *indent,
130 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp);
131 static void g_raid3_sync_stop(struct g_raid3_softc *sc, int type);
132 static int g_raid3_register_request(struct bio *pbp);
133 static void g_raid3_sync_release(struct g_raid3_softc *sc);
137 g_raid3_disk_state2str(int state)
141 case G_RAID3_DISK_STATE_NODISK:
143 case G_RAID3_DISK_STATE_NONE:
145 case G_RAID3_DISK_STATE_NEW:
147 case G_RAID3_DISK_STATE_ACTIVE:
149 case G_RAID3_DISK_STATE_STALE:
151 case G_RAID3_DISK_STATE_SYNCHRONIZING:
152 return ("SYNCHRONIZING");
153 case G_RAID3_DISK_STATE_DISCONNECTED:
154 return ("DISCONNECTED");
161 g_raid3_device_state2str(int state)
165 case G_RAID3_DEVICE_STATE_STARTING:
167 case G_RAID3_DEVICE_STATE_DEGRADED:
169 case G_RAID3_DEVICE_STATE_COMPLETE:
177 g_raid3_get_diskname(struct g_raid3_disk *disk)
180 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL)
181 return ("[unknown]");
182 return (disk->d_name);
186 g_raid3_alloc(struct g_raid3_softc *sc, size_t size, int flags)
189 enum g_raid3_zones zone;
191 if (g_raid3_use_malloc ||
192 (zone = g_raid3_zone(size)) == G_RAID3_NUM_ZONES)
193 ptr = malloc(size, M_RAID3, flags);
195 ptr = uma_zalloc_arg(sc->sc_zones[zone].sz_zone,
196 &sc->sc_zones[zone], flags);
197 sc->sc_zones[zone].sz_requested++;
199 sc->sc_zones[zone].sz_failed++;
205 g_raid3_free(struct g_raid3_softc *sc, void *ptr, size_t size)
207 enum g_raid3_zones zone;
209 if (g_raid3_use_malloc ||
210 (zone = g_raid3_zone(size)) == G_RAID3_NUM_ZONES)
213 uma_zfree_arg(sc->sc_zones[zone].sz_zone,
214 ptr, &sc->sc_zones[zone]);
219 g_raid3_uma_ctor(void *mem, int size, void *arg, int flags)
221 struct g_raid3_zone *sz = arg;
223 if (sz->sz_max > 0 && sz->sz_inuse == sz->sz_max)
230 g_raid3_uma_dtor(void *mem, int size, void *arg)
232 struct g_raid3_zone *sz = arg;
237 #define g_raid3_xor(src, dst, size) \
238 _g_raid3_xor((uint64_t *)(src), \
239 (uint64_t *)(dst), (size_t)size)
241 _g_raid3_xor(uint64_t *src, uint64_t *dst, size_t size)
244 KASSERT((size % 128) == 0, ("Invalid size: %zu.", size));
245 for (; size > 0; size -= 128) {
266 g_raid3_is_zero(struct bio *bp)
268 static const uint64_t zeros[] = {
269 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
274 size = bp->bio_length;
275 addr = (u_char *)bp->bio_data;
276 for (; size > 0; size -= sizeof(zeros), addr += sizeof(zeros)) {
277 if (bcmp(addr, zeros, sizeof(zeros)) != 0)
284 * --- Events handling functions ---
285 * Events in geom_raid3 are used to maintain disks and device status
286 * from one thread to simplify locking.
289 g_raid3_event_free(struct g_raid3_event *ep)
296 g_raid3_event_send(void *arg, int state, int flags)
298 struct g_raid3_softc *sc;
299 struct g_raid3_disk *disk;
300 struct g_raid3_event *ep;
303 ep = malloc(sizeof(*ep), M_RAID3, M_WAITOK);
304 G_RAID3_DEBUG(4, "%s: Sending event %p.", __func__, ep);
305 if ((flags & G_RAID3_EVENT_DEVICE) != 0) {
316 mtx_lock(&sc->sc_events_mtx);
317 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next);
318 mtx_unlock(&sc->sc_events_mtx);
319 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc);
320 mtx_lock(&sc->sc_queue_mtx);
322 wakeup(&sc->sc_queue);
323 mtx_unlock(&sc->sc_queue_mtx);
324 if ((flags & G_RAID3_EVENT_DONTWAIT) != 0)
326 sx_assert(&sc->sc_lock, SX_XLOCKED);
327 G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, ep);
328 sx_xunlock(&sc->sc_lock);
329 while ((ep->e_flags & G_RAID3_EVENT_DONE) == 0) {
330 mtx_lock(&sc->sc_events_mtx);
331 MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "r3:event",
335 g_raid3_event_free(ep);
336 sx_xlock(&sc->sc_lock);
340 static struct g_raid3_event *
341 g_raid3_event_get(struct g_raid3_softc *sc)
343 struct g_raid3_event *ep;
345 mtx_lock(&sc->sc_events_mtx);
346 ep = TAILQ_FIRST(&sc->sc_events);
347 mtx_unlock(&sc->sc_events_mtx);
352 g_raid3_event_remove(struct g_raid3_softc *sc, struct g_raid3_event *ep)
355 mtx_lock(&sc->sc_events_mtx);
356 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
357 mtx_unlock(&sc->sc_events_mtx);
361 g_raid3_event_cancel(struct g_raid3_disk *disk)
363 struct g_raid3_softc *sc;
364 struct g_raid3_event *ep, *tmpep;
367 sx_assert(&sc->sc_lock, SX_XLOCKED);
369 mtx_lock(&sc->sc_events_mtx);
370 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) {
371 if ((ep->e_flags & G_RAID3_EVENT_DEVICE) != 0)
373 if (ep->e_disk != disk)
375 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
376 if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0)
377 g_raid3_event_free(ep);
379 ep->e_error = ECANCELED;
383 mtx_unlock(&sc->sc_events_mtx);
387 * Return the number of disks in the given state.
388 * If state is equal to -1, count all connected disks.
391 g_raid3_ndisks(struct g_raid3_softc *sc, int state)
393 struct g_raid3_disk *disk;
396 sx_assert(&sc->sc_lock, SX_LOCKED);
398 for (n = ndisks = 0; n < sc->sc_ndisks; n++) {
399 disk = &sc->sc_disks[n];
400 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
402 if (state == -1 || disk->d_state == state)
409 g_raid3_nrequests(struct g_raid3_softc *sc, struct g_consumer *cp)
414 mtx_lock(&sc->sc_queue_mtx);
415 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
416 if (bp->bio_from == cp)
419 mtx_unlock(&sc->sc_queue_mtx);
424 g_raid3_is_busy(struct g_raid3_softc *sc, struct g_consumer *cp)
429 "I/O requests for %s exist, can't destroy it now.",
433 if (g_raid3_nrequests(sc, cp) > 0) {
435 "I/O requests for %s in queue, can't destroy it now.",
443 g_raid3_destroy_consumer(void *arg, int flags __unused)
445 struct g_consumer *cp;
450 G_RAID3_DEBUG(1, "Consumer %s destroyed.", cp->provider->name);
452 g_destroy_consumer(cp);
456 g_raid3_kill_consumer(struct g_raid3_softc *sc, struct g_consumer *cp)
458 struct g_provider *pp;
464 if (g_raid3_is_busy(sc, cp))
466 G_RAID3_DEBUG(2, "Consumer %s destroyed.", cp->provider->name);
470 if ((pp->geom->flags & G_GEOM_WITHER) == 0)
473 G_RAID3_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr,
474 -cp->acw, -cp->ace, 0);
475 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
476 g_access(cp, -cp->acr, -cp->acw, -cp->ace);
479 * After retaste event was send (inside g_access()), we can send
480 * event to detach and destroy consumer.
481 * A class, which has consumer to the given provider connected
482 * will not receive retaste event for the provider.
483 * This is the way how I ignore retaste events when I close
484 * consumers opened for write: I detach and destroy consumer
485 * after retaste event is sent.
487 g_post_event(g_raid3_destroy_consumer, cp, M_WAITOK, NULL);
490 G_RAID3_DEBUG(1, "Consumer %s destroyed.", pp->name);
492 g_destroy_consumer(cp);
496 g_raid3_connect_disk(struct g_raid3_disk *disk, struct g_provider *pp)
498 struct g_consumer *cp;
501 g_topology_assert_not();
502 KASSERT(disk->d_consumer == NULL,
503 ("Disk already connected (device %s).", disk->d_softc->sc_name));
506 cp = g_new_consumer(disk->d_softc->sc_geom);
507 error = g_attach(cp, pp);
509 g_destroy_consumer(cp);
513 error = g_access(cp, 1, 1, 1);
517 g_destroy_consumer(cp);
518 G_RAID3_DEBUG(0, "Cannot open consumer %s (error=%d).",
522 disk->d_consumer = cp;
523 disk->d_consumer->private = disk;
524 disk->d_consumer->index = 0;
525 G_RAID3_DEBUG(2, "Disk %s connected.", g_raid3_get_diskname(disk));
530 g_raid3_disconnect_consumer(struct g_raid3_softc *sc, struct g_consumer *cp)
537 if (cp->provider != NULL)
538 g_raid3_kill_consumer(sc, cp);
540 g_destroy_consumer(cp);
544 * Initialize disk. This means allocate memory, create consumer, attach it
545 * to the provider and open access (r1w1e1) to it.
547 static struct g_raid3_disk *
548 g_raid3_init_disk(struct g_raid3_softc *sc, struct g_provider *pp,
549 struct g_raid3_metadata *md, int *errorp)
551 struct g_raid3_disk *disk;
554 disk = &sc->sc_disks[md->md_no];
555 error = g_raid3_connect_disk(disk, pp);
561 disk->d_state = G_RAID3_DISK_STATE_NONE;
562 disk->d_flags = md->md_dflags;
563 if (md->md_provider[0] != '\0')
564 disk->d_flags |= G_RAID3_DISK_FLAG_HARDCODED;
565 disk->d_sync.ds_consumer = NULL;
566 disk->d_sync.ds_offset = md->md_sync_offset;
567 disk->d_sync.ds_offset_done = md->md_sync_offset;
568 disk->d_genid = md->md_genid;
569 disk->d_sync.ds_syncid = md->md_syncid;
576 g_raid3_destroy_disk(struct g_raid3_disk *disk)
578 struct g_raid3_softc *sc;
580 g_topology_assert_not();
582 sx_assert(&sc->sc_lock, SX_XLOCKED);
584 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
586 g_raid3_event_cancel(disk);
587 switch (disk->d_state) {
588 case G_RAID3_DISK_STATE_SYNCHRONIZING:
589 if (sc->sc_syncdisk != NULL)
590 g_raid3_sync_stop(sc, 1);
592 case G_RAID3_DISK_STATE_NEW:
593 case G_RAID3_DISK_STATE_STALE:
594 case G_RAID3_DISK_STATE_ACTIVE:
596 g_raid3_disconnect_consumer(sc, disk->d_consumer);
598 disk->d_consumer = NULL;
601 KASSERT(0 == 1, ("Wrong disk state (%s, %s).",
602 g_raid3_get_diskname(disk),
603 g_raid3_disk_state2str(disk->d_state)));
605 disk->d_state = G_RAID3_DISK_STATE_NODISK;
609 g_raid3_destroy_device(struct g_raid3_softc *sc)
611 struct g_raid3_event *ep;
612 struct g_raid3_disk *disk;
614 struct g_consumer *cp;
617 g_topology_assert_not();
618 sx_assert(&sc->sc_lock, SX_XLOCKED);
621 if (sc->sc_provider != NULL)
622 g_raid3_destroy_provider(sc);
623 for (n = 0; n < sc->sc_ndisks; n++) {
624 disk = &sc->sc_disks[n];
625 if (disk->d_state != G_RAID3_DISK_STATE_NODISK) {
626 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
627 g_raid3_update_metadata(disk);
628 g_raid3_destroy_disk(disk);
631 while ((ep = g_raid3_event_get(sc)) != NULL) {
632 g_raid3_event_remove(sc, ep);
633 if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0)
634 g_raid3_event_free(ep);
636 ep->e_error = ECANCELED;
637 ep->e_flags |= G_RAID3_EVENT_DONE;
638 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, ep);
639 mtx_lock(&sc->sc_events_mtx);
641 mtx_unlock(&sc->sc_events_mtx);
644 callout_drain(&sc->sc_callout);
645 cp = LIST_FIRST(&sc->sc_sync.ds_geom->consumer);
648 g_raid3_disconnect_consumer(sc, cp);
649 g_wither_geom(sc->sc_sync.ds_geom, ENXIO);
650 G_RAID3_DEBUG(0, "Device %s destroyed.", gp->name);
651 g_wither_geom(gp, ENXIO);
653 if (!g_raid3_use_malloc) {
654 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone);
655 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone);
656 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone);
658 mtx_destroy(&sc->sc_queue_mtx);
659 mtx_destroy(&sc->sc_events_mtx);
660 sx_xunlock(&sc->sc_lock);
661 sx_destroy(&sc->sc_lock);
665 g_raid3_orphan(struct g_consumer *cp)
667 struct g_raid3_disk *disk;
674 disk->d_softc->sc_bump_id = G_RAID3_BUMP_SYNCID;
675 g_raid3_event_send(disk, G_RAID3_DISK_STATE_DISCONNECTED,
676 G_RAID3_EVENT_DONTWAIT);
680 g_raid3_write_metadata(struct g_raid3_disk *disk, struct g_raid3_metadata *md)
682 struct g_raid3_softc *sc;
683 struct g_consumer *cp;
684 off_t offset, length;
688 g_topology_assert_not();
690 sx_assert(&sc->sc_lock, SX_LOCKED);
692 cp = disk->d_consumer;
693 KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name));
694 KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name));
695 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
696 ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr,
698 length = cp->provider->sectorsize;
699 offset = cp->provider->mediasize - length;
700 sector = malloc((size_t)length, M_RAID3, M_WAITOK | M_ZERO);
702 raid3_metadata_encode(md, sector);
703 error = g_write_data(cp, offset, sector, length);
704 free(sector, M_RAID3);
706 if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) {
707 G_RAID3_DEBUG(0, "Cannot write metadata on %s "
708 "(device=%s, error=%d).",
709 g_raid3_get_diskname(disk), sc->sc_name, error);
710 disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN;
712 G_RAID3_DEBUG(1, "Cannot write metadata on %s "
713 "(device=%s, error=%d).",
714 g_raid3_get_diskname(disk), sc->sc_name, error);
716 if (g_raid3_disconnect_on_failure &&
717 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
718 sc->sc_bump_id |= G_RAID3_BUMP_GENID;
719 g_raid3_event_send(disk,
720 G_RAID3_DISK_STATE_DISCONNECTED,
721 G_RAID3_EVENT_DONTWAIT);
728 g_raid3_clear_metadata(struct g_raid3_disk *disk)
732 g_topology_assert_not();
733 sx_assert(&disk->d_softc->sc_lock, SX_LOCKED);
735 error = g_raid3_write_metadata(disk, NULL);
737 G_RAID3_DEBUG(2, "Metadata on %s cleared.",
738 g_raid3_get_diskname(disk));
741 "Cannot clear metadata on disk %s (error=%d).",
742 g_raid3_get_diskname(disk), error);
748 g_raid3_fill_metadata(struct g_raid3_disk *disk, struct g_raid3_metadata *md)
750 struct g_raid3_softc *sc;
751 struct g_provider *pp;
754 strlcpy(md->md_magic, G_RAID3_MAGIC, sizeof(md->md_magic));
755 md->md_version = G_RAID3_VERSION;
756 strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name));
757 md->md_id = sc->sc_id;
758 md->md_all = sc->sc_ndisks;
759 md->md_genid = sc->sc_genid;
760 md->md_mediasize = sc->sc_mediasize;
761 md->md_sectorsize = sc->sc_sectorsize;
762 md->md_mflags = (sc->sc_flags & G_RAID3_DEVICE_FLAG_MASK);
763 md->md_no = disk->d_no;
764 md->md_syncid = disk->d_sync.ds_syncid;
765 md->md_dflags = (disk->d_flags & G_RAID3_DISK_FLAG_MASK);
766 if (disk->d_state != G_RAID3_DISK_STATE_SYNCHRONIZING)
767 md->md_sync_offset = 0;
770 disk->d_sync.ds_offset_done / (sc->sc_ndisks - 1);
772 if (disk->d_consumer != NULL && disk->d_consumer->provider != NULL)
773 pp = disk->d_consumer->provider;
776 if ((disk->d_flags & G_RAID3_DISK_FLAG_HARDCODED) != 0 && pp != NULL)
777 strlcpy(md->md_provider, pp->name, sizeof(md->md_provider));
779 bzero(md->md_provider, sizeof(md->md_provider));
781 md->md_provsize = pp->mediasize;
787 g_raid3_update_metadata(struct g_raid3_disk *disk)
789 struct g_raid3_softc *sc;
790 struct g_raid3_metadata md;
793 g_topology_assert_not();
795 sx_assert(&sc->sc_lock, SX_LOCKED);
797 g_raid3_fill_metadata(disk, &md);
798 error = g_raid3_write_metadata(disk, &md);
800 G_RAID3_DEBUG(2, "Metadata on %s updated.",
801 g_raid3_get_diskname(disk));
804 "Cannot update metadata on disk %s (error=%d).",
805 g_raid3_get_diskname(disk), error);
810 g_raid3_bump_syncid(struct g_raid3_softc *sc)
812 struct g_raid3_disk *disk;
815 g_topology_assert_not();
816 sx_assert(&sc->sc_lock, SX_XLOCKED);
817 KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) > 0,
818 ("%s called with no active disks (device=%s).", __func__,
822 G_RAID3_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name,
824 for (n = 0; n < sc->sc_ndisks; n++) {
825 disk = &sc->sc_disks[n];
826 if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
827 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
828 disk->d_sync.ds_syncid = sc->sc_syncid;
829 g_raid3_update_metadata(disk);
835 g_raid3_bump_genid(struct g_raid3_softc *sc)
837 struct g_raid3_disk *disk;
840 g_topology_assert_not();
841 sx_assert(&sc->sc_lock, SX_XLOCKED);
842 KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) > 0,
843 ("%s called with no active disks (device=%s).", __func__,
847 G_RAID3_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name,
849 for (n = 0; n < sc->sc_ndisks; n++) {
850 disk = &sc->sc_disks[n];
851 if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
852 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
853 disk->d_genid = sc->sc_genid;
854 g_raid3_update_metadata(disk);
860 g_raid3_idle(struct g_raid3_softc *sc, int acw)
862 struct g_raid3_disk *disk;
866 g_topology_assert_not();
867 sx_assert(&sc->sc_lock, SX_XLOCKED);
869 if (sc->sc_provider == NULL)
871 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0)
875 if (sc->sc_writes > 0)
877 if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) {
878 timeout = g_raid3_idletime - (time_uptime - sc->sc_last_write);
879 if (!g_raid3_shutdown && timeout > 0)
883 for (i = 0; i < sc->sc_ndisks; i++) {
884 disk = &sc->sc_disks[i];
885 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE)
887 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as clean.",
888 g_raid3_get_diskname(disk), sc->sc_name);
889 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
890 g_raid3_update_metadata(disk);
896 g_raid3_unidle(struct g_raid3_softc *sc)
898 struct g_raid3_disk *disk;
901 g_topology_assert_not();
902 sx_assert(&sc->sc_lock, SX_XLOCKED);
904 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0)
907 sc->sc_last_write = time_uptime;
908 for (i = 0; i < sc->sc_ndisks; i++) {
909 disk = &sc->sc_disks[i];
910 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE)
912 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as dirty.",
913 g_raid3_get_diskname(disk), sc->sc_name);
914 disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY;
915 g_raid3_update_metadata(disk);
920 * Treat bio_driver1 field in parent bio as list head and field bio_caller1
921 * in child bio as pointer to the next element on the list.
923 #define G_RAID3_HEAD_BIO(pbp) (pbp)->bio_driver1
925 #define G_RAID3_NEXT_BIO(cbp) (cbp)->bio_caller1
927 #define G_RAID3_FOREACH_BIO(pbp, bp) \
928 for ((bp) = G_RAID3_HEAD_BIO(pbp); (bp) != NULL; \
929 (bp) = G_RAID3_NEXT_BIO(bp))
931 #define G_RAID3_FOREACH_SAFE_BIO(pbp, bp, tmpbp) \
932 for ((bp) = G_RAID3_HEAD_BIO(pbp); \
933 (bp) != NULL && ((tmpbp) = G_RAID3_NEXT_BIO(bp), 1); \
937 g_raid3_init_bio(struct bio *pbp)
940 G_RAID3_HEAD_BIO(pbp) = NULL;
944 g_raid3_remove_bio(struct bio *cbp)
946 struct bio *pbp, *bp;
948 pbp = cbp->bio_parent;
949 if (G_RAID3_HEAD_BIO(pbp) == cbp)
950 G_RAID3_HEAD_BIO(pbp) = G_RAID3_NEXT_BIO(cbp);
952 G_RAID3_FOREACH_BIO(pbp, bp) {
953 if (G_RAID3_NEXT_BIO(bp) == cbp) {
954 G_RAID3_NEXT_BIO(bp) = G_RAID3_NEXT_BIO(cbp);
959 G_RAID3_NEXT_BIO(cbp) = NULL;
963 g_raid3_replace_bio(struct bio *sbp, struct bio *dbp)
965 struct bio *pbp, *bp;
967 g_raid3_remove_bio(sbp);
968 pbp = dbp->bio_parent;
969 G_RAID3_NEXT_BIO(sbp) = G_RAID3_NEXT_BIO(dbp);
970 if (G_RAID3_HEAD_BIO(pbp) == dbp)
971 G_RAID3_HEAD_BIO(pbp) = sbp;
973 G_RAID3_FOREACH_BIO(pbp, bp) {
974 if (G_RAID3_NEXT_BIO(bp) == dbp) {
975 G_RAID3_NEXT_BIO(bp) = sbp;
980 G_RAID3_NEXT_BIO(dbp) = NULL;
984 g_raid3_destroy_bio(struct g_raid3_softc *sc, struct bio *cbp)
986 struct bio *bp, *pbp;
989 pbp = cbp->bio_parent;
991 KASSERT(cbp->bio_data != NULL, ("NULL bio_data"));
992 size = pbp->bio_length / (sc->sc_ndisks - 1);
993 g_raid3_free(sc, cbp->bio_data, size);
994 if (G_RAID3_HEAD_BIO(pbp) == cbp) {
995 G_RAID3_HEAD_BIO(pbp) = G_RAID3_NEXT_BIO(cbp);
996 G_RAID3_NEXT_BIO(cbp) = NULL;
999 G_RAID3_FOREACH_BIO(pbp, bp) {
1000 if (G_RAID3_NEXT_BIO(bp) == cbp)
1004 KASSERT(G_RAID3_NEXT_BIO(bp) != NULL,
1005 ("NULL bp->bio_driver1"));
1006 G_RAID3_NEXT_BIO(bp) = G_RAID3_NEXT_BIO(cbp);
1007 G_RAID3_NEXT_BIO(cbp) = NULL;
1014 g_raid3_clone_bio(struct g_raid3_softc *sc, struct bio *pbp)
1016 struct bio *bp, *cbp;
1020 cbp = g_clone_bio(pbp);
1023 size = pbp->bio_length / (sc->sc_ndisks - 1);
1024 if ((pbp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) != 0)
1028 cbp->bio_data = g_raid3_alloc(sc, size, memflag);
1029 if (cbp->bio_data == NULL) {
1030 pbp->bio_children--;
1034 G_RAID3_NEXT_BIO(cbp) = NULL;
1035 if (G_RAID3_HEAD_BIO(pbp) == NULL)
1036 G_RAID3_HEAD_BIO(pbp) = cbp;
1038 G_RAID3_FOREACH_BIO(pbp, bp) {
1039 if (G_RAID3_NEXT_BIO(bp) == NULL) {
1040 G_RAID3_NEXT_BIO(bp) = cbp;
1049 g_raid3_scatter(struct bio *pbp)
1051 struct g_raid3_softc *sc;
1052 struct g_raid3_disk *disk;
1053 struct bio *bp, *cbp, *tmpbp;
1054 off_t atom, cadd, padd, left;
1057 sc = pbp->bio_to->geom->softc;
1059 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_NOPARITY) == 0) {
1061 * Find bio for which we should calculate data.
1063 G_RAID3_FOREACH_BIO(pbp, cbp) {
1064 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) {
1069 KASSERT(bp != NULL, ("NULL parity bio."));
1071 atom = sc->sc_sectorsize / (sc->sc_ndisks - 1);
1073 for (left = pbp->bio_length; left > 0; left -= sc->sc_sectorsize) {
1074 G_RAID3_FOREACH_BIO(pbp, cbp) {
1077 bcopy(pbp->bio_data + padd, cbp->bio_data + cadd, atom);
1082 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_NOPARITY) == 0) {
1087 G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) {
1091 bcopy(cbp->bio_data, bp->bio_data,
1095 g_raid3_xor(cbp->bio_data, bp->bio_data,
1098 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_NODISK) != 0)
1099 g_raid3_destroy_bio(sc, cbp);
1102 G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) {
1103 struct g_consumer *cp;
1105 disk = cbp->bio_caller2;
1106 cp = disk->d_consumer;
1107 cbp->bio_to = cp->provider;
1108 G_RAID3_LOGREQ(3, cbp, "Sending request.");
1109 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1110 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1111 cp->acr, cp->acw, cp->ace));
1114 g_io_request(cbp, cp);
1119 g_raid3_gather(struct bio *pbp)
1121 struct g_raid3_softc *sc;
1122 struct g_raid3_disk *disk;
1123 struct bio *xbp, *fbp, *cbp;
1124 off_t atom, cadd, padd, left;
1126 sc = pbp->bio_to->geom->softc;
1128 * Find bio for which we have to calculate data.
1129 * While going through this path, check if all requests
1130 * succeeded, if not, deny whole request.
1131 * If we're in COMPLETE mode, we allow one request to fail,
1132 * so if we find one, we're sending it to the parity consumer.
1133 * If there are more failed requests, we deny whole request.
1136 G_RAID3_FOREACH_BIO(pbp, cbp) {
1137 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) {
1138 KASSERT(xbp == NULL, ("More than one parity bio."));
1141 if (cbp->bio_error == 0)
1144 * Found failed request.
1147 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_DEGRADED) != 0) {
1149 * We are already in degraded mode, so we can't
1150 * accept any failures.
1152 if (pbp->bio_error == 0)
1153 pbp->bio_error = cbp->bio_error;
1159 * Next failed request, that's too many.
1161 if (pbp->bio_error == 0)
1162 pbp->bio_error = fbp->bio_error;
1164 disk = cbp->bio_caller2;
1167 if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) {
1168 disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN;
1169 G_RAID3_LOGREQ(0, cbp, "Request failed (error=%d).",
1172 G_RAID3_LOGREQ(1, cbp, "Request failed (error=%d).",
1175 if (g_raid3_disconnect_on_failure &&
1176 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1177 sc->sc_bump_id |= G_RAID3_BUMP_GENID;
1178 g_raid3_event_send(disk,
1179 G_RAID3_DISK_STATE_DISCONNECTED,
1180 G_RAID3_EVENT_DONTWAIT);
1183 if (pbp->bio_error != 0)
1185 if (fbp != NULL && (pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) {
1186 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_VERIFY;
1188 g_raid3_replace_bio(xbp, fbp);
1189 g_raid3_destroy_bio(sc, fbp);
1190 } else if (fbp != NULL) {
1191 struct g_consumer *cp;
1194 * One request failed, so send the same request to
1195 * the parity consumer.
1197 disk = pbp->bio_driver2;
1198 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) {
1199 pbp->bio_error = fbp->bio_error;
1202 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED;
1204 fbp->bio_flags &= ~(BIO_DONE | BIO_ERROR);
1205 if (disk->d_no == sc->sc_ndisks - 1)
1206 fbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1208 fbp->bio_completed = 0;
1209 fbp->bio_children = 0;
1211 cp = disk->d_consumer;
1212 fbp->bio_caller2 = disk;
1213 fbp->bio_to = cp->provider;
1214 G_RAID3_LOGREQ(3, fbp, "Sending request (recover).");
1215 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1216 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1217 cp->acr, cp->acw, cp->ace));
1219 g_io_request(fbp, cp);
1226 G_RAID3_FOREACH_BIO(pbp, cbp) {
1227 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0)
1229 g_raid3_xor(cbp->bio_data, xbp->bio_data,
1232 xbp->bio_cflags &= ~G_RAID3_BIO_CFLAG_PARITY;
1233 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) {
1234 if (!g_raid3_is_zero(xbp)) {
1235 g_raid3_parity_mismatch++;
1236 pbp->bio_error = EIO;
1239 g_raid3_destroy_bio(sc, xbp);
1242 atom = sc->sc_sectorsize / (sc->sc_ndisks - 1);
1244 for (left = pbp->bio_length; left > 0; left -= sc->sc_sectorsize) {
1245 G_RAID3_FOREACH_BIO(pbp, cbp) {
1246 bcopy(cbp->bio_data + cadd, pbp->bio_data + padd, atom);
1247 pbp->bio_completed += atom;
1253 if (pbp->bio_error == 0)
1254 G_RAID3_LOGREQ(3, pbp, "Request finished.");
1256 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0)
1257 G_RAID3_LOGREQ(1, pbp, "Verification error.");
1259 G_RAID3_LOGREQ(0, pbp, "Request failed.");
1261 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_MASK;
1262 while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL)
1263 g_raid3_destroy_bio(sc, cbp);
1264 g_io_deliver(pbp, pbp->bio_error);
1268 g_raid3_done(struct bio *bp)
1270 struct g_raid3_softc *sc;
1272 sc = bp->bio_from->geom->softc;
1273 bp->bio_cflags |= G_RAID3_BIO_CFLAG_REGULAR;
1274 G_RAID3_LOGREQ(3, bp, "Regular request done (error=%d).", bp->bio_error);
1275 mtx_lock(&sc->sc_queue_mtx);
1276 bioq_insert_head(&sc->sc_queue, bp);
1277 mtx_unlock(&sc->sc_queue_mtx);
1279 wakeup(&sc->sc_queue);
1283 g_raid3_regular_request(struct bio *cbp)
1285 struct g_raid3_softc *sc;
1286 struct g_raid3_disk *disk;
1289 g_topology_assert_not();
1291 pbp = cbp->bio_parent;
1292 sc = pbp->bio_to->geom->softc;
1293 cbp->bio_from->index--;
1294 if (cbp->bio_cmd == BIO_WRITE)
1296 disk = cbp->bio_from->private;
1299 g_raid3_kill_consumer(sc, cbp->bio_from);
1300 g_topology_unlock();
1303 G_RAID3_LOGREQ(3, cbp, "Request finished.");
1305 KASSERT(pbp->bio_inbed <= pbp->bio_children,
1306 ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed,
1307 pbp->bio_children));
1308 if (pbp->bio_inbed != pbp->bio_children)
1310 switch (pbp->bio_cmd) {
1312 g_raid3_gather(pbp);
1319 pbp->bio_completed = pbp->bio_length;
1320 while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL) {
1321 if (cbp->bio_error == 0) {
1322 g_raid3_destroy_bio(sc, cbp);
1327 error = cbp->bio_error;
1328 else if (pbp->bio_error == 0) {
1330 * Next failed request, that's too many.
1332 pbp->bio_error = error;
1335 disk = cbp->bio_caller2;
1337 g_raid3_destroy_bio(sc, cbp);
1341 if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) {
1342 disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN;
1343 G_RAID3_LOGREQ(0, cbp,
1344 "Request failed (error=%d).",
1347 G_RAID3_LOGREQ(1, cbp,
1348 "Request failed (error=%d).",
1351 if (g_raid3_disconnect_on_failure &&
1352 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1353 sc->sc_bump_id |= G_RAID3_BUMP_GENID;
1354 g_raid3_event_send(disk,
1355 G_RAID3_DISK_STATE_DISCONNECTED,
1356 G_RAID3_EVENT_DONTWAIT);
1358 g_raid3_destroy_bio(sc, cbp);
1360 if (pbp->bio_error == 0)
1361 G_RAID3_LOGREQ(3, pbp, "Request finished.");
1363 G_RAID3_LOGREQ(0, pbp, "Request failed.");
1364 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_DEGRADED;
1365 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_NOPARITY;
1366 bioq_remove(&sc->sc_inflight, pbp);
1367 /* Release delayed sync requests if possible. */
1368 g_raid3_sync_release(sc);
1369 g_io_deliver(pbp, pbp->bio_error);
1376 g_raid3_sync_done(struct bio *bp)
1378 struct g_raid3_softc *sc;
1380 G_RAID3_LOGREQ(3, bp, "Synchronization request delivered.");
1381 sc = bp->bio_from->geom->softc;
1382 bp->bio_cflags |= G_RAID3_BIO_CFLAG_SYNC;
1383 mtx_lock(&sc->sc_queue_mtx);
1384 bioq_insert_head(&sc->sc_queue, bp);
1385 mtx_unlock(&sc->sc_queue_mtx);
1387 wakeup(&sc->sc_queue);
1391 g_raid3_flush(struct g_raid3_softc *sc, struct bio *bp)
1393 struct bio_queue_head queue;
1394 struct g_raid3_disk *disk;
1395 struct g_consumer *cp;
1400 for (i = 0; i < sc->sc_ndisks; i++) {
1401 disk = &sc->sc_disks[i];
1402 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE)
1404 cbp = g_clone_bio(bp);
1406 for (cbp = bioq_first(&queue); cbp != NULL;
1407 cbp = bioq_first(&queue)) {
1408 bioq_remove(&queue, cbp);
1411 if (bp->bio_error == 0)
1412 bp->bio_error = ENOMEM;
1413 g_io_deliver(bp, bp->bio_error);
1416 bioq_insert_tail(&queue, cbp);
1417 cbp->bio_done = g_std_done;
1418 cbp->bio_caller1 = disk;
1419 cbp->bio_to = disk->d_consumer->provider;
1421 for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) {
1422 bioq_remove(&queue, cbp);
1423 G_RAID3_LOGREQ(3, cbp, "Sending request.");
1424 disk = cbp->bio_caller1;
1425 cbp->bio_caller1 = NULL;
1426 cp = disk->d_consumer;
1427 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1428 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1429 cp->acr, cp->acw, cp->ace));
1430 g_io_request(cbp, disk->d_consumer);
1435 g_raid3_start(struct bio *bp)
1437 struct g_raid3_softc *sc;
1439 sc = bp->bio_to->geom->softc;
1441 * If sc == NULL or there are no valid disks, provider's error
1442 * should be set and g_raid3_start() should not be called at all.
1444 KASSERT(sc != NULL && (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
1445 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE),
1446 ("Provider's error should be set (error=%d)(device=%s).",
1447 bp->bio_to->error, bp->bio_to->name));
1448 G_RAID3_LOGREQ(3, bp, "Request received.");
1450 switch (bp->bio_cmd) {
1456 g_raid3_flush(sc, bp);
1460 g_io_deliver(bp, EOPNOTSUPP);
1463 mtx_lock(&sc->sc_queue_mtx);
1464 bioq_insert_tail(&sc->sc_queue, bp);
1465 mtx_unlock(&sc->sc_queue_mtx);
1466 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc);
1471 * Return TRUE if the given request is colliding with a in-progress
1472 * synchronization request.
1475 g_raid3_sync_collision(struct g_raid3_softc *sc, struct bio *bp)
1477 struct g_raid3_disk *disk;
1479 off_t rstart, rend, sstart, send;
1482 disk = sc->sc_syncdisk;
1485 rstart = bp->bio_offset;
1486 rend = bp->bio_offset + bp->bio_length;
1487 for (i = 0; i < g_raid3_syncreqs; i++) {
1488 sbp = disk->d_sync.ds_bios[i];
1491 sstart = sbp->bio_offset;
1492 send = sbp->bio_length;
1493 if (sbp->bio_cmd == BIO_WRITE) {
1494 sstart *= sc->sc_ndisks - 1;
1495 send *= sc->sc_ndisks - 1;
1498 if (rend > sstart && rstart < send)
1505 * Return TRUE if the given sync request is colliding with a in-progress regular
1509 g_raid3_regular_collision(struct g_raid3_softc *sc, struct bio *sbp)
1511 off_t rstart, rend, sstart, send;
1514 if (sc->sc_syncdisk == NULL)
1516 sstart = sbp->bio_offset;
1517 send = sstart + sbp->bio_length;
1518 TAILQ_FOREACH(bp, &sc->sc_inflight.queue, bio_queue) {
1519 rstart = bp->bio_offset;
1520 rend = bp->bio_offset + bp->bio_length;
1521 if (rend > sstart && rstart < send)
1528 * Puts request onto delayed queue.
1531 g_raid3_regular_delay(struct g_raid3_softc *sc, struct bio *bp)
1534 G_RAID3_LOGREQ(2, bp, "Delaying request.");
1535 bioq_insert_head(&sc->sc_regular_delayed, bp);
1539 * Puts synchronization request onto delayed queue.
1542 g_raid3_sync_delay(struct g_raid3_softc *sc, struct bio *bp)
1545 G_RAID3_LOGREQ(2, bp, "Delaying synchronization request.");
1546 bioq_insert_tail(&sc->sc_sync_delayed, bp);
1550 * Releases delayed regular requests which don't collide anymore with sync
1554 g_raid3_regular_release(struct g_raid3_softc *sc)
1556 struct bio *bp, *bp2;
1558 TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed.queue, bio_queue, bp2) {
1559 if (g_raid3_sync_collision(sc, bp))
1561 bioq_remove(&sc->sc_regular_delayed, bp);
1562 G_RAID3_LOGREQ(2, bp, "Releasing delayed request (%p).", bp);
1563 mtx_lock(&sc->sc_queue_mtx);
1564 bioq_insert_head(&sc->sc_queue, bp);
1567 * wakeup() is not needed, because this function is called from
1568 * the worker thread.
1570 wakeup(&sc->sc_queue);
1572 mtx_unlock(&sc->sc_queue_mtx);
1577 * Releases delayed sync requests which don't collide anymore with regular
1581 g_raid3_sync_release(struct g_raid3_softc *sc)
1583 struct bio *bp, *bp2;
1585 TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed.queue, bio_queue, bp2) {
1586 if (g_raid3_regular_collision(sc, bp))
1588 bioq_remove(&sc->sc_sync_delayed, bp);
1589 G_RAID3_LOGREQ(2, bp,
1590 "Releasing delayed synchronization request.");
1591 g_io_request(bp, bp->bio_from);
1596 * Handle synchronization requests.
1597 * Every synchronization request is two-steps process: first, READ request is
1598 * send to active provider and then WRITE request (with read data) to the provider
1599 * beeing synchronized. When WRITE is finished, new synchronization request is
1603 g_raid3_sync_request(struct bio *bp)
1605 struct g_raid3_softc *sc;
1606 struct g_raid3_disk *disk;
1608 bp->bio_from->index--;
1609 sc = bp->bio_from->geom->softc;
1610 disk = bp->bio_from->private;
1612 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */
1614 g_raid3_kill_consumer(sc, bp->bio_from);
1615 g_topology_unlock();
1616 free(bp->bio_data, M_RAID3);
1618 sx_xlock(&sc->sc_lock);
1623 * Synchronization request.
1625 switch (bp->bio_cmd) {
1628 struct g_consumer *cp;
1633 if (bp->bio_error != 0) {
1634 G_RAID3_LOGREQ(0, bp,
1635 "Synchronization request failed (error=%d).",
1640 G_RAID3_LOGREQ(3, bp, "Synchronization request finished.");
1641 atom = sc->sc_sectorsize / (sc->sc_ndisks - 1);
1642 dst = src = bp->bio_data;
1643 if (disk->d_no == sc->sc_ndisks - 1) {
1646 /* Parity component. */
1647 for (left = bp->bio_length; left > 0;
1648 left -= sc->sc_sectorsize) {
1649 bcopy(src, dst, atom);
1651 for (n = 1; n < sc->sc_ndisks - 1; n++) {
1652 g_raid3_xor(src, dst, atom);
1658 /* Regular component. */
1659 src += atom * disk->d_no;
1660 for (left = bp->bio_length; left > 0;
1661 left -= sc->sc_sectorsize) {
1662 bcopy(src, dst, atom);
1663 src += sc->sc_sectorsize;
1667 bp->bio_driver1 = bp->bio_driver2 = NULL;
1669 bp->bio_offset /= sc->sc_ndisks - 1;
1670 bp->bio_length /= sc->sc_ndisks - 1;
1671 bp->bio_cmd = BIO_WRITE;
1673 bp->bio_children = bp->bio_inbed = 0;
1674 cp = disk->d_consumer;
1675 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1676 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1677 cp->acr, cp->acw, cp->ace));
1679 g_io_request(bp, cp);
1684 struct g_raid3_disk_sync *sync;
1685 off_t boffset, moffset;
1689 if (bp->bio_error != 0) {
1690 G_RAID3_LOGREQ(0, bp,
1691 "Synchronization request failed (error=%d).",
1694 sc->sc_bump_id |= G_RAID3_BUMP_GENID;
1695 g_raid3_event_send(disk,
1696 G_RAID3_DISK_STATE_DISCONNECTED,
1697 G_RAID3_EVENT_DONTWAIT);
1700 G_RAID3_LOGREQ(3, bp, "Synchronization request finished.");
1701 sync = &disk->d_sync;
1702 if (sync->ds_offset == sc->sc_mediasize / (sc->sc_ndisks - 1) ||
1703 sync->ds_consumer == NULL ||
1704 (sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
1705 /* Don't send more synchronization requests. */
1706 sync->ds_inflight--;
1707 if (sync->ds_bios != NULL) {
1708 i = (int)(uintptr_t)bp->bio_caller1;
1709 sync->ds_bios[i] = NULL;
1711 free(bp->bio_data, M_RAID3);
1713 if (sync->ds_inflight > 0)
1715 if (sync->ds_consumer == NULL ||
1716 (sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
1720 * Disk up-to-date, activate it.
1722 g_raid3_event_send(disk, G_RAID3_DISK_STATE_ACTIVE,
1723 G_RAID3_EVENT_DONTWAIT);
1727 /* Send next synchronization request. */
1728 data = bp->bio_data;
1729 bzero(bp, sizeof(*bp));
1730 bp->bio_cmd = BIO_READ;
1731 bp->bio_offset = sync->ds_offset * (sc->sc_ndisks - 1);
1732 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset);
1733 sync->ds_offset += bp->bio_length / (sc->sc_ndisks - 1);
1734 bp->bio_done = g_raid3_sync_done;
1735 bp->bio_data = data;
1736 bp->bio_from = sync->ds_consumer;
1737 bp->bio_to = sc->sc_provider;
1738 G_RAID3_LOGREQ(3, bp, "Sending synchronization request.");
1739 sync->ds_consumer->index++;
1741 * Delay the request if it is colliding with a regular request.
1743 if (g_raid3_regular_collision(sc, bp))
1744 g_raid3_sync_delay(sc, bp);
1746 g_io_request(bp, sync->ds_consumer);
1748 /* Release delayed requests if possible. */
1749 g_raid3_regular_release(sc);
1751 /* Find the smallest offset. */
1752 moffset = sc->sc_mediasize;
1753 for (i = 0; i < g_raid3_syncreqs; i++) {
1754 bp = sync->ds_bios[i];
1755 boffset = bp->bio_offset;
1756 if (bp->bio_cmd == BIO_WRITE)
1757 boffset *= sc->sc_ndisks - 1;
1758 if (boffset < moffset)
1761 if (sync->ds_offset_done + (MAXPHYS * 100) < moffset) {
1762 /* Update offset_done on every 100 blocks. */
1763 sync->ds_offset_done = moffset;
1764 g_raid3_update_metadata(disk);
1769 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)",
1770 bp->bio_cmd, sc->sc_name));
1776 g_raid3_register_request(struct bio *pbp)
1778 struct g_raid3_softc *sc;
1779 struct g_raid3_disk *disk;
1780 struct g_consumer *cp;
1781 struct bio *cbp, *tmpbp;
1782 off_t offset, length;
1784 int round_robin, verify;
1787 sc = pbp->bio_to->geom->softc;
1788 if ((pbp->bio_cflags & G_RAID3_BIO_CFLAG_REGSYNC) != 0 &&
1789 sc->sc_syncdisk == NULL) {
1790 g_io_deliver(pbp, EIO);
1793 g_raid3_init_bio(pbp);
1794 length = pbp->bio_length / (sc->sc_ndisks - 1);
1795 offset = pbp->bio_offset / (sc->sc_ndisks - 1);
1796 round_robin = verify = 0;
1797 switch (pbp->bio_cmd) {
1799 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_VERIFY) != 0 &&
1800 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1801 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_VERIFY;
1803 ndisks = sc->sc_ndisks;
1806 ndisks = sc->sc_ndisks - 1;
1808 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_ROUND_ROBIN) != 0 &&
1809 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1814 KASSERT(!round_robin || !verify,
1815 ("ROUND-ROBIN and VERIFY are mutually exclusive."));
1816 pbp->bio_driver2 = &sc->sc_disks[sc->sc_ndisks - 1];
1821 * Delay the request if it is colliding with a synchronization
1824 if (g_raid3_sync_collision(sc, pbp)) {
1825 g_raid3_regular_delay(sc, pbp);
1832 sc->sc_last_write = time_uptime;
1834 ndisks = sc->sc_ndisks;
1837 for (n = 0; n < ndisks; n++) {
1838 disk = &sc->sc_disks[n];
1839 cbp = g_raid3_clone_bio(sc, pbp);
1841 while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL)
1842 g_raid3_destroy_bio(sc, cbp);
1844 * To prevent deadlock, we must run back up
1845 * with the ENOMEM for failed requests of any
1846 * of our consumers. Our own sync requests
1847 * can stick around, as they are finite.
1849 if ((pbp->bio_cflags &
1850 G_RAID3_BIO_CFLAG_REGULAR) != 0) {
1851 g_io_deliver(pbp, ENOMEM);
1856 cbp->bio_offset = offset;
1857 cbp->bio_length = length;
1858 cbp->bio_done = g_raid3_done;
1859 switch (pbp->bio_cmd) {
1861 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) {
1863 * Replace invalid component with the parity
1866 disk = &sc->sc_disks[sc->sc_ndisks - 1];
1867 cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1868 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED;
1869 } else if (round_robin &&
1870 disk->d_no == sc->sc_round_robin) {
1872 * In round-robin mode skip one data component
1873 * and use parity component when reading.
1875 pbp->bio_driver2 = disk;
1876 disk = &sc->sc_disks[sc->sc_ndisks - 1];
1877 cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1878 sc->sc_round_robin++;
1880 } else if (verify && disk->d_no == sc->sc_ndisks - 1) {
1881 cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1886 if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
1887 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
1888 if (n == ndisks - 1) {
1890 * Active parity component, mark it as such.
1893 G_RAID3_BIO_CFLAG_PARITY;
1896 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED;
1897 if (n == ndisks - 1) {
1899 * Parity component is not connected,
1900 * so destroy its request.
1903 G_RAID3_BIO_PFLAG_NOPARITY;
1904 g_raid3_destroy_bio(sc, cbp);
1908 G_RAID3_BIO_CFLAG_NODISK;
1915 cbp->bio_caller2 = disk;
1917 switch (pbp->bio_cmd) {
1921 * If we are in round-robin mode and 'round_robin' is
1922 * still 1, it means, that we skipped parity component
1923 * for this read and must reset sc_round_robin field.
1925 sc->sc_round_robin = 0;
1927 G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) {
1928 disk = cbp->bio_caller2;
1929 cp = disk->d_consumer;
1930 cbp->bio_to = cp->provider;
1931 G_RAID3_LOGREQ(3, cbp, "Sending request.");
1932 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1933 ("Consumer %s not opened (r%dw%de%d).",
1934 cp->provider->name, cp->acr, cp->acw, cp->ace));
1936 g_io_request(cbp, cp);
1942 * Put request onto inflight queue, so we can check if new
1943 * synchronization requests don't collide with it.
1945 bioq_insert_tail(&sc->sc_inflight, pbp);
1948 * Bump syncid on first write.
1950 if ((sc->sc_bump_id & G_RAID3_BUMP_SYNCID) != 0) {
1951 sc->sc_bump_id &= ~G_RAID3_BUMP_SYNCID;
1952 g_raid3_bump_syncid(sc);
1954 g_raid3_scatter(pbp);
1961 g_raid3_can_destroy(struct g_raid3_softc *sc)
1964 struct g_consumer *cp;
1966 g_topology_assert();
1968 if (gp->softc == NULL)
1970 LIST_FOREACH(cp, &gp->consumer, consumer) {
1971 if (g_raid3_is_busy(sc, cp))
1974 gp = sc->sc_sync.ds_geom;
1975 LIST_FOREACH(cp, &gp->consumer, consumer) {
1976 if (g_raid3_is_busy(sc, cp))
1979 G_RAID3_DEBUG(2, "No I/O requests for %s, it can be destroyed.",
1985 g_raid3_try_destroy(struct g_raid3_softc *sc)
1988 g_topology_assert_not();
1989 sx_assert(&sc->sc_lock, SX_XLOCKED);
1991 if (sc->sc_rootmount != NULL) {
1992 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
1994 root_mount_rel(sc->sc_rootmount);
1995 sc->sc_rootmount = NULL;
1999 if (!g_raid3_can_destroy(sc)) {
2000 g_topology_unlock();
2003 sc->sc_geom->softc = NULL;
2004 sc->sc_sync.ds_geom->softc = NULL;
2005 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_WAIT) != 0) {
2006 g_topology_unlock();
2007 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__,
2009 /* Unlock sc_lock here, as it can be destroyed after wakeup. */
2010 sx_xunlock(&sc->sc_lock);
2011 wakeup(&sc->sc_worker);
2012 sc->sc_worker = NULL;
2014 g_topology_unlock();
2015 g_raid3_destroy_device(sc);
2016 free(sc->sc_disks, M_RAID3);
2026 g_raid3_worker(void *arg)
2028 struct g_raid3_softc *sc;
2029 struct g_raid3_event *ep;
2034 thread_lock(curthread);
2035 sched_prio(curthread, PRIBIO);
2036 thread_unlock(curthread);
2038 sx_xlock(&sc->sc_lock);
2040 G_RAID3_DEBUG(5, "%s: Let's see...", __func__);
2042 * First take a look at events.
2043 * This is important to handle events before any I/O requests.
2045 ep = g_raid3_event_get(sc);
2047 g_raid3_event_remove(sc, ep);
2048 if ((ep->e_flags & G_RAID3_EVENT_DEVICE) != 0) {
2049 /* Update only device status. */
2051 "Running event for device %s.",
2054 g_raid3_update_device(sc, 1);
2056 /* Update disk status. */
2057 G_RAID3_DEBUG(3, "Running event for disk %s.",
2058 g_raid3_get_diskname(ep->e_disk));
2059 ep->e_error = g_raid3_update_disk(ep->e_disk,
2061 if (ep->e_error == 0)
2062 g_raid3_update_device(sc, 0);
2064 if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0) {
2065 KASSERT(ep->e_error == 0,
2066 ("Error cannot be handled."));
2067 g_raid3_event_free(ep);
2069 ep->e_flags |= G_RAID3_EVENT_DONE;
2070 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__,
2072 mtx_lock(&sc->sc_events_mtx);
2074 mtx_unlock(&sc->sc_events_mtx);
2077 G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
2078 if (g_raid3_try_destroy(sc)) {
2079 curthread->td_pflags &= ~TDP_GEOM;
2080 G_RAID3_DEBUG(1, "Thread exiting.");
2084 G_RAID3_DEBUG(5, "%s: I'm here 1.", __func__);
2088 * Check if we can mark array as CLEAN and if we can't take
2089 * how much seconds should we wait.
2091 timeout = g_raid3_idle(sc, -1);
2095 /* Get first request from the queue. */
2096 mtx_lock(&sc->sc_queue_mtx);
2097 bp = bioq_first(&sc->sc_queue);
2100 G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
2101 mtx_unlock(&sc->sc_queue_mtx);
2102 if (g_raid3_try_destroy(sc)) {
2103 curthread->td_pflags &= ~TDP_GEOM;
2104 G_RAID3_DEBUG(1, "Thread exiting.");
2107 mtx_lock(&sc->sc_queue_mtx);
2109 sx_xunlock(&sc->sc_lock);
2111 * XXX: We can miss an event here, because an event
2112 * can be added without sx-device-lock and without
2113 * mtx-queue-lock. Maybe I should just stop using
2114 * dedicated mutex for events synchronization and
2115 * stick with the queue lock?
2116 * The event will hang here until next I/O request
2117 * or next event is received.
2119 MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "r3:w1",
2121 sx_xlock(&sc->sc_lock);
2122 G_RAID3_DEBUG(5, "%s: I'm here 4.", __func__);
2126 bioq_remove(&sc->sc_queue, bp);
2127 mtx_unlock(&sc->sc_queue_mtx);
2129 if (bp->bio_from->geom == sc->sc_sync.ds_geom &&
2130 (bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) != 0) {
2131 g_raid3_sync_request(bp); /* READ */
2132 } else if (bp->bio_to != sc->sc_provider) {
2133 if ((bp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) != 0)
2134 g_raid3_regular_request(bp);
2135 else if ((bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) != 0)
2136 g_raid3_sync_request(bp); /* WRITE */
2139 ("Invalid request cflags=0x%hhx to=%s.",
2140 bp->bio_cflags, bp->bio_to->name));
2142 } else if (g_raid3_register_request(bp) != 0) {
2143 mtx_lock(&sc->sc_queue_mtx);
2144 bioq_insert_head(&sc->sc_queue, bp);
2146 * We are short in memory, let see if there are finished
2147 * request we can free.
2149 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
2150 if (bp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR)
2154 * No finished regular request, so at least keep
2155 * synchronization running.
2157 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
2158 if (bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC)
2161 sx_xunlock(&sc->sc_lock);
2162 MSLEEP(&sc->sc_queue, &sc->sc_queue_mtx, PRIBIO | PDROP,
2163 "r3:lowmem", hz / 10);
2164 sx_xlock(&sc->sc_lock);
2166 G_RAID3_DEBUG(5, "%s: I'm here 9.", __func__);
2171 g_raid3_update_idle(struct g_raid3_softc *sc, struct g_raid3_disk *disk)
2174 sx_assert(&sc->sc_lock, SX_LOCKED);
2175 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0)
2177 if (!sc->sc_idle && (disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) == 0) {
2178 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as dirty.",
2179 g_raid3_get_diskname(disk), sc->sc_name);
2180 disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY;
2181 } else if (sc->sc_idle &&
2182 (disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) != 0) {
2183 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as clean.",
2184 g_raid3_get_diskname(disk), sc->sc_name);
2185 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2190 g_raid3_sync_start(struct g_raid3_softc *sc)
2192 struct g_raid3_disk *disk;
2193 struct g_consumer *cp;
2198 g_topology_assert_not();
2199 sx_assert(&sc->sc_lock, SX_XLOCKED);
2201 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED,
2202 ("Device not in DEGRADED state (%s, %u).", sc->sc_name,
2204 KASSERT(sc->sc_syncdisk == NULL, ("Syncdisk is not NULL (%s, %u).",
2205 sc->sc_name, sc->sc_state));
2207 for (n = 0; n < sc->sc_ndisks; n++) {
2208 if (sc->sc_disks[n].d_state != G_RAID3_DISK_STATE_SYNCHRONIZING)
2210 disk = &sc->sc_disks[n];
2216 sx_xunlock(&sc->sc_lock);
2218 cp = g_new_consumer(sc->sc_sync.ds_geom);
2219 error = g_attach(cp, sc->sc_provider);
2221 ("Cannot attach to %s (error=%d).", sc->sc_name, error));
2222 error = g_access(cp, 1, 0, 0);
2223 KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error));
2224 g_topology_unlock();
2225 sx_xlock(&sc->sc_lock);
2227 G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name,
2228 g_raid3_get_diskname(disk));
2229 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) == 0)
2230 disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY;
2231 KASSERT(disk->d_sync.ds_consumer == NULL,
2232 ("Sync consumer already exists (device=%s, disk=%s).",
2233 sc->sc_name, g_raid3_get_diskname(disk)));
2235 disk->d_sync.ds_consumer = cp;
2236 disk->d_sync.ds_consumer->private = disk;
2237 disk->d_sync.ds_consumer->index = 0;
2238 sc->sc_syncdisk = disk;
2241 * Allocate memory for synchronization bios and initialize them.
2243 disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_raid3_syncreqs,
2245 for (n = 0; n < g_raid3_syncreqs; n++) {
2247 disk->d_sync.ds_bios[n] = bp;
2248 bp->bio_parent = NULL;
2249 bp->bio_cmd = BIO_READ;
2250 bp->bio_data = malloc(MAXPHYS, M_RAID3, M_WAITOK);
2252 bp->bio_offset = disk->d_sync.ds_offset * (sc->sc_ndisks - 1);
2253 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset);
2254 disk->d_sync.ds_offset += bp->bio_length / (sc->sc_ndisks - 1);
2255 bp->bio_done = g_raid3_sync_done;
2256 bp->bio_from = disk->d_sync.ds_consumer;
2257 bp->bio_to = sc->sc_provider;
2258 bp->bio_caller1 = (void *)(uintptr_t)n;
2261 /* Set the number of in-flight synchronization requests. */
2262 disk->d_sync.ds_inflight = g_raid3_syncreqs;
2265 * Fire off first synchronization requests.
2267 for (n = 0; n < g_raid3_syncreqs; n++) {
2268 bp = disk->d_sync.ds_bios[n];
2269 G_RAID3_LOGREQ(3, bp, "Sending synchronization request.");
2270 disk->d_sync.ds_consumer->index++;
2272 * Delay the request if it is colliding with a regular request.
2274 if (g_raid3_regular_collision(sc, bp))
2275 g_raid3_sync_delay(sc, bp);
2277 g_io_request(bp, disk->d_sync.ds_consumer);
2282 * Stop synchronization process.
2283 * type: 0 - synchronization finished
2284 * 1 - synchronization stopped
2287 g_raid3_sync_stop(struct g_raid3_softc *sc, int type)
2289 struct g_raid3_disk *disk;
2290 struct g_consumer *cp;
2292 g_topology_assert_not();
2293 sx_assert(&sc->sc_lock, SX_LOCKED);
2295 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED,
2296 ("Device not in DEGRADED state (%s, %u).", sc->sc_name,
2298 disk = sc->sc_syncdisk;
2299 sc->sc_syncdisk = NULL;
2300 KASSERT(disk != NULL, ("No disk was synchronized (%s).", sc->sc_name));
2301 KASSERT(disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING,
2302 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2303 g_raid3_disk_state2str(disk->d_state)));
2304 if (disk->d_sync.ds_consumer == NULL)
2308 G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s finished.",
2309 sc->sc_name, g_raid3_get_diskname(disk));
2310 } else /* if (type == 1) */ {
2311 G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s stopped.",
2312 sc->sc_name, g_raid3_get_diskname(disk));
2314 free(disk->d_sync.ds_bios, M_RAID3);
2315 disk->d_sync.ds_bios = NULL;
2316 cp = disk->d_sync.ds_consumer;
2317 disk->d_sync.ds_consumer = NULL;
2318 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2319 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */
2321 g_raid3_kill_consumer(sc, cp);
2322 g_topology_unlock();
2323 sx_xlock(&sc->sc_lock);
2327 g_raid3_launch_provider(struct g_raid3_softc *sc)
2329 struct g_provider *pp;
2330 struct g_raid3_disk *disk;
2333 sx_assert(&sc->sc_lock, SX_LOCKED);
2336 pp = g_new_providerf(sc->sc_geom, "raid3/%s", sc->sc_name);
2337 pp->mediasize = sc->sc_mediasize;
2338 pp->sectorsize = sc->sc_sectorsize;
2340 pp->stripeoffset = 0;
2341 for (n = 0; n < sc->sc_ndisks; n++) {
2342 disk = &sc->sc_disks[n];
2343 if (disk->d_consumer && disk->d_consumer->provider &&
2344 disk->d_consumer->provider->stripesize > pp->stripesize) {
2345 pp->stripesize = disk->d_consumer->provider->stripesize;
2346 pp->stripeoffset = disk->d_consumer->provider->stripeoffset;
2349 pp->stripesize *= sc->sc_ndisks - 1;
2350 pp->stripeoffset *= sc->sc_ndisks - 1;
2351 sc->sc_provider = pp;
2352 g_error_provider(pp, 0);
2353 g_topology_unlock();
2354 G_RAID3_DEBUG(0, "Device %s launched (%u/%u).", pp->name,
2355 g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE), sc->sc_ndisks);
2357 if (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED)
2358 g_raid3_sync_start(sc);
2362 g_raid3_destroy_provider(struct g_raid3_softc *sc)
2366 g_topology_assert_not();
2367 KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).",
2371 g_error_provider(sc->sc_provider, ENXIO);
2372 mtx_lock(&sc->sc_queue_mtx);
2373 while ((bp = bioq_first(&sc->sc_queue)) != NULL) {
2374 bioq_remove(&sc->sc_queue, bp);
2375 g_io_deliver(bp, ENXIO);
2377 mtx_unlock(&sc->sc_queue_mtx);
2378 G_RAID3_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name,
2379 sc->sc_provider->name);
2380 sc->sc_provider->flags |= G_PF_WITHER;
2381 g_orphan_provider(sc->sc_provider, ENXIO);
2382 g_topology_unlock();
2383 sc->sc_provider = NULL;
2384 if (sc->sc_syncdisk != NULL)
2385 g_raid3_sync_stop(sc, 1);
2389 g_raid3_go(void *arg)
2391 struct g_raid3_softc *sc;
2394 G_RAID3_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name);
2395 g_raid3_event_send(sc, 0,
2396 G_RAID3_EVENT_DONTWAIT | G_RAID3_EVENT_DEVICE);
2400 g_raid3_determine_state(struct g_raid3_disk *disk)
2402 struct g_raid3_softc *sc;
2406 if (sc->sc_syncid == disk->d_sync.ds_syncid) {
2407 if ((disk->d_flags &
2408 G_RAID3_DISK_FLAG_SYNCHRONIZING) == 0) {
2409 /* Disk does not need synchronization. */
2410 state = G_RAID3_DISK_STATE_ACTIVE;
2413 G_RAID3_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
2415 G_RAID3_DISK_FLAG_FORCE_SYNC) != 0) {
2417 * We can start synchronization from
2418 * the stored offset.
2420 state = G_RAID3_DISK_STATE_SYNCHRONIZING;
2422 state = G_RAID3_DISK_STATE_STALE;
2425 } else if (disk->d_sync.ds_syncid < sc->sc_syncid) {
2427 * Reset all synchronization data for this disk,
2428 * because if it even was synchronized, it was
2429 * synchronized to disks with different syncid.
2431 disk->d_flags |= G_RAID3_DISK_FLAG_SYNCHRONIZING;
2432 disk->d_sync.ds_offset = 0;
2433 disk->d_sync.ds_offset_done = 0;
2434 disk->d_sync.ds_syncid = sc->sc_syncid;
2435 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
2436 (disk->d_flags & G_RAID3_DISK_FLAG_FORCE_SYNC) != 0) {
2437 state = G_RAID3_DISK_STATE_SYNCHRONIZING;
2439 state = G_RAID3_DISK_STATE_STALE;
2441 } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ {
2443 * Not good, NOT GOOD!
2444 * It means that device was started on stale disks
2445 * and more fresh disk just arrive.
2446 * If there were writes, device is broken, sorry.
2447 * I think the best choice here is don't touch
2448 * this disk and inform the user loudly.
2450 G_RAID3_DEBUG(0, "Device %s was started before the freshest "
2451 "disk (%s) arrives!! It will not be connected to the "
2452 "running device.", sc->sc_name,
2453 g_raid3_get_diskname(disk));
2454 g_raid3_destroy_disk(disk);
2455 state = G_RAID3_DISK_STATE_NONE;
2456 /* Return immediately, because disk was destroyed. */
2459 G_RAID3_DEBUG(3, "State for %s disk: %s.",
2460 g_raid3_get_diskname(disk), g_raid3_disk_state2str(state));
2465 * Update device state.
2468 g_raid3_update_device(struct g_raid3_softc *sc, boolean_t force)
2470 struct g_raid3_disk *disk;
2473 sx_assert(&sc->sc_lock, SX_XLOCKED);
2475 switch (sc->sc_state) {
2476 case G_RAID3_DEVICE_STATE_STARTING:
2478 u_int n, ndirty, ndisks, genid, syncid;
2480 KASSERT(sc->sc_provider == NULL,
2481 ("Non-NULL provider in STARTING state (%s).", sc->sc_name));
2483 * Are we ready? We are, if all disks are connected or
2484 * one disk is missing and 'force' is true.
2486 if (g_raid3_ndisks(sc, -1) + force == sc->sc_ndisks) {
2488 callout_drain(&sc->sc_callout);
2492 * Timeout expired, so destroy device.
2494 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
2495 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p",
2496 __LINE__, sc->sc_rootmount);
2497 root_mount_rel(sc->sc_rootmount);
2498 sc->sc_rootmount = NULL;
2504 * Find the biggest genid.
2507 for (n = 0; n < sc->sc_ndisks; n++) {
2508 disk = &sc->sc_disks[n];
2509 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2511 if (disk->d_genid > genid)
2512 genid = disk->d_genid;
2514 sc->sc_genid = genid;
2516 * Remove all disks without the biggest genid.
2518 for (n = 0; n < sc->sc_ndisks; n++) {
2519 disk = &sc->sc_disks[n];
2520 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2522 if (disk->d_genid < genid) {
2524 "Component %s (device %s) broken, skipping.",
2525 g_raid3_get_diskname(disk), sc->sc_name);
2526 g_raid3_destroy_disk(disk);
2531 * There must be at least 'sc->sc_ndisks - 1' components
2532 * with the same syncid and without SYNCHRONIZING flag.
2536 * Find the biggest syncid, number of valid components and
2537 * number of dirty components.
2539 ndirty = ndisks = syncid = 0;
2540 for (n = 0; n < sc->sc_ndisks; n++) {
2541 disk = &sc->sc_disks[n];
2542 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2544 if ((disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) != 0)
2546 if (disk->d_sync.ds_syncid > syncid) {
2547 syncid = disk->d_sync.ds_syncid;
2549 } else if (disk->d_sync.ds_syncid < syncid) {
2552 if ((disk->d_flags &
2553 G_RAID3_DISK_FLAG_SYNCHRONIZING) != 0) {
2559 * Do we have enough valid components?
2561 if (ndisks + 1 < sc->sc_ndisks) {
2563 "Device %s is broken, too few valid components.",
2565 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
2569 * If there is one DIRTY component and all disks are present,
2570 * mark it for synchronization. If there is more than one DIRTY
2571 * component, mark parity component for synchronization.
2573 if (ndisks == sc->sc_ndisks && ndirty == 1) {
2574 for (n = 0; n < sc->sc_ndisks; n++) {
2575 disk = &sc->sc_disks[n];
2576 if ((disk->d_flags &
2577 G_RAID3_DISK_FLAG_DIRTY) == 0) {
2581 G_RAID3_DISK_FLAG_SYNCHRONIZING;
2583 } else if (ndisks == sc->sc_ndisks && ndirty > 1) {
2584 disk = &sc->sc_disks[sc->sc_ndisks - 1];
2585 disk->d_flags |= G_RAID3_DISK_FLAG_SYNCHRONIZING;
2588 sc->sc_syncid = syncid;
2590 /* Remember to bump syncid on first write. */
2591 sc->sc_bump_id |= G_RAID3_BUMP_SYNCID;
2593 if (ndisks == sc->sc_ndisks)
2594 state = G_RAID3_DEVICE_STATE_COMPLETE;
2595 else /* if (ndisks == sc->sc_ndisks - 1) */
2596 state = G_RAID3_DEVICE_STATE_DEGRADED;
2597 G_RAID3_DEBUG(1, "Device %s state changed from %s to %s.",
2598 sc->sc_name, g_raid3_device_state2str(sc->sc_state),
2599 g_raid3_device_state2str(state));
2600 sc->sc_state = state;
2601 for (n = 0; n < sc->sc_ndisks; n++) {
2602 disk = &sc->sc_disks[n];
2603 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2605 state = g_raid3_determine_state(disk);
2606 g_raid3_event_send(disk, state, G_RAID3_EVENT_DONTWAIT);
2607 if (state == G_RAID3_DISK_STATE_STALE)
2608 sc->sc_bump_id |= G_RAID3_BUMP_SYNCID;
2612 case G_RAID3_DEVICE_STATE_DEGRADED:
2614 * Genid need to be bumped immediately, so do it here.
2616 if ((sc->sc_bump_id & G_RAID3_BUMP_GENID) != 0) {
2617 sc->sc_bump_id &= ~G_RAID3_BUMP_GENID;
2618 g_raid3_bump_genid(sc);
2621 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NEW) > 0)
2623 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) <
2624 sc->sc_ndisks - 1) {
2625 if (sc->sc_provider != NULL)
2626 g_raid3_destroy_provider(sc);
2627 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
2630 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) ==
2632 state = G_RAID3_DEVICE_STATE_COMPLETE;
2634 "Device %s state changed from %s to %s.",
2635 sc->sc_name, g_raid3_device_state2str(sc->sc_state),
2636 g_raid3_device_state2str(state));
2637 sc->sc_state = state;
2639 if (sc->sc_provider == NULL)
2640 g_raid3_launch_provider(sc);
2641 if (sc->sc_rootmount != NULL) {
2642 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
2644 root_mount_rel(sc->sc_rootmount);
2645 sc->sc_rootmount = NULL;
2648 case G_RAID3_DEVICE_STATE_COMPLETE:
2650 * Genid need to be bumped immediately, so do it here.
2652 if ((sc->sc_bump_id & G_RAID3_BUMP_GENID) != 0) {
2653 sc->sc_bump_id &= ~G_RAID3_BUMP_GENID;
2654 g_raid3_bump_genid(sc);
2657 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NEW) > 0)
2659 KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) >=
2661 ("Too few ACTIVE components in COMPLETE state (device %s).",
2663 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) ==
2664 sc->sc_ndisks - 1) {
2665 state = G_RAID3_DEVICE_STATE_DEGRADED;
2667 "Device %s state changed from %s to %s.",
2668 sc->sc_name, g_raid3_device_state2str(sc->sc_state),
2669 g_raid3_device_state2str(state));
2670 sc->sc_state = state;
2672 if (sc->sc_provider == NULL)
2673 g_raid3_launch_provider(sc);
2674 if (sc->sc_rootmount != NULL) {
2675 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
2677 root_mount_rel(sc->sc_rootmount);
2678 sc->sc_rootmount = NULL;
2682 KASSERT(1 == 0, ("Wrong device state (%s, %s).", sc->sc_name,
2683 g_raid3_device_state2str(sc->sc_state)));
2689 * Update disk state and device state if needed.
2691 #define DISK_STATE_CHANGED() G_RAID3_DEBUG(1, \
2692 "Disk %s state changed from %s to %s (device %s).", \
2693 g_raid3_get_diskname(disk), \
2694 g_raid3_disk_state2str(disk->d_state), \
2695 g_raid3_disk_state2str(state), sc->sc_name)
2697 g_raid3_update_disk(struct g_raid3_disk *disk, u_int state)
2699 struct g_raid3_softc *sc;
2702 sx_assert(&sc->sc_lock, SX_XLOCKED);
2705 G_RAID3_DEBUG(3, "Changing disk %s state from %s to %s.",
2706 g_raid3_get_diskname(disk), g_raid3_disk_state2str(disk->d_state),
2707 g_raid3_disk_state2str(state));
2709 case G_RAID3_DISK_STATE_NEW:
2711 * Possible scenarios:
2712 * 1. New disk arrive.
2714 /* Previous state should be NONE. */
2715 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NONE,
2716 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2717 g_raid3_disk_state2str(disk->d_state)));
2718 DISK_STATE_CHANGED();
2720 disk->d_state = state;
2721 G_RAID3_DEBUG(1, "Device %s: provider %s detected.",
2722 sc->sc_name, g_raid3_get_diskname(disk));
2723 if (sc->sc_state == G_RAID3_DEVICE_STATE_STARTING)
2725 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2726 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2727 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2728 g_raid3_device_state2str(sc->sc_state),
2729 g_raid3_get_diskname(disk),
2730 g_raid3_disk_state2str(disk->d_state)));
2731 state = g_raid3_determine_state(disk);
2732 if (state != G_RAID3_DISK_STATE_NONE)
2735 case G_RAID3_DISK_STATE_ACTIVE:
2737 * Possible scenarios:
2738 * 1. New disk does not need synchronization.
2739 * 2. Synchronization process finished successfully.
2741 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2742 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2743 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2744 g_raid3_device_state2str(sc->sc_state),
2745 g_raid3_get_diskname(disk),
2746 g_raid3_disk_state2str(disk->d_state)));
2747 /* Previous state should be NEW or SYNCHRONIZING. */
2748 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW ||
2749 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING,
2750 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2751 g_raid3_disk_state2str(disk->d_state)));
2752 DISK_STATE_CHANGED();
2754 if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
2755 disk->d_flags &= ~G_RAID3_DISK_FLAG_SYNCHRONIZING;
2756 disk->d_flags &= ~G_RAID3_DISK_FLAG_FORCE_SYNC;
2757 g_raid3_sync_stop(sc, 0);
2759 disk->d_state = state;
2760 disk->d_sync.ds_offset = 0;
2761 disk->d_sync.ds_offset_done = 0;
2762 g_raid3_update_idle(sc, disk);
2763 g_raid3_update_metadata(disk);
2764 G_RAID3_DEBUG(1, "Device %s: provider %s activated.",
2765 sc->sc_name, g_raid3_get_diskname(disk));
2767 case G_RAID3_DISK_STATE_STALE:
2769 * Possible scenarios:
2770 * 1. Stale disk was connected.
2772 /* Previous state should be NEW. */
2773 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW,
2774 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2775 g_raid3_disk_state2str(disk->d_state)));
2776 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2777 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2778 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2779 g_raid3_device_state2str(sc->sc_state),
2780 g_raid3_get_diskname(disk),
2781 g_raid3_disk_state2str(disk->d_state)));
2783 * STALE state is only possible if device is marked
2786 KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOAUTOSYNC) != 0,
2787 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2788 g_raid3_device_state2str(sc->sc_state),
2789 g_raid3_get_diskname(disk),
2790 g_raid3_disk_state2str(disk->d_state)));
2791 DISK_STATE_CHANGED();
2793 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2794 disk->d_state = state;
2795 g_raid3_update_metadata(disk);
2796 G_RAID3_DEBUG(0, "Device %s: provider %s is stale.",
2797 sc->sc_name, g_raid3_get_diskname(disk));
2799 case G_RAID3_DISK_STATE_SYNCHRONIZING:
2801 * Possible scenarios:
2802 * 1. Disk which needs synchronization was connected.
2804 /* Previous state should be NEW. */
2805 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW,
2806 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2807 g_raid3_disk_state2str(disk->d_state)));
2808 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2809 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2810 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2811 g_raid3_device_state2str(sc->sc_state),
2812 g_raid3_get_diskname(disk),
2813 g_raid3_disk_state2str(disk->d_state)));
2814 DISK_STATE_CHANGED();
2816 if (disk->d_state == G_RAID3_DISK_STATE_NEW)
2817 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2818 disk->d_state = state;
2819 if (sc->sc_provider != NULL) {
2820 g_raid3_sync_start(sc);
2821 g_raid3_update_metadata(disk);
2824 case G_RAID3_DISK_STATE_DISCONNECTED:
2826 * Possible scenarios:
2827 * 1. Device wasn't running yet, but disk disappear.
2828 * 2. Disk was active and disapppear.
2829 * 3. Disk disappear during synchronization process.
2831 if (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2832 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
2834 * Previous state should be ACTIVE, STALE or
2837 KASSERT(disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
2838 disk->d_state == G_RAID3_DISK_STATE_STALE ||
2839 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING,
2840 ("Wrong disk state (%s, %s).",
2841 g_raid3_get_diskname(disk),
2842 g_raid3_disk_state2str(disk->d_state)));
2843 } else if (sc->sc_state == G_RAID3_DEVICE_STATE_STARTING) {
2844 /* Previous state should be NEW. */
2845 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW,
2846 ("Wrong disk state (%s, %s).",
2847 g_raid3_get_diskname(disk),
2848 g_raid3_disk_state2str(disk->d_state)));
2850 * Reset bumping syncid if disk disappeared in STARTING
2853 if ((sc->sc_bump_id & G_RAID3_BUMP_SYNCID) != 0)
2854 sc->sc_bump_id &= ~G_RAID3_BUMP_SYNCID;
2857 KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).",
2859 g_raid3_device_state2str(sc->sc_state),
2860 g_raid3_get_diskname(disk),
2861 g_raid3_disk_state2str(disk->d_state)));
2864 DISK_STATE_CHANGED();
2865 G_RAID3_DEBUG(0, "Device %s: provider %s disconnected.",
2866 sc->sc_name, g_raid3_get_diskname(disk));
2868 g_raid3_destroy_disk(disk);
2871 KASSERT(1 == 0, ("Unknown state (%u).", state));
2876 #undef DISK_STATE_CHANGED
2879 g_raid3_read_metadata(struct g_consumer *cp, struct g_raid3_metadata *md)
2881 struct g_provider *pp;
2885 g_topology_assert();
2887 error = g_access(cp, 1, 0, 0);
2891 g_topology_unlock();
2892 /* Metadata are stored on last sector. */
2893 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
2896 g_access(cp, -1, 0, 0);
2898 G_RAID3_DEBUG(1, "Cannot read metadata from %s (error=%d).",
2899 cp->provider->name, error);
2903 /* Decode metadata. */
2904 error = raid3_metadata_decode(buf, md);
2906 if (strcmp(md->md_magic, G_RAID3_MAGIC) != 0)
2908 if (md->md_version > G_RAID3_VERSION) {
2910 "Kernel module is too old to handle metadata from %s.",
2911 cp->provider->name);
2915 G_RAID3_DEBUG(1, "MD5 metadata hash mismatch for provider %s.",
2916 cp->provider->name);
2919 if (md->md_sectorsize > MAXPHYS) {
2920 G_RAID3_DEBUG(0, "The blocksize is too big.");
2928 g_raid3_check_metadata(struct g_raid3_softc *sc, struct g_provider *pp,
2929 struct g_raid3_metadata *md)
2932 if (md->md_no >= sc->sc_ndisks) {
2933 G_RAID3_DEBUG(1, "Invalid disk %s number (no=%u), skipping.",
2934 pp->name, md->md_no);
2937 if (sc->sc_disks[md->md_no].d_state != G_RAID3_DISK_STATE_NODISK) {
2938 G_RAID3_DEBUG(1, "Disk %s (no=%u) already exists, skipping.",
2939 pp->name, md->md_no);
2942 if (md->md_all != sc->sc_ndisks) {
2944 "Invalid '%s' field on disk %s (device %s), skipping.",
2945 "md_all", pp->name, sc->sc_name);
2948 if ((md->md_mediasize % md->md_sectorsize) != 0) {
2949 G_RAID3_DEBUG(1, "Invalid metadata (mediasize %% sectorsize != "
2950 "0) on disk %s (device %s), skipping.", pp->name,
2954 if (md->md_mediasize != sc->sc_mediasize) {
2956 "Invalid '%s' field on disk %s (device %s), skipping.",
2957 "md_mediasize", pp->name, sc->sc_name);
2960 if ((md->md_mediasize % (sc->sc_ndisks - 1)) != 0) {
2962 "Invalid '%s' field on disk %s (device %s), skipping.",
2963 "md_mediasize", pp->name, sc->sc_name);
2966 if ((sc->sc_mediasize / (sc->sc_ndisks - 1)) > pp->mediasize) {
2968 "Invalid size of disk %s (device %s), skipping.", pp->name,
2972 if ((md->md_sectorsize / pp->sectorsize) < sc->sc_ndisks - 1) {
2974 "Invalid '%s' field on disk %s (device %s), skipping.",
2975 "md_sectorsize", pp->name, sc->sc_name);
2978 if (md->md_sectorsize != sc->sc_sectorsize) {
2980 "Invalid '%s' field on disk %s (device %s), skipping.",
2981 "md_sectorsize", pp->name, sc->sc_name);
2984 if ((sc->sc_sectorsize % pp->sectorsize) != 0) {
2986 "Invalid sector size of disk %s (device %s), skipping.",
2987 pp->name, sc->sc_name);
2990 if ((md->md_mflags & ~G_RAID3_DEVICE_FLAG_MASK) != 0) {
2992 "Invalid device flags on disk %s (device %s), skipping.",
2993 pp->name, sc->sc_name);
2996 if ((md->md_mflags & G_RAID3_DEVICE_FLAG_VERIFY) != 0 &&
2997 (md->md_mflags & G_RAID3_DEVICE_FLAG_ROUND_ROBIN) != 0) {
2999 * VERIFY and ROUND-ROBIN options are mutally exclusive.
3001 G_RAID3_DEBUG(1, "Both VERIFY and ROUND-ROBIN flags exist on "
3002 "disk %s (device %s), skipping.", pp->name, sc->sc_name);
3005 if ((md->md_dflags & ~G_RAID3_DISK_FLAG_MASK) != 0) {
3007 "Invalid disk flags on disk %s (device %s), skipping.",
3008 pp->name, sc->sc_name);
3015 g_raid3_add_disk(struct g_raid3_softc *sc, struct g_provider *pp,
3016 struct g_raid3_metadata *md)
3018 struct g_raid3_disk *disk;
3021 g_topology_assert_not();
3022 G_RAID3_DEBUG(2, "Adding disk %s.", pp->name);
3024 error = g_raid3_check_metadata(sc, pp, md);
3027 if (sc->sc_state != G_RAID3_DEVICE_STATE_STARTING &&
3028 md->md_genid < sc->sc_genid) {
3029 G_RAID3_DEBUG(0, "Component %s (device %s) broken, skipping.",
3030 pp->name, sc->sc_name);
3033 disk = g_raid3_init_disk(sc, pp, md, &error);
3036 error = g_raid3_event_send(disk, G_RAID3_DISK_STATE_NEW,
3037 G_RAID3_EVENT_WAIT);
3040 if (md->md_version < G_RAID3_VERSION) {
3041 G_RAID3_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).",
3042 pp->name, md->md_version, G_RAID3_VERSION);
3043 g_raid3_update_metadata(disk);
3049 g_raid3_destroy_delayed(void *arg, int flag)
3051 struct g_raid3_softc *sc;
3054 if (flag == EV_CANCEL) {
3055 G_RAID3_DEBUG(1, "Destroying canceled.");
3059 g_topology_unlock();
3060 sx_xlock(&sc->sc_lock);
3061 KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) == 0,
3062 ("DESTROY flag set on %s.", sc->sc_name));
3063 KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0,
3064 ("DESTROYING flag not set on %s.", sc->sc_name));
3065 G_RAID3_DEBUG(0, "Destroying %s (delayed).", sc->sc_name);
3066 error = g_raid3_destroy(sc, G_RAID3_DESTROY_SOFT);
3068 G_RAID3_DEBUG(0, "Cannot destroy %s.", sc->sc_name);
3069 sx_xunlock(&sc->sc_lock);
3075 g_raid3_access(struct g_provider *pp, int acr, int acw, int ace)
3077 struct g_raid3_softc *sc;
3078 int dcr, dcw, dce, error = 0;
3080 g_topology_assert();
3081 G_RAID3_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr,
3084 sc = pp->geom->softc;
3085 if (sc == NULL && acr <= 0 && acw <= 0 && ace <= 0)
3087 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name));
3089 dcr = pp->acr + acr;
3090 dcw = pp->acw + acw;
3091 dce = pp->ace + ace;
3093 g_topology_unlock();
3094 sx_xlock(&sc->sc_lock);
3095 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0 ||
3096 g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) < sc->sc_ndisks - 1) {
3097 if (acr > 0 || acw > 0 || ace > 0)
3102 g_raid3_idle(sc, dcw);
3103 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0) {
3104 if (acr > 0 || acw > 0 || ace > 0) {
3108 if (dcr == 0 && dcw == 0 && dce == 0) {
3109 g_post_event(g_raid3_destroy_delayed, sc, M_WAITOK,
3114 sx_xunlock(&sc->sc_lock);
3119 static struct g_geom *
3120 g_raid3_create(struct g_class *mp, const struct g_raid3_metadata *md)
3122 struct g_raid3_softc *sc;
3127 g_topology_assert();
3128 G_RAID3_DEBUG(1, "Creating device %s (id=%u).", md->md_name, md->md_id);
3130 /* One disk is minimum. */
3136 gp = g_new_geomf(mp, "%s", md->md_name);
3137 sc = malloc(sizeof(*sc), M_RAID3, M_WAITOK | M_ZERO);
3138 sc->sc_disks = malloc(sizeof(struct g_raid3_disk) * md->md_all, M_RAID3,
3140 gp->start = g_raid3_start;
3141 gp->orphan = g_raid3_orphan;
3142 gp->access = g_raid3_access;
3143 gp->dumpconf = g_raid3_dumpconf;
3145 sc->sc_id = md->md_id;
3146 sc->sc_mediasize = md->md_mediasize;
3147 sc->sc_sectorsize = md->md_sectorsize;
3148 sc->sc_ndisks = md->md_all;
3149 sc->sc_round_robin = 0;
3150 sc->sc_flags = md->md_mflags;
3153 sc->sc_last_write = time_uptime;
3155 for (n = 0; n < sc->sc_ndisks; n++) {
3156 sc->sc_disks[n].d_softc = sc;
3157 sc->sc_disks[n].d_no = n;
3158 sc->sc_disks[n].d_state = G_RAID3_DISK_STATE_NODISK;
3160 sx_init(&sc->sc_lock, "graid3:lock");
3161 bioq_init(&sc->sc_queue);
3162 mtx_init(&sc->sc_queue_mtx, "graid3:queue", NULL, MTX_DEF);
3163 bioq_init(&sc->sc_regular_delayed);
3164 bioq_init(&sc->sc_inflight);
3165 bioq_init(&sc->sc_sync_delayed);
3166 TAILQ_INIT(&sc->sc_events);
3167 mtx_init(&sc->sc_events_mtx, "graid3:events", NULL, MTX_DEF);
3168 callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
3169 sc->sc_state = G_RAID3_DEVICE_STATE_STARTING;
3172 sc->sc_provider = NULL;
3174 * Synchronization geom.
3176 gp = g_new_geomf(mp, "%s.sync", md->md_name);
3178 gp->orphan = g_raid3_orphan;
3179 sc->sc_sync.ds_geom = gp;
3181 if (!g_raid3_use_malloc) {
3182 sc->sc_zones[G_RAID3_ZONE_64K].sz_zone = uma_zcreate("gr3:64k",
3183 65536, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL,
3185 sc->sc_zones[G_RAID3_ZONE_64K].sz_inuse = 0;
3186 sc->sc_zones[G_RAID3_ZONE_64K].sz_max = g_raid3_n64k;
3187 sc->sc_zones[G_RAID3_ZONE_64K].sz_requested =
3188 sc->sc_zones[G_RAID3_ZONE_64K].sz_failed = 0;
3189 sc->sc_zones[G_RAID3_ZONE_16K].sz_zone = uma_zcreate("gr3:16k",
3190 16384, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL,
3192 sc->sc_zones[G_RAID3_ZONE_16K].sz_inuse = 0;
3193 sc->sc_zones[G_RAID3_ZONE_16K].sz_max = g_raid3_n16k;
3194 sc->sc_zones[G_RAID3_ZONE_16K].sz_requested =
3195 sc->sc_zones[G_RAID3_ZONE_16K].sz_failed = 0;
3196 sc->sc_zones[G_RAID3_ZONE_4K].sz_zone = uma_zcreate("gr3:4k",
3197 4096, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL,
3199 sc->sc_zones[G_RAID3_ZONE_4K].sz_inuse = 0;
3200 sc->sc_zones[G_RAID3_ZONE_4K].sz_max = g_raid3_n4k;
3201 sc->sc_zones[G_RAID3_ZONE_4K].sz_requested =
3202 sc->sc_zones[G_RAID3_ZONE_4K].sz_failed = 0;
3205 error = kproc_create(g_raid3_worker, sc, &sc->sc_worker, 0, 0,
3206 "g_raid3 %s", md->md_name);
3208 G_RAID3_DEBUG(1, "Cannot create kernel thread for %s.",
3210 if (!g_raid3_use_malloc) {
3211 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone);
3212 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone);
3213 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone);
3215 g_destroy_geom(sc->sc_sync.ds_geom);
3216 mtx_destroy(&sc->sc_events_mtx);
3217 mtx_destroy(&sc->sc_queue_mtx);
3218 sx_destroy(&sc->sc_lock);
3219 g_destroy_geom(sc->sc_geom);
3220 free(sc->sc_disks, M_RAID3);
3225 G_RAID3_DEBUG(1, "Device %s created (%u components, id=%u).",
3226 sc->sc_name, sc->sc_ndisks, sc->sc_id);
3228 sc->sc_rootmount = root_mount_hold("GRAID3");
3229 G_RAID3_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount);
3234 timeout = atomic_load_acq_int(&g_raid3_timeout);
3235 callout_reset(&sc->sc_callout, timeout * hz, g_raid3_go, sc);
3236 return (sc->sc_geom);
3240 g_raid3_destroy(struct g_raid3_softc *sc, int how)
3242 struct g_provider *pp;
3244 g_topology_assert_not();
3247 sx_assert(&sc->sc_lock, SX_XLOCKED);
3249 pp = sc->sc_provider;
3250 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
3252 case G_RAID3_DESTROY_SOFT:
3254 "Device %s is still open (r%dw%de%d).", pp->name,
3255 pp->acr, pp->acw, pp->ace);
3257 case G_RAID3_DESTROY_DELAYED:
3259 "Device %s will be destroyed on last close.",
3261 if (sc->sc_syncdisk != NULL)
3262 g_raid3_sync_stop(sc, 1);
3263 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROYING;
3265 case G_RAID3_DESTROY_HARD:
3266 G_RAID3_DEBUG(1, "Device %s is still open, so it "
3267 "can't be definitely removed.", pp->name);
3273 if (sc->sc_geom->softc == NULL) {
3274 g_topology_unlock();
3277 sc->sc_geom->softc = NULL;
3278 sc->sc_sync.ds_geom->softc = NULL;
3279 g_topology_unlock();
3281 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
3282 sc->sc_flags |= G_RAID3_DEVICE_FLAG_WAIT;
3283 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc);
3284 sx_xunlock(&sc->sc_lock);
3285 mtx_lock(&sc->sc_queue_mtx);
3287 wakeup(&sc->sc_queue);
3288 mtx_unlock(&sc->sc_queue_mtx);
3289 G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker);
3290 while (sc->sc_worker != NULL)
3291 tsleep(&sc->sc_worker, PRIBIO, "r3:destroy", hz / 5);
3292 G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker);
3293 sx_xlock(&sc->sc_lock);
3294 g_raid3_destroy_device(sc);
3295 free(sc->sc_disks, M_RAID3);
3301 g_raid3_taste_orphan(struct g_consumer *cp)
3304 KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
3305 cp->provider->name));
3308 static struct g_geom *
3309 g_raid3_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
3311 struct g_raid3_metadata md;
3312 struct g_raid3_softc *sc;
3313 struct g_consumer *cp;
3317 g_topology_assert();
3318 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
3319 G_RAID3_DEBUG(2, "Tasting %s.", pp->name);
3321 gp = g_new_geomf(mp, "raid3:taste");
3322 /* This orphan function should be never called. */
3323 gp->orphan = g_raid3_taste_orphan;
3324 cp = g_new_consumer(gp);
3326 error = g_raid3_read_metadata(cp, &md);
3328 g_destroy_consumer(cp);
3334 if (md.md_provider[0] != '\0' &&
3335 !g_compare_names(md.md_provider, pp->name))
3337 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize)
3339 if (g_raid3_debug >= 2)
3340 raid3_metadata_dump(&md);
3343 * Let's check if device already exists.
3346 LIST_FOREACH(gp, &mp->geom, geom) {
3350 if (sc->sc_sync.ds_geom == gp)
3352 if (strcmp(md.md_name, sc->sc_name) != 0)
3354 if (md.md_id != sc->sc_id) {
3355 G_RAID3_DEBUG(0, "Device %s already configured.",
3362 gp = g_raid3_create(mp, &md);
3364 G_RAID3_DEBUG(0, "Cannot create device %s.",
3370 G_RAID3_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
3371 g_topology_unlock();
3372 sx_xlock(&sc->sc_lock);
3373 error = g_raid3_add_disk(sc, pp, &md);
3375 G_RAID3_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
3376 pp->name, gp->name, error);
3377 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NODISK) ==
3380 g_raid3_destroy(sc, G_RAID3_DESTROY_HARD);
3386 sx_xunlock(&sc->sc_lock);
3392 g_raid3_destroy_geom(struct gctl_req *req __unused, struct g_class *mp __unused,
3395 struct g_raid3_softc *sc;
3398 g_topology_unlock();
3400 sx_xlock(&sc->sc_lock);
3402 error = g_raid3_destroy(gp->softc, G_RAID3_DESTROY_SOFT);
3404 sx_xunlock(&sc->sc_lock);
3410 g_raid3_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
3411 struct g_consumer *cp, struct g_provider *pp)
3413 struct g_raid3_softc *sc;
3415 g_topology_assert();
3420 /* Skip synchronization geom. */
3421 if (gp == sc->sc_sync.ds_geom)
3425 } else if (cp != NULL) {
3426 struct g_raid3_disk *disk;
3431 g_topology_unlock();
3432 sx_xlock(&sc->sc_lock);
3433 sbuf_printf(sb, "%s<Type>", indent);
3434 if (disk->d_no == sc->sc_ndisks - 1)
3435 sbuf_printf(sb, "PARITY");
3437 sbuf_printf(sb, "DATA");
3438 sbuf_printf(sb, "</Type>\n");
3439 sbuf_printf(sb, "%s<Number>%u</Number>\n", indent,
3441 if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
3442 sbuf_printf(sb, "%s<Synchronized>", indent);
3443 if (disk->d_sync.ds_offset == 0)
3444 sbuf_printf(sb, "0%%");
3446 sbuf_printf(sb, "%u%%",
3447 (u_int)((disk->d_sync.ds_offset * 100) /
3448 (sc->sc_mediasize / (sc->sc_ndisks - 1))));
3450 sbuf_printf(sb, "</Synchronized>\n");
3451 if (disk->d_sync.ds_offset > 0) {
3452 sbuf_printf(sb, "%s<BytesSynced>%jd"
3453 "</BytesSynced>\n", indent,
3454 (intmax_t)disk->d_sync.ds_offset);
3457 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent,
3458 disk->d_sync.ds_syncid);
3459 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, disk->d_genid);
3460 sbuf_printf(sb, "%s<Flags>", indent);
3461 if (disk->d_flags == 0)
3462 sbuf_printf(sb, "NONE");
3466 #define ADD_FLAG(flag, name) do { \
3467 if ((disk->d_flags & (flag)) != 0) { \
3469 sbuf_printf(sb, ", "); \
3472 sbuf_printf(sb, name); \
3475 ADD_FLAG(G_RAID3_DISK_FLAG_DIRTY, "DIRTY");
3476 ADD_FLAG(G_RAID3_DISK_FLAG_HARDCODED, "HARDCODED");
3477 ADD_FLAG(G_RAID3_DISK_FLAG_SYNCHRONIZING,
3479 ADD_FLAG(G_RAID3_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC");
3480 ADD_FLAG(G_RAID3_DISK_FLAG_BROKEN, "BROKEN");
3483 sbuf_printf(sb, "</Flags>\n");
3484 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
3485 g_raid3_disk_state2str(disk->d_state));
3486 sx_xunlock(&sc->sc_lock);
3489 g_topology_unlock();
3490 sx_xlock(&sc->sc_lock);
3491 if (!g_raid3_use_malloc) {
3493 "%s<Zone4kRequested>%u</Zone4kRequested>\n", indent,
3494 sc->sc_zones[G_RAID3_ZONE_4K].sz_requested);
3496 "%s<Zone4kFailed>%u</Zone4kFailed>\n", indent,
3497 sc->sc_zones[G_RAID3_ZONE_4K].sz_failed);
3499 "%s<Zone16kRequested>%u</Zone16kRequested>\n", indent,
3500 sc->sc_zones[G_RAID3_ZONE_16K].sz_requested);
3502 "%s<Zone16kFailed>%u</Zone16kFailed>\n", indent,
3503 sc->sc_zones[G_RAID3_ZONE_16K].sz_failed);
3505 "%s<Zone64kRequested>%u</Zone64kRequested>\n", indent,
3506 sc->sc_zones[G_RAID3_ZONE_64K].sz_requested);
3508 "%s<Zone64kFailed>%u</Zone64kFailed>\n", indent,
3509 sc->sc_zones[G_RAID3_ZONE_64K].sz_failed);
3511 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
3512 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid);
3513 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid);
3514 sbuf_printf(sb, "%s<Flags>", indent);
3515 if (sc->sc_flags == 0)
3516 sbuf_printf(sb, "NONE");
3520 #define ADD_FLAG(flag, name) do { \
3521 if ((sc->sc_flags & (flag)) != 0) { \
3523 sbuf_printf(sb, ", "); \
3526 sbuf_printf(sb, name); \
3529 ADD_FLAG(G_RAID3_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC");
3530 ADD_FLAG(G_RAID3_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC");
3531 ADD_FLAG(G_RAID3_DEVICE_FLAG_ROUND_ROBIN,
3533 ADD_FLAG(G_RAID3_DEVICE_FLAG_VERIFY, "VERIFY");
3536 sbuf_printf(sb, "</Flags>\n");
3537 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent,
3539 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
3540 g_raid3_device_state2str(sc->sc_state));
3541 sx_xunlock(&sc->sc_lock);
3547 g_raid3_shutdown_post_sync(void *arg, int howto)
3550 struct g_geom *gp, *gp2;
3551 struct g_raid3_softc *sc;
3557 g_raid3_shutdown = 1;
3558 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
3559 if ((sc = gp->softc) == NULL)
3561 /* Skip synchronization geom. */
3562 if (gp == sc->sc_sync.ds_geom)
3564 g_topology_unlock();
3565 sx_xlock(&sc->sc_lock);
3566 g_raid3_idle(sc, -1);
3568 error = g_raid3_destroy(sc, G_RAID3_DESTROY_DELAYED);
3570 sx_xunlock(&sc->sc_lock);
3573 g_topology_unlock();
3578 g_raid3_init(struct g_class *mp)
3581 g_raid3_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync,
3582 g_raid3_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST);
3583 if (g_raid3_post_sync == NULL)
3584 G_RAID3_DEBUG(0, "Warning! Cannot register shutdown event.");
3588 g_raid3_fini(struct g_class *mp)
3591 if (g_raid3_post_sync != NULL)
3592 EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_raid3_post_sync);
3595 DECLARE_GEOM_CLASS(g_raid3_class, g_raid3);