2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
34 #include <sys/limits.h>
36 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
40 #include <sys/malloc.h>
41 #include <sys/eventhandler.h>
43 #include <geom/geom.h>
45 #include <sys/kthread.h>
46 #include <sys/sched.h>
47 #include <geom/raid3/g_raid3.h>
49 FEATURE(geom_raid3, "GEOM RAID-3 functionality");
51 static MALLOC_DEFINE(M_RAID3, "raid3_data", "GEOM_RAID3 Data");
53 SYSCTL_DECL(_kern_geom);
54 static SYSCTL_NODE(_kern_geom, OID_AUTO, raid3, CTLFLAG_RW, 0,
56 u_int g_raid3_debug = 0;
57 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, debug, CTLFLAG_RWTUN, &g_raid3_debug, 0,
59 static u_int g_raid3_timeout = 4;
60 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, timeout, CTLFLAG_RWTUN, &g_raid3_timeout,
61 0, "Time to wait on all raid3 components");
62 static u_int g_raid3_idletime = 5;
63 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, idletime, CTLFLAG_RWTUN,
64 &g_raid3_idletime, 0, "Mark components as clean when idling");
65 static u_int g_raid3_disconnect_on_failure = 1;
66 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, disconnect_on_failure, CTLFLAG_RWTUN,
67 &g_raid3_disconnect_on_failure, 0, "Disconnect component on I/O failure.");
68 static u_int g_raid3_syncreqs = 2;
69 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, sync_requests, CTLFLAG_RDTUN,
70 &g_raid3_syncreqs, 0, "Parallel synchronization I/O requests.");
71 static u_int g_raid3_use_malloc = 0;
72 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, use_malloc, CTLFLAG_RDTUN,
73 &g_raid3_use_malloc, 0, "Use malloc(9) instead of uma(9).");
75 static u_int g_raid3_n64k = 50;
76 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n64k, CTLFLAG_RDTUN, &g_raid3_n64k, 0,
77 "Maximum number of 64kB allocations");
78 static u_int g_raid3_n16k = 200;
79 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n16k, CTLFLAG_RDTUN, &g_raid3_n16k, 0,
80 "Maximum number of 16kB allocations");
81 static u_int g_raid3_n4k = 1200;
82 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n4k, CTLFLAG_RDTUN, &g_raid3_n4k, 0,
83 "Maximum number of 4kB allocations");
85 static SYSCTL_NODE(_kern_geom_raid3, OID_AUTO, stat, CTLFLAG_RW, 0,
86 "GEOM_RAID3 statistics");
87 static u_int g_raid3_parity_mismatch = 0;
88 SYSCTL_UINT(_kern_geom_raid3_stat, OID_AUTO, parity_mismatch, CTLFLAG_RD,
89 &g_raid3_parity_mismatch, 0, "Number of failures in VERIFY mode");
91 #define MSLEEP(ident, mtx, priority, wmesg, timeout) do { \
92 G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \
93 msleep((ident), (mtx), (priority), (wmesg), (timeout)); \
94 G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \
97 static eventhandler_tag g_raid3_post_sync = NULL;
98 static int g_raid3_shutdown = 0;
100 static int g_raid3_destroy_geom(struct gctl_req *req, struct g_class *mp,
102 static g_taste_t g_raid3_taste;
103 static void g_raid3_init(struct g_class *mp);
104 static void g_raid3_fini(struct g_class *mp);
106 struct g_class g_raid3_class = {
107 .name = G_RAID3_CLASS_NAME,
108 .version = G_VERSION,
109 .ctlreq = g_raid3_config,
110 .taste = g_raid3_taste,
111 .destroy_geom = g_raid3_destroy_geom,
112 .init = g_raid3_init,
117 static void g_raid3_destroy_provider(struct g_raid3_softc *sc);
118 static int g_raid3_update_disk(struct g_raid3_disk *disk, u_int state);
119 static void g_raid3_update_device(struct g_raid3_softc *sc, boolean_t force);
120 static void g_raid3_dumpconf(struct sbuf *sb, const char *indent,
121 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp);
122 static void g_raid3_sync_stop(struct g_raid3_softc *sc, int type);
123 static int g_raid3_register_request(struct bio *pbp);
124 static void g_raid3_sync_release(struct g_raid3_softc *sc);
128 g_raid3_disk_state2str(int state)
132 case G_RAID3_DISK_STATE_NODISK:
134 case G_RAID3_DISK_STATE_NONE:
136 case G_RAID3_DISK_STATE_NEW:
138 case G_RAID3_DISK_STATE_ACTIVE:
140 case G_RAID3_DISK_STATE_STALE:
142 case G_RAID3_DISK_STATE_SYNCHRONIZING:
143 return ("SYNCHRONIZING");
144 case G_RAID3_DISK_STATE_DISCONNECTED:
145 return ("DISCONNECTED");
152 g_raid3_device_state2str(int state)
156 case G_RAID3_DEVICE_STATE_STARTING:
158 case G_RAID3_DEVICE_STATE_DEGRADED:
160 case G_RAID3_DEVICE_STATE_COMPLETE:
168 g_raid3_get_diskname(struct g_raid3_disk *disk)
171 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL)
172 return ("[unknown]");
173 return (disk->d_name);
177 g_raid3_alloc(struct g_raid3_softc *sc, size_t size, int flags)
180 enum g_raid3_zones zone;
182 if (g_raid3_use_malloc ||
183 (zone = g_raid3_zone(size)) == G_RAID3_NUM_ZONES)
184 ptr = malloc(size, M_RAID3, flags);
186 ptr = uma_zalloc_arg(sc->sc_zones[zone].sz_zone,
187 &sc->sc_zones[zone], flags);
188 sc->sc_zones[zone].sz_requested++;
190 sc->sc_zones[zone].sz_failed++;
196 g_raid3_free(struct g_raid3_softc *sc, void *ptr, size_t size)
198 enum g_raid3_zones zone;
200 if (g_raid3_use_malloc ||
201 (zone = g_raid3_zone(size)) == G_RAID3_NUM_ZONES)
204 uma_zfree_arg(sc->sc_zones[zone].sz_zone,
205 ptr, &sc->sc_zones[zone]);
210 g_raid3_uma_ctor(void *mem, int size, void *arg, int flags)
212 struct g_raid3_zone *sz = arg;
214 if (sz->sz_max > 0 && sz->sz_inuse == sz->sz_max)
221 g_raid3_uma_dtor(void *mem, int size, void *arg)
223 struct g_raid3_zone *sz = arg;
228 #define g_raid3_xor(src, dst, size) \
229 _g_raid3_xor((uint64_t *)(src), \
230 (uint64_t *)(dst), (size_t)size)
232 _g_raid3_xor(uint64_t *src, uint64_t *dst, size_t size)
235 KASSERT((size % 128) == 0, ("Invalid size: %zu.", size));
236 for (; size > 0; size -= 128) {
257 g_raid3_is_zero(struct bio *bp)
259 static const uint64_t zeros[] = {
260 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
265 size = bp->bio_length;
266 addr = (u_char *)bp->bio_data;
267 for (; size > 0; size -= sizeof(zeros), addr += sizeof(zeros)) {
268 if (bcmp(addr, zeros, sizeof(zeros)) != 0)
275 * --- Events handling functions ---
276 * Events in geom_raid3 are used to maintain disks and device status
277 * from one thread to simplify locking.
280 g_raid3_event_free(struct g_raid3_event *ep)
287 g_raid3_event_send(void *arg, int state, int flags)
289 struct g_raid3_softc *sc;
290 struct g_raid3_disk *disk;
291 struct g_raid3_event *ep;
294 ep = malloc(sizeof(*ep), M_RAID3, M_WAITOK);
295 G_RAID3_DEBUG(4, "%s: Sending event %p.", __func__, ep);
296 if ((flags & G_RAID3_EVENT_DEVICE) != 0) {
307 mtx_lock(&sc->sc_events_mtx);
308 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next);
309 mtx_unlock(&sc->sc_events_mtx);
310 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc);
311 mtx_lock(&sc->sc_queue_mtx);
313 wakeup(&sc->sc_queue);
314 mtx_unlock(&sc->sc_queue_mtx);
315 if ((flags & G_RAID3_EVENT_DONTWAIT) != 0)
317 sx_assert(&sc->sc_lock, SX_XLOCKED);
318 G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, ep);
319 sx_xunlock(&sc->sc_lock);
320 while ((ep->e_flags & G_RAID3_EVENT_DONE) == 0) {
321 mtx_lock(&sc->sc_events_mtx);
322 MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "r3:event",
326 g_raid3_event_free(ep);
327 sx_xlock(&sc->sc_lock);
331 static struct g_raid3_event *
332 g_raid3_event_get(struct g_raid3_softc *sc)
334 struct g_raid3_event *ep;
336 mtx_lock(&sc->sc_events_mtx);
337 ep = TAILQ_FIRST(&sc->sc_events);
338 mtx_unlock(&sc->sc_events_mtx);
343 g_raid3_event_remove(struct g_raid3_softc *sc, struct g_raid3_event *ep)
346 mtx_lock(&sc->sc_events_mtx);
347 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
348 mtx_unlock(&sc->sc_events_mtx);
352 g_raid3_event_cancel(struct g_raid3_disk *disk)
354 struct g_raid3_softc *sc;
355 struct g_raid3_event *ep, *tmpep;
358 sx_assert(&sc->sc_lock, SX_XLOCKED);
360 mtx_lock(&sc->sc_events_mtx);
361 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) {
362 if ((ep->e_flags & G_RAID3_EVENT_DEVICE) != 0)
364 if (ep->e_disk != disk)
366 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
367 if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0)
368 g_raid3_event_free(ep);
370 ep->e_error = ECANCELED;
374 mtx_unlock(&sc->sc_events_mtx);
378 * Return the number of disks in the given state.
379 * If state is equal to -1, count all connected disks.
382 g_raid3_ndisks(struct g_raid3_softc *sc, int state)
384 struct g_raid3_disk *disk;
387 sx_assert(&sc->sc_lock, SX_LOCKED);
389 for (n = ndisks = 0; n < sc->sc_ndisks; n++) {
390 disk = &sc->sc_disks[n];
391 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
393 if (state == -1 || disk->d_state == state)
400 g_raid3_nrequests(struct g_raid3_softc *sc, struct g_consumer *cp)
405 mtx_lock(&sc->sc_queue_mtx);
406 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
407 if (bp->bio_from == cp)
410 mtx_unlock(&sc->sc_queue_mtx);
415 g_raid3_is_busy(struct g_raid3_softc *sc, struct g_consumer *cp)
420 "I/O requests for %s exist, can't destroy it now.",
424 if (g_raid3_nrequests(sc, cp) > 0) {
426 "I/O requests for %s in queue, can't destroy it now.",
434 g_raid3_destroy_consumer(void *arg, int flags __unused)
436 struct g_consumer *cp;
441 G_RAID3_DEBUG(1, "Consumer %s destroyed.", cp->provider->name);
443 g_destroy_consumer(cp);
447 g_raid3_kill_consumer(struct g_raid3_softc *sc, struct g_consumer *cp)
449 struct g_provider *pp;
455 if (g_raid3_is_busy(sc, cp))
457 G_RAID3_DEBUG(2, "Consumer %s destroyed.", cp->provider->name);
461 if ((pp->geom->flags & G_GEOM_WITHER) == 0)
464 G_RAID3_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr,
465 -cp->acw, -cp->ace, 0);
466 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
467 g_access(cp, -cp->acr, -cp->acw, -cp->ace);
470 * After retaste event was send (inside g_access()), we can send
471 * event to detach and destroy consumer.
472 * A class, which has consumer to the given provider connected
473 * will not receive retaste event for the provider.
474 * This is the way how I ignore retaste events when I close
475 * consumers opened for write: I detach and destroy consumer
476 * after retaste event is sent.
478 g_post_event(g_raid3_destroy_consumer, cp, M_WAITOK, NULL);
481 G_RAID3_DEBUG(1, "Consumer %s destroyed.", pp->name);
483 g_destroy_consumer(cp);
487 g_raid3_connect_disk(struct g_raid3_disk *disk, struct g_provider *pp)
489 struct g_consumer *cp;
492 g_topology_assert_not();
493 KASSERT(disk->d_consumer == NULL,
494 ("Disk already connected (device %s).", disk->d_softc->sc_name));
497 cp = g_new_consumer(disk->d_softc->sc_geom);
498 error = g_attach(cp, pp);
500 g_destroy_consumer(cp);
504 error = g_access(cp, 1, 1, 1);
508 g_destroy_consumer(cp);
509 G_RAID3_DEBUG(0, "Cannot open consumer %s (error=%d).",
513 disk->d_consumer = cp;
514 disk->d_consumer->private = disk;
515 disk->d_consumer->index = 0;
516 G_RAID3_DEBUG(2, "Disk %s connected.", g_raid3_get_diskname(disk));
521 g_raid3_disconnect_consumer(struct g_raid3_softc *sc, struct g_consumer *cp)
528 if (cp->provider != NULL)
529 g_raid3_kill_consumer(sc, cp);
531 g_destroy_consumer(cp);
535 * Initialize disk. This means allocate memory, create consumer, attach it
536 * to the provider and open access (r1w1e1) to it.
538 static struct g_raid3_disk *
539 g_raid3_init_disk(struct g_raid3_softc *sc, struct g_provider *pp,
540 struct g_raid3_metadata *md, int *errorp)
542 struct g_raid3_disk *disk;
545 disk = &sc->sc_disks[md->md_no];
546 error = g_raid3_connect_disk(disk, pp);
552 disk->d_state = G_RAID3_DISK_STATE_NONE;
553 disk->d_flags = md->md_dflags;
554 if (md->md_provider[0] != '\0')
555 disk->d_flags |= G_RAID3_DISK_FLAG_HARDCODED;
556 disk->d_sync.ds_consumer = NULL;
557 disk->d_sync.ds_offset = md->md_sync_offset;
558 disk->d_sync.ds_offset_done = md->md_sync_offset;
559 disk->d_genid = md->md_genid;
560 disk->d_sync.ds_syncid = md->md_syncid;
567 g_raid3_destroy_disk(struct g_raid3_disk *disk)
569 struct g_raid3_softc *sc;
571 g_topology_assert_not();
573 sx_assert(&sc->sc_lock, SX_XLOCKED);
575 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
577 g_raid3_event_cancel(disk);
578 switch (disk->d_state) {
579 case G_RAID3_DISK_STATE_SYNCHRONIZING:
580 if (sc->sc_syncdisk != NULL)
581 g_raid3_sync_stop(sc, 1);
583 case G_RAID3_DISK_STATE_NEW:
584 case G_RAID3_DISK_STATE_STALE:
585 case G_RAID3_DISK_STATE_ACTIVE:
587 g_raid3_disconnect_consumer(sc, disk->d_consumer);
589 disk->d_consumer = NULL;
592 KASSERT(0 == 1, ("Wrong disk state (%s, %s).",
593 g_raid3_get_diskname(disk),
594 g_raid3_disk_state2str(disk->d_state)));
596 disk->d_state = G_RAID3_DISK_STATE_NODISK;
600 g_raid3_destroy_device(struct g_raid3_softc *sc)
602 struct g_raid3_event *ep;
603 struct g_raid3_disk *disk;
605 struct g_consumer *cp;
608 g_topology_assert_not();
609 sx_assert(&sc->sc_lock, SX_XLOCKED);
612 if (sc->sc_provider != NULL)
613 g_raid3_destroy_provider(sc);
614 for (n = 0; n < sc->sc_ndisks; n++) {
615 disk = &sc->sc_disks[n];
616 if (disk->d_state != G_RAID3_DISK_STATE_NODISK) {
617 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
618 g_raid3_update_metadata(disk);
619 g_raid3_destroy_disk(disk);
622 while ((ep = g_raid3_event_get(sc)) != NULL) {
623 g_raid3_event_remove(sc, ep);
624 if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0)
625 g_raid3_event_free(ep);
627 ep->e_error = ECANCELED;
628 ep->e_flags |= G_RAID3_EVENT_DONE;
629 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, ep);
630 mtx_lock(&sc->sc_events_mtx);
632 mtx_unlock(&sc->sc_events_mtx);
635 callout_drain(&sc->sc_callout);
636 cp = LIST_FIRST(&sc->sc_sync.ds_geom->consumer);
639 g_raid3_disconnect_consumer(sc, cp);
640 g_wither_geom(sc->sc_sync.ds_geom, ENXIO);
641 G_RAID3_DEBUG(0, "Device %s destroyed.", gp->name);
642 g_wither_geom(gp, ENXIO);
644 if (!g_raid3_use_malloc) {
645 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone);
646 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone);
647 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone);
649 mtx_destroy(&sc->sc_queue_mtx);
650 mtx_destroy(&sc->sc_events_mtx);
651 sx_xunlock(&sc->sc_lock);
652 sx_destroy(&sc->sc_lock);
656 g_raid3_orphan(struct g_consumer *cp)
658 struct g_raid3_disk *disk;
665 disk->d_softc->sc_bump_id = G_RAID3_BUMP_SYNCID;
666 g_raid3_event_send(disk, G_RAID3_DISK_STATE_DISCONNECTED,
667 G_RAID3_EVENT_DONTWAIT);
671 g_raid3_write_metadata(struct g_raid3_disk *disk, struct g_raid3_metadata *md)
673 struct g_raid3_softc *sc;
674 struct g_consumer *cp;
675 off_t offset, length;
679 g_topology_assert_not();
681 sx_assert(&sc->sc_lock, SX_LOCKED);
683 cp = disk->d_consumer;
684 KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name));
685 KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name));
686 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
687 ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr,
689 length = cp->provider->sectorsize;
690 offset = cp->provider->mediasize - length;
691 sector = malloc((size_t)length, M_RAID3, M_WAITOK | M_ZERO);
693 raid3_metadata_encode(md, sector);
694 error = g_write_data(cp, offset, sector, length);
695 free(sector, M_RAID3);
697 if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) {
698 G_RAID3_DEBUG(0, "Cannot write metadata on %s "
699 "(device=%s, error=%d).",
700 g_raid3_get_diskname(disk), sc->sc_name, error);
701 disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN;
703 G_RAID3_DEBUG(1, "Cannot write metadata on %s "
704 "(device=%s, error=%d).",
705 g_raid3_get_diskname(disk), sc->sc_name, error);
707 if (g_raid3_disconnect_on_failure &&
708 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
709 sc->sc_bump_id |= G_RAID3_BUMP_GENID;
710 g_raid3_event_send(disk,
711 G_RAID3_DISK_STATE_DISCONNECTED,
712 G_RAID3_EVENT_DONTWAIT);
719 g_raid3_clear_metadata(struct g_raid3_disk *disk)
723 g_topology_assert_not();
724 sx_assert(&disk->d_softc->sc_lock, SX_LOCKED);
726 error = g_raid3_write_metadata(disk, NULL);
728 G_RAID3_DEBUG(2, "Metadata on %s cleared.",
729 g_raid3_get_diskname(disk));
732 "Cannot clear metadata on disk %s (error=%d).",
733 g_raid3_get_diskname(disk), error);
739 g_raid3_fill_metadata(struct g_raid3_disk *disk, struct g_raid3_metadata *md)
741 struct g_raid3_softc *sc;
742 struct g_provider *pp;
745 strlcpy(md->md_magic, G_RAID3_MAGIC, sizeof(md->md_magic));
746 md->md_version = G_RAID3_VERSION;
747 strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name));
748 md->md_id = sc->sc_id;
749 md->md_all = sc->sc_ndisks;
750 md->md_genid = sc->sc_genid;
751 md->md_mediasize = sc->sc_mediasize;
752 md->md_sectorsize = sc->sc_sectorsize;
753 md->md_mflags = (sc->sc_flags & G_RAID3_DEVICE_FLAG_MASK);
754 md->md_no = disk->d_no;
755 md->md_syncid = disk->d_sync.ds_syncid;
756 md->md_dflags = (disk->d_flags & G_RAID3_DISK_FLAG_MASK);
757 if (disk->d_state != G_RAID3_DISK_STATE_SYNCHRONIZING)
758 md->md_sync_offset = 0;
761 disk->d_sync.ds_offset_done / (sc->sc_ndisks - 1);
763 if (disk->d_consumer != NULL && disk->d_consumer->provider != NULL)
764 pp = disk->d_consumer->provider;
767 if ((disk->d_flags & G_RAID3_DISK_FLAG_HARDCODED) != 0 && pp != NULL)
768 strlcpy(md->md_provider, pp->name, sizeof(md->md_provider));
770 bzero(md->md_provider, sizeof(md->md_provider));
772 md->md_provsize = pp->mediasize;
778 g_raid3_update_metadata(struct g_raid3_disk *disk)
780 struct g_raid3_softc *sc;
781 struct g_raid3_metadata md;
784 g_topology_assert_not();
786 sx_assert(&sc->sc_lock, SX_LOCKED);
788 g_raid3_fill_metadata(disk, &md);
789 error = g_raid3_write_metadata(disk, &md);
791 G_RAID3_DEBUG(2, "Metadata on %s updated.",
792 g_raid3_get_diskname(disk));
795 "Cannot update metadata on disk %s (error=%d).",
796 g_raid3_get_diskname(disk), error);
801 g_raid3_bump_syncid(struct g_raid3_softc *sc)
803 struct g_raid3_disk *disk;
806 g_topology_assert_not();
807 sx_assert(&sc->sc_lock, SX_XLOCKED);
808 KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) > 0,
809 ("%s called with no active disks (device=%s).", __func__,
813 G_RAID3_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name,
815 for (n = 0; n < sc->sc_ndisks; n++) {
816 disk = &sc->sc_disks[n];
817 if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
818 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
819 disk->d_sync.ds_syncid = sc->sc_syncid;
820 g_raid3_update_metadata(disk);
826 g_raid3_bump_genid(struct g_raid3_softc *sc)
828 struct g_raid3_disk *disk;
831 g_topology_assert_not();
832 sx_assert(&sc->sc_lock, SX_XLOCKED);
833 KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) > 0,
834 ("%s called with no active disks (device=%s).", __func__,
838 G_RAID3_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name,
840 for (n = 0; n < sc->sc_ndisks; n++) {
841 disk = &sc->sc_disks[n];
842 if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
843 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
844 disk->d_genid = sc->sc_genid;
845 g_raid3_update_metadata(disk);
851 g_raid3_idle(struct g_raid3_softc *sc, int acw)
853 struct g_raid3_disk *disk;
857 g_topology_assert_not();
858 sx_assert(&sc->sc_lock, SX_XLOCKED);
860 if (sc->sc_provider == NULL)
862 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0)
866 if (sc->sc_writes > 0)
868 if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) {
869 timeout = g_raid3_idletime - (time_uptime - sc->sc_last_write);
870 if (!g_raid3_shutdown && timeout > 0)
874 for (i = 0; i < sc->sc_ndisks; i++) {
875 disk = &sc->sc_disks[i];
876 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE)
878 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as clean.",
879 g_raid3_get_diskname(disk), sc->sc_name);
880 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
881 g_raid3_update_metadata(disk);
887 g_raid3_unidle(struct g_raid3_softc *sc)
889 struct g_raid3_disk *disk;
892 g_topology_assert_not();
893 sx_assert(&sc->sc_lock, SX_XLOCKED);
895 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0)
898 sc->sc_last_write = time_uptime;
899 for (i = 0; i < sc->sc_ndisks; i++) {
900 disk = &sc->sc_disks[i];
901 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE)
903 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as dirty.",
904 g_raid3_get_diskname(disk), sc->sc_name);
905 disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY;
906 g_raid3_update_metadata(disk);
911 * Treat bio_driver1 field in parent bio as list head and field bio_caller1
912 * in child bio as pointer to the next element on the list.
914 #define G_RAID3_HEAD_BIO(pbp) (pbp)->bio_driver1
916 #define G_RAID3_NEXT_BIO(cbp) (cbp)->bio_caller1
918 #define G_RAID3_FOREACH_BIO(pbp, bp) \
919 for ((bp) = G_RAID3_HEAD_BIO(pbp); (bp) != NULL; \
920 (bp) = G_RAID3_NEXT_BIO(bp))
922 #define G_RAID3_FOREACH_SAFE_BIO(pbp, bp, tmpbp) \
923 for ((bp) = G_RAID3_HEAD_BIO(pbp); \
924 (bp) != NULL && ((tmpbp) = G_RAID3_NEXT_BIO(bp), 1); \
928 g_raid3_init_bio(struct bio *pbp)
931 G_RAID3_HEAD_BIO(pbp) = NULL;
935 g_raid3_remove_bio(struct bio *cbp)
937 struct bio *pbp, *bp;
939 pbp = cbp->bio_parent;
940 if (G_RAID3_HEAD_BIO(pbp) == cbp)
941 G_RAID3_HEAD_BIO(pbp) = G_RAID3_NEXT_BIO(cbp);
943 G_RAID3_FOREACH_BIO(pbp, bp) {
944 if (G_RAID3_NEXT_BIO(bp) == cbp) {
945 G_RAID3_NEXT_BIO(bp) = G_RAID3_NEXT_BIO(cbp);
950 G_RAID3_NEXT_BIO(cbp) = NULL;
954 g_raid3_replace_bio(struct bio *sbp, struct bio *dbp)
956 struct bio *pbp, *bp;
958 g_raid3_remove_bio(sbp);
959 pbp = dbp->bio_parent;
960 G_RAID3_NEXT_BIO(sbp) = G_RAID3_NEXT_BIO(dbp);
961 if (G_RAID3_HEAD_BIO(pbp) == dbp)
962 G_RAID3_HEAD_BIO(pbp) = sbp;
964 G_RAID3_FOREACH_BIO(pbp, bp) {
965 if (G_RAID3_NEXT_BIO(bp) == dbp) {
966 G_RAID3_NEXT_BIO(bp) = sbp;
971 G_RAID3_NEXT_BIO(dbp) = NULL;
975 g_raid3_destroy_bio(struct g_raid3_softc *sc, struct bio *cbp)
977 struct bio *bp, *pbp;
980 pbp = cbp->bio_parent;
982 KASSERT(cbp->bio_data != NULL, ("NULL bio_data"));
983 size = pbp->bio_length / (sc->sc_ndisks - 1);
984 g_raid3_free(sc, cbp->bio_data, size);
985 if (G_RAID3_HEAD_BIO(pbp) == cbp) {
986 G_RAID3_HEAD_BIO(pbp) = G_RAID3_NEXT_BIO(cbp);
987 G_RAID3_NEXT_BIO(cbp) = NULL;
990 G_RAID3_FOREACH_BIO(pbp, bp) {
991 if (G_RAID3_NEXT_BIO(bp) == cbp)
995 KASSERT(G_RAID3_NEXT_BIO(bp) != NULL,
996 ("NULL bp->bio_driver1"));
997 G_RAID3_NEXT_BIO(bp) = G_RAID3_NEXT_BIO(cbp);
998 G_RAID3_NEXT_BIO(cbp) = NULL;
1005 g_raid3_clone_bio(struct g_raid3_softc *sc, struct bio *pbp)
1007 struct bio *bp, *cbp;
1011 cbp = g_clone_bio(pbp);
1014 size = pbp->bio_length / (sc->sc_ndisks - 1);
1015 if ((pbp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) != 0)
1019 cbp->bio_data = g_raid3_alloc(sc, size, memflag);
1020 if (cbp->bio_data == NULL) {
1021 pbp->bio_children--;
1025 G_RAID3_NEXT_BIO(cbp) = NULL;
1026 if (G_RAID3_HEAD_BIO(pbp) == NULL)
1027 G_RAID3_HEAD_BIO(pbp) = cbp;
1029 G_RAID3_FOREACH_BIO(pbp, bp) {
1030 if (G_RAID3_NEXT_BIO(bp) == NULL) {
1031 G_RAID3_NEXT_BIO(bp) = cbp;
1040 g_raid3_scatter(struct bio *pbp)
1042 struct g_raid3_softc *sc;
1043 struct g_raid3_disk *disk;
1044 struct bio *bp, *cbp, *tmpbp;
1045 off_t atom, cadd, padd, left;
1048 sc = pbp->bio_to->geom->softc;
1050 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_NOPARITY) == 0) {
1052 * Find bio for which we should calculate data.
1054 G_RAID3_FOREACH_BIO(pbp, cbp) {
1055 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) {
1060 KASSERT(bp != NULL, ("NULL parity bio."));
1062 atom = sc->sc_sectorsize / (sc->sc_ndisks - 1);
1064 for (left = pbp->bio_length; left > 0; left -= sc->sc_sectorsize) {
1065 G_RAID3_FOREACH_BIO(pbp, cbp) {
1068 bcopy(pbp->bio_data + padd, cbp->bio_data + cadd, atom);
1073 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_NOPARITY) == 0) {
1078 G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) {
1082 bcopy(cbp->bio_data, bp->bio_data,
1086 g_raid3_xor(cbp->bio_data, bp->bio_data,
1089 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_NODISK) != 0)
1090 g_raid3_destroy_bio(sc, cbp);
1093 G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) {
1094 struct g_consumer *cp;
1096 disk = cbp->bio_caller2;
1097 cp = disk->d_consumer;
1098 cbp->bio_to = cp->provider;
1099 G_RAID3_LOGREQ(3, cbp, "Sending request.");
1100 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1101 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1102 cp->acr, cp->acw, cp->ace));
1105 g_io_request(cbp, cp);
1110 g_raid3_gather(struct bio *pbp)
1112 struct g_raid3_softc *sc;
1113 struct g_raid3_disk *disk;
1114 struct bio *xbp, *fbp, *cbp;
1115 off_t atom, cadd, padd, left;
1117 sc = pbp->bio_to->geom->softc;
1119 * Find bio for which we have to calculate data.
1120 * While going through this path, check if all requests
1121 * succeeded, if not, deny whole request.
1122 * If we're in COMPLETE mode, we allow one request to fail,
1123 * so if we find one, we're sending it to the parity consumer.
1124 * If there are more failed requests, we deny whole request.
1127 G_RAID3_FOREACH_BIO(pbp, cbp) {
1128 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) {
1129 KASSERT(xbp == NULL, ("More than one parity bio."));
1132 if (cbp->bio_error == 0)
1135 * Found failed request.
1138 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_DEGRADED) != 0) {
1140 * We are already in degraded mode, so we can't
1141 * accept any failures.
1143 if (pbp->bio_error == 0)
1144 pbp->bio_error = cbp->bio_error;
1150 * Next failed request, that's too many.
1152 if (pbp->bio_error == 0)
1153 pbp->bio_error = fbp->bio_error;
1155 disk = cbp->bio_caller2;
1158 if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) {
1159 disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN;
1160 G_RAID3_LOGREQ(0, cbp, "Request failed (error=%d).",
1163 G_RAID3_LOGREQ(1, cbp, "Request failed (error=%d).",
1166 if (g_raid3_disconnect_on_failure &&
1167 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1168 sc->sc_bump_id |= G_RAID3_BUMP_GENID;
1169 g_raid3_event_send(disk,
1170 G_RAID3_DISK_STATE_DISCONNECTED,
1171 G_RAID3_EVENT_DONTWAIT);
1174 if (pbp->bio_error != 0)
1176 if (fbp != NULL && (pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) {
1177 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_VERIFY;
1179 g_raid3_replace_bio(xbp, fbp);
1180 g_raid3_destroy_bio(sc, fbp);
1181 } else if (fbp != NULL) {
1182 struct g_consumer *cp;
1185 * One request failed, so send the same request to
1186 * the parity consumer.
1188 disk = pbp->bio_driver2;
1189 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) {
1190 pbp->bio_error = fbp->bio_error;
1193 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED;
1195 fbp->bio_flags &= ~(BIO_DONE | BIO_ERROR);
1196 if (disk->d_no == sc->sc_ndisks - 1)
1197 fbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1199 fbp->bio_completed = 0;
1200 fbp->bio_children = 0;
1202 cp = disk->d_consumer;
1203 fbp->bio_caller2 = disk;
1204 fbp->bio_to = cp->provider;
1205 G_RAID3_LOGREQ(3, fbp, "Sending request (recover).");
1206 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1207 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1208 cp->acr, cp->acw, cp->ace));
1210 g_io_request(fbp, cp);
1217 G_RAID3_FOREACH_BIO(pbp, cbp) {
1218 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0)
1220 g_raid3_xor(cbp->bio_data, xbp->bio_data,
1223 xbp->bio_cflags &= ~G_RAID3_BIO_CFLAG_PARITY;
1224 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) {
1225 if (!g_raid3_is_zero(xbp)) {
1226 g_raid3_parity_mismatch++;
1227 pbp->bio_error = EIO;
1230 g_raid3_destroy_bio(sc, xbp);
1233 atom = sc->sc_sectorsize / (sc->sc_ndisks - 1);
1235 for (left = pbp->bio_length; left > 0; left -= sc->sc_sectorsize) {
1236 G_RAID3_FOREACH_BIO(pbp, cbp) {
1237 bcopy(cbp->bio_data + cadd, pbp->bio_data + padd, atom);
1238 pbp->bio_completed += atom;
1244 if (pbp->bio_error == 0)
1245 G_RAID3_LOGREQ(3, pbp, "Request finished.");
1247 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0)
1248 G_RAID3_LOGREQ(1, pbp, "Verification error.");
1250 G_RAID3_LOGREQ(0, pbp, "Request failed.");
1252 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_MASK;
1253 while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL)
1254 g_raid3_destroy_bio(sc, cbp);
1255 g_io_deliver(pbp, pbp->bio_error);
1259 g_raid3_done(struct bio *bp)
1261 struct g_raid3_softc *sc;
1263 sc = bp->bio_from->geom->softc;
1264 bp->bio_cflags |= G_RAID3_BIO_CFLAG_REGULAR;
1265 G_RAID3_LOGREQ(3, bp, "Regular request done (error=%d).", bp->bio_error);
1266 mtx_lock(&sc->sc_queue_mtx);
1267 bioq_insert_head(&sc->sc_queue, bp);
1268 mtx_unlock(&sc->sc_queue_mtx);
1270 wakeup(&sc->sc_queue);
1274 g_raid3_regular_request(struct bio *cbp)
1276 struct g_raid3_softc *sc;
1277 struct g_raid3_disk *disk;
1280 g_topology_assert_not();
1282 pbp = cbp->bio_parent;
1283 sc = pbp->bio_to->geom->softc;
1284 cbp->bio_from->index--;
1285 if (cbp->bio_cmd == BIO_WRITE)
1287 disk = cbp->bio_from->private;
1290 g_raid3_kill_consumer(sc, cbp->bio_from);
1291 g_topology_unlock();
1294 G_RAID3_LOGREQ(3, cbp, "Request finished.");
1296 KASSERT(pbp->bio_inbed <= pbp->bio_children,
1297 ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed,
1298 pbp->bio_children));
1299 if (pbp->bio_inbed != pbp->bio_children)
1301 switch (pbp->bio_cmd) {
1303 g_raid3_gather(pbp);
1310 pbp->bio_completed = pbp->bio_length;
1311 while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL) {
1312 if (cbp->bio_error == 0) {
1313 g_raid3_destroy_bio(sc, cbp);
1318 error = cbp->bio_error;
1319 else if (pbp->bio_error == 0) {
1321 * Next failed request, that's too many.
1323 pbp->bio_error = error;
1326 disk = cbp->bio_caller2;
1328 g_raid3_destroy_bio(sc, cbp);
1332 if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) {
1333 disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN;
1334 G_RAID3_LOGREQ(0, cbp,
1335 "Request failed (error=%d).",
1338 G_RAID3_LOGREQ(1, cbp,
1339 "Request failed (error=%d).",
1342 if (g_raid3_disconnect_on_failure &&
1343 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1344 sc->sc_bump_id |= G_RAID3_BUMP_GENID;
1345 g_raid3_event_send(disk,
1346 G_RAID3_DISK_STATE_DISCONNECTED,
1347 G_RAID3_EVENT_DONTWAIT);
1349 g_raid3_destroy_bio(sc, cbp);
1351 if (pbp->bio_error == 0)
1352 G_RAID3_LOGREQ(3, pbp, "Request finished.");
1354 G_RAID3_LOGREQ(0, pbp, "Request failed.");
1355 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_DEGRADED;
1356 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_NOPARITY;
1357 bioq_remove(&sc->sc_inflight, pbp);
1358 /* Release delayed sync requests if possible. */
1359 g_raid3_sync_release(sc);
1360 g_io_deliver(pbp, pbp->bio_error);
1367 g_raid3_sync_done(struct bio *bp)
1369 struct g_raid3_softc *sc;
1371 G_RAID3_LOGREQ(3, bp, "Synchronization request delivered.");
1372 sc = bp->bio_from->geom->softc;
1373 bp->bio_cflags |= G_RAID3_BIO_CFLAG_SYNC;
1374 mtx_lock(&sc->sc_queue_mtx);
1375 bioq_insert_head(&sc->sc_queue, bp);
1376 mtx_unlock(&sc->sc_queue_mtx);
1378 wakeup(&sc->sc_queue);
1382 g_raid3_flush(struct g_raid3_softc *sc, struct bio *bp)
1384 struct bio_queue_head queue;
1385 struct g_raid3_disk *disk;
1386 struct g_consumer *cp;
1391 for (i = 0; i < sc->sc_ndisks; i++) {
1392 disk = &sc->sc_disks[i];
1393 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE)
1395 cbp = g_clone_bio(bp);
1397 for (cbp = bioq_first(&queue); cbp != NULL;
1398 cbp = bioq_first(&queue)) {
1399 bioq_remove(&queue, cbp);
1402 if (bp->bio_error == 0)
1403 bp->bio_error = ENOMEM;
1404 g_io_deliver(bp, bp->bio_error);
1407 bioq_insert_tail(&queue, cbp);
1408 cbp->bio_done = g_std_done;
1409 cbp->bio_caller1 = disk;
1410 cbp->bio_to = disk->d_consumer->provider;
1412 for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) {
1413 bioq_remove(&queue, cbp);
1414 G_RAID3_LOGREQ(3, cbp, "Sending request.");
1415 disk = cbp->bio_caller1;
1416 cbp->bio_caller1 = NULL;
1417 cp = disk->d_consumer;
1418 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1419 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1420 cp->acr, cp->acw, cp->ace));
1421 g_io_request(cbp, disk->d_consumer);
1426 g_raid3_start(struct bio *bp)
1428 struct g_raid3_softc *sc;
1430 sc = bp->bio_to->geom->softc;
1432 * If sc == NULL or there are no valid disks, provider's error
1433 * should be set and g_raid3_start() should not be called at all.
1435 KASSERT(sc != NULL && (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
1436 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE),
1437 ("Provider's error should be set (error=%d)(device=%s).",
1438 bp->bio_to->error, bp->bio_to->name));
1439 G_RAID3_LOGREQ(3, bp, "Request received.");
1441 switch (bp->bio_cmd) {
1447 g_raid3_flush(sc, bp);
1451 g_io_deliver(bp, EOPNOTSUPP);
1454 mtx_lock(&sc->sc_queue_mtx);
1455 bioq_insert_tail(&sc->sc_queue, bp);
1456 mtx_unlock(&sc->sc_queue_mtx);
1457 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc);
1462 * Return TRUE if the given request is colliding with a in-progress
1463 * synchronization request.
1466 g_raid3_sync_collision(struct g_raid3_softc *sc, struct bio *bp)
1468 struct g_raid3_disk *disk;
1470 off_t rstart, rend, sstart, send;
1473 disk = sc->sc_syncdisk;
1476 rstart = bp->bio_offset;
1477 rend = bp->bio_offset + bp->bio_length;
1478 for (i = 0; i < g_raid3_syncreqs; i++) {
1479 sbp = disk->d_sync.ds_bios[i];
1482 sstart = sbp->bio_offset;
1483 send = sbp->bio_length;
1484 if (sbp->bio_cmd == BIO_WRITE) {
1485 sstart *= sc->sc_ndisks - 1;
1486 send *= sc->sc_ndisks - 1;
1489 if (rend > sstart && rstart < send)
1496 * Return TRUE if the given sync request is colliding with a in-progress regular
1500 g_raid3_regular_collision(struct g_raid3_softc *sc, struct bio *sbp)
1502 off_t rstart, rend, sstart, send;
1505 if (sc->sc_syncdisk == NULL)
1507 sstart = sbp->bio_offset;
1508 send = sstart + sbp->bio_length;
1509 TAILQ_FOREACH(bp, &sc->sc_inflight.queue, bio_queue) {
1510 rstart = bp->bio_offset;
1511 rend = bp->bio_offset + bp->bio_length;
1512 if (rend > sstart && rstart < send)
1519 * Puts request onto delayed queue.
1522 g_raid3_regular_delay(struct g_raid3_softc *sc, struct bio *bp)
1525 G_RAID3_LOGREQ(2, bp, "Delaying request.");
1526 bioq_insert_head(&sc->sc_regular_delayed, bp);
1530 * Puts synchronization request onto delayed queue.
1533 g_raid3_sync_delay(struct g_raid3_softc *sc, struct bio *bp)
1536 G_RAID3_LOGREQ(2, bp, "Delaying synchronization request.");
1537 bioq_insert_tail(&sc->sc_sync_delayed, bp);
1541 * Releases delayed regular requests which don't collide anymore with sync
1545 g_raid3_regular_release(struct g_raid3_softc *sc)
1547 struct bio *bp, *bp2;
1549 TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed.queue, bio_queue, bp2) {
1550 if (g_raid3_sync_collision(sc, bp))
1552 bioq_remove(&sc->sc_regular_delayed, bp);
1553 G_RAID3_LOGREQ(2, bp, "Releasing delayed request (%p).", bp);
1554 mtx_lock(&sc->sc_queue_mtx);
1555 bioq_insert_head(&sc->sc_queue, bp);
1558 * wakeup() is not needed, because this function is called from
1559 * the worker thread.
1561 wakeup(&sc->sc_queue);
1563 mtx_unlock(&sc->sc_queue_mtx);
1568 * Releases delayed sync requests which don't collide anymore with regular
1572 g_raid3_sync_release(struct g_raid3_softc *sc)
1574 struct bio *bp, *bp2;
1576 TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed.queue, bio_queue, bp2) {
1577 if (g_raid3_regular_collision(sc, bp))
1579 bioq_remove(&sc->sc_sync_delayed, bp);
1580 G_RAID3_LOGREQ(2, bp,
1581 "Releasing delayed synchronization request.");
1582 g_io_request(bp, bp->bio_from);
1587 * Handle synchronization requests.
1588 * Every synchronization request is two-steps process: first, READ request is
1589 * send to active provider and then WRITE request (with read data) to the provider
1590 * being synchronized. When WRITE is finished, new synchronization request is
1594 g_raid3_sync_request(struct bio *bp)
1596 struct g_raid3_softc *sc;
1597 struct g_raid3_disk *disk;
1599 bp->bio_from->index--;
1600 sc = bp->bio_from->geom->softc;
1601 disk = bp->bio_from->private;
1603 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */
1605 g_raid3_kill_consumer(sc, bp->bio_from);
1606 g_topology_unlock();
1607 free(bp->bio_data, M_RAID3);
1609 sx_xlock(&sc->sc_lock);
1614 * Synchronization request.
1616 switch (bp->bio_cmd) {
1619 struct g_consumer *cp;
1624 if (bp->bio_error != 0) {
1625 G_RAID3_LOGREQ(0, bp,
1626 "Synchronization request failed (error=%d).",
1631 G_RAID3_LOGREQ(3, bp, "Synchronization request finished.");
1632 atom = sc->sc_sectorsize / (sc->sc_ndisks - 1);
1633 dst = src = bp->bio_data;
1634 if (disk->d_no == sc->sc_ndisks - 1) {
1637 /* Parity component. */
1638 for (left = bp->bio_length; left > 0;
1639 left -= sc->sc_sectorsize) {
1640 bcopy(src, dst, atom);
1642 for (n = 1; n < sc->sc_ndisks - 1; n++) {
1643 g_raid3_xor(src, dst, atom);
1649 /* Regular component. */
1650 src += atom * disk->d_no;
1651 for (left = bp->bio_length; left > 0;
1652 left -= sc->sc_sectorsize) {
1653 bcopy(src, dst, atom);
1654 src += sc->sc_sectorsize;
1658 bp->bio_driver1 = bp->bio_driver2 = NULL;
1660 bp->bio_offset /= sc->sc_ndisks - 1;
1661 bp->bio_length /= sc->sc_ndisks - 1;
1662 bp->bio_cmd = BIO_WRITE;
1664 bp->bio_children = bp->bio_inbed = 0;
1665 cp = disk->d_consumer;
1666 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1667 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1668 cp->acr, cp->acw, cp->ace));
1670 g_io_request(bp, cp);
1675 struct g_raid3_disk_sync *sync;
1676 off_t boffset, moffset;
1680 if (bp->bio_error != 0) {
1681 G_RAID3_LOGREQ(0, bp,
1682 "Synchronization request failed (error=%d).",
1685 sc->sc_bump_id |= G_RAID3_BUMP_GENID;
1686 g_raid3_event_send(disk,
1687 G_RAID3_DISK_STATE_DISCONNECTED,
1688 G_RAID3_EVENT_DONTWAIT);
1691 G_RAID3_LOGREQ(3, bp, "Synchronization request finished.");
1692 sync = &disk->d_sync;
1693 if (sync->ds_offset == sc->sc_mediasize / (sc->sc_ndisks - 1) ||
1694 sync->ds_consumer == NULL ||
1695 (sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
1696 /* Don't send more synchronization requests. */
1697 sync->ds_inflight--;
1698 if (sync->ds_bios != NULL) {
1699 i = (int)(uintptr_t)bp->bio_caller1;
1700 sync->ds_bios[i] = NULL;
1702 free(bp->bio_data, M_RAID3);
1704 if (sync->ds_inflight > 0)
1706 if (sync->ds_consumer == NULL ||
1707 (sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
1711 * Disk up-to-date, activate it.
1713 g_raid3_event_send(disk, G_RAID3_DISK_STATE_ACTIVE,
1714 G_RAID3_EVENT_DONTWAIT);
1718 /* Send next synchronization request. */
1719 data = bp->bio_data;
1721 bp->bio_cmd = BIO_READ;
1722 bp->bio_offset = sync->ds_offset * (sc->sc_ndisks - 1);
1723 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset);
1724 sync->ds_offset += bp->bio_length / (sc->sc_ndisks - 1);
1725 bp->bio_done = g_raid3_sync_done;
1726 bp->bio_data = data;
1727 bp->bio_from = sync->ds_consumer;
1728 bp->bio_to = sc->sc_provider;
1729 G_RAID3_LOGREQ(3, bp, "Sending synchronization request.");
1730 sync->ds_consumer->index++;
1732 * Delay the request if it is colliding with a regular request.
1734 if (g_raid3_regular_collision(sc, bp))
1735 g_raid3_sync_delay(sc, bp);
1737 g_io_request(bp, sync->ds_consumer);
1739 /* Release delayed requests if possible. */
1740 g_raid3_regular_release(sc);
1742 /* Find the smallest offset. */
1743 moffset = sc->sc_mediasize;
1744 for (i = 0; i < g_raid3_syncreqs; i++) {
1745 bp = sync->ds_bios[i];
1746 boffset = bp->bio_offset;
1747 if (bp->bio_cmd == BIO_WRITE)
1748 boffset *= sc->sc_ndisks - 1;
1749 if (boffset < moffset)
1752 if (sync->ds_offset_done + (MAXPHYS * 100) < moffset) {
1753 /* Update offset_done on every 100 blocks. */
1754 sync->ds_offset_done = moffset;
1755 g_raid3_update_metadata(disk);
1760 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)",
1761 bp->bio_cmd, sc->sc_name));
1767 g_raid3_register_request(struct bio *pbp)
1769 struct g_raid3_softc *sc;
1770 struct g_raid3_disk *disk;
1771 struct g_consumer *cp;
1772 struct bio *cbp, *tmpbp;
1773 off_t offset, length;
1775 int round_robin, verify;
1778 sc = pbp->bio_to->geom->softc;
1779 if ((pbp->bio_cflags & G_RAID3_BIO_CFLAG_REGSYNC) != 0 &&
1780 sc->sc_syncdisk == NULL) {
1781 g_io_deliver(pbp, EIO);
1784 g_raid3_init_bio(pbp);
1785 length = pbp->bio_length / (sc->sc_ndisks - 1);
1786 offset = pbp->bio_offset / (sc->sc_ndisks - 1);
1787 round_robin = verify = 0;
1788 switch (pbp->bio_cmd) {
1790 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_VERIFY) != 0 &&
1791 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1792 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_VERIFY;
1794 ndisks = sc->sc_ndisks;
1797 ndisks = sc->sc_ndisks - 1;
1799 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_ROUND_ROBIN) != 0 &&
1800 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1805 KASSERT(!round_robin || !verify,
1806 ("ROUND-ROBIN and VERIFY are mutually exclusive."));
1807 pbp->bio_driver2 = &sc->sc_disks[sc->sc_ndisks - 1];
1812 * Delay the request if it is colliding with a synchronization
1815 if (g_raid3_sync_collision(sc, pbp)) {
1816 g_raid3_regular_delay(sc, pbp);
1823 sc->sc_last_write = time_uptime;
1825 ndisks = sc->sc_ndisks;
1828 for (n = 0; n < ndisks; n++) {
1829 disk = &sc->sc_disks[n];
1830 cbp = g_raid3_clone_bio(sc, pbp);
1832 while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL)
1833 g_raid3_destroy_bio(sc, cbp);
1835 * To prevent deadlock, we must run back up
1836 * with the ENOMEM for failed requests of any
1837 * of our consumers. Our own sync requests
1838 * can stick around, as they are finite.
1840 if ((pbp->bio_cflags &
1841 G_RAID3_BIO_CFLAG_REGULAR) != 0) {
1842 g_io_deliver(pbp, ENOMEM);
1847 cbp->bio_offset = offset;
1848 cbp->bio_length = length;
1849 cbp->bio_done = g_raid3_done;
1850 switch (pbp->bio_cmd) {
1852 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) {
1854 * Replace invalid component with the parity
1857 disk = &sc->sc_disks[sc->sc_ndisks - 1];
1858 cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1859 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED;
1860 } else if (round_robin &&
1861 disk->d_no == sc->sc_round_robin) {
1863 * In round-robin mode skip one data component
1864 * and use parity component when reading.
1866 pbp->bio_driver2 = disk;
1867 disk = &sc->sc_disks[sc->sc_ndisks - 1];
1868 cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1869 sc->sc_round_robin++;
1871 } else if (verify && disk->d_no == sc->sc_ndisks - 1) {
1872 cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1877 if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
1878 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
1879 if (n == ndisks - 1) {
1881 * Active parity component, mark it as such.
1884 G_RAID3_BIO_CFLAG_PARITY;
1887 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED;
1888 if (n == ndisks - 1) {
1890 * Parity component is not connected,
1891 * so destroy its request.
1894 G_RAID3_BIO_PFLAG_NOPARITY;
1895 g_raid3_destroy_bio(sc, cbp);
1899 G_RAID3_BIO_CFLAG_NODISK;
1906 cbp->bio_caller2 = disk;
1908 switch (pbp->bio_cmd) {
1912 * If we are in round-robin mode and 'round_robin' is
1913 * still 1, it means, that we skipped parity component
1914 * for this read and must reset sc_round_robin field.
1916 sc->sc_round_robin = 0;
1918 G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) {
1919 disk = cbp->bio_caller2;
1920 cp = disk->d_consumer;
1921 cbp->bio_to = cp->provider;
1922 G_RAID3_LOGREQ(3, cbp, "Sending request.");
1923 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1924 ("Consumer %s not opened (r%dw%de%d).",
1925 cp->provider->name, cp->acr, cp->acw, cp->ace));
1927 g_io_request(cbp, cp);
1933 * Put request onto inflight queue, so we can check if new
1934 * synchronization requests don't collide with it.
1936 bioq_insert_tail(&sc->sc_inflight, pbp);
1939 * Bump syncid on first write.
1941 if ((sc->sc_bump_id & G_RAID3_BUMP_SYNCID) != 0) {
1942 sc->sc_bump_id &= ~G_RAID3_BUMP_SYNCID;
1943 g_raid3_bump_syncid(sc);
1945 g_raid3_scatter(pbp);
1952 g_raid3_can_destroy(struct g_raid3_softc *sc)
1955 struct g_consumer *cp;
1957 g_topology_assert();
1959 if (gp->softc == NULL)
1961 LIST_FOREACH(cp, &gp->consumer, consumer) {
1962 if (g_raid3_is_busy(sc, cp))
1965 gp = sc->sc_sync.ds_geom;
1966 LIST_FOREACH(cp, &gp->consumer, consumer) {
1967 if (g_raid3_is_busy(sc, cp))
1970 G_RAID3_DEBUG(2, "No I/O requests for %s, it can be destroyed.",
1976 g_raid3_try_destroy(struct g_raid3_softc *sc)
1979 g_topology_assert_not();
1980 sx_assert(&sc->sc_lock, SX_XLOCKED);
1982 if (sc->sc_rootmount != NULL) {
1983 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
1985 root_mount_rel(sc->sc_rootmount);
1986 sc->sc_rootmount = NULL;
1990 if (!g_raid3_can_destroy(sc)) {
1991 g_topology_unlock();
1994 sc->sc_geom->softc = NULL;
1995 sc->sc_sync.ds_geom->softc = NULL;
1996 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_WAIT) != 0) {
1997 g_topology_unlock();
1998 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__,
2000 /* Unlock sc_lock here, as it can be destroyed after wakeup. */
2001 sx_xunlock(&sc->sc_lock);
2002 wakeup(&sc->sc_worker);
2003 sc->sc_worker = NULL;
2005 g_topology_unlock();
2006 g_raid3_destroy_device(sc);
2007 free(sc->sc_disks, M_RAID3);
2017 g_raid3_worker(void *arg)
2019 struct g_raid3_softc *sc;
2020 struct g_raid3_event *ep;
2025 thread_lock(curthread);
2026 sched_prio(curthread, PRIBIO);
2027 thread_unlock(curthread);
2029 sx_xlock(&sc->sc_lock);
2031 G_RAID3_DEBUG(5, "%s: Let's see...", __func__);
2033 * First take a look at events.
2034 * This is important to handle events before any I/O requests.
2036 ep = g_raid3_event_get(sc);
2038 g_raid3_event_remove(sc, ep);
2039 if ((ep->e_flags & G_RAID3_EVENT_DEVICE) != 0) {
2040 /* Update only device status. */
2042 "Running event for device %s.",
2045 g_raid3_update_device(sc, 1);
2047 /* Update disk status. */
2048 G_RAID3_DEBUG(3, "Running event for disk %s.",
2049 g_raid3_get_diskname(ep->e_disk));
2050 ep->e_error = g_raid3_update_disk(ep->e_disk,
2052 if (ep->e_error == 0)
2053 g_raid3_update_device(sc, 0);
2055 if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0) {
2056 KASSERT(ep->e_error == 0,
2057 ("Error cannot be handled."));
2058 g_raid3_event_free(ep);
2060 ep->e_flags |= G_RAID3_EVENT_DONE;
2061 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__,
2063 mtx_lock(&sc->sc_events_mtx);
2065 mtx_unlock(&sc->sc_events_mtx);
2068 G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
2069 if (g_raid3_try_destroy(sc)) {
2070 curthread->td_pflags &= ~TDP_GEOM;
2071 G_RAID3_DEBUG(1, "Thread exiting.");
2075 G_RAID3_DEBUG(5, "%s: I'm here 1.", __func__);
2079 * Check if we can mark array as CLEAN and if we can't take
2080 * how much seconds should we wait.
2082 timeout = g_raid3_idle(sc, -1);
2086 /* Get first request from the queue. */
2087 mtx_lock(&sc->sc_queue_mtx);
2088 bp = bioq_first(&sc->sc_queue);
2091 G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
2092 mtx_unlock(&sc->sc_queue_mtx);
2093 if (g_raid3_try_destroy(sc)) {
2094 curthread->td_pflags &= ~TDP_GEOM;
2095 G_RAID3_DEBUG(1, "Thread exiting.");
2098 mtx_lock(&sc->sc_queue_mtx);
2100 sx_xunlock(&sc->sc_lock);
2102 * XXX: We can miss an event here, because an event
2103 * can be added without sx-device-lock and without
2104 * mtx-queue-lock. Maybe I should just stop using
2105 * dedicated mutex for events synchronization and
2106 * stick with the queue lock?
2107 * The event will hang here until next I/O request
2108 * or next event is received.
2110 MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "r3:w1",
2112 sx_xlock(&sc->sc_lock);
2113 G_RAID3_DEBUG(5, "%s: I'm here 4.", __func__);
2117 bioq_remove(&sc->sc_queue, bp);
2118 mtx_unlock(&sc->sc_queue_mtx);
2120 if (bp->bio_from->geom == sc->sc_sync.ds_geom &&
2121 (bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) != 0) {
2122 g_raid3_sync_request(bp); /* READ */
2123 } else if (bp->bio_to != sc->sc_provider) {
2124 if ((bp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) != 0)
2125 g_raid3_regular_request(bp);
2126 else if ((bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) != 0)
2127 g_raid3_sync_request(bp); /* WRITE */
2130 ("Invalid request cflags=0x%hx to=%s.",
2131 bp->bio_cflags, bp->bio_to->name));
2133 } else if (g_raid3_register_request(bp) != 0) {
2134 mtx_lock(&sc->sc_queue_mtx);
2135 bioq_insert_head(&sc->sc_queue, bp);
2137 * We are short in memory, let see if there are finished
2138 * request we can free.
2140 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
2141 if (bp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR)
2145 * No finished regular request, so at least keep
2146 * synchronization running.
2148 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
2149 if (bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC)
2152 sx_xunlock(&sc->sc_lock);
2153 MSLEEP(&sc->sc_queue, &sc->sc_queue_mtx, PRIBIO | PDROP,
2154 "r3:lowmem", hz / 10);
2155 sx_xlock(&sc->sc_lock);
2157 G_RAID3_DEBUG(5, "%s: I'm here 9.", __func__);
2162 g_raid3_update_idle(struct g_raid3_softc *sc, struct g_raid3_disk *disk)
2165 sx_assert(&sc->sc_lock, SX_LOCKED);
2166 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0)
2168 if (!sc->sc_idle && (disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) == 0) {
2169 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as dirty.",
2170 g_raid3_get_diskname(disk), sc->sc_name);
2171 disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY;
2172 } else if (sc->sc_idle &&
2173 (disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) != 0) {
2174 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as clean.",
2175 g_raid3_get_diskname(disk), sc->sc_name);
2176 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2181 g_raid3_sync_start(struct g_raid3_softc *sc)
2183 struct g_raid3_disk *disk;
2184 struct g_consumer *cp;
2189 g_topology_assert_not();
2190 sx_assert(&sc->sc_lock, SX_XLOCKED);
2192 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED,
2193 ("Device not in DEGRADED state (%s, %u).", sc->sc_name,
2195 KASSERT(sc->sc_syncdisk == NULL, ("Syncdisk is not NULL (%s, %u).",
2196 sc->sc_name, sc->sc_state));
2198 for (n = 0; n < sc->sc_ndisks; n++) {
2199 if (sc->sc_disks[n].d_state != G_RAID3_DISK_STATE_SYNCHRONIZING)
2201 disk = &sc->sc_disks[n];
2207 sx_xunlock(&sc->sc_lock);
2209 cp = g_new_consumer(sc->sc_sync.ds_geom);
2210 error = g_attach(cp, sc->sc_provider);
2212 ("Cannot attach to %s (error=%d).", sc->sc_name, error));
2213 error = g_access(cp, 1, 0, 0);
2214 KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error));
2215 g_topology_unlock();
2216 sx_xlock(&sc->sc_lock);
2218 G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name,
2219 g_raid3_get_diskname(disk));
2220 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) == 0)
2221 disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY;
2222 KASSERT(disk->d_sync.ds_consumer == NULL,
2223 ("Sync consumer already exists (device=%s, disk=%s).",
2224 sc->sc_name, g_raid3_get_diskname(disk)));
2226 disk->d_sync.ds_consumer = cp;
2227 disk->d_sync.ds_consumer->private = disk;
2228 disk->d_sync.ds_consumer->index = 0;
2229 sc->sc_syncdisk = disk;
2232 * Allocate memory for synchronization bios and initialize them.
2234 disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_raid3_syncreqs,
2236 for (n = 0; n < g_raid3_syncreqs; n++) {
2238 disk->d_sync.ds_bios[n] = bp;
2239 bp->bio_parent = NULL;
2240 bp->bio_cmd = BIO_READ;
2241 bp->bio_data = malloc(MAXPHYS, M_RAID3, M_WAITOK);
2243 bp->bio_offset = disk->d_sync.ds_offset * (sc->sc_ndisks - 1);
2244 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset);
2245 disk->d_sync.ds_offset += bp->bio_length / (sc->sc_ndisks - 1);
2246 bp->bio_done = g_raid3_sync_done;
2247 bp->bio_from = disk->d_sync.ds_consumer;
2248 bp->bio_to = sc->sc_provider;
2249 bp->bio_caller1 = (void *)(uintptr_t)n;
2252 /* Set the number of in-flight synchronization requests. */
2253 disk->d_sync.ds_inflight = g_raid3_syncreqs;
2256 * Fire off first synchronization requests.
2258 for (n = 0; n < g_raid3_syncreqs; n++) {
2259 bp = disk->d_sync.ds_bios[n];
2260 G_RAID3_LOGREQ(3, bp, "Sending synchronization request.");
2261 disk->d_sync.ds_consumer->index++;
2263 * Delay the request if it is colliding with a regular request.
2265 if (g_raid3_regular_collision(sc, bp))
2266 g_raid3_sync_delay(sc, bp);
2268 g_io_request(bp, disk->d_sync.ds_consumer);
2273 * Stop synchronization process.
2274 * type: 0 - synchronization finished
2275 * 1 - synchronization stopped
2278 g_raid3_sync_stop(struct g_raid3_softc *sc, int type)
2280 struct g_raid3_disk *disk;
2281 struct g_consumer *cp;
2283 g_topology_assert_not();
2284 sx_assert(&sc->sc_lock, SX_LOCKED);
2286 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED,
2287 ("Device not in DEGRADED state (%s, %u).", sc->sc_name,
2289 disk = sc->sc_syncdisk;
2290 sc->sc_syncdisk = NULL;
2291 KASSERT(disk != NULL, ("No disk was synchronized (%s).", sc->sc_name));
2292 KASSERT(disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING,
2293 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2294 g_raid3_disk_state2str(disk->d_state)));
2295 if (disk->d_sync.ds_consumer == NULL)
2299 G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s finished.",
2300 sc->sc_name, g_raid3_get_diskname(disk));
2301 } else /* if (type == 1) */ {
2302 G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s stopped.",
2303 sc->sc_name, g_raid3_get_diskname(disk));
2305 free(disk->d_sync.ds_bios, M_RAID3);
2306 disk->d_sync.ds_bios = NULL;
2307 cp = disk->d_sync.ds_consumer;
2308 disk->d_sync.ds_consumer = NULL;
2309 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2310 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */
2312 g_raid3_kill_consumer(sc, cp);
2313 g_topology_unlock();
2314 sx_xlock(&sc->sc_lock);
2318 g_raid3_launch_provider(struct g_raid3_softc *sc)
2320 struct g_provider *pp;
2321 struct g_raid3_disk *disk;
2324 sx_assert(&sc->sc_lock, SX_LOCKED);
2327 pp = g_new_providerf(sc->sc_geom, "raid3/%s", sc->sc_name);
2328 pp->mediasize = sc->sc_mediasize;
2329 pp->sectorsize = sc->sc_sectorsize;
2331 pp->stripeoffset = 0;
2332 for (n = 0; n < sc->sc_ndisks; n++) {
2333 disk = &sc->sc_disks[n];
2334 if (disk->d_consumer && disk->d_consumer->provider &&
2335 disk->d_consumer->provider->stripesize > pp->stripesize) {
2336 pp->stripesize = disk->d_consumer->provider->stripesize;
2337 pp->stripeoffset = disk->d_consumer->provider->stripeoffset;
2340 pp->stripesize *= sc->sc_ndisks - 1;
2341 pp->stripeoffset *= sc->sc_ndisks - 1;
2342 sc->sc_provider = pp;
2343 g_error_provider(pp, 0);
2344 g_topology_unlock();
2345 G_RAID3_DEBUG(0, "Device %s launched (%u/%u).", pp->name,
2346 g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE), sc->sc_ndisks);
2348 if (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED)
2349 g_raid3_sync_start(sc);
2353 g_raid3_destroy_provider(struct g_raid3_softc *sc)
2357 g_topology_assert_not();
2358 KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).",
2362 g_error_provider(sc->sc_provider, ENXIO);
2363 mtx_lock(&sc->sc_queue_mtx);
2364 while ((bp = bioq_first(&sc->sc_queue)) != NULL) {
2365 bioq_remove(&sc->sc_queue, bp);
2366 g_io_deliver(bp, ENXIO);
2368 mtx_unlock(&sc->sc_queue_mtx);
2369 G_RAID3_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name,
2370 sc->sc_provider->name);
2371 g_wither_provider(sc->sc_provider, ENXIO);
2372 g_topology_unlock();
2373 sc->sc_provider = NULL;
2374 if (sc->sc_syncdisk != NULL)
2375 g_raid3_sync_stop(sc, 1);
2379 g_raid3_go(void *arg)
2381 struct g_raid3_softc *sc;
2384 G_RAID3_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name);
2385 g_raid3_event_send(sc, 0,
2386 G_RAID3_EVENT_DONTWAIT | G_RAID3_EVENT_DEVICE);
2390 g_raid3_determine_state(struct g_raid3_disk *disk)
2392 struct g_raid3_softc *sc;
2396 if (sc->sc_syncid == disk->d_sync.ds_syncid) {
2397 if ((disk->d_flags &
2398 G_RAID3_DISK_FLAG_SYNCHRONIZING) == 0) {
2399 /* Disk does not need synchronization. */
2400 state = G_RAID3_DISK_STATE_ACTIVE;
2403 G_RAID3_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
2405 G_RAID3_DISK_FLAG_FORCE_SYNC) != 0) {
2407 * We can start synchronization from
2408 * the stored offset.
2410 state = G_RAID3_DISK_STATE_SYNCHRONIZING;
2412 state = G_RAID3_DISK_STATE_STALE;
2415 } else if (disk->d_sync.ds_syncid < sc->sc_syncid) {
2417 * Reset all synchronization data for this disk,
2418 * because if it even was synchronized, it was
2419 * synchronized to disks with different syncid.
2421 disk->d_flags |= G_RAID3_DISK_FLAG_SYNCHRONIZING;
2422 disk->d_sync.ds_offset = 0;
2423 disk->d_sync.ds_offset_done = 0;
2424 disk->d_sync.ds_syncid = sc->sc_syncid;
2425 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
2426 (disk->d_flags & G_RAID3_DISK_FLAG_FORCE_SYNC) != 0) {
2427 state = G_RAID3_DISK_STATE_SYNCHRONIZING;
2429 state = G_RAID3_DISK_STATE_STALE;
2431 } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ {
2433 * Not good, NOT GOOD!
2434 * It means that device was started on stale disks
2435 * and more fresh disk just arrive.
2436 * If there were writes, device is broken, sorry.
2437 * I think the best choice here is don't touch
2438 * this disk and inform the user loudly.
2440 G_RAID3_DEBUG(0, "Device %s was started before the freshest "
2441 "disk (%s) arrives!! It will not be connected to the "
2442 "running device.", sc->sc_name,
2443 g_raid3_get_diskname(disk));
2444 g_raid3_destroy_disk(disk);
2445 state = G_RAID3_DISK_STATE_NONE;
2446 /* Return immediately, because disk was destroyed. */
2449 G_RAID3_DEBUG(3, "State for %s disk: %s.",
2450 g_raid3_get_diskname(disk), g_raid3_disk_state2str(state));
2455 * Update device state.
2458 g_raid3_update_device(struct g_raid3_softc *sc, boolean_t force)
2460 struct g_raid3_disk *disk;
2463 sx_assert(&sc->sc_lock, SX_XLOCKED);
2465 switch (sc->sc_state) {
2466 case G_RAID3_DEVICE_STATE_STARTING:
2468 u_int n, ndirty, ndisks, genid, syncid;
2470 KASSERT(sc->sc_provider == NULL,
2471 ("Non-NULL provider in STARTING state (%s).", sc->sc_name));
2473 * Are we ready? We are, if all disks are connected or
2474 * one disk is missing and 'force' is true.
2476 if (g_raid3_ndisks(sc, -1) + force == sc->sc_ndisks) {
2478 callout_drain(&sc->sc_callout);
2482 * Timeout expired, so destroy device.
2484 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
2485 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p",
2486 __LINE__, sc->sc_rootmount);
2487 root_mount_rel(sc->sc_rootmount);
2488 sc->sc_rootmount = NULL;
2494 * Find the biggest genid.
2497 for (n = 0; n < sc->sc_ndisks; n++) {
2498 disk = &sc->sc_disks[n];
2499 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2501 if (disk->d_genid > genid)
2502 genid = disk->d_genid;
2504 sc->sc_genid = genid;
2506 * Remove all disks without the biggest genid.
2508 for (n = 0; n < sc->sc_ndisks; n++) {
2509 disk = &sc->sc_disks[n];
2510 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2512 if (disk->d_genid < genid) {
2514 "Component %s (device %s) broken, skipping.",
2515 g_raid3_get_diskname(disk), sc->sc_name);
2516 g_raid3_destroy_disk(disk);
2521 * There must be at least 'sc->sc_ndisks - 1' components
2522 * with the same syncid and without SYNCHRONIZING flag.
2526 * Find the biggest syncid, number of valid components and
2527 * number of dirty components.
2529 ndirty = ndisks = syncid = 0;
2530 for (n = 0; n < sc->sc_ndisks; n++) {
2531 disk = &sc->sc_disks[n];
2532 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2534 if ((disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) != 0)
2536 if (disk->d_sync.ds_syncid > syncid) {
2537 syncid = disk->d_sync.ds_syncid;
2539 } else if (disk->d_sync.ds_syncid < syncid) {
2542 if ((disk->d_flags &
2543 G_RAID3_DISK_FLAG_SYNCHRONIZING) != 0) {
2549 * Do we have enough valid components?
2551 if (ndisks + 1 < sc->sc_ndisks) {
2553 "Device %s is broken, too few valid components.",
2555 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
2559 * If there is one DIRTY component and all disks are present,
2560 * mark it for synchronization. If there is more than one DIRTY
2561 * component, mark parity component for synchronization.
2563 if (ndisks == sc->sc_ndisks && ndirty == 1) {
2564 for (n = 0; n < sc->sc_ndisks; n++) {
2565 disk = &sc->sc_disks[n];
2566 if ((disk->d_flags &
2567 G_RAID3_DISK_FLAG_DIRTY) == 0) {
2571 G_RAID3_DISK_FLAG_SYNCHRONIZING;
2573 } else if (ndisks == sc->sc_ndisks && ndirty > 1) {
2574 disk = &sc->sc_disks[sc->sc_ndisks - 1];
2575 disk->d_flags |= G_RAID3_DISK_FLAG_SYNCHRONIZING;
2578 sc->sc_syncid = syncid;
2580 /* Remember to bump syncid on first write. */
2581 sc->sc_bump_id |= G_RAID3_BUMP_SYNCID;
2583 if (ndisks == sc->sc_ndisks)
2584 state = G_RAID3_DEVICE_STATE_COMPLETE;
2585 else /* if (ndisks == sc->sc_ndisks - 1) */
2586 state = G_RAID3_DEVICE_STATE_DEGRADED;
2587 G_RAID3_DEBUG(1, "Device %s state changed from %s to %s.",
2588 sc->sc_name, g_raid3_device_state2str(sc->sc_state),
2589 g_raid3_device_state2str(state));
2590 sc->sc_state = state;
2591 for (n = 0; n < sc->sc_ndisks; n++) {
2592 disk = &sc->sc_disks[n];
2593 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2595 state = g_raid3_determine_state(disk);
2596 g_raid3_event_send(disk, state, G_RAID3_EVENT_DONTWAIT);
2597 if (state == G_RAID3_DISK_STATE_STALE)
2598 sc->sc_bump_id |= G_RAID3_BUMP_SYNCID;
2602 case G_RAID3_DEVICE_STATE_DEGRADED:
2604 * Genid need to be bumped immediately, so do it here.
2606 if ((sc->sc_bump_id & G_RAID3_BUMP_GENID) != 0) {
2607 sc->sc_bump_id &= ~G_RAID3_BUMP_GENID;
2608 g_raid3_bump_genid(sc);
2611 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NEW) > 0)
2613 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) <
2614 sc->sc_ndisks - 1) {
2615 if (sc->sc_provider != NULL)
2616 g_raid3_destroy_provider(sc);
2617 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
2620 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) ==
2622 state = G_RAID3_DEVICE_STATE_COMPLETE;
2624 "Device %s state changed from %s to %s.",
2625 sc->sc_name, g_raid3_device_state2str(sc->sc_state),
2626 g_raid3_device_state2str(state));
2627 sc->sc_state = state;
2629 if (sc->sc_provider == NULL)
2630 g_raid3_launch_provider(sc);
2631 if (sc->sc_rootmount != NULL) {
2632 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
2634 root_mount_rel(sc->sc_rootmount);
2635 sc->sc_rootmount = NULL;
2638 case G_RAID3_DEVICE_STATE_COMPLETE:
2640 * Genid need to be bumped immediately, so do it here.
2642 if ((sc->sc_bump_id & G_RAID3_BUMP_GENID) != 0) {
2643 sc->sc_bump_id &= ~G_RAID3_BUMP_GENID;
2644 g_raid3_bump_genid(sc);
2647 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NEW) > 0)
2649 KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) >=
2651 ("Too few ACTIVE components in COMPLETE state (device %s).",
2653 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) ==
2654 sc->sc_ndisks - 1) {
2655 state = G_RAID3_DEVICE_STATE_DEGRADED;
2657 "Device %s state changed from %s to %s.",
2658 sc->sc_name, g_raid3_device_state2str(sc->sc_state),
2659 g_raid3_device_state2str(state));
2660 sc->sc_state = state;
2662 if (sc->sc_provider == NULL)
2663 g_raid3_launch_provider(sc);
2664 if (sc->sc_rootmount != NULL) {
2665 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
2667 root_mount_rel(sc->sc_rootmount);
2668 sc->sc_rootmount = NULL;
2672 KASSERT(1 == 0, ("Wrong device state (%s, %s).", sc->sc_name,
2673 g_raid3_device_state2str(sc->sc_state)));
2679 * Update disk state and device state if needed.
2681 #define DISK_STATE_CHANGED() G_RAID3_DEBUG(1, \
2682 "Disk %s state changed from %s to %s (device %s).", \
2683 g_raid3_get_diskname(disk), \
2684 g_raid3_disk_state2str(disk->d_state), \
2685 g_raid3_disk_state2str(state), sc->sc_name)
2687 g_raid3_update_disk(struct g_raid3_disk *disk, u_int state)
2689 struct g_raid3_softc *sc;
2692 sx_assert(&sc->sc_lock, SX_XLOCKED);
2695 G_RAID3_DEBUG(3, "Changing disk %s state from %s to %s.",
2696 g_raid3_get_diskname(disk), g_raid3_disk_state2str(disk->d_state),
2697 g_raid3_disk_state2str(state));
2699 case G_RAID3_DISK_STATE_NEW:
2701 * Possible scenarios:
2702 * 1. New disk arrive.
2704 /* Previous state should be NONE. */
2705 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NONE,
2706 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2707 g_raid3_disk_state2str(disk->d_state)));
2708 DISK_STATE_CHANGED();
2710 disk->d_state = state;
2711 G_RAID3_DEBUG(1, "Device %s: provider %s detected.",
2712 sc->sc_name, g_raid3_get_diskname(disk));
2713 if (sc->sc_state == G_RAID3_DEVICE_STATE_STARTING)
2715 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2716 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2717 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2718 g_raid3_device_state2str(sc->sc_state),
2719 g_raid3_get_diskname(disk),
2720 g_raid3_disk_state2str(disk->d_state)));
2721 state = g_raid3_determine_state(disk);
2722 if (state != G_RAID3_DISK_STATE_NONE)
2725 case G_RAID3_DISK_STATE_ACTIVE:
2727 * Possible scenarios:
2728 * 1. New disk does not need synchronization.
2729 * 2. Synchronization process finished successfully.
2731 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2732 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2733 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2734 g_raid3_device_state2str(sc->sc_state),
2735 g_raid3_get_diskname(disk),
2736 g_raid3_disk_state2str(disk->d_state)));
2737 /* Previous state should be NEW or SYNCHRONIZING. */
2738 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW ||
2739 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING,
2740 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2741 g_raid3_disk_state2str(disk->d_state)));
2742 DISK_STATE_CHANGED();
2744 if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
2745 disk->d_flags &= ~G_RAID3_DISK_FLAG_SYNCHRONIZING;
2746 disk->d_flags &= ~G_RAID3_DISK_FLAG_FORCE_SYNC;
2747 g_raid3_sync_stop(sc, 0);
2749 disk->d_state = state;
2750 disk->d_sync.ds_offset = 0;
2751 disk->d_sync.ds_offset_done = 0;
2752 g_raid3_update_idle(sc, disk);
2753 g_raid3_update_metadata(disk);
2754 G_RAID3_DEBUG(1, "Device %s: provider %s activated.",
2755 sc->sc_name, g_raid3_get_diskname(disk));
2757 case G_RAID3_DISK_STATE_STALE:
2759 * Possible scenarios:
2760 * 1. Stale disk was connected.
2762 /* Previous state should be NEW. */
2763 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW,
2764 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2765 g_raid3_disk_state2str(disk->d_state)));
2766 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2767 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2768 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2769 g_raid3_device_state2str(sc->sc_state),
2770 g_raid3_get_diskname(disk),
2771 g_raid3_disk_state2str(disk->d_state)));
2773 * STALE state is only possible if device is marked
2776 KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOAUTOSYNC) != 0,
2777 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2778 g_raid3_device_state2str(sc->sc_state),
2779 g_raid3_get_diskname(disk),
2780 g_raid3_disk_state2str(disk->d_state)));
2781 DISK_STATE_CHANGED();
2783 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2784 disk->d_state = state;
2785 g_raid3_update_metadata(disk);
2786 G_RAID3_DEBUG(0, "Device %s: provider %s is stale.",
2787 sc->sc_name, g_raid3_get_diskname(disk));
2789 case G_RAID3_DISK_STATE_SYNCHRONIZING:
2791 * Possible scenarios:
2792 * 1. Disk which needs synchronization was connected.
2794 /* Previous state should be NEW. */
2795 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW,
2796 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2797 g_raid3_disk_state2str(disk->d_state)));
2798 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2799 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2800 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2801 g_raid3_device_state2str(sc->sc_state),
2802 g_raid3_get_diskname(disk),
2803 g_raid3_disk_state2str(disk->d_state)));
2804 DISK_STATE_CHANGED();
2806 if (disk->d_state == G_RAID3_DISK_STATE_NEW)
2807 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2808 disk->d_state = state;
2809 if (sc->sc_provider != NULL) {
2810 g_raid3_sync_start(sc);
2811 g_raid3_update_metadata(disk);
2814 case G_RAID3_DISK_STATE_DISCONNECTED:
2816 * Possible scenarios:
2817 * 1. Device wasn't running yet, but disk disappear.
2818 * 2. Disk was active and disapppear.
2819 * 3. Disk disappear during synchronization process.
2821 if (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2822 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
2824 * Previous state should be ACTIVE, STALE or
2827 KASSERT(disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
2828 disk->d_state == G_RAID3_DISK_STATE_STALE ||
2829 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING,
2830 ("Wrong disk state (%s, %s).",
2831 g_raid3_get_diskname(disk),
2832 g_raid3_disk_state2str(disk->d_state)));
2833 } else if (sc->sc_state == G_RAID3_DEVICE_STATE_STARTING) {
2834 /* Previous state should be NEW. */
2835 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW,
2836 ("Wrong disk state (%s, %s).",
2837 g_raid3_get_diskname(disk),
2838 g_raid3_disk_state2str(disk->d_state)));
2840 * Reset bumping syncid if disk disappeared in STARTING
2843 if ((sc->sc_bump_id & G_RAID3_BUMP_SYNCID) != 0)
2844 sc->sc_bump_id &= ~G_RAID3_BUMP_SYNCID;
2847 KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).",
2849 g_raid3_device_state2str(sc->sc_state),
2850 g_raid3_get_diskname(disk),
2851 g_raid3_disk_state2str(disk->d_state)));
2854 DISK_STATE_CHANGED();
2855 G_RAID3_DEBUG(0, "Device %s: provider %s disconnected.",
2856 sc->sc_name, g_raid3_get_diskname(disk));
2858 g_raid3_destroy_disk(disk);
2861 KASSERT(1 == 0, ("Unknown state (%u).", state));
2866 #undef DISK_STATE_CHANGED
2869 g_raid3_read_metadata(struct g_consumer *cp, struct g_raid3_metadata *md)
2871 struct g_provider *pp;
2875 g_topology_assert();
2877 error = g_access(cp, 1, 0, 0);
2881 g_topology_unlock();
2882 /* Metadata are stored on last sector. */
2883 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
2886 g_access(cp, -1, 0, 0);
2888 G_RAID3_DEBUG(1, "Cannot read metadata from %s (error=%d).",
2889 cp->provider->name, error);
2893 /* Decode metadata. */
2894 error = raid3_metadata_decode(buf, md);
2896 if (strcmp(md->md_magic, G_RAID3_MAGIC) != 0)
2898 if (md->md_version > G_RAID3_VERSION) {
2900 "Kernel module is too old to handle metadata from %s.",
2901 cp->provider->name);
2905 G_RAID3_DEBUG(1, "MD5 metadata hash mismatch for provider %s.",
2906 cp->provider->name);
2909 if (md->md_sectorsize > MAXPHYS) {
2910 G_RAID3_DEBUG(0, "The blocksize is too big.");
2918 g_raid3_check_metadata(struct g_raid3_softc *sc, struct g_provider *pp,
2919 struct g_raid3_metadata *md)
2922 if (md->md_no >= sc->sc_ndisks) {
2923 G_RAID3_DEBUG(1, "Invalid disk %s number (no=%u), skipping.",
2924 pp->name, md->md_no);
2927 if (sc->sc_disks[md->md_no].d_state != G_RAID3_DISK_STATE_NODISK) {
2928 G_RAID3_DEBUG(1, "Disk %s (no=%u) already exists, skipping.",
2929 pp->name, md->md_no);
2932 if (md->md_all != sc->sc_ndisks) {
2934 "Invalid '%s' field on disk %s (device %s), skipping.",
2935 "md_all", pp->name, sc->sc_name);
2938 if ((md->md_mediasize % md->md_sectorsize) != 0) {
2939 G_RAID3_DEBUG(1, "Invalid metadata (mediasize %% sectorsize != "
2940 "0) on disk %s (device %s), skipping.", pp->name,
2944 if (md->md_mediasize != sc->sc_mediasize) {
2946 "Invalid '%s' field on disk %s (device %s), skipping.",
2947 "md_mediasize", pp->name, sc->sc_name);
2950 if ((md->md_mediasize % (sc->sc_ndisks - 1)) != 0) {
2952 "Invalid '%s' field on disk %s (device %s), skipping.",
2953 "md_mediasize", pp->name, sc->sc_name);
2956 if ((sc->sc_mediasize / (sc->sc_ndisks - 1)) > pp->mediasize) {
2958 "Invalid size of disk %s (device %s), skipping.", pp->name,
2962 if ((md->md_sectorsize / pp->sectorsize) < sc->sc_ndisks - 1) {
2964 "Invalid '%s' field on disk %s (device %s), skipping.",
2965 "md_sectorsize", pp->name, sc->sc_name);
2968 if (md->md_sectorsize != sc->sc_sectorsize) {
2970 "Invalid '%s' field on disk %s (device %s), skipping.",
2971 "md_sectorsize", pp->name, sc->sc_name);
2974 if ((sc->sc_sectorsize % pp->sectorsize) != 0) {
2976 "Invalid sector size of disk %s (device %s), skipping.",
2977 pp->name, sc->sc_name);
2980 if ((md->md_mflags & ~G_RAID3_DEVICE_FLAG_MASK) != 0) {
2982 "Invalid device flags on disk %s (device %s), skipping.",
2983 pp->name, sc->sc_name);
2986 if ((md->md_mflags & G_RAID3_DEVICE_FLAG_VERIFY) != 0 &&
2987 (md->md_mflags & G_RAID3_DEVICE_FLAG_ROUND_ROBIN) != 0) {
2989 * VERIFY and ROUND-ROBIN options are mutally exclusive.
2991 G_RAID3_DEBUG(1, "Both VERIFY and ROUND-ROBIN flags exist on "
2992 "disk %s (device %s), skipping.", pp->name, sc->sc_name);
2995 if ((md->md_dflags & ~G_RAID3_DISK_FLAG_MASK) != 0) {
2997 "Invalid disk flags on disk %s (device %s), skipping.",
2998 pp->name, sc->sc_name);
3005 g_raid3_add_disk(struct g_raid3_softc *sc, struct g_provider *pp,
3006 struct g_raid3_metadata *md)
3008 struct g_raid3_disk *disk;
3011 g_topology_assert_not();
3012 G_RAID3_DEBUG(2, "Adding disk %s.", pp->name);
3014 error = g_raid3_check_metadata(sc, pp, md);
3017 if (sc->sc_state != G_RAID3_DEVICE_STATE_STARTING &&
3018 md->md_genid < sc->sc_genid) {
3019 G_RAID3_DEBUG(0, "Component %s (device %s) broken, skipping.",
3020 pp->name, sc->sc_name);
3023 disk = g_raid3_init_disk(sc, pp, md, &error);
3026 error = g_raid3_event_send(disk, G_RAID3_DISK_STATE_NEW,
3027 G_RAID3_EVENT_WAIT);
3030 if (md->md_version < G_RAID3_VERSION) {
3031 G_RAID3_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).",
3032 pp->name, md->md_version, G_RAID3_VERSION);
3033 g_raid3_update_metadata(disk);
3039 g_raid3_destroy_delayed(void *arg, int flag)
3041 struct g_raid3_softc *sc;
3044 if (flag == EV_CANCEL) {
3045 G_RAID3_DEBUG(1, "Destroying canceled.");
3049 g_topology_unlock();
3050 sx_xlock(&sc->sc_lock);
3051 KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) == 0,
3052 ("DESTROY flag set on %s.", sc->sc_name));
3053 KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0,
3054 ("DESTROYING flag not set on %s.", sc->sc_name));
3055 G_RAID3_DEBUG(0, "Destroying %s (delayed).", sc->sc_name);
3056 error = g_raid3_destroy(sc, G_RAID3_DESTROY_SOFT);
3058 G_RAID3_DEBUG(0, "Cannot destroy %s.", sc->sc_name);
3059 sx_xunlock(&sc->sc_lock);
3065 g_raid3_access(struct g_provider *pp, int acr, int acw, int ace)
3067 struct g_raid3_softc *sc;
3068 int dcr, dcw, dce, error = 0;
3070 g_topology_assert();
3071 G_RAID3_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr,
3074 sc = pp->geom->softc;
3075 if (sc == NULL && acr <= 0 && acw <= 0 && ace <= 0)
3077 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name));
3079 dcr = pp->acr + acr;
3080 dcw = pp->acw + acw;
3081 dce = pp->ace + ace;
3083 g_topology_unlock();
3084 sx_xlock(&sc->sc_lock);
3085 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0 ||
3086 g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) < sc->sc_ndisks - 1) {
3087 if (acr > 0 || acw > 0 || ace > 0)
3092 g_raid3_idle(sc, dcw);
3093 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0) {
3094 if (acr > 0 || acw > 0 || ace > 0) {
3098 if (dcr == 0 && dcw == 0 && dce == 0) {
3099 g_post_event(g_raid3_destroy_delayed, sc, M_WAITOK,
3104 sx_xunlock(&sc->sc_lock);
3109 static struct g_geom *
3110 g_raid3_create(struct g_class *mp, const struct g_raid3_metadata *md)
3112 struct g_raid3_softc *sc;
3117 g_topology_assert();
3118 G_RAID3_DEBUG(1, "Creating device %s (id=%u).", md->md_name, md->md_id);
3120 /* One disk is minimum. */
3126 gp = g_new_geomf(mp, "%s", md->md_name);
3127 sc = malloc(sizeof(*sc), M_RAID3, M_WAITOK | M_ZERO);
3128 sc->sc_disks = malloc(sizeof(struct g_raid3_disk) * md->md_all, M_RAID3,
3130 gp->start = g_raid3_start;
3131 gp->orphan = g_raid3_orphan;
3132 gp->access = g_raid3_access;
3133 gp->dumpconf = g_raid3_dumpconf;
3135 sc->sc_id = md->md_id;
3136 sc->sc_mediasize = md->md_mediasize;
3137 sc->sc_sectorsize = md->md_sectorsize;
3138 sc->sc_ndisks = md->md_all;
3139 sc->sc_round_robin = 0;
3140 sc->sc_flags = md->md_mflags;
3143 sc->sc_last_write = time_uptime;
3145 for (n = 0; n < sc->sc_ndisks; n++) {
3146 sc->sc_disks[n].d_softc = sc;
3147 sc->sc_disks[n].d_no = n;
3148 sc->sc_disks[n].d_state = G_RAID3_DISK_STATE_NODISK;
3150 sx_init(&sc->sc_lock, "graid3:lock");
3151 bioq_init(&sc->sc_queue);
3152 mtx_init(&sc->sc_queue_mtx, "graid3:queue", NULL, MTX_DEF);
3153 bioq_init(&sc->sc_regular_delayed);
3154 bioq_init(&sc->sc_inflight);
3155 bioq_init(&sc->sc_sync_delayed);
3156 TAILQ_INIT(&sc->sc_events);
3157 mtx_init(&sc->sc_events_mtx, "graid3:events", NULL, MTX_DEF);
3158 callout_init(&sc->sc_callout, 1);
3159 sc->sc_state = G_RAID3_DEVICE_STATE_STARTING;
3162 sc->sc_provider = NULL;
3164 * Synchronization geom.
3166 gp = g_new_geomf(mp, "%s.sync", md->md_name);
3168 gp->orphan = g_raid3_orphan;
3169 sc->sc_sync.ds_geom = gp;
3171 if (!g_raid3_use_malloc) {
3172 sc->sc_zones[G_RAID3_ZONE_64K].sz_zone = uma_zcreate("gr3:64k",
3173 65536, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL,
3175 sc->sc_zones[G_RAID3_ZONE_64K].sz_inuse = 0;
3176 sc->sc_zones[G_RAID3_ZONE_64K].sz_max = g_raid3_n64k;
3177 sc->sc_zones[G_RAID3_ZONE_64K].sz_requested =
3178 sc->sc_zones[G_RAID3_ZONE_64K].sz_failed = 0;
3179 sc->sc_zones[G_RAID3_ZONE_16K].sz_zone = uma_zcreate("gr3:16k",
3180 16384, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL,
3182 sc->sc_zones[G_RAID3_ZONE_16K].sz_inuse = 0;
3183 sc->sc_zones[G_RAID3_ZONE_16K].sz_max = g_raid3_n16k;
3184 sc->sc_zones[G_RAID3_ZONE_16K].sz_requested =
3185 sc->sc_zones[G_RAID3_ZONE_16K].sz_failed = 0;
3186 sc->sc_zones[G_RAID3_ZONE_4K].sz_zone = uma_zcreate("gr3:4k",
3187 4096, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL,
3189 sc->sc_zones[G_RAID3_ZONE_4K].sz_inuse = 0;
3190 sc->sc_zones[G_RAID3_ZONE_4K].sz_max = g_raid3_n4k;
3191 sc->sc_zones[G_RAID3_ZONE_4K].sz_requested =
3192 sc->sc_zones[G_RAID3_ZONE_4K].sz_failed = 0;
3195 error = kproc_create(g_raid3_worker, sc, &sc->sc_worker, 0, 0,
3196 "g_raid3 %s", md->md_name);
3198 G_RAID3_DEBUG(1, "Cannot create kernel thread for %s.",
3200 if (!g_raid3_use_malloc) {
3201 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone);
3202 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone);
3203 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone);
3205 g_destroy_geom(sc->sc_sync.ds_geom);
3206 mtx_destroy(&sc->sc_events_mtx);
3207 mtx_destroy(&sc->sc_queue_mtx);
3208 sx_destroy(&sc->sc_lock);
3209 g_destroy_geom(sc->sc_geom);
3210 free(sc->sc_disks, M_RAID3);
3215 G_RAID3_DEBUG(1, "Device %s created (%u components, id=%u).",
3216 sc->sc_name, sc->sc_ndisks, sc->sc_id);
3218 sc->sc_rootmount = root_mount_hold("GRAID3");
3219 G_RAID3_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount);
3224 timeout = atomic_load_acq_int(&g_raid3_timeout);
3225 callout_reset(&sc->sc_callout, timeout * hz, g_raid3_go, sc);
3226 return (sc->sc_geom);
3230 g_raid3_destroy(struct g_raid3_softc *sc, int how)
3232 struct g_provider *pp;
3234 g_topology_assert_not();
3237 sx_assert(&sc->sc_lock, SX_XLOCKED);
3239 pp = sc->sc_provider;
3240 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
3242 case G_RAID3_DESTROY_SOFT:
3244 "Device %s is still open (r%dw%de%d).", pp->name,
3245 pp->acr, pp->acw, pp->ace);
3247 case G_RAID3_DESTROY_DELAYED:
3249 "Device %s will be destroyed on last close.",
3251 if (sc->sc_syncdisk != NULL)
3252 g_raid3_sync_stop(sc, 1);
3253 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROYING;
3255 case G_RAID3_DESTROY_HARD:
3256 G_RAID3_DEBUG(1, "Device %s is still open, so it "
3257 "can't be definitely removed.", pp->name);
3263 if (sc->sc_geom->softc == NULL) {
3264 g_topology_unlock();
3267 sc->sc_geom->softc = NULL;
3268 sc->sc_sync.ds_geom->softc = NULL;
3269 g_topology_unlock();
3271 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
3272 sc->sc_flags |= G_RAID3_DEVICE_FLAG_WAIT;
3273 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc);
3274 sx_xunlock(&sc->sc_lock);
3275 mtx_lock(&sc->sc_queue_mtx);
3277 wakeup(&sc->sc_queue);
3278 mtx_unlock(&sc->sc_queue_mtx);
3279 G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker);
3280 while (sc->sc_worker != NULL)
3281 tsleep(&sc->sc_worker, PRIBIO, "r3:destroy", hz / 5);
3282 G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker);
3283 sx_xlock(&sc->sc_lock);
3284 g_raid3_destroy_device(sc);
3285 free(sc->sc_disks, M_RAID3);
3291 g_raid3_taste_orphan(struct g_consumer *cp)
3294 KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
3295 cp->provider->name));
3298 static struct g_geom *
3299 g_raid3_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
3301 struct g_raid3_metadata md;
3302 struct g_raid3_softc *sc;
3303 struct g_consumer *cp;
3307 g_topology_assert();
3308 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
3309 G_RAID3_DEBUG(2, "Tasting %s.", pp->name);
3311 gp = g_new_geomf(mp, "raid3:taste");
3312 /* This orphan function should be never called. */
3313 gp->orphan = g_raid3_taste_orphan;
3314 cp = g_new_consumer(gp);
3316 error = g_raid3_read_metadata(cp, &md);
3318 g_destroy_consumer(cp);
3324 if (md.md_provider[0] != '\0' &&
3325 !g_compare_names(md.md_provider, pp->name))
3327 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize)
3329 if (g_raid3_debug >= 2)
3330 raid3_metadata_dump(&md);
3333 * Let's check if device already exists.
3336 LIST_FOREACH(gp, &mp->geom, geom) {
3340 if (sc->sc_sync.ds_geom == gp)
3342 if (strcmp(md.md_name, sc->sc_name) != 0)
3344 if (md.md_id != sc->sc_id) {
3345 G_RAID3_DEBUG(0, "Device %s already configured.",
3352 gp = g_raid3_create(mp, &md);
3354 G_RAID3_DEBUG(0, "Cannot create device %s.",
3360 G_RAID3_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
3361 g_topology_unlock();
3362 sx_xlock(&sc->sc_lock);
3363 error = g_raid3_add_disk(sc, pp, &md);
3365 G_RAID3_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
3366 pp->name, gp->name, error);
3367 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NODISK) ==
3370 g_raid3_destroy(sc, G_RAID3_DESTROY_HARD);
3376 sx_xunlock(&sc->sc_lock);
3382 g_raid3_destroy_geom(struct gctl_req *req __unused, struct g_class *mp __unused,
3385 struct g_raid3_softc *sc;
3388 g_topology_unlock();
3390 sx_xlock(&sc->sc_lock);
3392 error = g_raid3_destroy(gp->softc, G_RAID3_DESTROY_SOFT);
3394 sx_xunlock(&sc->sc_lock);
3400 g_raid3_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
3401 struct g_consumer *cp, struct g_provider *pp)
3403 struct g_raid3_softc *sc;
3405 g_topology_assert();
3410 /* Skip synchronization geom. */
3411 if (gp == sc->sc_sync.ds_geom)
3415 } else if (cp != NULL) {
3416 struct g_raid3_disk *disk;
3421 g_topology_unlock();
3422 sx_xlock(&sc->sc_lock);
3423 sbuf_printf(sb, "%s<Type>", indent);
3424 if (disk->d_no == sc->sc_ndisks - 1)
3425 sbuf_printf(sb, "PARITY");
3427 sbuf_printf(sb, "DATA");
3428 sbuf_printf(sb, "</Type>\n");
3429 sbuf_printf(sb, "%s<Number>%u</Number>\n", indent,
3431 if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
3432 sbuf_printf(sb, "%s<Synchronized>", indent);
3433 if (disk->d_sync.ds_offset == 0)
3434 sbuf_printf(sb, "0%%");
3436 sbuf_printf(sb, "%u%%",
3437 (u_int)((disk->d_sync.ds_offset * 100) /
3438 (sc->sc_mediasize / (sc->sc_ndisks - 1))));
3440 sbuf_printf(sb, "</Synchronized>\n");
3441 if (disk->d_sync.ds_offset > 0) {
3442 sbuf_printf(sb, "%s<BytesSynced>%jd"
3443 "</BytesSynced>\n", indent,
3444 (intmax_t)disk->d_sync.ds_offset);
3447 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent,
3448 disk->d_sync.ds_syncid);
3449 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, disk->d_genid);
3450 sbuf_printf(sb, "%s<Flags>", indent);
3451 if (disk->d_flags == 0)
3452 sbuf_printf(sb, "NONE");
3456 #define ADD_FLAG(flag, name) do { \
3457 if ((disk->d_flags & (flag)) != 0) { \
3459 sbuf_printf(sb, ", "); \
3462 sbuf_printf(sb, name); \
3465 ADD_FLAG(G_RAID3_DISK_FLAG_DIRTY, "DIRTY");
3466 ADD_FLAG(G_RAID3_DISK_FLAG_HARDCODED, "HARDCODED");
3467 ADD_FLAG(G_RAID3_DISK_FLAG_SYNCHRONIZING,
3469 ADD_FLAG(G_RAID3_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC");
3470 ADD_FLAG(G_RAID3_DISK_FLAG_BROKEN, "BROKEN");
3473 sbuf_printf(sb, "</Flags>\n");
3474 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
3475 g_raid3_disk_state2str(disk->d_state));
3476 sx_xunlock(&sc->sc_lock);
3479 g_topology_unlock();
3480 sx_xlock(&sc->sc_lock);
3481 if (!g_raid3_use_malloc) {
3483 "%s<Zone4kRequested>%u</Zone4kRequested>\n", indent,
3484 sc->sc_zones[G_RAID3_ZONE_4K].sz_requested);
3486 "%s<Zone4kFailed>%u</Zone4kFailed>\n", indent,
3487 sc->sc_zones[G_RAID3_ZONE_4K].sz_failed);
3489 "%s<Zone16kRequested>%u</Zone16kRequested>\n", indent,
3490 sc->sc_zones[G_RAID3_ZONE_16K].sz_requested);
3492 "%s<Zone16kFailed>%u</Zone16kFailed>\n", indent,
3493 sc->sc_zones[G_RAID3_ZONE_16K].sz_failed);
3495 "%s<Zone64kRequested>%u</Zone64kRequested>\n", indent,
3496 sc->sc_zones[G_RAID3_ZONE_64K].sz_requested);
3498 "%s<Zone64kFailed>%u</Zone64kFailed>\n", indent,
3499 sc->sc_zones[G_RAID3_ZONE_64K].sz_failed);
3501 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
3502 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid);
3503 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid);
3504 sbuf_printf(sb, "%s<Flags>", indent);
3505 if (sc->sc_flags == 0)
3506 sbuf_printf(sb, "NONE");
3510 #define ADD_FLAG(flag, name) do { \
3511 if ((sc->sc_flags & (flag)) != 0) { \
3513 sbuf_printf(sb, ", "); \
3516 sbuf_printf(sb, name); \
3519 ADD_FLAG(G_RAID3_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC");
3520 ADD_FLAG(G_RAID3_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC");
3521 ADD_FLAG(G_RAID3_DEVICE_FLAG_ROUND_ROBIN,
3523 ADD_FLAG(G_RAID3_DEVICE_FLAG_VERIFY, "VERIFY");
3526 sbuf_printf(sb, "</Flags>\n");
3527 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent,
3529 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
3530 g_raid3_device_state2str(sc->sc_state));
3531 sx_xunlock(&sc->sc_lock);
3537 g_raid3_shutdown_post_sync(void *arg, int howto)
3540 struct g_geom *gp, *gp2;
3541 struct g_raid3_softc *sc;
3546 g_raid3_shutdown = 1;
3547 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
3548 if ((sc = gp->softc) == NULL)
3550 /* Skip synchronization geom. */
3551 if (gp == sc->sc_sync.ds_geom)
3553 g_topology_unlock();
3554 sx_xlock(&sc->sc_lock);
3555 g_raid3_idle(sc, -1);
3557 error = g_raid3_destroy(sc, G_RAID3_DESTROY_DELAYED);
3559 sx_xunlock(&sc->sc_lock);
3562 g_topology_unlock();
3566 g_raid3_init(struct g_class *mp)
3569 g_raid3_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync,
3570 g_raid3_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST);
3571 if (g_raid3_post_sync == NULL)
3572 G_RAID3_DEBUG(0, "Warning! Cannot register shutdown event.");
3576 g_raid3_fini(struct g_class *mp)
3579 if (g_raid3_post_sync != NULL)
3580 EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_raid3_post_sync);
3583 DECLARE_GEOM_CLASS(g_raid3_class, g_raid3);