2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/limits.h>
38 #include <sys/mutex.h>
41 #include <sys/sysctl.h>
42 #include <sys/malloc.h>
43 #include <sys/eventhandler.h>
45 #include <geom/geom.h>
46 #include <geom/geom_dbg.h>
48 #include <sys/kthread.h>
49 #include <sys/sched.h>
50 #include <geom/raid3/g_raid3.h>
52 FEATURE(geom_raid3, "GEOM RAID-3 functionality");
54 static MALLOC_DEFINE(M_RAID3, "raid3_data", "GEOM_RAID3 Data");
56 SYSCTL_DECL(_kern_geom);
57 static SYSCTL_NODE(_kern_geom, OID_AUTO, raid3, CTLFLAG_RW, 0,
59 u_int g_raid3_debug = 0;
60 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, debug, CTLFLAG_RWTUN, &g_raid3_debug, 0,
62 static u_int g_raid3_timeout = 4;
63 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, timeout, CTLFLAG_RWTUN, &g_raid3_timeout,
64 0, "Time to wait on all raid3 components");
65 static u_int g_raid3_idletime = 5;
66 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, idletime, CTLFLAG_RWTUN,
67 &g_raid3_idletime, 0, "Mark components as clean when idling");
68 static u_int g_raid3_disconnect_on_failure = 1;
69 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, disconnect_on_failure, CTLFLAG_RWTUN,
70 &g_raid3_disconnect_on_failure, 0, "Disconnect component on I/O failure.");
71 static u_int g_raid3_syncreqs = 2;
72 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, sync_requests, CTLFLAG_RDTUN,
73 &g_raid3_syncreqs, 0, "Parallel synchronization I/O requests.");
74 static u_int g_raid3_use_malloc = 0;
75 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, use_malloc, CTLFLAG_RDTUN,
76 &g_raid3_use_malloc, 0, "Use malloc(9) instead of uma(9).");
78 static u_int g_raid3_n64k = 50;
79 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n64k, CTLFLAG_RDTUN, &g_raid3_n64k, 0,
80 "Maximum number of 64kB allocations");
81 static u_int g_raid3_n16k = 200;
82 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n16k, CTLFLAG_RDTUN, &g_raid3_n16k, 0,
83 "Maximum number of 16kB allocations");
84 static u_int g_raid3_n4k = 1200;
85 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n4k, CTLFLAG_RDTUN, &g_raid3_n4k, 0,
86 "Maximum number of 4kB allocations");
88 static SYSCTL_NODE(_kern_geom_raid3, OID_AUTO, stat, CTLFLAG_RW, 0,
89 "GEOM_RAID3 statistics");
90 static u_int g_raid3_parity_mismatch = 0;
91 SYSCTL_UINT(_kern_geom_raid3_stat, OID_AUTO, parity_mismatch, CTLFLAG_RD,
92 &g_raid3_parity_mismatch, 0, "Number of failures in VERIFY mode");
94 #define MSLEEP(ident, mtx, priority, wmesg, timeout) do { \
95 G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \
96 msleep((ident), (mtx), (priority), (wmesg), (timeout)); \
97 G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \
100 static eventhandler_tag g_raid3_post_sync = NULL;
101 static int g_raid3_shutdown = 0;
103 static int g_raid3_destroy_geom(struct gctl_req *req, struct g_class *mp,
105 static g_taste_t g_raid3_taste;
106 static void g_raid3_init(struct g_class *mp);
107 static void g_raid3_fini(struct g_class *mp);
109 struct g_class g_raid3_class = {
110 .name = G_RAID3_CLASS_NAME,
111 .version = G_VERSION,
112 .ctlreq = g_raid3_config,
113 .taste = g_raid3_taste,
114 .destroy_geom = g_raid3_destroy_geom,
115 .init = g_raid3_init,
120 static void g_raid3_destroy_provider(struct g_raid3_softc *sc);
121 static int g_raid3_update_disk(struct g_raid3_disk *disk, u_int state);
122 static void g_raid3_update_device(struct g_raid3_softc *sc, boolean_t force);
123 static void g_raid3_dumpconf(struct sbuf *sb, const char *indent,
124 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp);
125 static void g_raid3_sync_stop(struct g_raid3_softc *sc, int type);
126 static int g_raid3_register_request(struct bio *pbp);
127 static void g_raid3_sync_release(struct g_raid3_softc *sc);
131 g_raid3_disk_state2str(int state)
135 case G_RAID3_DISK_STATE_NODISK:
137 case G_RAID3_DISK_STATE_NONE:
139 case G_RAID3_DISK_STATE_NEW:
141 case G_RAID3_DISK_STATE_ACTIVE:
143 case G_RAID3_DISK_STATE_STALE:
145 case G_RAID3_DISK_STATE_SYNCHRONIZING:
146 return ("SYNCHRONIZING");
147 case G_RAID3_DISK_STATE_DISCONNECTED:
148 return ("DISCONNECTED");
155 g_raid3_device_state2str(int state)
159 case G_RAID3_DEVICE_STATE_STARTING:
161 case G_RAID3_DEVICE_STATE_DEGRADED:
163 case G_RAID3_DEVICE_STATE_COMPLETE:
171 g_raid3_get_diskname(struct g_raid3_disk *disk)
174 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL)
175 return ("[unknown]");
176 return (disk->d_name);
180 g_raid3_alloc(struct g_raid3_softc *sc, size_t size, int flags)
183 enum g_raid3_zones zone;
185 if (g_raid3_use_malloc ||
186 (zone = g_raid3_zone(size)) == G_RAID3_NUM_ZONES)
187 ptr = malloc(size, M_RAID3, flags);
189 ptr = uma_zalloc_arg(sc->sc_zones[zone].sz_zone,
190 &sc->sc_zones[zone], flags);
191 sc->sc_zones[zone].sz_requested++;
193 sc->sc_zones[zone].sz_failed++;
199 g_raid3_free(struct g_raid3_softc *sc, void *ptr, size_t size)
201 enum g_raid3_zones zone;
203 if (g_raid3_use_malloc ||
204 (zone = g_raid3_zone(size)) == G_RAID3_NUM_ZONES)
207 uma_zfree_arg(sc->sc_zones[zone].sz_zone,
208 ptr, &sc->sc_zones[zone]);
213 g_raid3_uma_ctor(void *mem, int size, void *arg, int flags)
215 struct g_raid3_zone *sz = arg;
217 if (sz->sz_max > 0 && sz->sz_inuse == sz->sz_max)
224 g_raid3_uma_dtor(void *mem, int size, void *arg)
226 struct g_raid3_zone *sz = arg;
231 #define g_raid3_xor(src, dst, size) \
232 _g_raid3_xor((uint64_t *)(src), \
233 (uint64_t *)(dst), (size_t)size)
235 _g_raid3_xor(uint64_t *src, uint64_t *dst, size_t size)
238 KASSERT((size % 128) == 0, ("Invalid size: %zu.", size));
239 for (; size > 0; size -= 128) {
260 g_raid3_is_zero(struct bio *bp)
262 static const uint64_t zeros[] = {
263 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
268 size = bp->bio_length;
269 addr = (u_char *)bp->bio_data;
270 for (; size > 0; size -= sizeof(zeros), addr += sizeof(zeros)) {
271 if (bcmp(addr, zeros, sizeof(zeros)) != 0)
278 * --- Events handling functions ---
279 * Events in geom_raid3 are used to maintain disks and device status
280 * from one thread to simplify locking.
283 g_raid3_event_free(struct g_raid3_event *ep)
290 g_raid3_event_send(void *arg, int state, int flags)
292 struct g_raid3_softc *sc;
293 struct g_raid3_disk *disk;
294 struct g_raid3_event *ep;
297 ep = malloc(sizeof(*ep), M_RAID3, M_WAITOK);
298 G_RAID3_DEBUG(4, "%s: Sending event %p.", __func__, ep);
299 if ((flags & G_RAID3_EVENT_DEVICE) != 0) {
310 mtx_lock(&sc->sc_events_mtx);
311 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next);
312 mtx_unlock(&sc->sc_events_mtx);
313 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc);
314 mtx_lock(&sc->sc_queue_mtx);
316 wakeup(&sc->sc_queue);
317 mtx_unlock(&sc->sc_queue_mtx);
318 if ((flags & G_RAID3_EVENT_DONTWAIT) != 0)
320 sx_assert(&sc->sc_lock, SX_XLOCKED);
321 G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, ep);
322 sx_xunlock(&sc->sc_lock);
323 while ((ep->e_flags & G_RAID3_EVENT_DONE) == 0) {
324 mtx_lock(&sc->sc_events_mtx);
325 MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "r3:event",
329 g_raid3_event_free(ep);
330 sx_xlock(&sc->sc_lock);
334 static struct g_raid3_event *
335 g_raid3_event_get(struct g_raid3_softc *sc)
337 struct g_raid3_event *ep;
339 mtx_lock(&sc->sc_events_mtx);
340 ep = TAILQ_FIRST(&sc->sc_events);
341 mtx_unlock(&sc->sc_events_mtx);
346 g_raid3_event_remove(struct g_raid3_softc *sc, struct g_raid3_event *ep)
349 mtx_lock(&sc->sc_events_mtx);
350 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
351 mtx_unlock(&sc->sc_events_mtx);
355 g_raid3_event_cancel(struct g_raid3_disk *disk)
357 struct g_raid3_softc *sc;
358 struct g_raid3_event *ep, *tmpep;
361 sx_assert(&sc->sc_lock, SX_XLOCKED);
363 mtx_lock(&sc->sc_events_mtx);
364 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) {
365 if ((ep->e_flags & G_RAID3_EVENT_DEVICE) != 0)
367 if (ep->e_disk != disk)
369 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
370 if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0)
371 g_raid3_event_free(ep);
373 ep->e_error = ECANCELED;
377 mtx_unlock(&sc->sc_events_mtx);
381 * Return the number of disks in the given state.
382 * If state is equal to -1, count all connected disks.
385 g_raid3_ndisks(struct g_raid3_softc *sc, int state)
387 struct g_raid3_disk *disk;
390 sx_assert(&sc->sc_lock, SX_LOCKED);
392 for (n = ndisks = 0; n < sc->sc_ndisks; n++) {
393 disk = &sc->sc_disks[n];
394 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
396 if (state == -1 || disk->d_state == state)
403 g_raid3_nrequests(struct g_raid3_softc *sc, struct g_consumer *cp)
408 mtx_lock(&sc->sc_queue_mtx);
409 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
410 if (bp->bio_from == cp)
413 mtx_unlock(&sc->sc_queue_mtx);
418 g_raid3_is_busy(struct g_raid3_softc *sc, struct g_consumer *cp)
423 "I/O requests for %s exist, can't destroy it now.",
427 if (g_raid3_nrequests(sc, cp) > 0) {
429 "I/O requests for %s in queue, can't destroy it now.",
437 g_raid3_destroy_consumer(void *arg, int flags __unused)
439 struct g_consumer *cp;
444 G_RAID3_DEBUG(1, "Consumer %s destroyed.", cp->provider->name);
446 g_destroy_consumer(cp);
450 g_raid3_kill_consumer(struct g_raid3_softc *sc, struct g_consumer *cp)
452 struct g_provider *pp;
458 if (g_raid3_is_busy(sc, cp))
460 G_RAID3_DEBUG(2, "Consumer %s destroyed.", cp->provider->name);
464 if ((pp->geom->flags & G_GEOM_WITHER) == 0)
467 G_RAID3_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr,
468 -cp->acw, -cp->ace, 0);
469 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
470 g_access(cp, -cp->acr, -cp->acw, -cp->ace);
473 * After retaste event was send (inside g_access()), we can send
474 * event to detach and destroy consumer.
475 * A class, which has consumer to the given provider connected
476 * will not receive retaste event for the provider.
477 * This is the way how I ignore retaste events when I close
478 * consumers opened for write: I detach and destroy consumer
479 * after retaste event is sent.
481 g_post_event(g_raid3_destroy_consumer, cp, M_WAITOK, NULL);
484 G_RAID3_DEBUG(1, "Consumer %s destroyed.", pp->name);
486 g_destroy_consumer(cp);
490 g_raid3_connect_disk(struct g_raid3_disk *disk, struct g_provider *pp)
492 struct g_consumer *cp;
495 g_topology_assert_not();
496 KASSERT(disk->d_consumer == NULL,
497 ("Disk already connected (device %s).", disk->d_softc->sc_name));
500 cp = g_new_consumer(disk->d_softc->sc_geom);
501 error = g_attach(cp, pp);
503 g_destroy_consumer(cp);
507 error = g_access(cp, 1, 1, 1);
511 g_destroy_consumer(cp);
512 G_RAID3_DEBUG(0, "Cannot open consumer %s (error=%d).",
516 disk->d_consumer = cp;
517 disk->d_consumer->private = disk;
518 disk->d_consumer->index = 0;
519 G_RAID3_DEBUG(2, "Disk %s connected.", g_raid3_get_diskname(disk));
524 g_raid3_disconnect_consumer(struct g_raid3_softc *sc, struct g_consumer *cp)
531 if (cp->provider != NULL)
532 g_raid3_kill_consumer(sc, cp);
534 g_destroy_consumer(cp);
538 * Initialize disk. This means allocate memory, create consumer, attach it
539 * to the provider and open access (r1w1e1) to it.
541 static struct g_raid3_disk *
542 g_raid3_init_disk(struct g_raid3_softc *sc, struct g_provider *pp,
543 struct g_raid3_metadata *md, int *errorp)
545 struct g_raid3_disk *disk;
548 disk = &sc->sc_disks[md->md_no];
549 error = g_raid3_connect_disk(disk, pp);
555 disk->d_state = G_RAID3_DISK_STATE_NONE;
556 disk->d_flags = md->md_dflags;
557 if (md->md_provider[0] != '\0')
558 disk->d_flags |= G_RAID3_DISK_FLAG_HARDCODED;
559 disk->d_sync.ds_consumer = NULL;
560 disk->d_sync.ds_offset = md->md_sync_offset;
561 disk->d_sync.ds_offset_done = md->md_sync_offset;
562 disk->d_genid = md->md_genid;
563 disk->d_sync.ds_syncid = md->md_syncid;
570 g_raid3_destroy_disk(struct g_raid3_disk *disk)
572 struct g_raid3_softc *sc;
574 g_topology_assert_not();
576 sx_assert(&sc->sc_lock, SX_XLOCKED);
578 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
580 g_raid3_event_cancel(disk);
581 switch (disk->d_state) {
582 case G_RAID3_DISK_STATE_SYNCHRONIZING:
583 if (sc->sc_syncdisk != NULL)
584 g_raid3_sync_stop(sc, 1);
586 case G_RAID3_DISK_STATE_NEW:
587 case G_RAID3_DISK_STATE_STALE:
588 case G_RAID3_DISK_STATE_ACTIVE:
590 g_raid3_disconnect_consumer(sc, disk->d_consumer);
592 disk->d_consumer = NULL;
595 KASSERT(0 == 1, ("Wrong disk state (%s, %s).",
596 g_raid3_get_diskname(disk),
597 g_raid3_disk_state2str(disk->d_state)));
599 disk->d_state = G_RAID3_DISK_STATE_NODISK;
603 g_raid3_destroy_device(struct g_raid3_softc *sc)
605 struct g_raid3_event *ep;
606 struct g_raid3_disk *disk;
608 struct g_consumer *cp;
611 g_topology_assert_not();
612 sx_assert(&sc->sc_lock, SX_XLOCKED);
615 if (sc->sc_provider != NULL)
616 g_raid3_destroy_provider(sc);
617 for (n = 0; n < sc->sc_ndisks; n++) {
618 disk = &sc->sc_disks[n];
619 if (disk->d_state != G_RAID3_DISK_STATE_NODISK) {
620 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
621 g_raid3_update_metadata(disk);
622 g_raid3_destroy_disk(disk);
625 while ((ep = g_raid3_event_get(sc)) != NULL) {
626 g_raid3_event_remove(sc, ep);
627 if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0)
628 g_raid3_event_free(ep);
630 ep->e_error = ECANCELED;
631 ep->e_flags |= G_RAID3_EVENT_DONE;
632 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, ep);
633 mtx_lock(&sc->sc_events_mtx);
635 mtx_unlock(&sc->sc_events_mtx);
638 callout_drain(&sc->sc_callout);
639 cp = LIST_FIRST(&sc->sc_sync.ds_geom->consumer);
642 g_raid3_disconnect_consumer(sc, cp);
643 g_wither_geom(sc->sc_sync.ds_geom, ENXIO);
644 G_RAID3_DEBUG(0, "Device %s destroyed.", gp->name);
645 g_wither_geom(gp, ENXIO);
647 if (!g_raid3_use_malloc) {
648 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone);
649 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone);
650 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone);
652 mtx_destroy(&sc->sc_queue_mtx);
653 mtx_destroy(&sc->sc_events_mtx);
654 sx_xunlock(&sc->sc_lock);
655 sx_destroy(&sc->sc_lock);
659 g_raid3_orphan(struct g_consumer *cp)
661 struct g_raid3_disk *disk;
668 disk->d_softc->sc_bump_id = G_RAID3_BUMP_SYNCID;
669 g_raid3_event_send(disk, G_RAID3_DISK_STATE_DISCONNECTED,
670 G_RAID3_EVENT_DONTWAIT);
674 g_raid3_write_metadata(struct g_raid3_disk *disk, struct g_raid3_metadata *md)
676 struct g_raid3_softc *sc;
677 struct g_consumer *cp;
678 off_t offset, length;
682 g_topology_assert_not();
684 sx_assert(&sc->sc_lock, SX_LOCKED);
686 cp = disk->d_consumer;
687 KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name));
688 KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name));
689 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
690 ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr,
692 length = cp->provider->sectorsize;
693 offset = cp->provider->mediasize - length;
694 sector = malloc((size_t)length, M_RAID3, M_WAITOK | M_ZERO);
696 raid3_metadata_encode(md, sector);
697 error = g_write_data(cp, offset, sector, length);
698 free(sector, M_RAID3);
700 if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) {
701 G_RAID3_DEBUG(0, "Cannot write metadata on %s "
702 "(device=%s, error=%d).",
703 g_raid3_get_diskname(disk), sc->sc_name, error);
704 disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN;
706 G_RAID3_DEBUG(1, "Cannot write metadata on %s "
707 "(device=%s, error=%d).",
708 g_raid3_get_diskname(disk), sc->sc_name, error);
710 if (g_raid3_disconnect_on_failure &&
711 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
712 sc->sc_bump_id |= G_RAID3_BUMP_GENID;
713 g_raid3_event_send(disk,
714 G_RAID3_DISK_STATE_DISCONNECTED,
715 G_RAID3_EVENT_DONTWAIT);
722 g_raid3_clear_metadata(struct g_raid3_disk *disk)
726 g_topology_assert_not();
727 sx_assert(&disk->d_softc->sc_lock, SX_LOCKED);
729 error = g_raid3_write_metadata(disk, NULL);
731 G_RAID3_DEBUG(2, "Metadata on %s cleared.",
732 g_raid3_get_diskname(disk));
735 "Cannot clear metadata on disk %s (error=%d).",
736 g_raid3_get_diskname(disk), error);
742 g_raid3_fill_metadata(struct g_raid3_disk *disk, struct g_raid3_metadata *md)
744 struct g_raid3_softc *sc;
745 struct g_provider *pp;
748 strlcpy(md->md_magic, G_RAID3_MAGIC, sizeof(md->md_magic));
749 md->md_version = G_RAID3_VERSION;
750 strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name));
751 md->md_id = sc->sc_id;
752 md->md_all = sc->sc_ndisks;
753 md->md_genid = sc->sc_genid;
754 md->md_mediasize = sc->sc_mediasize;
755 md->md_sectorsize = sc->sc_sectorsize;
756 md->md_mflags = (sc->sc_flags & G_RAID3_DEVICE_FLAG_MASK);
757 md->md_no = disk->d_no;
758 md->md_syncid = disk->d_sync.ds_syncid;
759 md->md_dflags = (disk->d_flags & G_RAID3_DISK_FLAG_MASK);
760 if (disk->d_state != G_RAID3_DISK_STATE_SYNCHRONIZING)
761 md->md_sync_offset = 0;
764 disk->d_sync.ds_offset_done / (sc->sc_ndisks - 1);
766 if (disk->d_consumer != NULL && disk->d_consumer->provider != NULL)
767 pp = disk->d_consumer->provider;
770 if ((disk->d_flags & G_RAID3_DISK_FLAG_HARDCODED) != 0 && pp != NULL)
771 strlcpy(md->md_provider, pp->name, sizeof(md->md_provider));
773 bzero(md->md_provider, sizeof(md->md_provider));
775 md->md_provsize = pp->mediasize;
781 g_raid3_update_metadata(struct g_raid3_disk *disk)
783 struct g_raid3_softc *sc;
784 struct g_raid3_metadata md;
787 g_topology_assert_not();
789 sx_assert(&sc->sc_lock, SX_LOCKED);
791 g_raid3_fill_metadata(disk, &md);
792 error = g_raid3_write_metadata(disk, &md);
794 G_RAID3_DEBUG(2, "Metadata on %s updated.",
795 g_raid3_get_diskname(disk));
798 "Cannot update metadata on disk %s (error=%d).",
799 g_raid3_get_diskname(disk), error);
804 g_raid3_bump_syncid(struct g_raid3_softc *sc)
806 struct g_raid3_disk *disk;
809 g_topology_assert_not();
810 sx_assert(&sc->sc_lock, SX_XLOCKED);
811 KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) > 0,
812 ("%s called with no active disks (device=%s).", __func__,
816 G_RAID3_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name,
818 for (n = 0; n < sc->sc_ndisks; n++) {
819 disk = &sc->sc_disks[n];
820 if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
821 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
822 disk->d_sync.ds_syncid = sc->sc_syncid;
823 g_raid3_update_metadata(disk);
829 g_raid3_bump_genid(struct g_raid3_softc *sc)
831 struct g_raid3_disk *disk;
834 g_topology_assert_not();
835 sx_assert(&sc->sc_lock, SX_XLOCKED);
836 KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) > 0,
837 ("%s called with no active disks (device=%s).", __func__,
841 G_RAID3_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name,
843 for (n = 0; n < sc->sc_ndisks; n++) {
844 disk = &sc->sc_disks[n];
845 if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
846 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
847 disk->d_genid = sc->sc_genid;
848 g_raid3_update_metadata(disk);
854 g_raid3_idle(struct g_raid3_softc *sc, int acw)
856 struct g_raid3_disk *disk;
860 g_topology_assert_not();
861 sx_assert(&sc->sc_lock, SX_XLOCKED);
863 if (sc->sc_provider == NULL)
865 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0)
869 if (sc->sc_writes > 0)
871 if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) {
872 timeout = g_raid3_idletime - (time_uptime - sc->sc_last_write);
873 if (!g_raid3_shutdown && timeout > 0)
877 for (i = 0; i < sc->sc_ndisks; i++) {
878 disk = &sc->sc_disks[i];
879 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE)
881 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as clean.",
882 g_raid3_get_diskname(disk), sc->sc_name);
883 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
884 g_raid3_update_metadata(disk);
890 g_raid3_unidle(struct g_raid3_softc *sc)
892 struct g_raid3_disk *disk;
895 g_topology_assert_not();
896 sx_assert(&sc->sc_lock, SX_XLOCKED);
898 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0)
901 sc->sc_last_write = time_uptime;
902 for (i = 0; i < sc->sc_ndisks; i++) {
903 disk = &sc->sc_disks[i];
904 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE)
906 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as dirty.",
907 g_raid3_get_diskname(disk), sc->sc_name);
908 disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY;
909 g_raid3_update_metadata(disk);
914 * Treat bio_driver1 field in parent bio as list head and field bio_caller1
915 * in child bio as pointer to the next element on the list.
917 #define G_RAID3_HEAD_BIO(pbp) (pbp)->bio_driver1
919 #define G_RAID3_NEXT_BIO(cbp) (cbp)->bio_caller1
921 #define G_RAID3_FOREACH_BIO(pbp, bp) \
922 for ((bp) = G_RAID3_HEAD_BIO(pbp); (bp) != NULL; \
923 (bp) = G_RAID3_NEXT_BIO(bp))
925 #define G_RAID3_FOREACH_SAFE_BIO(pbp, bp, tmpbp) \
926 for ((bp) = G_RAID3_HEAD_BIO(pbp); \
927 (bp) != NULL && ((tmpbp) = G_RAID3_NEXT_BIO(bp), 1); \
931 g_raid3_init_bio(struct bio *pbp)
934 G_RAID3_HEAD_BIO(pbp) = NULL;
938 g_raid3_remove_bio(struct bio *cbp)
940 struct bio *pbp, *bp;
942 pbp = cbp->bio_parent;
943 if (G_RAID3_HEAD_BIO(pbp) == cbp)
944 G_RAID3_HEAD_BIO(pbp) = G_RAID3_NEXT_BIO(cbp);
946 G_RAID3_FOREACH_BIO(pbp, bp) {
947 if (G_RAID3_NEXT_BIO(bp) == cbp) {
948 G_RAID3_NEXT_BIO(bp) = G_RAID3_NEXT_BIO(cbp);
953 G_RAID3_NEXT_BIO(cbp) = NULL;
957 g_raid3_replace_bio(struct bio *sbp, struct bio *dbp)
959 struct bio *pbp, *bp;
961 g_raid3_remove_bio(sbp);
962 pbp = dbp->bio_parent;
963 G_RAID3_NEXT_BIO(sbp) = G_RAID3_NEXT_BIO(dbp);
964 if (G_RAID3_HEAD_BIO(pbp) == dbp)
965 G_RAID3_HEAD_BIO(pbp) = sbp;
967 G_RAID3_FOREACH_BIO(pbp, bp) {
968 if (G_RAID3_NEXT_BIO(bp) == dbp) {
969 G_RAID3_NEXT_BIO(bp) = sbp;
974 G_RAID3_NEXT_BIO(dbp) = NULL;
978 g_raid3_destroy_bio(struct g_raid3_softc *sc, struct bio *cbp)
980 struct bio *bp, *pbp;
983 pbp = cbp->bio_parent;
985 KASSERT(cbp->bio_data != NULL, ("NULL bio_data"));
986 size = pbp->bio_length / (sc->sc_ndisks - 1);
987 g_raid3_free(sc, cbp->bio_data, size);
988 if (G_RAID3_HEAD_BIO(pbp) == cbp) {
989 G_RAID3_HEAD_BIO(pbp) = G_RAID3_NEXT_BIO(cbp);
990 G_RAID3_NEXT_BIO(cbp) = NULL;
993 G_RAID3_FOREACH_BIO(pbp, bp) {
994 if (G_RAID3_NEXT_BIO(bp) == cbp)
998 KASSERT(G_RAID3_NEXT_BIO(bp) != NULL,
999 ("NULL bp->bio_driver1"));
1000 G_RAID3_NEXT_BIO(bp) = G_RAID3_NEXT_BIO(cbp);
1001 G_RAID3_NEXT_BIO(cbp) = NULL;
1008 g_raid3_clone_bio(struct g_raid3_softc *sc, struct bio *pbp)
1010 struct bio *bp, *cbp;
1014 cbp = g_clone_bio(pbp);
1017 size = pbp->bio_length / (sc->sc_ndisks - 1);
1018 if ((pbp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) != 0)
1022 cbp->bio_data = g_raid3_alloc(sc, size, memflag);
1023 if (cbp->bio_data == NULL) {
1024 pbp->bio_children--;
1028 G_RAID3_NEXT_BIO(cbp) = NULL;
1029 if (G_RAID3_HEAD_BIO(pbp) == NULL)
1030 G_RAID3_HEAD_BIO(pbp) = cbp;
1032 G_RAID3_FOREACH_BIO(pbp, bp) {
1033 if (G_RAID3_NEXT_BIO(bp) == NULL) {
1034 G_RAID3_NEXT_BIO(bp) = cbp;
1043 g_raid3_scatter(struct bio *pbp)
1045 struct g_raid3_softc *sc;
1046 struct g_raid3_disk *disk;
1047 struct bio *bp, *cbp, *tmpbp;
1048 off_t atom, cadd, padd, left;
1051 sc = pbp->bio_to->geom->softc;
1053 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_NOPARITY) == 0) {
1055 * Find bio for which we should calculate data.
1057 G_RAID3_FOREACH_BIO(pbp, cbp) {
1058 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) {
1063 KASSERT(bp != NULL, ("NULL parity bio."));
1065 atom = sc->sc_sectorsize / (sc->sc_ndisks - 1);
1067 for (left = pbp->bio_length; left > 0; left -= sc->sc_sectorsize) {
1068 G_RAID3_FOREACH_BIO(pbp, cbp) {
1071 bcopy(pbp->bio_data + padd, cbp->bio_data + cadd, atom);
1076 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_NOPARITY) == 0) {
1081 G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) {
1085 bcopy(cbp->bio_data, bp->bio_data,
1089 g_raid3_xor(cbp->bio_data, bp->bio_data,
1092 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_NODISK) != 0)
1093 g_raid3_destroy_bio(sc, cbp);
1096 G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) {
1097 struct g_consumer *cp;
1099 disk = cbp->bio_caller2;
1100 cp = disk->d_consumer;
1101 cbp->bio_to = cp->provider;
1102 G_RAID3_LOGREQ(3, cbp, "Sending request.");
1103 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1104 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1105 cp->acr, cp->acw, cp->ace));
1108 g_io_request(cbp, cp);
1113 g_raid3_gather(struct bio *pbp)
1115 struct g_raid3_softc *sc;
1116 struct g_raid3_disk *disk;
1117 struct bio *xbp, *fbp, *cbp;
1118 off_t atom, cadd, padd, left;
1120 sc = pbp->bio_to->geom->softc;
1122 * Find bio for which we have to calculate data.
1123 * While going through this path, check if all requests
1124 * succeeded, if not, deny whole request.
1125 * If we're in COMPLETE mode, we allow one request to fail,
1126 * so if we find one, we're sending it to the parity consumer.
1127 * If there are more failed requests, we deny whole request.
1130 G_RAID3_FOREACH_BIO(pbp, cbp) {
1131 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) {
1132 KASSERT(xbp == NULL, ("More than one parity bio."));
1135 if (cbp->bio_error == 0)
1138 * Found failed request.
1141 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_DEGRADED) != 0) {
1143 * We are already in degraded mode, so we can't
1144 * accept any failures.
1146 if (pbp->bio_error == 0)
1147 pbp->bio_error = cbp->bio_error;
1153 * Next failed request, that's too many.
1155 if (pbp->bio_error == 0)
1156 pbp->bio_error = fbp->bio_error;
1158 disk = cbp->bio_caller2;
1161 if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) {
1162 disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN;
1163 G_RAID3_LOGREQ(0, cbp, "Request failed (error=%d).",
1166 G_RAID3_LOGREQ(1, cbp, "Request failed (error=%d).",
1169 if (g_raid3_disconnect_on_failure &&
1170 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1171 sc->sc_bump_id |= G_RAID3_BUMP_GENID;
1172 g_raid3_event_send(disk,
1173 G_RAID3_DISK_STATE_DISCONNECTED,
1174 G_RAID3_EVENT_DONTWAIT);
1177 if (pbp->bio_error != 0)
1179 if (fbp != NULL && (pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) {
1180 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_VERIFY;
1182 g_raid3_replace_bio(xbp, fbp);
1183 g_raid3_destroy_bio(sc, fbp);
1184 } else if (fbp != NULL) {
1185 struct g_consumer *cp;
1188 * One request failed, so send the same request to
1189 * the parity consumer.
1191 disk = pbp->bio_driver2;
1192 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) {
1193 pbp->bio_error = fbp->bio_error;
1196 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED;
1198 fbp->bio_flags &= ~(BIO_DONE | BIO_ERROR);
1199 if (disk->d_no == sc->sc_ndisks - 1)
1200 fbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1202 fbp->bio_completed = 0;
1203 fbp->bio_children = 0;
1205 cp = disk->d_consumer;
1206 fbp->bio_caller2 = disk;
1207 fbp->bio_to = cp->provider;
1208 G_RAID3_LOGREQ(3, fbp, "Sending request (recover).");
1209 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1210 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1211 cp->acr, cp->acw, cp->ace));
1213 g_io_request(fbp, cp);
1220 G_RAID3_FOREACH_BIO(pbp, cbp) {
1221 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0)
1223 g_raid3_xor(cbp->bio_data, xbp->bio_data,
1226 xbp->bio_cflags &= ~G_RAID3_BIO_CFLAG_PARITY;
1227 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) {
1228 if (!g_raid3_is_zero(xbp)) {
1229 g_raid3_parity_mismatch++;
1230 pbp->bio_error = EIO;
1233 g_raid3_destroy_bio(sc, xbp);
1236 atom = sc->sc_sectorsize / (sc->sc_ndisks - 1);
1238 for (left = pbp->bio_length; left > 0; left -= sc->sc_sectorsize) {
1239 G_RAID3_FOREACH_BIO(pbp, cbp) {
1240 bcopy(cbp->bio_data + cadd, pbp->bio_data + padd, atom);
1241 pbp->bio_completed += atom;
1247 if (pbp->bio_error == 0)
1248 G_RAID3_LOGREQ(3, pbp, "Request finished.");
1250 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0)
1251 G_RAID3_LOGREQ(1, pbp, "Verification error.");
1253 G_RAID3_LOGREQ(0, pbp, "Request failed.");
1255 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_MASK;
1256 while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL)
1257 g_raid3_destroy_bio(sc, cbp);
1258 g_io_deliver(pbp, pbp->bio_error);
1262 g_raid3_done(struct bio *bp)
1264 struct g_raid3_softc *sc;
1266 sc = bp->bio_from->geom->softc;
1267 bp->bio_cflags |= G_RAID3_BIO_CFLAG_REGULAR;
1268 G_RAID3_LOGREQ(3, bp, "Regular request done (error=%d).", bp->bio_error);
1269 mtx_lock(&sc->sc_queue_mtx);
1270 bioq_insert_head(&sc->sc_queue, bp);
1271 mtx_unlock(&sc->sc_queue_mtx);
1273 wakeup(&sc->sc_queue);
1277 g_raid3_regular_request(struct bio *cbp)
1279 struct g_raid3_softc *sc;
1280 struct g_raid3_disk *disk;
1283 g_topology_assert_not();
1285 pbp = cbp->bio_parent;
1286 sc = pbp->bio_to->geom->softc;
1287 cbp->bio_from->index--;
1288 if (cbp->bio_cmd == BIO_WRITE)
1290 disk = cbp->bio_from->private;
1293 g_raid3_kill_consumer(sc, cbp->bio_from);
1294 g_topology_unlock();
1297 G_RAID3_LOGREQ(3, cbp, "Request finished.");
1299 KASSERT(pbp->bio_inbed <= pbp->bio_children,
1300 ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed,
1301 pbp->bio_children));
1302 if (pbp->bio_inbed != pbp->bio_children)
1304 switch (pbp->bio_cmd) {
1306 g_raid3_gather(pbp);
1313 pbp->bio_completed = pbp->bio_length;
1314 while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL) {
1315 if (cbp->bio_error == 0) {
1316 g_raid3_destroy_bio(sc, cbp);
1321 error = cbp->bio_error;
1322 else if (pbp->bio_error == 0) {
1324 * Next failed request, that's too many.
1326 pbp->bio_error = error;
1329 disk = cbp->bio_caller2;
1331 g_raid3_destroy_bio(sc, cbp);
1335 if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) {
1336 disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN;
1337 G_RAID3_LOGREQ(0, cbp,
1338 "Request failed (error=%d).",
1341 G_RAID3_LOGREQ(1, cbp,
1342 "Request failed (error=%d).",
1345 if (g_raid3_disconnect_on_failure &&
1346 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1347 sc->sc_bump_id |= G_RAID3_BUMP_GENID;
1348 g_raid3_event_send(disk,
1349 G_RAID3_DISK_STATE_DISCONNECTED,
1350 G_RAID3_EVENT_DONTWAIT);
1352 g_raid3_destroy_bio(sc, cbp);
1354 if (pbp->bio_error == 0)
1355 G_RAID3_LOGREQ(3, pbp, "Request finished.");
1357 G_RAID3_LOGREQ(0, pbp, "Request failed.");
1358 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_DEGRADED;
1359 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_NOPARITY;
1360 bioq_remove(&sc->sc_inflight, pbp);
1361 /* Release delayed sync requests if possible. */
1362 g_raid3_sync_release(sc);
1363 g_io_deliver(pbp, pbp->bio_error);
1370 g_raid3_sync_done(struct bio *bp)
1372 struct g_raid3_softc *sc;
1374 G_RAID3_LOGREQ(3, bp, "Synchronization request delivered.");
1375 sc = bp->bio_from->geom->softc;
1376 bp->bio_cflags |= G_RAID3_BIO_CFLAG_SYNC;
1377 mtx_lock(&sc->sc_queue_mtx);
1378 bioq_insert_head(&sc->sc_queue, bp);
1379 mtx_unlock(&sc->sc_queue_mtx);
1381 wakeup(&sc->sc_queue);
1385 g_raid3_flush(struct g_raid3_softc *sc, struct bio *bp)
1387 struct bio_queue_head queue;
1388 struct g_raid3_disk *disk;
1389 struct g_consumer *cp;
1394 for (i = 0; i < sc->sc_ndisks; i++) {
1395 disk = &sc->sc_disks[i];
1396 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE)
1398 cbp = g_clone_bio(bp);
1400 for (cbp = bioq_first(&queue); cbp != NULL;
1401 cbp = bioq_first(&queue)) {
1402 bioq_remove(&queue, cbp);
1405 if (bp->bio_error == 0)
1406 bp->bio_error = ENOMEM;
1407 g_io_deliver(bp, bp->bio_error);
1410 bioq_insert_tail(&queue, cbp);
1411 cbp->bio_done = g_std_done;
1412 cbp->bio_caller1 = disk;
1413 cbp->bio_to = disk->d_consumer->provider;
1415 for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) {
1416 bioq_remove(&queue, cbp);
1417 G_RAID3_LOGREQ(3, cbp, "Sending request.");
1418 disk = cbp->bio_caller1;
1419 cbp->bio_caller1 = NULL;
1420 cp = disk->d_consumer;
1421 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1422 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1423 cp->acr, cp->acw, cp->ace));
1424 g_io_request(cbp, disk->d_consumer);
1429 g_raid3_start(struct bio *bp)
1431 struct g_raid3_softc *sc;
1433 sc = bp->bio_to->geom->softc;
1435 * If sc == NULL or there are no valid disks, provider's error
1436 * should be set and g_raid3_start() should not be called at all.
1438 KASSERT(sc != NULL && (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
1439 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE),
1440 ("Provider's error should be set (error=%d)(device=%s).",
1441 bp->bio_to->error, bp->bio_to->name));
1442 G_RAID3_LOGREQ(3, bp, "Request received.");
1444 switch (bp->bio_cmd) {
1451 g_raid3_flush(sc, bp);
1455 g_io_deliver(bp, EOPNOTSUPP);
1458 mtx_lock(&sc->sc_queue_mtx);
1459 bioq_insert_tail(&sc->sc_queue, bp);
1460 mtx_unlock(&sc->sc_queue_mtx);
1461 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc);
1466 * Return TRUE if the given request is colliding with a in-progress
1467 * synchronization request.
1470 g_raid3_sync_collision(struct g_raid3_softc *sc, struct bio *bp)
1472 struct g_raid3_disk *disk;
1474 off_t rstart, rend, sstart, send;
1477 disk = sc->sc_syncdisk;
1480 rstart = bp->bio_offset;
1481 rend = bp->bio_offset + bp->bio_length;
1482 for (i = 0; i < g_raid3_syncreqs; i++) {
1483 sbp = disk->d_sync.ds_bios[i];
1486 sstart = sbp->bio_offset;
1487 send = sbp->bio_length;
1488 if (sbp->bio_cmd == BIO_WRITE) {
1489 sstart *= sc->sc_ndisks - 1;
1490 send *= sc->sc_ndisks - 1;
1493 if (rend > sstart && rstart < send)
1500 * Return TRUE if the given sync request is colliding with a in-progress regular
1504 g_raid3_regular_collision(struct g_raid3_softc *sc, struct bio *sbp)
1506 off_t rstart, rend, sstart, send;
1509 if (sc->sc_syncdisk == NULL)
1511 sstart = sbp->bio_offset;
1512 send = sstart + sbp->bio_length;
1513 TAILQ_FOREACH(bp, &sc->sc_inflight.queue, bio_queue) {
1514 rstart = bp->bio_offset;
1515 rend = bp->bio_offset + bp->bio_length;
1516 if (rend > sstart && rstart < send)
1523 * Puts request onto delayed queue.
1526 g_raid3_regular_delay(struct g_raid3_softc *sc, struct bio *bp)
1529 G_RAID3_LOGREQ(2, bp, "Delaying request.");
1530 bioq_insert_head(&sc->sc_regular_delayed, bp);
1534 * Puts synchronization request onto delayed queue.
1537 g_raid3_sync_delay(struct g_raid3_softc *sc, struct bio *bp)
1540 G_RAID3_LOGREQ(2, bp, "Delaying synchronization request.");
1541 bioq_insert_tail(&sc->sc_sync_delayed, bp);
1545 * Releases delayed regular requests which don't collide anymore with sync
1549 g_raid3_regular_release(struct g_raid3_softc *sc)
1551 struct bio *bp, *bp2;
1553 TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed.queue, bio_queue, bp2) {
1554 if (g_raid3_sync_collision(sc, bp))
1556 bioq_remove(&sc->sc_regular_delayed, bp);
1557 G_RAID3_LOGREQ(2, bp, "Releasing delayed request (%p).", bp);
1558 mtx_lock(&sc->sc_queue_mtx);
1559 bioq_insert_head(&sc->sc_queue, bp);
1562 * wakeup() is not needed, because this function is called from
1563 * the worker thread.
1565 wakeup(&sc->sc_queue);
1567 mtx_unlock(&sc->sc_queue_mtx);
1572 * Releases delayed sync requests which don't collide anymore with regular
1576 g_raid3_sync_release(struct g_raid3_softc *sc)
1578 struct bio *bp, *bp2;
1580 TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed.queue, bio_queue, bp2) {
1581 if (g_raid3_regular_collision(sc, bp))
1583 bioq_remove(&sc->sc_sync_delayed, bp);
1584 G_RAID3_LOGREQ(2, bp,
1585 "Releasing delayed synchronization request.");
1586 g_io_request(bp, bp->bio_from);
1591 * Handle synchronization requests.
1592 * Every synchronization request is two-steps process: first, READ request is
1593 * send to active provider and then WRITE request (with read data) to the provider
1594 * being synchronized. When WRITE is finished, new synchronization request is
1598 g_raid3_sync_request(struct bio *bp)
1600 struct g_raid3_softc *sc;
1601 struct g_raid3_disk *disk;
1603 bp->bio_from->index--;
1604 sc = bp->bio_from->geom->softc;
1605 disk = bp->bio_from->private;
1607 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */
1609 g_raid3_kill_consumer(sc, bp->bio_from);
1610 g_topology_unlock();
1611 free(bp->bio_data, M_RAID3);
1613 sx_xlock(&sc->sc_lock);
1618 * Synchronization request.
1620 switch (bp->bio_cmd) {
1623 struct g_consumer *cp;
1628 if (bp->bio_error != 0) {
1629 G_RAID3_LOGREQ(0, bp,
1630 "Synchronization request failed (error=%d).",
1635 G_RAID3_LOGREQ(3, bp, "Synchronization request finished.");
1636 atom = sc->sc_sectorsize / (sc->sc_ndisks - 1);
1637 dst = src = bp->bio_data;
1638 if (disk->d_no == sc->sc_ndisks - 1) {
1641 /* Parity component. */
1642 for (left = bp->bio_length; left > 0;
1643 left -= sc->sc_sectorsize) {
1644 bcopy(src, dst, atom);
1646 for (n = 1; n < sc->sc_ndisks - 1; n++) {
1647 g_raid3_xor(src, dst, atom);
1653 /* Regular component. */
1654 src += atom * disk->d_no;
1655 for (left = bp->bio_length; left > 0;
1656 left -= sc->sc_sectorsize) {
1657 bcopy(src, dst, atom);
1658 src += sc->sc_sectorsize;
1662 bp->bio_driver1 = bp->bio_driver2 = NULL;
1664 bp->bio_offset /= sc->sc_ndisks - 1;
1665 bp->bio_length /= sc->sc_ndisks - 1;
1666 bp->bio_cmd = BIO_WRITE;
1668 bp->bio_children = bp->bio_inbed = 0;
1669 cp = disk->d_consumer;
1670 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1671 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1672 cp->acr, cp->acw, cp->ace));
1674 g_io_request(bp, cp);
1679 struct g_raid3_disk_sync *sync;
1680 off_t boffset, moffset;
1684 if (bp->bio_error != 0) {
1685 G_RAID3_LOGREQ(0, bp,
1686 "Synchronization request failed (error=%d).",
1689 sc->sc_bump_id |= G_RAID3_BUMP_GENID;
1690 g_raid3_event_send(disk,
1691 G_RAID3_DISK_STATE_DISCONNECTED,
1692 G_RAID3_EVENT_DONTWAIT);
1695 G_RAID3_LOGREQ(3, bp, "Synchronization request finished.");
1696 sync = &disk->d_sync;
1697 if (sync->ds_offset == sc->sc_mediasize / (sc->sc_ndisks - 1) ||
1698 sync->ds_consumer == NULL ||
1699 (sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
1700 /* Don't send more synchronization requests. */
1701 sync->ds_inflight--;
1702 if (sync->ds_bios != NULL) {
1703 i = (int)(uintptr_t)bp->bio_caller1;
1704 sync->ds_bios[i] = NULL;
1706 free(bp->bio_data, M_RAID3);
1708 if (sync->ds_inflight > 0)
1710 if (sync->ds_consumer == NULL ||
1711 (sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
1715 * Disk up-to-date, activate it.
1717 g_raid3_event_send(disk, G_RAID3_DISK_STATE_ACTIVE,
1718 G_RAID3_EVENT_DONTWAIT);
1722 /* Send next synchronization request. */
1723 data = bp->bio_data;
1725 bp->bio_cmd = BIO_READ;
1726 bp->bio_offset = sync->ds_offset * (sc->sc_ndisks - 1);
1727 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset);
1728 sync->ds_offset += bp->bio_length / (sc->sc_ndisks - 1);
1729 bp->bio_done = g_raid3_sync_done;
1730 bp->bio_data = data;
1731 bp->bio_from = sync->ds_consumer;
1732 bp->bio_to = sc->sc_provider;
1733 G_RAID3_LOGREQ(3, bp, "Sending synchronization request.");
1734 sync->ds_consumer->index++;
1736 * Delay the request if it is colliding with a regular request.
1738 if (g_raid3_regular_collision(sc, bp))
1739 g_raid3_sync_delay(sc, bp);
1741 g_io_request(bp, sync->ds_consumer);
1743 /* Release delayed requests if possible. */
1744 g_raid3_regular_release(sc);
1746 /* Find the smallest offset. */
1747 moffset = sc->sc_mediasize;
1748 for (i = 0; i < g_raid3_syncreqs; i++) {
1749 bp = sync->ds_bios[i];
1750 boffset = bp->bio_offset;
1751 if (bp->bio_cmd == BIO_WRITE)
1752 boffset *= sc->sc_ndisks - 1;
1753 if (boffset < moffset)
1756 if (sync->ds_offset_done + (MAXPHYS * 100) < moffset) {
1757 /* Update offset_done on every 100 blocks. */
1758 sync->ds_offset_done = moffset;
1759 g_raid3_update_metadata(disk);
1764 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)",
1765 bp->bio_cmd, sc->sc_name));
1771 g_raid3_register_request(struct bio *pbp)
1773 struct g_raid3_softc *sc;
1774 struct g_raid3_disk *disk;
1775 struct g_consumer *cp;
1776 struct bio *cbp, *tmpbp;
1777 off_t offset, length;
1779 int round_robin, verify;
1782 sc = pbp->bio_to->geom->softc;
1783 if ((pbp->bio_cflags & G_RAID3_BIO_CFLAG_REGSYNC) != 0 &&
1784 sc->sc_syncdisk == NULL) {
1785 g_io_deliver(pbp, EIO);
1788 g_raid3_init_bio(pbp);
1789 length = pbp->bio_length / (sc->sc_ndisks - 1);
1790 offset = pbp->bio_offset / (sc->sc_ndisks - 1);
1791 round_robin = verify = 0;
1792 switch (pbp->bio_cmd) {
1794 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_VERIFY) != 0 &&
1795 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1796 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_VERIFY;
1798 ndisks = sc->sc_ndisks;
1801 ndisks = sc->sc_ndisks - 1;
1803 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_ROUND_ROBIN) != 0 &&
1804 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1809 KASSERT(!round_robin || !verify,
1810 ("ROUND-ROBIN and VERIFY are mutually exclusive."));
1811 pbp->bio_driver2 = &sc->sc_disks[sc->sc_ndisks - 1];
1816 * Delay the request if it is colliding with a synchronization
1819 if (g_raid3_sync_collision(sc, pbp)) {
1820 g_raid3_regular_delay(sc, pbp);
1827 sc->sc_last_write = time_uptime;
1829 ndisks = sc->sc_ndisks;
1832 for (n = 0; n < ndisks; n++) {
1833 disk = &sc->sc_disks[n];
1834 cbp = g_raid3_clone_bio(sc, pbp);
1836 while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL)
1837 g_raid3_destroy_bio(sc, cbp);
1839 * To prevent deadlock, we must run back up
1840 * with the ENOMEM for failed requests of any
1841 * of our consumers. Our own sync requests
1842 * can stick around, as they are finite.
1844 if ((pbp->bio_cflags &
1845 G_RAID3_BIO_CFLAG_REGULAR) != 0) {
1846 g_io_deliver(pbp, ENOMEM);
1851 cbp->bio_offset = offset;
1852 cbp->bio_length = length;
1853 cbp->bio_done = g_raid3_done;
1854 switch (pbp->bio_cmd) {
1856 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) {
1858 * Replace invalid component with the parity
1861 disk = &sc->sc_disks[sc->sc_ndisks - 1];
1862 cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1863 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED;
1864 } else if (round_robin &&
1865 disk->d_no == sc->sc_round_robin) {
1867 * In round-robin mode skip one data component
1868 * and use parity component when reading.
1870 pbp->bio_driver2 = disk;
1871 disk = &sc->sc_disks[sc->sc_ndisks - 1];
1872 cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1873 sc->sc_round_robin++;
1875 } else if (verify && disk->d_no == sc->sc_ndisks - 1) {
1876 cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1881 if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
1882 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
1883 if (n == ndisks - 1) {
1885 * Active parity component, mark it as such.
1888 G_RAID3_BIO_CFLAG_PARITY;
1891 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED;
1892 if (n == ndisks - 1) {
1894 * Parity component is not connected,
1895 * so destroy its request.
1898 G_RAID3_BIO_PFLAG_NOPARITY;
1899 g_raid3_destroy_bio(sc, cbp);
1903 G_RAID3_BIO_CFLAG_NODISK;
1910 cbp->bio_caller2 = disk;
1912 switch (pbp->bio_cmd) {
1916 * If we are in round-robin mode and 'round_robin' is
1917 * still 1, it means, that we skipped parity component
1918 * for this read and must reset sc_round_robin field.
1920 sc->sc_round_robin = 0;
1922 G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) {
1923 disk = cbp->bio_caller2;
1924 cp = disk->d_consumer;
1925 cbp->bio_to = cp->provider;
1926 G_RAID3_LOGREQ(3, cbp, "Sending request.");
1927 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1928 ("Consumer %s not opened (r%dw%de%d).",
1929 cp->provider->name, cp->acr, cp->acw, cp->ace));
1931 g_io_request(cbp, cp);
1937 * Put request onto inflight queue, so we can check if new
1938 * synchronization requests don't collide with it.
1940 bioq_insert_tail(&sc->sc_inflight, pbp);
1943 * Bump syncid on first write.
1945 if ((sc->sc_bump_id & G_RAID3_BUMP_SYNCID) != 0) {
1946 sc->sc_bump_id &= ~G_RAID3_BUMP_SYNCID;
1947 g_raid3_bump_syncid(sc);
1949 g_raid3_scatter(pbp);
1956 g_raid3_can_destroy(struct g_raid3_softc *sc)
1959 struct g_consumer *cp;
1961 g_topology_assert();
1963 if (gp->softc == NULL)
1965 LIST_FOREACH(cp, &gp->consumer, consumer) {
1966 if (g_raid3_is_busy(sc, cp))
1969 gp = sc->sc_sync.ds_geom;
1970 LIST_FOREACH(cp, &gp->consumer, consumer) {
1971 if (g_raid3_is_busy(sc, cp))
1974 G_RAID3_DEBUG(2, "No I/O requests for %s, it can be destroyed.",
1980 g_raid3_try_destroy(struct g_raid3_softc *sc)
1983 g_topology_assert_not();
1984 sx_assert(&sc->sc_lock, SX_XLOCKED);
1986 if (sc->sc_rootmount != NULL) {
1987 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
1989 root_mount_rel(sc->sc_rootmount);
1990 sc->sc_rootmount = NULL;
1994 if (!g_raid3_can_destroy(sc)) {
1995 g_topology_unlock();
1998 sc->sc_geom->softc = NULL;
1999 sc->sc_sync.ds_geom->softc = NULL;
2000 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_WAIT) != 0) {
2001 g_topology_unlock();
2002 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__,
2004 /* Unlock sc_lock here, as it can be destroyed after wakeup. */
2005 sx_xunlock(&sc->sc_lock);
2006 wakeup(&sc->sc_worker);
2007 sc->sc_worker = NULL;
2009 g_topology_unlock();
2010 g_raid3_destroy_device(sc);
2011 free(sc->sc_disks, M_RAID3);
2021 g_raid3_worker(void *arg)
2023 struct g_raid3_softc *sc;
2024 struct g_raid3_event *ep;
2029 thread_lock(curthread);
2030 sched_prio(curthread, PRIBIO);
2031 thread_unlock(curthread);
2033 sx_xlock(&sc->sc_lock);
2035 G_RAID3_DEBUG(5, "%s: Let's see...", __func__);
2037 * First take a look at events.
2038 * This is important to handle events before any I/O requests.
2040 ep = g_raid3_event_get(sc);
2042 g_raid3_event_remove(sc, ep);
2043 if ((ep->e_flags & G_RAID3_EVENT_DEVICE) != 0) {
2044 /* Update only device status. */
2046 "Running event for device %s.",
2049 g_raid3_update_device(sc, 1);
2051 /* Update disk status. */
2052 G_RAID3_DEBUG(3, "Running event for disk %s.",
2053 g_raid3_get_diskname(ep->e_disk));
2054 ep->e_error = g_raid3_update_disk(ep->e_disk,
2056 if (ep->e_error == 0)
2057 g_raid3_update_device(sc, 0);
2059 if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0) {
2060 KASSERT(ep->e_error == 0,
2061 ("Error cannot be handled."));
2062 g_raid3_event_free(ep);
2064 ep->e_flags |= G_RAID3_EVENT_DONE;
2065 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__,
2067 mtx_lock(&sc->sc_events_mtx);
2069 mtx_unlock(&sc->sc_events_mtx);
2072 G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
2073 if (g_raid3_try_destroy(sc)) {
2074 curthread->td_pflags &= ~TDP_GEOM;
2075 G_RAID3_DEBUG(1, "Thread exiting.");
2079 G_RAID3_DEBUG(5, "%s: I'm here 1.", __func__);
2083 * Check if we can mark array as CLEAN and if we can't take
2084 * how much seconds should we wait.
2086 timeout = g_raid3_idle(sc, -1);
2090 /* Get first request from the queue. */
2091 mtx_lock(&sc->sc_queue_mtx);
2092 bp = bioq_first(&sc->sc_queue);
2095 G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
2096 mtx_unlock(&sc->sc_queue_mtx);
2097 if (g_raid3_try_destroy(sc)) {
2098 curthread->td_pflags &= ~TDP_GEOM;
2099 G_RAID3_DEBUG(1, "Thread exiting.");
2102 mtx_lock(&sc->sc_queue_mtx);
2104 sx_xunlock(&sc->sc_lock);
2106 * XXX: We can miss an event here, because an event
2107 * can be added without sx-device-lock and without
2108 * mtx-queue-lock. Maybe I should just stop using
2109 * dedicated mutex for events synchronization and
2110 * stick with the queue lock?
2111 * The event will hang here until next I/O request
2112 * or next event is received.
2114 MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "r3:w1",
2116 sx_xlock(&sc->sc_lock);
2117 G_RAID3_DEBUG(5, "%s: I'm here 4.", __func__);
2121 bioq_remove(&sc->sc_queue, bp);
2122 mtx_unlock(&sc->sc_queue_mtx);
2124 if (bp->bio_from->geom == sc->sc_sync.ds_geom &&
2125 (bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) != 0) {
2126 g_raid3_sync_request(bp); /* READ */
2127 } else if (bp->bio_to != sc->sc_provider) {
2128 if ((bp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) != 0)
2129 g_raid3_regular_request(bp);
2130 else if ((bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) != 0)
2131 g_raid3_sync_request(bp); /* WRITE */
2134 ("Invalid request cflags=0x%hx to=%s.",
2135 bp->bio_cflags, bp->bio_to->name));
2137 } else if (g_raid3_register_request(bp) != 0) {
2138 mtx_lock(&sc->sc_queue_mtx);
2139 bioq_insert_head(&sc->sc_queue, bp);
2141 * We are short in memory, let see if there are finished
2142 * request we can free.
2144 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
2145 if (bp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR)
2149 * No finished regular request, so at least keep
2150 * synchronization running.
2152 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
2153 if (bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC)
2156 sx_xunlock(&sc->sc_lock);
2157 MSLEEP(&sc->sc_queue, &sc->sc_queue_mtx, PRIBIO | PDROP,
2158 "r3:lowmem", hz / 10);
2159 sx_xlock(&sc->sc_lock);
2161 G_RAID3_DEBUG(5, "%s: I'm here 9.", __func__);
2166 g_raid3_update_idle(struct g_raid3_softc *sc, struct g_raid3_disk *disk)
2169 sx_assert(&sc->sc_lock, SX_LOCKED);
2170 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0)
2172 if (!sc->sc_idle && (disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) == 0) {
2173 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as dirty.",
2174 g_raid3_get_diskname(disk), sc->sc_name);
2175 disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY;
2176 } else if (sc->sc_idle &&
2177 (disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) != 0) {
2178 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as clean.",
2179 g_raid3_get_diskname(disk), sc->sc_name);
2180 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2185 g_raid3_sync_start(struct g_raid3_softc *sc)
2187 struct g_raid3_disk *disk;
2188 struct g_consumer *cp;
2193 g_topology_assert_not();
2194 sx_assert(&sc->sc_lock, SX_XLOCKED);
2196 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED,
2197 ("Device not in DEGRADED state (%s, %u).", sc->sc_name,
2199 KASSERT(sc->sc_syncdisk == NULL, ("Syncdisk is not NULL (%s, %u).",
2200 sc->sc_name, sc->sc_state));
2202 for (n = 0; n < sc->sc_ndisks; n++) {
2203 if (sc->sc_disks[n].d_state != G_RAID3_DISK_STATE_SYNCHRONIZING)
2205 disk = &sc->sc_disks[n];
2211 sx_xunlock(&sc->sc_lock);
2213 cp = g_new_consumer(sc->sc_sync.ds_geom);
2214 error = g_attach(cp, sc->sc_provider);
2216 ("Cannot attach to %s (error=%d).", sc->sc_name, error));
2217 error = g_access(cp, 1, 0, 0);
2218 KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error));
2219 g_topology_unlock();
2220 sx_xlock(&sc->sc_lock);
2222 G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name,
2223 g_raid3_get_diskname(disk));
2224 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) == 0)
2225 disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY;
2226 KASSERT(disk->d_sync.ds_consumer == NULL,
2227 ("Sync consumer already exists (device=%s, disk=%s).",
2228 sc->sc_name, g_raid3_get_diskname(disk)));
2230 disk->d_sync.ds_consumer = cp;
2231 disk->d_sync.ds_consumer->private = disk;
2232 disk->d_sync.ds_consumer->index = 0;
2233 sc->sc_syncdisk = disk;
2236 * Allocate memory for synchronization bios and initialize them.
2238 disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_raid3_syncreqs,
2240 for (n = 0; n < g_raid3_syncreqs; n++) {
2242 disk->d_sync.ds_bios[n] = bp;
2243 bp->bio_parent = NULL;
2244 bp->bio_cmd = BIO_READ;
2245 bp->bio_data = malloc(MAXPHYS, M_RAID3, M_WAITOK);
2247 bp->bio_offset = disk->d_sync.ds_offset * (sc->sc_ndisks - 1);
2248 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset);
2249 disk->d_sync.ds_offset += bp->bio_length / (sc->sc_ndisks - 1);
2250 bp->bio_done = g_raid3_sync_done;
2251 bp->bio_from = disk->d_sync.ds_consumer;
2252 bp->bio_to = sc->sc_provider;
2253 bp->bio_caller1 = (void *)(uintptr_t)n;
2256 /* Set the number of in-flight synchronization requests. */
2257 disk->d_sync.ds_inflight = g_raid3_syncreqs;
2260 * Fire off first synchronization requests.
2262 for (n = 0; n < g_raid3_syncreqs; n++) {
2263 bp = disk->d_sync.ds_bios[n];
2264 G_RAID3_LOGREQ(3, bp, "Sending synchronization request.");
2265 disk->d_sync.ds_consumer->index++;
2267 * Delay the request if it is colliding with a regular request.
2269 if (g_raid3_regular_collision(sc, bp))
2270 g_raid3_sync_delay(sc, bp);
2272 g_io_request(bp, disk->d_sync.ds_consumer);
2277 * Stop synchronization process.
2278 * type: 0 - synchronization finished
2279 * 1 - synchronization stopped
2282 g_raid3_sync_stop(struct g_raid3_softc *sc, int type)
2284 struct g_raid3_disk *disk;
2285 struct g_consumer *cp;
2287 g_topology_assert_not();
2288 sx_assert(&sc->sc_lock, SX_LOCKED);
2290 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED,
2291 ("Device not in DEGRADED state (%s, %u).", sc->sc_name,
2293 disk = sc->sc_syncdisk;
2294 sc->sc_syncdisk = NULL;
2295 KASSERT(disk != NULL, ("No disk was synchronized (%s).", sc->sc_name));
2296 KASSERT(disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING,
2297 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2298 g_raid3_disk_state2str(disk->d_state)));
2299 if (disk->d_sync.ds_consumer == NULL)
2303 G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s finished.",
2304 sc->sc_name, g_raid3_get_diskname(disk));
2305 } else /* if (type == 1) */ {
2306 G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s stopped.",
2307 sc->sc_name, g_raid3_get_diskname(disk));
2309 free(disk->d_sync.ds_bios, M_RAID3);
2310 disk->d_sync.ds_bios = NULL;
2311 cp = disk->d_sync.ds_consumer;
2312 disk->d_sync.ds_consumer = NULL;
2313 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2314 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */
2316 g_raid3_kill_consumer(sc, cp);
2317 g_topology_unlock();
2318 sx_xlock(&sc->sc_lock);
2322 g_raid3_launch_provider(struct g_raid3_softc *sc)
2324 struct g_provider *pp;
2325 struct g_raid3_disk *disk;
2328 sx_assert(&sc->sc_lock, SX_LOCKED);
2331 pp = g_new_providerf(sc->sc_geom, "raid3/%s", sc->sc_name);
2332 pp->mediasize = sc->sc_mediasize;
2333 pp->sectorsize = sc->sc_sectorsize;
2335 pp->stripeoffset = 0;
2336 for (n = 0; n < sc->sc_ndisks; n++) {
2337 disk = &sc->sc_disks[n];
2338 if (disk->d_consumer && disk->d_consumer->provider &&
2339 disk->d_consumer->provider->stripesize > pp->stripesize) {
2340 pp->stripesize = disk->d_consumer->provider->stripesize;
2341 pp->stripeoffset = disk->d_consumer->provider->stripeoffset;
2344 pp->stripesize *= sc->sc_ndisks - 1;
2345 pp->stripeoffset *= sc->sc_ndisks - 1;
2346 sc->sc_provider = pp;
2347 g_error_provider(pp, 0);
2348 g_topology_unlock();
2349 G_RAID3_DEBUG(0, "Device %s launched (%u/%u).", pp->name,
2350 g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE), sc->sc_ndisks);
2352 if (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED)
2353 g_raid3_sync_start(sc);
2357 g_raid3_destroy_provider(struct g_raid3_softc *sc)
2361 g_topology_assert_not();
2362 KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).",
2366 g_error_provider(sc->sc_provider, ENXIO);
2367 mtx_lock(&sc->sc_queue_mtx);
2368 while ((bp = bioq_first(&sc->sc_queue)) != NULL) {
2369 bioq_remove(&sc->sc_queue, bp);
2370 g_io_deliver(bp, ENXIO);
2372 mtx_unlock(&sc->sc_queue_mtx);
2373 G_RAID3_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name,
2374 sc->sc_provider->name);
2375 g_wither_provider(sc->sc_provider, ENXIO);
2376 g_topology_unlock();
2377 sc->sc_provider = NULL;
2378 if (sc->sc_syncdisk != NULL)
2379 g_raid3_sync_stop(sc, 1);
2383 g_raid3_go(void *arg)
2385 struct g_raid3_softc *sc;
2388 G_RAID3_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name);
2389 g_raid3_event_send(sc, 0,
2390 G_RAID3_EVENT_DONTWAIT | G_RAID3_EVENT_DEVICE);
2394 g_raid3_determine_state(struct g_raid3_disk *disk)
2396 struct g_raid3_softc *sc;
2400 if (sc->sc_syncid == disk->d_sync.ds_syncid) {
2401 if ((disk->d_flags &
2402 G_RAID3_DISK_FLAG_SYNCHRONIZING) == 0) {
2403 /* Disk does not need synchronization. */
2404 state = G_RAID3_DISK_STATE_ACTIVE;
2407 G_RAID3_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
2409 G_RAID3_DISK_FLAG_FORCE_SYNC) != 0) {
2411 * We can start synchronization from
2412 * the stored offset.
2414 state = G_RAID3_DISK_STATE_SYNCHRONIZING;
2416 state = G_RAID3_DISK_STATE_STALE;
2419 } else if (disk->d_sync.ds_syncid < sc->sc_syncid) {
2421 * Reset all synchronization data for this disk,
2422 * because if it even was synchronized, it was
2423 * synchronized to disks with different syncid.
2425 disk->d_flags |= G_RAID3_DISK_FLAG_SYNCHRONIZING;
2426 disk->d_sync.ds_offset = 0;
2427 disk->d_sync.ds_offset_done = 0;
2428 disk->d_sync.ds_syncid = sc->sc_syncid;
2429 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
2430 (disk->d_flags & G_RAID3_DISK_FLAG_FORCE_SYNC) != 0) {
2431 state = G_RAID3_DISK_STATE_SYNCHRONIZING;
2433 state = G_RAID3_DISK_STATE_STALE;
2435 } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ {
2437 * Not good, NOT GOOD!
2438 * It means that device was started on stale disks
2439 * and more fresh disk just arrive.
2440 * If there were writes, device is broken, sorry.
2441 * I think the best choice here is don't touch
2442 * this disk and inform the user loudly.
2444 G_RAID3_DEBUG(0, "Device %s was started before the freshest "
2445 "disk (%s) arrives!! It will not be connected to the "
2446 "running device.", sc->sc_name,
2447 g_raid3_get_diskname(disk));
2448 g_raid3_destroy_disk(disk);
2449 state = G_RAID3_DISK_STATE_NONE;
2450 /* Return immediately, because disk was destroyed. */
2453 G_RAID3_DEBUG(3, "State for %s disk: %s.",
2454 g_raid3_get_diskname(disk), g_raid3_disk_state2str(state));
2459 * Update device state.
2462 g_raid3_update_device(struct g_raid3_softc *sc, boolean_t force)
2464 struct g_raid3_disk *disk;
2467 sx_assert(&sc->sc_lock, SX_XLOCKED);
2469 switch (sc->sc_state) {
2470 case G_RAID3_DEVICE_STATE_STARTING:
2472 u_int n, ndirty, ndisks, genid, syncid;
2474 KASSERT(sc->sc_provider == NULL,
2475 ("Non-NULL provider in STARTING state (%s).", sc->sc_name));
2477 * Are we ready? We are, if all disks are connected or
2478 * one disk is missing and 'force' is true.
2480 if (g_raid3_ndisks(sc, -1) + force == sc->sc_ndisks) {
2482 callout_drain(&sc->sc_callout);
2486 * Timeout expired, so destroy device.
2488 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
2489 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p",
2490 __LINE__, sc->sc_rootmount);
2491 root_mount_rel(sc->sc_rootmount);
2492 sc->sc_rootmount = NULL;
2498 * Find the biggest genid.
2501 for (n = 0; n < sc->sc_ndisks; n++) {
2502 disk = &sc->sc_disks[n];
2503 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2505 if (disk->d_genid > genid)
2506 genid = disk->d_genid;
2508 sc->sc_genid = genid;
2510 * Remove all disks without the biggest genid.
2512 for (n = 0; n < sc->sc_ndisks; n++) {
2513 disk = &sc->sc_disks[n];
2514 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2516 if (disk->d_genid < genid) {
2518 "Component %s (device %s) broken, skipping.",
2519 g_raid3_get_diskname(disk), sc->sc_name);
2520 g_raid3_destroy_disk(disk);
2525 * There must be at least 'sc->sc_ndisks - 1' components
2526 * with the same syncid and without SYNCHRONIZING flag.
2530 * Find the biggest syncid, number of valid components and
2531 * number of dirty components.
2533 ndirty = ndisks = syncid = 0;
2534 for (n = 0; n < sc->sc_ndisks; n++) {
2535 disk = &sc->sc_disks[n];
2536 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2538 if ((disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) != 0)
2540 if (disk->d_sync.ds_syncid > syncid) {
2541 syncid = disk->d_sync.ds_syncid;
2543 } else if (disk->d_sync.ds_syncid < syncid) {
2546 if ((disk->d_flags &
2547 G_RAID3_DISK_FLAG_SYNCHRONIZING) != 0) {
2553 * Do we have enough valid components?
2555 if (ndisks + 1 < sc->sc_ndisks) {
2557 "Device %s is broken, too few valid components.",
2559 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
2563 * If there is one DIRTY component and all disks are present,
2564 * mark it for synchronization. If there is more than one DIRTY
2565 * component, mark parity component for synchronization.
2567 if (ndisks == sc->sc_ndisks && ndirty == 1) {
2568 for (n = 0; n < sc->sc_ndisks; n++) {
2569 disk = &sc->sc_disks[n];
2570 if ((disk->d_flags &
2571 G_RAID3_DISK_FLAG_DIRTY) == 0) {
2575 G_RAID3_DISK_FLAG_SYNCHRONIZING;
2577 } else if (ndisks == sc->sc_ndisks && ndirty > 1) {
2578 disk = &sc->sc_disks[sc->sc_ndisks - 1];
2579 disk->d_flags |= G_RAID3_DISK_FLAG_SYNCHRONIZING;
2582 sc->sc_syncid = syncid;
2584 /* Remember to bump syncid on first write. */
2585 sc->sc_bump_id |= G_RAID3_BUMP_SYNCID;
2587 if (ndisks == sc->sc_ndisks)
2588 state = G_RAID3_DEVICE_STATE_COMPLETE;
2589 else /* if (ndisks == sc->sc_ndisks - 1) */
2590 state = G_RAID3_DEVICE_STATE_DEGRADED;
2591 G_RAID3_DEBUG(1, "Device %s state changed from %s to %s.",
2592 sc->sc_name, g_raid3_device_state2str(sc->sc_state),
2593 g_raid3_device_state2str(state));
2594 sc->sc_state = state;
2595 for (n = 0; n < sc->sc_ndisks; n++) {
2596 disk = &sc->sc_disks[n];
2597 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2599 state = g_raid3_determine_state(disk);
2600 g_raid3_event_send(disk, state, G_RAID3_EVENT_DONTWAIT);
2601 if (state == G_RAID3_DISK_STATE_STALE)
2602 sc->sc_bump_id |= G_RAID3_BUMP_SYNCID;
2606 case G_RAID3_DEVICE_STATE_DEGRADED:
2608 * Genid need to be bumped immediately, so do it here.
2610 if ((sc->sc_bump_id & G_RAID3_BUMP_GENID) != 0) {
2611 sc->sc_bump_id &= ~G_RAID3_BUMP_GENID;
2612 g_raid3_bump_genid(sc);
2615 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NEW) > 0)
2617 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) <
2618 sc->sc_ndisks - 1) {
2619 if (sc->sc_provider != NULL)
2620 g_raid3_destroy_provider(sc);
2621 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
2624 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) ==
2626 state = G_RAID3_DEVICE_STATE_COMPLETE;
2628 "Device %s state changed from %s to %s.",
2629 sc->sc_name, g_raid3_device_state2str(sc->sc_state),
2630 g_raid3_device_state2str(state));
2631 sc->sc_state = state;
2633 if (sc->sc_provider == NULL)
2634 g_raid3_launch_provider(sc);
2635 if (sc->sc_rootmount != NULL) {
2636 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
2638 root_mount_rel(sc->sc_rootmount);
2639 sc->sc_rootmount = NULL;
2642 case G_RAID3_DEVICE_STATE_COMPLETE:
2644 * Genid need to be bumped immediately, so do it here.
2646 if ((sc->sc_bump_id & G_RAID3_BUMP_GENID) != 0) {
2647 sc->sc_bump_id &= ~G_RAID3_BUMP_GENID;
2648 g_raid3_bump_genid(sc);
2651 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NEW) > 0)
2653 KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) >=
2655 ("Too few ACTIVE components in COMPLETE state (device %s).",
2657 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) ==
2658 sc->sc_ndisks - 1) {
2659 state = G_RAID3_DEVICE_STATE_DEGRADED;
2661 "Device %s state changed from %s to %s.",
2662 sc->sc_name, g_raid3_device_state2str(sc->sc_state),
2663 g_raid3_device_state2str(state));
2664 sc->sc_state = state;
2666 if (sc->sc_provider == NULL)
2667 g_raid3_launch_provider(sc);
2668 if (sc->sc_rootmount != NULL) {
2669 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
2671 root_mount_rel(sc->sc_rootmount);
2672 sc->sc_rootmount = NULL;
2676 KASSERT(1 == 0, ("Wrong device state (%s, %s).", sc->sc_name,
2677 g_raid3_device_state2str(sc->sc_state)));
2683 * Update disk state and device state if needed.
2685 #define DISK_STATE_CHANGED() G_RAID3_DEBUG(1, \
2686 "Disk %s state changed from %s to %s (device %s).", \
2687 g_raid3_get_diskname(disk), \
2688 g_raid3_disk_state2str(disk->d_state), \
2689 g_raid3_disk_state2str(state), sc->sc_name)
2691 g_raid3_update_disk(struct g_raid3_disk *disk, u_int state)
2693 struct g_raid3_softc *sc;
2696 sx_assert(&sc->sc_lock, SX_XLOCKED);
2699 G_RAID3_DEBUG(3, "Changing disk %s state from %s to %s.",
2700 g_raid3_get_diskname(disk), g_raid3_disk_state2str(disk->d_state),
2701 g_raid3_disk_state2str(state));
2703 case G_RAID3_DISK_STATE_NEW:
2705 * Possible scenarios:
2706 * 1. New disk arrive.
2708 /* Previous state should be NONE. */
2709 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NONE,
2710 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2711 g_raid3_disk_state2str(disk->d_state)));
2712 DISK_STATE_CHANGED();
2714 disk->d_state = state;
2715 G_RAID3_DEBUG(1, "Device %s: provider %s detected.",
2716 sc->sc_name, g_raid3_get_diskname(disk));
2717 if (sc->sc_state == G_RAID3_DEVICE_STATE_STARTING)
2719 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2720 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2721 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2722 g_raid3_device_state2str(sc->sc_state),
2723 g_raid3_get_diskname(disk),
2724 g_raid3_disk_state2str(disk->d_state)));
2725 state = g_raid3_determine_state(disk);
2726 if (state != G_RAID3_DISK_STATE_NONE)
2729 case G_RAID3_DISK_STATE_ACTIVE:
2731 * Possible scenarios:
2732 * 1. New disk does not need synchronization.
2733 * 2. Synchronization process finished successfully.
2735 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2736 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2737 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2738 g_raid3_device_state2str(sc->sc_state),
2739 g_raid3_get_diskname(disk),
2740 g_raid3_disk_state2str(disk->d_state)));
2741 /* Previous state should be NEW or SYNCHRONIZING. */
2742 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW ||
2743 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING,
2744 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2745 g_raid3_disk_state2str(disk->d_state)));
2746 DISK_STATE_CHANGED();
2748 if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
2749 disk->d_flags &= ~G_RAID3_DISK_FLAG_SYNCHRONIZING;
2750 disk->d_flags &= ~G_RAID3_DISK_FLAG_FORCE_SYNC;
2751 g_raid3_sync_stop(sc, 0);
2753 disk->d_state = state;
2754 disk->d_sync.ds_offset = 0;
2755 disk->d_sync.ds_offset_done = 0;
2756 g_raid3_update_idle(sc, disk);
2757 g_raid3_update_metadata(disk);
2758 G_RAID3_DEBUG(1, "Device %s: provider %s activated.",
2759 sc->sc_name, g_raid3_get_diskname(disk));
2761 case G_RAID3_DISK_STATE_STALE:
2763 * Possible scenarios:
2764 * 1. Stale disk was connected.
2766 /* Previous state should be NEW. */
2767 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW,
2768 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2769 g_raid3_disk_state2str(disk->d_state)));
2770 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2771 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2772 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2773 g_raid3_device_state2str(sc->sc_state),
2774 g_raid3_get_diskname(disk),
2775 g_raid3_disk_state2str(disk->d_state)));
2777 * STALE state is only possible if device is marked
2780 KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOAUTOSYNC) != 0,
2781 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2782 g_raid3_device_state2str(sc->sc_state),
2783 g_raid3_get_diskname(disk),
2784 g_raid3_disk_state2str(disk->d_state)));
2785 DISK_STATE_CHANGED();
2787 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2788 disk->d_state = state;
2789 g_raid3_update_metadata(disk);
2790 G_RAID3_DEBUG(0, "Device %s: provider %s is stale.",
2791 sc->sc_name, g_raid3_get_diskname(disk));
2793 case G_RAID3_DISK_STATE_SYNCHRONIZING:
2795 * Possible scenarios:
2796 * 1. Disk which needs synchronization was connected.
2798 /* Previous state should be NEW. */
2799 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW,
2800 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2801 g_raid3_disk_state2str(disk->d_state)));
2802 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2803 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2804 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2805 g_raid3_device_state2str(sc->sc_state),
2806 g_raid3_get_diskname(disk),
2807 g_raid3_disk_state2str(disk->d_state)));
2808 DISK_STATE_CHANGED();
2810 if (disk->d_state == G_RAID3_DISK_STATE_NEW)
2811 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2812 disk->d_state = state;
2813 if (sc->sc_provider != NULL) {
2814 g_raid3_sync_start(sc);
2815 g_raid3_update_metadata(disk);
2818 case G_RAID3_DISK_STATE_DISCONNECTED:
2820 * Possible scenarios:
2821 * 1. Device wasn't running yet, but disk disappear.
2822 * 2. Disk was active and disapppear.
2823 * 3. Disk disappear during synchronization process.
2825 if (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2826 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
2828 * Previous state should be ACTIVE, STALE or
2831 KASSERT(disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
2832 disk->d_state == G_RAID3_DISK_STATE_STALE ||
2833 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING,
2834 ("Wrong disk state (%s, %s).",
2835 g_raid3_get_diskname(disk),
2836 g_raid3_disk_state2str(disk->d_state)));
2837 } else if (sc->sc_state == G_RAID3_DEVICE_STATE_STARTING) {
2838 /* Previous state should be NEW. */
2839 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW,
2840 ("Wrong disk state (%s, %s).",
2841 g_raid3_get_diskname(disk),
2842 g_raid3_disk_state2str(disk->d_state)));
2844 * Reset bumping syncid if disk disappeared in STARTING
2847 if ((sc->sc_bump_id & G_RAID3_BUMP_SYNCID) != 0)
2848 sc->sc_bump_id &= ~G_RAID3_BUMP_SYNCID;
2851 KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).",
2853 g_raid3_device_state2str(sc->sc_state),
2854 g_raid3_get_diskname(disk),
2855 g_raid3_disk_state2str(disk->d_state)));
2858 DISK_STATE_CHANGED();
2859 G_RAID3_DEBUG(0, "Device %s: provider %s disconnected.",
2860 sc->sc_name, g_raid3_get_diskname(disk));
2862 g_raid3_destroy_disk(disk);
2865 KASSERT(1 == 0, ("Unknown state (%u).", state));
2870 #undef DISK_STATE_CHANGED
2873 g_raid3_read_metadata(struct g_consumer *cp, struct g_raid3_metadata *md)
2875 struct g_provider *pp;
2879 g_topology_assert();
2881 error = g_access(cp, 1, 0, 0);
2885 g_topology_unlock();
2886 /* Metadata are stored on last sector. */
2887 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
2890 g_access(cp, -1, 0, 0);
2892 G_RAID3_DEBUG(1, "Cannot read metadata from %s (error=%d).",
2893 cp->provider->name, error);
2897 /* Decode metadata. */
2898 error = raid3_metadata_decode(buf, md);
2900 if (strcmp(md->md_magic, G_RAID3_MAGIC) != 0)
2902 if (md->md_version > G_RAID3_VERSION) {
2904 "Kernel module is too old to handle metadata from %s.",
2905 cp->provider->name);
2909 G_RAID3_DEBUG(1, "MD5 metadata hash mismatch for provider %s.",
2910 cp->provider->name);
2913 if (md->md_sectorsize > MAXPHYS) {
2914 G_RAID3_DEBUG(0, "The blocksize is too big.");
2922 g_raid3_check_metadata(struct g_raid3_softc *sc, struct g_provider *pp,
2923 struct g_raid3_metadata *md)
2926 if (md->md_no >= sc->sc_ndisks) {
2927 G_RAID3_DEBUG(1, "Invalid disk %s number (no=%u), skipping.",
2928 pp->name, md->md_no);
2931 if (sc->sc_disks[md->md_no].d_state != G_RAID3_DISK_STATE_NODISK) {
2932 G_RAID3_DEBUG(1, "Disk %s (no=%u) already exists, skipping.",
2933 pp->name, md->md_no);
2936 if (md->md_all != sc->sc_ndisks) {
2938 "Invalid '%s' field on disk %s (device %s), skipping.",
2939 "md_all", pp->name, sc->sc_name);
2942 if ((md->md_mediasize % md->md_sectorsize) != 0) {
2943 G_RAID3_DEBUG(1, "Invalid metadata (mediasize %% sectorsize != "
2944 "0) on disk %s (device %s), skipping.", pp->name,
2948 if (md->md_mediasize != sc->sc_mediasize) {
2950 "Invalid '%s' field on disk %s (device %s), skipping.",
2951 "md_mediasize", pp->name, sc->sc_name);
2954 if ((md->md_mediasize % (sc->sc_ndisks - 1)) != 0) {
2956 "Invalid '%s' field on disk %s (device %s), skipping.",
2957 "md_mediasize", pp->name, sc->sc_name);
2960 if ((sc->sc_mediasize / (sc->sc_ndisks - 1)) > pp->mediasize) {
2962 "Invalid size of disk %s (device %s), skipping.", pp->name,
2966 if ((md->md_sectorsize / pp->sectorsize) < sc->sc_ndisks - 1) {
2968 "Invalid '%s' field on disk %s (device %s), skipping.",
2969 "md_sectorsize", pp->name, sc->sc_name);
2972 if (md->md_sectorsize != sc->sc_sectorsize) {
2974 "Invalid '%s' field on disk %s (device %s), skipping.",
2975 "md_sectorsize", pp->name, sc->sc_name);
2978 if ((sc->sc_sectorsize % pp->sectorsize) != 0) {
2980 "Invalid sector size of disk %s (device %s), skipping.",
2981 pp->name, sc->sc_name);
2984 if ((md->md_mflags & ~G_RAID3_DEVICE_FLAG_MASK) != 0) {
2986 "Invalid device flags on disk %s (device %s), skipping.",
2987 pp->name, sc->sc_name);
2990 if ((md->md_mflags & G_RAID3_DEVICE_FLAG_VERIFY) != 0 &&
2991 (md->md_mflags & G_RAID3_DEVICE_FLAG_ROUND_ROBIN) != 0) {
2993 * VERIFY and ROUND-ROBIN options are mutally exclusive.
2995 G_RAID3_DEBUG(1, "Both VERIFY and ROUND-ROBIN flags exist on "
2996 "disk %s (device %s), skipping.", pp->name, sc->sc_name);
2999 if ((md->md_dflags & ~G_RAID3_DISK_FLAG_MASK) != 0) {
3001 "Invalid disk flags on disk %s (device %s), skipping.",
3002 pp->name, sc->sc_name);
3009 g_raid3_add_disk(struct g_raid3_softc *sc, struct g_provider *pp,
3010 struct g_raid3_metadata *md)
3012 struct g_raid3_disk *disk;
3015 g_topology_assert_not();
3016 G_RAID3_DEBUG(2, "Adding disk %s.", pp->name);
3018 error = g_raid3_check_metadata(sc, pp, md);
3021 if (sc->sc_state != G_RAID3_DEVICE_STATE_STARTING &&
3022 md->md_genid < sc->sc_genid) {
3023 G_RAID3_DEBUG(0, "Component %s (device %s) broken, skipping.",
3024 pp->name, sc->sc_name);
3027 disk = g_raid3_init_disk(sc, pp, md, &error);
3030 error = g_raid3_event_send(disk, G_RAID3_DISK_STATE_NEW,
3031 G_RAID3_EVENT_WAIT);
3034 if (md->md_version < G_RAID3_VERSION) {
3035 G_RAID3_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).",
3036 pp->name, md->md_version, G_RAID3_VERSION);
3037 g_raid3_update_metadata(disk);
3043 g_raid3_destroy_delayed(void *arg, int flag)
3045 struct g_raid3_softc *sc;
3048 if (flag == EV_CANCEL) {
3049 G_RAID3_DEBUG(1, "Destroying canceled.");
3053 g_topology_unlock();
3054 sx_xlock(&sc->sc_lock);
3055 KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) == 0,
3056 ("DESTROY flag set on %s.", sc->sc_name));
3057 KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0,
3058 ("DESTROYING flag not set on %s.", sc->sc_name));
3059 G_RAID3_DEBUG(0, "Destroying %s (delayed).", sc->sc_name);
3060 error = g_raid3_destroy(sc, G_RAID3_DESTROY_SOFT);
3062 G_RAID3_DEBUG(0, "Cannot destroy %s.", sc->sc_name);
3063 sx_xunlock(&sc->sc_lock);
3069 g_raid3_access(struct g_provider *pp, int acr, int acw, int ace)
3071 struct g_raid3_softc *sc;
3072 int dcr, dcw, dce, error = 0;
3074 g_topology_assert();
3075 G_RAID3_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr,
3078 sc = pp->geom->softc;
3079 if (sc == NULL && acr <= 0 && acw <= 0 && ace <= 0)
3081 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name));
3083 dcr = pp->acr + acr;
3084 dcw = pp->acw + acw;
3085 dce = pp->ace + ace;
3087 g_topology_unlock();
3088 sx_xlock(&sc->sc_lock);
3089 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0 ||
3090 g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) < sc->sc_ndisks - 1) {
3091 if (acr > 0 || acw > 0 || ace > 0)
3096 g_raid3_idle(sc, dcw);
3097 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0) {
3098 if (acr > 0 || acw > 0 || ace > 0) {
3102 if (dcr == 0 && dcw == 0 && dce == 0) {
3103 g_post_event(g_raid3_destroy_delayed, sc, M_WAITOK,
3108 sx_xunlock(&sc->sc_lock);
3113 static struct g_geom *
3114 g_raid3_create(struct g_class *mp, const struct g_raid3_metadata *md)
3116 struct g_raid3_softc *sc;
3121 g_topology_assert();
3122 G_RAID3_DEBUG(1, "Creating device %s (id=%u).", md->md_name, md->md_id);
3124 /* One disk is minimum. */
3130 gp = g_new_geomf(mp, "%s", md->md_name);
3131 sc = malloc(sizeof(*sc), M_RAID3, M_WAITOK | M_ZERO);
3132 sc->sc_disks = malloc(sizeof(struct g_raid3_disk) * md->md_all, M_RAID3,
3134 gp->start = g_raid3_start;
3135 gp->orphan = g_raid3_orphan;
3136 gp->access = g_raid3_access;
3137 gp->dumpconf = g_raid3_dumpconf;
3139 sc->sc_id = md->md_id;
3140 sc->sc_mediasize = md->md_mediasize;
3141 sc->sc_sectorsize = md->md_sectorsize;
3142 sc->sc_ndisks = md->md_all;
3143 sc->sc_round_robin = 0;
3144 sc->sc_flags = md->md_mflags;
3147 sc->sc_last_write = time_uptime;
3149 for (n = 0; n < sc->sc_ndisks; n++) {
3150 sc->sc_disks[n].d_softc = sc;
3151 sc->sc_disks[n].d_no = n;
3152 sc->sc_disks[n].d_state = G_RAID3_DISK_STATE_NODISK;
3154 sx_init(&sc->sc_lock, "graid3:lock");
3155 bioq_init(&sc->sc_queue);
3156 mtx_init(&sc->sc_queue_mtx, "graid3:queue", NULL, MTX_DEF);
3157 bioq_init(&sc->sc_regular_delayed);
3158 bioq_init(&sc->sc_inflight);
3159 bioq_init(&sc->sc_sync_delayed);
3160 TAILQ_INIT(&sc->sc_events);
3161 mtx_init(&sc->sc_events_mtx, "graid3:events", NULL, MTX_DEF);
3162 callout_init(&sc->sc_callout, 1);
3163 sc->sc_state = G_RAID3_DEVICE_STATE_STARTING;
3166 sc->sc_provider = NULL;
3168 * Synchronization geom.
3170 gp = g_new_geomf(mp, "%s.sync", md->md_name);
3172 gp->orphan = g_raid3_orphan;
3173 sc->sc_sync.ds_geom = gp;
3175 if (!g_raid3_use_malloc) {
3176 sc->sc_zones[G_RAID3_ZONE_64K].sz_zone = uma_zcreate("gr3:64k",
3177 65536, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL,
3179 sc->sc_zones[G_RAID3_ZONE_64K].sz_inuse = 0;
3180 sc->sc_zones[G_RAID3_ZONE_64K].sz_max = g_raid3_n64k;
3181 sc->sc_zones[G_RAID3_ZONE_64K].sz_requested =
3182 sc->sc_zones[G_RAID3_ZONE_64K].sz_failed = 0;
3183 sc->sc_zones[G_RAID3_ZONE_16K].sz_zone = uma_zcreate("gr3:16k",
3184 16384, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL,
3186 sc->sc_zones[G_RAID3_ZONE_16K].sz_inuse = 0;
3187 sc->sc_zones[G_RAID3_ZONE_16K].sz_max = g_raid3_n16k;
3188 sc->sc_zones[G_RAID3_ZONE_16K].sz_requested =
3189 sc->sc_zones[G_RAID3_ZONE_16K].sz_failed = 0;
3190 sc->sc_zones[G_RAID3_ZONE_4K].sz_zone = uma_zcreate("gr3:4k",
3191 4096, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL,
3193 sc->sc_zones[G_RAID3_ZONE_4K].sz_inuse = 0;
3194 sc->sc_zones[G_RAID3_ZONE_4K].sz_max = g_raid3_n4k;
3195 sc->sc_zones[G_RAID3_ZONE_4K].sz_requested =
3196 sc->sc_zones[G_RAID3_ZONE_4K].sz_failed = 0;
3199 error = kproc_create(g_raid3_worker, sc, &sc->sc_worker, 0, 0,
3200 "g_raid3 %s", md->md_name);
3202 G_RAID3_DEBUG(1, "Cannot create kernel thread for %s.",
3204 if (!g_raid3_use_malloc) {
3205 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone);
3206 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone);
3207 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone);
3209 g_destroy_geom(sc->sc_sync.ds_geom);
3210 mtx_destroy(&sc->sc_events_mtx);
3211 mtx_destroy(&sc->sc_queue_mtx);
3212 sx_destroy(&sc->sc_lock);
3213 g_destroy_geom(sc->sc_geom);
3214 free(sc->sc_disks, M_RAID3);
3219 G_RAID3_DEBUG(1, "Device %s created (%u components, id=%u).",
3220 sc->sc_name, sc->sc_ndisks, sc->sc_id);
3222 sc->sc_rootmount = root_mount_hold("GRAID3");
3223 G_RAID3_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount);
3228 timeout = atomic_load_acq_int(&g_raid3_timeout);
3229 callout_reset(&sc->sc_callout, timeout * hz, g_raid3_go, sc);
3230 return (sc->sc_geom);
3234 g_raid3_destroy(struct g_raid3_softc *sc, int how)
3236 struct g_provider *pp;
3238 g_topology_assert_not();
3241 sx_assert(&sc->sc_lock, SX_XLOCKED);
3243 pp = sc->sc_provider;
3244 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
3246 case G_RAID3_DESTROY_SOFT:
3248 "Device %s is still open (r%dw%de%d).", pp->name,
3249 pp->acr, pp->acw, pp->ace);
3251 case G_RAID3_DESTROY_DELAYED:
3253 "Device %s will be destroyed on last close.",
3255 if (sc->sc_syncdisk != NULL)
3256 g_raid3_sync_stop(sc, 1);
3257 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROYING;
3259 case G_RAID3_DESTROY_HARD:
3260 G_RAID3_DEBUG(1, "Device %s is still open, so it "
3261 "can't be definitely removed.", pp->name);
3267 if (sc->sc_geom->softc == NULL) {
3268 g_topology_unlock();
3271 sc->sc_geom->softc = NULL;
3272 sc->sc_sync.ds_geom->softc = NULL;
3273 g_topology_unlock();
3275 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
3276 sc->sc_flags |= G_RAID3_DEVICE_FLAG_WAIT;
3277 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc);
3278 sx_xunlock(&sc->sc_lock);
3279 mtx_lock(&sc->sc_queue_mtx);
3281 wakeup(&sc->sc_queue);
3282 mtx_unlock(&sc->sc_queue_mtx);
3283 G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker);
3284 while (sc->sc_worker != NULL)
3285 tsleep(&sc->sc_worker, PRIBIO, "r3:destroy", hz / 5);
3286 G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker);
3287 sx_xlock(&sc->sc_lock);
3288 g_raid3_destroy_device(sc);
3289 free(sc->sc_disks, M_RAID3);
3295 g_raid3_taste_orphan(struct g_consumer *cp)
3298 KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
3299 cp->provider->name));
3302 static struct g_geom *
3303 g_raid3_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
3305 struct g_raid3_metadata md;
3306 struct g_raid3_softc *sc;
3307 struct g_consumer *cp;
3311 g_topology_assert();
3312 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
3313 G_RAID3_DEBUG(2, "Tasting %s.", pp->name);
3315 gp = g_new_geomf(mp, "raid3:taste");
3316 /* This orphan function should be never called. */
3317 gp->orphan = g_raid3_taste_orphan;
3318 cp = g_new_consumer(gp);
3320 error = g_raid3_read_metadata(cp, &md);
3322 g_destroy_consumer(cp);
3328 if (md.md_provider[0] != '\0' &&
3329 !g_compare_names(md.md_provider, pp->name))
3331 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize)
3333 if (g_raid3_debug >= 2)
3334 raid3_metadata_dump(&md);
3337 * Let's check if device already exists.
3340 LIST_FOREACH(gp, &mp->geom, geom) {
3344 if (sc->sc_sync.ds_geom == gp)
3346 if (strcmp(md.md_name, sc->sc_name) != 0)
3348 if (md.md_id != sc->sc_id) {
3349 G_RAID3_DEBUG(0, "Device %s already configured.",
3356 gp = g_raid3_create(mp, &md);
3358 G_RAID3_DEBUG(0, "Cannot create device %s.",
3364 G_RAID3_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
3365 g_topology_unlock();
3366 sx_xlock(&sc->sc_lock);
3367 error = g_raid3_add_disk(sc, pp, &md);
3369 G_RAID3_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
3370 pp->name, gp->name, error);
3371 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NODISK) ==
3374 g_raid3_destroy(sc, G_RAID3_DESTROY_HARD);
3380 sx_xunlock(&sc->sc_lock);
3386 g_raid3_destroy_geom(struct gctl_req *req __unused, struct g_class *mp __unused,
3389 struct g_raid3_softc *sc;
3392 g_topology_unlock();
3394 sx_xlock(&sc->sc_lock);
3396 error = g_raid3_destroy(gp->softc, G_RAID3_DESTROY_SOFT);
3398 sx_xunlock(&sc->sc_lock);
3404 g_raid3_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
3405 struct g_consumer *cp, struct g_provider *pp)
3407 struct g_raid3_softc *sc;
3409 g_topology_assert();
3414 /* Skip synchronization geom. */
3415 if (gp == sc->sc_sync.ds_geom)
3419 } else if (cp != NULL) {
3420 struct g_raid3_disk *disk;
3425 g_topology_unlock();
3426 sx_xlock(&sc->sc_lock);
3427 sbuf_printf(sb, "%s<Type>", indent);
3428 if (disk->d_no == sc->sc_ndisks - 1)
3429 sbuf_cat(sb, "PARITY");
3431 sbuf_cat(sb, "DATA");
3432 sbuf_cat(sb, "</Type>\n");
3433 sbuf_printf(sb, "%s<Number>%u</Number>\n", indent,
3435 if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
3436 sbuf_printf(sb, "%s<Synchronized>", indent);
3437 if (disk->d_sync.ds_offset == 0)
3440 sbuf_printf(sb, "%u%%",
3441 (u_int)((disk->d_sync.ds_offset * 100) /
3442 (sc->sc_mediasize / (sc->sc_ndisks - 1))));
3444 sbuf_cat(sb, "</Synchronized>\n");
3445 if (disk->d_sync.ds_offset > 0) {
3446 sbuf_printf(sb, "%s<BytesSynced>%jd"
3447 "</BytesSynced>\n", indent,
3448 (intmax_t)disk->d_sync.ds_offset);
3451 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent,
3452 disk->d_sync.ds_syncid);
3453 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, disk->d_genid);
3454 sbuf_printf(sb, "%s<Flags>", indent);
3455 if (disk->d_flags == 0)
3456 sbuf_cat(sb, "NONE");
3460 #define ADD_FLAG(flag, name) do { \
3461 if ((disk->d_flags & (flag)) != 0) { \
3463 sbuf_cat(sb, ", "); \
3466 sbuf_cat(sb, name); \
3469 ADD_FLAG(G_RAID3_DISK_FLAG_DIRTY, "DIRTY");
3470 ADD_FLAG(G_RAID3_DISK_FLAG_HARDCODED, "HARDCODED");
3471 ADD_FLAG(G_RAID3_DISK_FLAG_SYNCHRONIZING,
3473 ADD_FLAG(G_RAID3_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC");
3474 ADD_FLAG(G_RAID3_DISK_FLAG_BROKEN, "BROKEN");
3477 sbuf_cat(sb, "</Flags>\n");
3478 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
3479 g_raid3_disk_state2str(disk->d_state));
3480 sx_xunlock(&sc->sc_lock);
3483 g_topology_unlock();
3484 sx_xlock(&sc->sc_lock);
3485 if (!g_raid3_use_malloc) {
3487 "%s<Zone4kRequested>%u</Zone4kRequested>\n", indent,
3488 sc->sc_zones[G_RAID3_ZONE_4K].sz_requested);
3490 "%s<Zone4kFailed>%u</Zone4kFailed>\n", indent,
3491 sc->sc_zones[G_RAID3_ZONE_4K].sz_failed);
3493 "%s<Zone16kRequested>%u</Zone16kRequested>\n", indent,
3494 sc->sc_zones[G_RAID3_ZONE_16K].sz_requested);
3496 "%s<Zone16kFailed>%u</Zone16kFailed>\n", indent,
3497 sc->sc_zones[G_RAID3_ZONE_16K].sz_failed);
3499 "%s<Zone64kRequested>%u</Zone64kRequested>\n", indent,
3500 sc->sc_zones[G_RAID3_ZONE_64K].sz_requested);
3502 "%s<Zone64kFailed>%u</Zone64kFailed>\n", indent,
3503 sc->sc_zones[G_RAID3_ZONE_64K].sz_failed);
3505 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
3506 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid);
3507 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid);
3508 sbuf_printf(sb, "%s<Flags>", indent);
3509 if (sc->sc_flags == 0)
3510 sbuf_cat(sb, "NONE");
3514 #define ADD_FLAG(flag, name) do { \
3515 if ((sc->sc_flags & (flag)) != 0) { \
3517 sbuf_cat(sb, ", "); \
3520 sbuf_cat(sb, name); \
3523 ADD_FLAG(G_RAID3_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC");
3524 ADD_FLAG(G_RAID3_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC");
3525 ADD_FLAG(G_RAID3_DEVICE_FLAG_ROUND_ROBIN,
3527 ADD_FLAG(G_RAID3_DEVICE_FLAG_VERIFY, "VERIFY");
3530 sbuf_cat(sb, "</Flags>\n");
3531 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent,
3533 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
3534 g_raid3_device_state2str(sc->sc_state));
3535 sx_xunlock(&sc->sc_lock);
3541 g_raid3_shutdown_post_sync(void *arg, int howto)
3544 struct g_geom *gp, *gp2;
3545 struct g_raid3_softc *sc;
3550 g_raid3_shutdown = 1;
3551 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
3552 if ((sc = gp->softc) == NULL)
3554 /* Skip synchronization geom. */
3555 if (gp == sc->sc_sync.ds_geom)
3557 g_topology_unlock();
3558 sx_xlock(&sc->sc_lock);
3559 g_raid3_idle(sc, -1);
3561 error = g_raid3_destroy(sc, G_RAID3_DESTROY_DELAYED);
3563 sx_xunlock(&sc->sc_lock);
3566 g_topology_unlock();
3570 g_raid3_init(struct g_class *mp)
3573 g_raid3_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync,
3574 g_raid3_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST);
3575 if (g_raid3_post_sync == NULL)
3576 G_RAID3_DEBUG(0, "Warning! Cannot register shutdown event.");
3580 g_raid3_fini(struct g_class *mp)
3583 if (g_raid3_post_sync != NULL)
3584 EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_raid3_post_sync);
3587 DECLARE_GEOM_CLASS(g_raid3_class, g_raid3);
3588 MODULE_VERSION(geom_raid3, 0);