2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
34 #include <sys/limits.h>
36 #include <sys/mutex.h>
38 #include <sys/sysctl.h>
39 #include <sys/malloc.h>
40 #include <sys/eventhandler.h>
42 #include <geom/geom.h>
44 #include <sys/kthread.h>
45 #include <sys/sched.h>
46 #include <geom/raid3/g_raid3.h>
49 static MALLOC_DEFINE(M_RAID3, "raid3_data", "GEOM_RAID3 Data");
51 SYSCTL_DECL(_kern_geom);
52 SYSCTL_NODE(_kern_geom, OID_AUTO, raid3, CTLFLAG_RW, 0, "GEOM_RAID3 stuff");
53 u_int g_raid3_debug = 0;
54 TUNABLE_INT("kern.geom.raid3.debug", &g_raid3_debug);
55 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, debug, CTLFLAG_RW, &g_raid3_debug, 0,
57 static u_int g_raid3_timeout = 4;
58 TUNABLE_INT("kern.geom.raid3.timeout", &g_raid3_timeout);
59 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, timeout, CTLFLAG_RW, &g_raid3_timeout,
60 0, "Time to wait on all raid3 components");
61 static u_int g_raid3_idletime = 5;
62 TUNABLE_INT("kern.geom.raid3.idletime", &g_raid3_idletime);
63 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, idletime, CTLFLAG_RW,
64 &g_raid3_idletime, 0, "Mark components as clean when idling");
65 static u_int g_raid3_disconnect_on_failure = 1;
66 TUNABLE_INT("kern.geom.raid3.disconnect_on_failure",
67 &g_raid3_disconnect_on_failure);
68 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, disconnect_on_failure, CTLFLAG_RW,
69 &g_raid3_disconnect_on_failure, 0, "Disconnect component on I/O failure.");
70 static u_int g_raid3_syncreqs = 2;
71 TUNABLE_INT("kern.geom.raid3.sync_requests", &g_raid3_syncreqs);
72 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, sync_requests, CTLFLAG_RDTUN,
73 &g_raid3_syncreqs, 0, "Parallel synchronization I/O requests.");
74 static u_int g_raid3_use_malloc = 0;
75 TUNABLE_INT("kern.geom.raid3.use_malloc", &g_raid3_use_malloc);
76 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, use_malloc, CTLFLAG_RDTUN,
77 &g_raid3_use_malloc, 0, "Use malloc(9) instead of uma(9).");
79 static u_int g_raid3_n64k = 50;
80 TUNABLE_INT("kern.geom.raid3.n64k", &g_raid3_n64k);
81 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n64k, CTLFLAG_RD, &g_raid3_n64k, 0,
82 "Maximum number of 64kB allocations");
83 static u_int g_raid3_n16k = 200;
84 TUNABLE_INT("kern.geom.raid3.n16k", &g_raid3_n16k);
85 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n16k, CTLFLAG_RD, &g_raid3_n16k, 0,
86 "Maximum number of 16kB allocations");
87 static u_int g_raid3_n4k = 1200;
88 TUNABLE_INT("kern.geom.raid3.n4k", &g_raid3_n4k);
89 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n4k, CTLFLAG_RD, &g_raid3_n4k, 0,
90 "Maximum number of 4kB allocations");
92 SYSCTL_NODE(_kern_geom_raid3, OID_AUTO, stat, CTLFLAG_RW, 0,
93 "GEOM_RAID3 statistics");
94 static u_int g_raid3_parity_mismatch = 0;
95 SYSCTL_UINT(_kern_geom_raid3_stat, OID_AUTO, parity_mismatch, CTLFLAG_RD,
96 &g_raid3_parity_mismatch, 0, "Number of failures in VERIFY mode");
98 #define MSLEEP(ident, mtx, priority, wmesg, timeout) do { \
99 G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \
100 msleep((ident), (mtx), (priority), (wmesg), (timeout)); \
101 G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \
104 static eventhandler_tag g_raid3_pre_sync = NULL;
106 static int g_raid3_destroy_geom(struct gctl_req *req, struct g_class *mp,
108 static g_taste_t g_raid3_taste;
109 static void g_raid3_init(struct g_class *mp);
110 static void g_raid3_fini(struct g_class *mp);
112 struct g_class g_raid3_class = {
113 .name = G_RAID3_CLASS_NAME,
114 .version = G_VERSION,
115 .ctlreq = g_raid3_config,
116 .taste = g_raid3_taste,
117 .destroy_geom = g_raid3_destroy_geom,
118 .init = g_raid3_init,
123 static void g_raid3_destroy_provider(struct g_raid3_softc *sc);
124 static int g_raid3_update_disk(struct g_raid3_disk *disk, u_int state);
125 static void g_raid3_update_device(struct g_raid3_softc *sc, boolean_t force);
126 static void g_raid3_dumpconf(struct sbuf *sb, const char *indent,
127 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp);
128 static void g_raid3_sync_stop(struct g_raid3_softc *sc, int type);
129 static int g_raid3_register_request(struct bio *pbp);
130 static void g_raid3_sync_release(struct g_raid3_softc *sc);
134 g_raid3_disk_state2str(int state)
138 case G_RAID3_DISK_STATE_NODISK:
140 case G_RAID3_DISK_STATE_NONE:
142 case G_RAID3_DISK_STATE_NEW:
144 case G_RAID3_DISK_STATE_ACTIVE:
146 case G_RAID3_DISK_STATE_STALE:
148 case G_RAID3_DISK_STATE_SYNCHRONIZING:
149 return ("SYNCHRONIZING");
150 case G_RAID3_DISK_STATE_DISCONNECTED:
151 return ("DISCONNECTED");
158 g_raid3_device_state2str(int state)
162 case G_RAID3_DEVICE_STATE_STARTING:
164 case G_RAID3_DEVICE_STATE_DEGRADED:
166 case G_RAID3_DEVICE_STATE_COMPLETE:
174 g_raid3_get_diskname(struct g_raid3_disk *disk)
177 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL)
178 return ("[unknown]");
179 return (disk->d_name);
183 g_raid3_alloc(struct g_raid3_softc *sc, size_t size, int flags)
187 if (g_raid3_use_malloc)
188 ptr = malloc(size, M_RAID3, flags);
190 ptr = uma_zalloc_arg(sc->sc_zones[g_raid3_zone(size)].sz_zone,
191 &sc->sc_zones[g_raid3_zone(size)], flags);
192 sc->sc_zones[g_raid3_zone(size)].sz_requested++;
194 sc->sc_zones[g_raid3_zone(size)].sz_failed++;
200 g_raid3_free(struct g_raid3_softc *sc, void *ptr, size_t size)
203 if (g_raid3_use_malloc)
206 uma_zfree_arg(sc->sc_zones[g_raid3_zone(size)].sz_zone,
207 ptr, &sc->sc_zones[g_raid3_zone(size)]);
212 g_raid3_uma_ctor(void *mem, int size, void *arg, int flags)
214 struct g_raid3_zone *sz = arg;
216 if (sz->sz_max > 0 && sz->sz_inuse == sz->sz_max)
223 g_raid3_uma_dtor(void *mem, int size, void *arg)
225 struct g_raid3_zone *sz = arg;
230 #define g_raid3_xor(src1, src2, dst, size) \
231 _g_raid3_xor((uint64_t *)(src1), (uint64_t *)(src2), \
232 (uint64_t *)(dst), (size_t)size)
234 _g_raid3_xor(uint64_t *src1, uint64_t *src2, uint64_t *dst, size_t size)
237 KASSERT((size % 128) == 0, ("Invalid size: %zu.", size));
238 for (; size > 0; size -= 128) {
239 *dst++ = (*src1++) ^ (*src2++);
240 *dst++ = (*src1++) ^ (*src2++);
241 *dst++ = (*src1++) ^ (*src2++);
242 *dst++ = (*src1++) ^ (*src2++);
243 *dst++ = (*src1++) ^ (*src2++);
244 *dst++ = (*src1++) ^ (*src2++);
245 *dst++ = (*src1++) ^ (*src2++);
246 *dst++ = (*src1++) ^ (*src2++);
247 *dst++ = (*src1++) ^ (*src2++);
248 *dst++ = (*src1++) ^ (*src2++);
249 *dst++ = (*src1++) ^ (*src2++);
250 *dst++ = (*src1++) ^ (*src2++);
251 *dst++ = (*src1++) ^ (*src2++);
252 *dst++ = (*src1++) ^ (*src2++);
253 *dst++ = (*src1++) ^ (*src2++);
254 *dst++ = (*src1++) ^ (*src2++);
259 g_raid3_is_zero(struct bio *bp)
261 static const uint64_t zeros[] = {
262 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
267 size = bp->bio_length;
268 addr = (u_char *)bp->bio_data;
269 for (; size > 0; size -= sizeof(zeros), addr += sizeof(zeros)) {
270 if (bcmp(addr, zeros, sizeof(zeros)) != 0)
277 * --- Events handling functions ---
278 * Events in geom_raid3 are used to maintain disks and device status
279 * from one thread to simplify locking.
282 g_raid3_event_free(struct g_raid3_event *ep)
289 g_raid3_event_send(void *arg, int state, int flags)
291 struct g_raid3_softc *sc;
292 struct g_raid3_disk *disk;
293 struct g_raid3_event *ep;
296 ep = malloc(sizeof(*ep), M_RAID3, M_WAITOK);
297 G_RAID3_DEBUG(4, "%s: Sending event %p.", __func__, ep);
298 if ((flags & G_RAID3_EVENT_DEVICE) != 0) {
309 mtx_lock(&sc->sc_events_mtx);
310 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next);
311 mtx_unlock(&sc->sc_events_mtx);
312 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc);
313 mtx_lock(&sc->sc_queue_mtx);
315 wakeup(&sc->sc_queue);
316 mtx_unlock(&sc->sc_queue_mtx);
317 if ((flags & G_RAID3_EVENT_DONTWAIT) != 0)
319 sx_assert(&sc->sc_lock, SX_XLOCKED);
320 G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, ep);
321 sx_xunlock(&sc->sc_lock);
322 while ((ep->e_flags & G_RAID3_EVENT_DONE) == 0) {
323 mtx_lock(&sc->sc_events_mtx);
324 MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "r3:event",
328 g_raid3_event_free(ep);
329 sx_xlock(&sc->sc_lock);
333 static struct g_raid3_event *
334 g_raid3_event_get(struct g_raid3_softc *sc)
336 struct g_raid3_event *ep;
338 mtx_lock(&sc->sc_events_mtx);
339 ep = TAILQ_FIRST(&sc->sc_events);
340 mtx_unlock(&sc->sc_events_mtx);
345 g_raid3_event_remove(struct g_raid3_softc *sc, struct g_raid3_event *ep)
348 mtx_lock(&sc->sc_events_mtx);
349 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
350 mtx_unlock(&sc->sc_events_mtx);
354 g_raid3_event_cancel(struct g_raid3_disk *disk)
356 struct g_raid3_softc *sc;
357 struct g_raid3_event *ep, *tmpep;
360 sx_assert(&sc->sc_lock, SX_XLOCKED);
362 mtx_lock(&sc->sc_events_mtx);
363 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) {
364 if ((ep->e_flags & G_RAID3_EVENT_DEVICE) != 0)
366 if (ep->e_disk != disk)
368 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
369 if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0)
370 g_raid3_event_free(ep);
372 ep->e_error = ECANCELED;
376 mtx_unlock(&sc->sc_events_mtx);
380 * Return the number of disks in the given state.
381 * If state is equal to -1, count all connected disks.
384 g_raid3_ndisks(struct g_raid3_softc *sc, int state)
386 struct g_raid3_disk *disk;
389 sx_assert(&sc->sc_lock, SX_LOCKED);
391 for (n = ndisks = 0; n < sc->sc_ndisks; n++) {
392 disk = &sc->sc_disks[n];
393 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
395 if (state == -1 || disk->d_state == state)
402 g_raid3_nrequests(struct g_raid3_softc *sc, struct g_consumer *cp)
407 mtx_lock(&sc->sc_queue_mtx);
408 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
409 if (bp->bio_from == cp)
412 mtx_unlock(&sc->sc_queue_mtx);
417 g_raid3_is_busy(struct g_raid3_softc *sc, struct g_consumer *cp)
422 "I/O requests for %s exist, can't destroy it now.",
426 if (g_raid3_nrequests(sc, cp) > 0) {
428 "I/O requests for %s in queue, can't destroy it now.",
436 g_raid3_destroy_consumer(void *arg, int flags __unused)
438 struct g_consumer *cp;
443 G_RAID3_DEBUG(1, "Consumer %s destroyed.", cp->provider->name);
445 g_destroy_consumer(cp);
449 g_raid3_kill_consumer(struct g_raid3_softc *sc, struct g_consumer *cp)
451 struct g_provider *pp;
457 if (g_raid3_is_busy(sc, cp))
459 G_RAID3_DEBUG(2, "Consumer %s destroyed.", cp->provider->name);
463 if ((pp->geom->flags & G_GEOM_WITHER) == 0)
466 G_RAID3_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr,
467 -cp->acw, -cp->ace, 0);
468 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
469 g_access(cp, -cp->acr, -cp->acw, -cp->ace);
472 * After retaste event was send (inside g_access()), we can send
473 * event to detach and destroy consumer.
474 * A class, which has consumer to the given provider connected
475 * will not receive retaste event for the provider.
476 * This is the way how I ignore retaste events when I close
477 * consumers opened for write: I detach and destroy consumer
478 * after retaste event is sent.
480 g_post_event(g_raid3_destroy_consumer, cp, M_WAITOK, NULL);
483 G_RAID3_DEBUG(1, "Consumer %s destroyed.", pp->name);
485 g_destroy_consumer(cp);
489 g_raid3_connect_disk(struct g_raid3_disk *disk, struct g_provider *pp)
491 struct g_consumer *cp;
494 g_topology_assert_not();
495 KASSERT(disk->d_consumer == NULL,
496 ("Disk already connected (device %s).", disk->d_softc->sc_name));
499 cp = g_new_consumer(disk->d_softc->sc_geom);
500 error = g_attach(cp, pp);
502 g_destroy_consumer(cp);
506 error = g_access(cp, 1, 1, 1);
510 g_destroy_consumer(cp);
511 G_RAID3_DEBUG(0, "Cannot open consumer %s (error=%d).",
515 disk->d_consumer = cp;
516 disk->d_consumer->private = disk;
517 disk->d_consumer->index = 0;
518 G_RAID3_DEBUG(2, "Disk %s connected.", g_raid3_get_diskname(disk));
523 g_raid3_disconnect_consumer(struct g_raid3_softc *sc, struct g_consumer *cp)
530 if (cp->provider != NULL)
531 g_raid3_kill_consumer(sc, cp);
533 g_destroy_consumer(cp);
537 * Initialize disk. This means allocate memory, create consumer, attach it
538 * to the provider and open access (r1w1e1) to it.
540 static struct g_raid3_disk *
541 g_raid3_init_disk(struct g_raid3_softc *sc, struct g_provider *pp,
542 struct g_raid3_metadata *md, int *errorp)
544 struct g_raid3_disk *disk;
547 disk = &sc->sc_disks[md->md_no];
548 error = g_raid3_connect_disk(disk, pp);
554 disk->d_state = G_RAID3_DISK_STATE_NONE;
555 disk->d_flags = md->md_dflags;
556 if (md->md_provider[0] != '\0')
557 disk->d_flags |= G_RAID3_DISK_FLAG_HARDCODED;
558 disk->d_sync.ds_consumer = NULL;
559 disk->d_sync.ds_offset = md->md_sync_offset;
560 disk->d_sync.ds_offset_done = md->md_sync_offset;
561 disk->d_genid = md->md_genid;
562 disk->d_sync.ds_syncid = md->md_syncid;
569 g_raid3_destroy_disk(struct g_raid3_disk *disk)
571 struct g_raid3_softc *sc;
573 g_topology_assert_not();
575 sx_assert(&sc->sc_lock, SX_XLOCKED);
577 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
579 g_raid3_event_cancel(disk);
580 switch (disk->d_state) {
581 case G_RAID3_DISK_STATE_SYNCHRONIZING:
582 if (sc->sc_syncdisk != NULL)
583 g_raid3_sync_stop(sc, 1);
585 case G_RAID3_DISK_STATE_NEW:
586 case G_RAID3_DISK_STATE_STALE:
587 case G_RAID3_DISK_STATE_ACTIVE:
589 g_raid3_disconnect_consumer(sc, disk->d_consumer);
591 disk->d_consumer = NULL;
594 KASSERT(0 == 1, ("Wrong disk state (%s, %s).",
595 g_raid3_get_diskname(disk),
596 g_raid3_disk_state2str(disk->d_state)));
598 disk->d_state = G_RAID3_DISK_STATE_NODISK;
602 g_raid3_destroy_device(struct g_raid3_softc *sc)
604 struct g_raid3_event *ep;
605 struct g_raid3_disk *disk;
607 struct g_consumer *cp;
610 g_topology_assert_not();
611 sx_assert(&sc->sc_lock, SX_XLOCKED);
614 if (sc->sc_provider != NULL)
615 g_raid3_destroy_provider(sc);
616 for (n = 0; n < sc->sc_ndisks; n++) {
617 disk = &sc->sc_disks[n];
618 if (disk->d_state != G_RAID3_DISK_STATE_NODISK) {
619 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
620 g_raid3_update_metadata(disk);
621 g_raid3_destroy_disk(disk);
624 while ((ep = g_raid3_event_get(sc)) != NULL) {
625 g_raid3_event_remove(sc, ep);
626 if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0)
627 g_raid3_event_free(ep);
629 ep->e_error = ECANCELED;
630 ep->e_flags |= G_RAID3_EVENT_DONE;
631 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, ep);
632 mtx_lock(&sc->sc_events_mtx);
634 mtx_unlock(&sc->sc_events_mtx);
637 callout_drain(&sc->sc_callout);
638 cp = LIST_FIRST(&sc->sc_sync.ds_geom->consumer);
641 g_raid3_disconnect_consumer(sc, cp);
642 g_wither_geom(sc->sc_sync.ds_geom, ENXIO);
643 G_RAID3_DEBUG(0, "Device %s destroyed.", gp->name);
644 g_wither_geom(gp, ENXIO);
646 if (!g_raid3_use_malloc) {
647 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone);
648 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone);
649 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone);
651 mtx_destroy(&sc->sc_queue_mtx);
652 mtx_destroy(&sc->sc_events_mtx);
653 sx_xunlock(&sc->sc_lock);
654 sx_destroy(&sc->sc_lock);
658 g_raid3_orphan(struct g_consumer *cp)
660 struct g_raid3_disk *disk;
667 disk->d_softc->sc_bump_id = G_RAID3_BUMP_SYNCID;
668 g_raid3_event_send(disk, G_RAID3_DISK_STATE_DISCONNECTED,
669 G_RAID3_EVENT_DONTWAIT);
673 g_raid3_write_metadata(struct g_raid3_disk *disk, struct g_raid3_metadata *md)
675 struct g_raid3_softc *sc;
676 struct g_consumer *cp;
677 off_t offset, length;
681 g_topology_assert_not();
683 sx_assert(&sc->sc_lock, SX_LOCKED);
685 cp = disk->d_consumer;
686 KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name));
687 KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name));
688 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
689 ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr,
691 length = cp->provider->sectorsize;
692 offset = cp->provider->mediasize - length;
693 sector = malloc((size_t)length, M_RAID3, M_WAITOK | M_ZERO);
695 raid3_metadata_encode(md, sector);
696 error = g_write_data(cp, offset, sector, length);
697 free(sector, M_RAID3);
699 if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) {
700 G_RAID3_DEBUG(0, "Cannot write metadata on %s "
701 "(device=%s, error=%d).",
702 g_raid3_get_diskname(disk), sc->sc_name, error);
703 disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN;
705 G_RAID3_DEBUG(1, "Cannot write metadata on %s "
706 "(device=%s, error=%d).",
707 g_raid3_get_diskname(disk), sc->sc_name, error);
709 if (g_raid3_disconnect_on_failure &&
710 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
711 sc->sc_bump_id |= G_RAID3_BUMP_GENID;
712 g_raid3_event_send(disk,
713 G_RAID3_DISK_STATE_DISCONNECTED,
714 G_RAID3_EVENT_DONTWAIT);
721 g_raid3_clear_metadata(struct g_raid3_disk *disk)
725 g_topology_assert_not();
726 sx_assert(&disk->d_softc->sc_lock, SX_LOCKED);
728 error = g_raid3_write_metadata(disk, NULL);
730 G_RAID3_DEBUG(2, "Metadata on %s cleared.",
731 g_raid3_get_diskname(disk));
734 "Cannot clear metadata on disk %s (error=%d).",
735 g_raid3_get_diskname(disk), error);
741 g_raid3_fill_metadata(struct g_raid3_disk *disk, struct g_raid3_metadata *md)
743 struct g_raid3_softc *sc;
744 struct g_provider *pp;
747 strlcpy(md->md_magic, G_RAID3_MAGIC, sizeof(md->md_magic));
748 md->md_version = G_RAID3_VERSION;
749 strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name));
750 md->md_id = sc->sc_id;
751 md->md_all = sc->sc_ndisks;
752 md->md_genid = sc->sc_genid;
753 md->md_mediasize = sc->sc_mediasize;
754 md->md_sectorsize = sc->sc_sectorsize;
755 md->md_mflags = (sc->sc_flags & G_RAID3_DEVICE_FLAG_MASK);
756 md->md_no = disk->d_no;
757 md->md_syncid = disk->d_sync.ds_syncid;
758 md->md_dflags = (disk->d_flags & G_RAID3_DISK_FLAG_MASK);
759 if (disk->d_state != G_RAID3_DISK_STATE_SYNCHRONIZING)
760 md->md_sync_offset = 0;
763 disk->d_sync.ds_offset_done / (sc->sc_ndisks - 1);
765 if (disk->d_consumer != NULL && disk->d_consumer->provider != NULL)
766 pp = disk->d_consumer->provider;
769 if ((disk->d_flags & G_RAID3_DISK_FLAG_HARDCODED) != 0 && pp != NULL)
770 strlcpy(md->md_provider, pp->name, sizeof(md->md_provider));
772 bzero(md->md_provider, sizeof(md->md_provider));
774 md->md_provsize = pp->mediasize;
780 g_raid3_update_metadata(struct g_raid3_disk *disk)
782 struct g_raid3_softc *sc;
783 struct g_raid3_metadata md;
786 g_topology_assert_not();
788 sx_assert(&sc->sc_lock, SX_LOCKED);
790 g_raid3_fill_metadata(disk, &md);
791 error = g_raid3_write_metadata(disk, &md);
793 G_RAID3_DEBUG(2, "Metadata on %s updated.",
794 g_raid3_get_diskname(disk));
797 "Cannot update metadata on disk %s (error=%d).",
798 g_raid3_get_diskname(disk), error);
803 g_raid3_bump_syncid(struct g_raid3_softc *sc)
805 struct g_raid3_disk *disk;
808 g_topology_assert_not();
809 sx_assert(&sc->sc_lock, SX_XLOCKED);
810 KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) > 0,
811 ("%s called with no active disks (device=%s).", __func__,
815 G_RAID3_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name,
817 for (n = 0; n < sc->sc_ndisks; n++) {
818 disk = &sc->sc_disks[n];
819 if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
820 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
821 disk->d_sync.ds_syncid = sc->sc_syncid;
822 g_raid3_update_metadata(disk);
828 g_raid3_bump_genid(struct g_raid3_softc *sc)
830 struct g_raid3_disk *disk;
833 g_topology_assert_not();
834 sx_assert(&sc->sc_lock, SX_XLOCKED);
835 KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) > 0,
836 ("%s called with no active disks (device=%s).", __func__,
840 G_RAID3_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name,
842 for (n = 0; n < sc->sc_ndisks; n++) {
843 disk = &sc->sc_disks[n];
844 if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
845 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
846 disk->d_genid = sc->sc_genid;
847 g_raid3_update_metadata(disk);
853 g_raid3_idle(struct g_raid3_softc *sc, int acw)
855 struct g_raid3_disk *disk;
859 g_topology_assert_not();
860 sx_assert(&sc->sc_lock, SX_XLOCKED);
862 if (sc->sc_provider == NULL)
864 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0)
868 if (sc->sc_writes > 0)
870 if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) {
871 timeout = g_raid3_idletime - (time_uptime - sc->sc_last_write);
876 for (i = 0; i < sc->sc_ndisks; i++) {
877 disk = &sc->sc_disks[i];
878 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE)
880 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as clean.",
881 g_raid3_get_diskname(disk), sc->sc_name);
882 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
883 g_raid3_update_metadata(disk);
889 g_raid3_unidle(struct g_raid3_softc *sc)
891 struct g_raid3_disk *disk;
894 g_topology_assert_not();
895 sx_assert(&sc->sc_lock, SX_XLOCKED);
897 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0)
900 sc->sc_last_write = time_uptime;
901 for (i = 0; i < sc->sc_ndisks; i++) {
902 disk = &sc->sc_disks[i];
903 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE)
905 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as dirty.",
906 g_raid3_get_diskname(disk), sc->sc_name);
907 disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY;
908 g_raid3_update_metadata(disk);
913 * Treat bio_driver1 field in parent bio as list head and field bio_caller1
914 * in child bio as pointer to the next element on the list.
916 #define G_RAID3_HEAD_BIO(pbp) (pbp)->bio_driver1
918 #define G_RAID3_NEXT_BIO(cbp) (cbp)->bio_caller1
920 #define G_RAID3_FOREACH_BIO(pbp, bp) \
921 for ((bp) = G_RAID3_HEAD_BIO(pbp); (bp) != NULL; \
922 (bp) = G_RAID3_NEXT_BIO(bp))
924 #define G_RAID3_FOREACH_SAFE_BIO(pbp, bp, tmpbp) \
925 for ((bp) = G_RAID3_HEAD_BIO(pbp); \
926 (bp) != NULL && ((tmpbp) = G_RAID3_NEXT_BIO(bp), 1); \
930 g_raid3_init_bio(struct bio *pbp)
933 G_RAID3_HEAD_BIO(pbp) = NULL;
937 g_raid3_remove_bio(struct bio *cbp)
939 struct bio *pbp, *bp;
941 pbp = cbp->bio_parent;
942 if (G_RAID3_HEAD_BIO(pbp) == cbp)
943 G_RAID3_HEAD_BIO(pbp) = G_RAID3_NEXT_BIO(cbp);
945 G_RAID3_FOREACH_BIO(pbp, bp) {
946 if (G_RAID3_NEXT_BIO(bp) == cbp) {
947 G_RAID3_NEXT_BIO(bp) = G_RAID3_NEXT_BIO(cbp);
952 G_RAID3_NEXT_BIO(cbp) = NULL;
956 g_raid3_replace_bio(struct bio *sbp, struct bio *dbp)
958 struct bio *pbp, *bp;
960 g_raid3_remove_bio(sbp);
961 pbp = dbp->bio_parent;
962 G_RAID3_NEXT_BIO(sbp) = G_RAID3_NEXT_BIO(dbp);
963 if (G_RAID3_HEAD_BIO(pbp) == dbp)
964 G_RAID3_HEAD_BIO(pbp) = sbp;
966 G_RAID3_FOREACH_BIO(pbp, bp) {
967 if (G_RAID3_NEXT_BIO(bp) == dbp) {
968 G_RAID3_NEXT_BIO(bp) = sbp;
973 G_RAID3_NEXT_BIO(dbp) = NULL;
977 g_raid3_destroy_bio(struct g_raid3_softc *sc, struct bio *cbp)
979 struct bio *bp, *pbp;
982 pbp = cbp->bio_parent;
984 KASSERT(cbp->bio_data != NULL, ("NULL bio_data"));
985 size = pbp->bio_length / (sc->sc_ndisks - 1);
986 g_raid3_free(sc, cbp->bio_data, size);
987 if (G_RAID3_HEAD_BIO(pbp) == cbp) {
988 G_RAID3_HEAD_BIO(pbp) = G_RAID3_NEXT_BIO(cbp);
989 G_RAID3_NEXT_BIO(cbp) = NULL;
992 G_RAID3_FOREACH_BIO(pbp, bp) {
993 if (G_RAID3_NEXT_BIO(bp) == cbp)
997 KASSERT(G_RAID3_NEXT_BIO(bp) != NULL,
998 ("NULL bp->bio_driver1"));
999 G_RAID3_NEXT_BIO(bp) = G_RAID3_NEXT_BIO(cbp);
1000 G_RAID3_NEXT_BIO(cbp) = NULL;
1007 g_raid3_clone_bio(struct g_raid3_softc *sc, struct bio *pbp)
1009 struct bio *bp, *cbp;
1013 cbp = g_clone_bio(pbp);
1016 size = pbp->bio_length / (sc->sc_ndisks - 1);
1017 if ((pbp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) != 0)
1021 cbp->bio_data = g_raid3_alloc(sc, size, memflag);
1022 if (cbp->bio_data == NULL) {
1023 pbp->bio_children--;
1027 G_RAID3_NEXT_BIO(cbp) = NULL;
1028 if (G_RAID3_HEAD_BIO(pbp) == NULL)
1029 G_RAID3_HEAD_BIO(pbp) = cbp;
1031 G_RAID3_FOREACH_BIO(pbp, bp) {
1032 if (G_RAID3_NEXT_BIO(bp) == NULL) {
1033 G_RAID3_NEXT_BIO(bp) = cbp;
1042 g_raid3_scatter(struct bio *pbp)
1044 struct g_raid3_softc *sc;
1045 struct g_raid3_disk *disk;
1046 struct bio *bp, *cbp, *tmpbp;
1047 off_t atom, cadd, padd, left;
1049 sc = pbp->bio_to->geom->softc;
1051 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_NOPARITY) == 0) {
1053 * Find bio for which we should calculate data.
1055 G_RAID3_FOREACH_BIO(pbp, cbp) {
1056 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) {
1061 KASSERT(bp != NULL, ("NULL parity bio."));
1063 atom = sc->sc_sectorsize / (sc->sc_ndisks - 1);
1065 for (left = pbp->bio_length; left > 0; left -= sc->sc_sectorsize) {
1066 G_RAID3_FOREACH_BIO(pbp, cbp) {
1069 bcopy(pbp->bio_data + padd, cbp->bio_data + cadd, atom);
1074 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_NOPARITY) == 0) {
1078 bzero(bp->bio_data, bp->bio_length);
1079 G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) {
1082 g_raid3_xor(cbp->bio_data, bp->bio_data, bp->bio_data,
1084 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_NODISK) != 0)
1085 g_raid3_destroy_bio(sc, cbp);
1088 G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) {
1089 struct g_consumer *cp;
1091 disk = cbp->bio_caller2;
1092 cp = disk->d_consumer;
1093 cbp->bio_to = cp->provider;
1094 G_RAID3_LOGREQ(3, cbp, "Sending request.");
1095 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1096 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1097 cp->acr, cp->acw, cp->ace));
1100 g_io_request(cbp, cp);
1105 g_raid3_gather(struct bio *pbp)
1107 struct g_raid3_softc *sc;
1108 struct g_raid3_disk *disk;
1109 struct bio *xbp, *fbp, *cbp;
1110 off_t atom, cadd, padd, left;
1112 sc = pbp->bio_to->geom->softc;
1114 * Find bio for which we have to calculate data.
1115 * While going through this path, check if all requests
1116 * succeeded, if not, deny whole request.
1117 * If we're in COMPLETE mode, we allow one request to fail,
1118 * so if we find one, we're sending it to the parity consumer.
1119 * If there are more failed requests, we deny whole request.
1122 G_RAID3_FOREACH_BIO(pbp, cbp) {
1123 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) {
1124 KASSERT(xbp == NULL, ("More than one parity bio."));
1127 if (cbp->bio_error == 0)
1130 * Found failed request.
1133 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_DEGRADED) != 0) {
1135 * We are already in degraded mode, so we can't
1136 * accept any failures.
1138 if (pbp->bio_error == 0)
1139 pbp->bio_error = cbp->bio_error;
1145 * Next failed request, that's too many.
1147 if (pbp->bio_error == 0)
1148 pbp->bio_error = fbp->bio_error;
1150 disk = cbp->bio_caller2;
1153 if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) {
1154 disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN;
1155 G_RAID3_LOGREQ(0, cbp, "Request failed (error=%d).",
1158 G_RAID3_LOGREQ(1, cbp, "Request failed (error=%d).",
1161 if (g_raid3_disconnect_on_failure &&
1162 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1163 sc->sc_bump_id |= G_RAID3_BUMP_GENID;
1164 g_raid3_event_send(disk,
1165 G_RAID3_DISK_STATE_DISCONNECTED,
1166 G_RAID3_EVENT_DONTWAIT);
1169 if (pbp->bio_error != 0)
1171 if (fbp != NULL && (pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) {
1172 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_VERIFY;
1174 g_raid3_replace_bio(xbp, fbp);
1175 g_raid3_destroy_bio(sc, fbp);
1176 } else if (fbp != NULL) {
1177 struct g_consumer *cp;
1180 * One request failed, so send the same request to
1181 * the parity consumer.
1183 disk = pbp->bio_driver2;
1184 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) {
1185 pbp->bio_error = fbp->bio_error;
1188 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED;
1190 fbp->bio_flags &= ~(BIO_DONE | BIO_ERROR);
1191 if (disk->d_no == sc->sc_ndisks - 1)
1192 fbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1194 fbp->bio_completed = 0;
1195 fbp->bio_children = 0;
1197 cp = disk->d_consumer;
1198 fbp->bio_caller2 = disk;
1199 fbp->bio_to = cp->provider;
1200 G_RAID3_LOGREQ(3, fbp, "Sending request (recover).");
1201 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1202 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1203 cp->acr, cp->acw, cp->ace));
1205 g_io_request(fbp, cp);
1212 G_RAID3_FOREACH_BIO(pbp, cbp) {
1213 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0)
1215 g_raid3_xor(cbp->bio_data, xbp->bio_data, xbp->bio_data,
1218 xbp->bio_cflags &= ~G_RAID3_BIO_CFLAG_PARITY;
1219 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) {
1220 if (!g_raid3_is_zero(xbp)) {
1221 g_raid3_parity_mismatch++;
1222 pbp->bio_error = EIO;
1225 g_raid3_destroy_bio(sc, xbp);
1228 atom = sc->sc_sectorsize / (sc->sc_ndisks - 1);
1230 for (left = pbp->bio_length; left > 0; left -= sc->sc_sectorsize) {
1231 G_RAID3_FOREACH_BIO(pbp, cbp) {
1232 bcopy(cbp->bio_data + cadd, pbp->bio_data + padd, atom);
1233 pbp->bio_completed += atom;
1239 if (pbp->bio_error == 0)
1240 G_RAID3_LOGREQ(3, pbp, "Request finished.");
1242 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0)
1243 G_RAID3_LOGREQ(1, pbp, "Verification error.");
1245 G_RAID3_LOGREQ(0, pbp, "Request failed.");
1247 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_MASK;
1248 while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL)
1249 g_raid3_destroy_bio(sc, cbp);
1250 g_io_deliver(pbp, pbp->bio_error);
1254 g_raid3_done(struct bio *bp)
1256 struct g_raid3_softc *sc;
1258 sc = bp->bio_from->geom->softc;
1259 bp->bio_cflags |= G_RAID3_BIO_CFLAG_REGULAR;
1260 G_RAID3_LOGREQ(3, bp, "Regular request done (error=%d).", bp->bio_error);
1261 mtx_lock(&sc->sc_queue_mtx);
1262 bioq_insert_head(&sc->sc_queue, bp);
1264 wakeup(&sc->sc_queue);
1265 mtx_unlock(&sc->sc_queue_mtx);
1269 g_raid3_regular_request(struct bio *cbp)
1271 struct g_raid3_softc *sc;
1272 struct g_raid3_disk *disk;
1275 g_topology_assert_not();
1277 pbp = cbp->bio_parent;
1278 sc = pbp->bio_to->geom->softc;
1279 cbp->bio_from->index--;
1280 if (cbp->bio_cmd == BIO_WRITE)
1282 disk = cbp->bio_from->private;
1285 g_raid3_kill_consumer(sc, cbp->bio_from);
1286 g_topology_unlock();
1289 G_RAID3_LOGREQ(3, cbp, "Request finished.");
1291 KASSERT(pbp->bio_inbed <= pbp->bio_children,
1292 ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed,
1293 pbp->bio_children));
1294 if (pbp->bio_inbed != pbp->bio_children)
1296 switch (pbp->bio_cmd) {
1298 g_raid3_gather(pbp);
1305 pbp->bio_completed = pbp->bio_length;
1306 while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL) {
1307 if (cbp->bio_error == 0) {
1308 g_raid3_destroy_bio(sc, cbp);
1313 error = cbp->bio_error;
1314 else if (pbp->bio_error == 0) {
1316 * Next failed request, that's too many.
1318 pbp->bio_error = error;
1321 disk = cbp->bio_caller2;
1323 g_raid3_destroy_bio(sc, cbp);
1327 if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) {
1328 disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN;
1329 G_RAID3_LOGREQ(0, cbp,
1330 "Request failed (error=%d).",
1333 G_RAID3_LOGREQ(1, cbp,
1334 "Request failed (error=%d).",
1337 if (g_raid3_disconnect_on_failure &&
1338 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1339 sc->sc_bump_id |= G_RAID3_BUMP_GENID;
1340 g_raid3_event_send(disk,
1341 G_RAID3_DISK_STATE_DISCONNECTED,
1342 G_RAID3_EVENT_DONTWAIT);
1344 g_raid3_destroy_bio(sc, cbp);
1346 if (pbp->bio_error == 0)
1347 G_RAID3_LOGREQ(3, pbp, "Request finished.");
1349 G_RAID3_LOGREQ(0, pbp, "Request failed.");
1350 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_DEGRADED;
1351 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_NOPARITY;
1352 bioq_remove(&sc->sc_inflight, pbp);
1353 /* Release delayed sync requests if possible. */
1354 g_raid3_sync_release(sc);
1355 g_io_deliver(pbp, pbp->bio_error);
1362 g_raid3_sync_done(struct bio *bp)
1364 struct g_raid3_softc *sc;
1366 G_RAID3_LOGREQ(3, bp, "Synchronization request delivered.");
1367 sc = bp->bio_from->geom->softc;
1368 bp->bio_cflags |= G_RAID3_BIO_CFLAG_SYNC;
1369 mtx_lock(&sc->sc_queue_mtx);
1370 bioq_insert_head(&sc->sc_queue, bp);
1372 wakeup(&sc->sc_queue);
1373 mtx_unlock(&sc->sc_queue_mtx);
1377 g_raid3_flush(struct g_raid3_softc *sc, struct bio *bp)
1379 struct bio_queue_head queue;
1380 struct g_raid3_disk *disk;
1381 struct g_consumer *cp;
1386 for (i = 0; i < sc->sc_ndisks; i++) {
1387 disk = &sc->sc_disks[i];
1388 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE)
1390 cbp = g_clone_bio(bp);
1392 for (cbp = bioq_first(&queue); cbp != NULL;
1393 cbp = bioq_first(&queue)) {
1394 bioq_remove(&queue, cbp);
1397 if (bp->bio_error == 0)
1398 bp->bio_error = ENOMEM;
1399 g_io_deliver(bp, bp->bio_error);
1402 bioq_insert_tail(&queue, cbp);
1403 cbp->bio_done = g_std_done;
1404 cbp->bio_caller1 = disk;
1405 cbp->bio_to = disk->d_consumer->provider;
1407 for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) {
1408 bioq_remove(&queue, cbp);
1409 G_RAID3_LOGREQ(3, cbp, "Sending request.");
1410 disk = cbp->bio_caller1;
1411 cbp->bio_caller1 = NULL;
1412 cp = disk->d_consumer;
1413 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1414 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1415 cp->acr, cp->acw, cp->ace));
1416 g_io_request(cbp, disk->d_consumer);
1421 g_raid3_start(struct bio *bp)
1423 struct g_raid3_softc *sc;
1425 sc = bp->bio_to->geom->softc;
1427 * If sc == NULL or there are no valid disks, provider's error
1428 * should be set and g_raid3_start() should not be called at all.
1430 KASSERT(sc != NULL && (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
1431 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE),
1432 ("Provider's error should be set (error=%d)(device=%s).",
1433 bp->bio_to->error, bp->bio_to->name));
1434 G_RAID3_LOGREQ(3, bp, "Request received.");
1436 switch (bp->bio_cmd) {
1442 g_raid3_flush(sc, bp);
1446 g_io_deliver(bp, EOPNOTSUPP);
1449 mtx_lock(&sc->sc_queue_mtx);
1450 bioq_insert_tail(&sc->sc_queue, bp);
1451 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc);
1453 mtx_unlock(&sc->sc_queue_mtx);
1457 * Return TRUE if the given request is colliding with a in-progress
1458 * synchronization request.
1461 g_raid3_sync_collision(struct g_raid3_softc *sc, struct bio *bp)
1463 struct g_raid3_disk *disk;
1465 off_t rstart, rend, sstart, send;
1468 disk = sc->sc_syncdisk;
1471 rstart = bp->bio_offset;
1472 rend = bp->bio_offset + bp->bio_length;
1473 for (i = 0; i < g_raid3_syncreqs; i++) {
1474 sbp = disk->d_sync.ds_bios[i];
1477 sstart = sbp->bio_offset;
1478 send = sbp->bio_length;
1479 if (sbp->bio_cmd == BIO_WRITE) {
1480 sstart *= sc->sc_ndisks - 1;
1481 send *= sc->sc_ndisks - 1;
1484 if (rend > sstart && rstart < send)
1491 * Return TRUE if the given sync request is colliding with a in-progress regular
1495 g_raid3_regular_collision(struct g_raid3_softc *sc, struct bio *sbp)
1497 off_t rstart, rend, sstart, send;
1500 if (sc->sc_syncdisk == NULL)
1502 sstart = sbp->bio_offset;
1503 send = sstart + sbp->bio_length;
1504 TAILQ_FOREACH(bp, &sc->sc_inflight.queue, bio_queue) {
1505 rstart = bp->bio_offset;
1506 rend = bp->bio_offset + bp->bio_length;
1507 if (rend > sstart && rstart < send)
1514 * Puts request onto delayed queue.
1517 g_raid3_regular_delay(struct g_raid3_softc *sc, struct bio *bp)
1520 G_RAID3_LOGREQ(2, bp, "Delaying request.");
1521 bioq_insert_head(&sc->sc_regular_delayed, bp);
1525 * Puts synchronization request onto delayed queue.
1528 g_raid3_sync_delay(struct g_raid3_softc *sc, struct bio *bp)
1531 G_RAID3_LOGREQ(2, bp, "Delaying synchronization request.");
1532 bioq_insert_tail(&sc->sc_sync_delayed, bp);
1536 * Releases delayed regular requests which don't collide anymore with sync
1540 g_raid3_regular_release(struct g_raid3_softc *sc)
1542 struct bio *bp, *bp2;
1544 TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed.queue, bio_queue, bp2) {
1545 if (g_raid3_sync_collision(sc, bp))
1547 bioq_remove(&sc->sc_regular_delayed, bp);
1548 G_RAID3_LOGREQ(2, bp, "Releasing delayed request (%p).", bp);
1549 mtx_lock(&sc->sc_queue_mtx);
1550 bioq_insert_head(&sc->sc_queue, bp);
1553 * wakeup() is not needed, because this function is called from
1554 * the worker thread.
1556 wakeup(&sc->sc_queue);
1558 mtx_unlock(&sc->sc_queue_mtx);
1563 * Releases delayed sync requests which don't collide anymore with regular
1567 g_raid3_sync_release(struct g_raid3_softc *sc)
1569 struct bio *bp, *bp2;
1571 TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed.queue, bio_queue, bp2) {
1572 if (g_raid3_regular_collision(sc, bp))
1574 bioq_remove(&sc->sc_sync_delayed, bp);
1575 G_RAID3_LOGREQ(2, bp,
1576 "Releasing delayed synchronization request.");
1577 g_io_request(bp, bp->bio_from);
1582 * Handle synchronization requests.
1583 * Every synchronization request is two-steps process: first, READ request is
1584 * send to active provider and then WRITE request (with read data) to the provider
1585 * beeing synchronized. When WRITE is finished, new synchronization request is
1589 g_raid3_sync_request(struct bio *bp)
1591 struct g_raid3_softc *sc;
1592 struct g_raid3_disk *disk;
1594 bp->bio_from->index--;
1595 sc = bp->bio_from->geom->softc;
1596 disk = bp->bio_from->private;
1598 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */
1600 g_raid3_kill_consumer(sc, bp->bio_from);
1601 g_topology_unlock();
1602 free(bp->bio_data, M_RAID3);
1604 sx_xlock(&sc->sc_lock);
1609 * Synchronization request.
1611 switch (bp->bio_cmd) {
1614 struct g_consumer *cp;
1619 if (bp->bio_error != 0) {
1620 G_RAID3_LOGREQ(0, bp,
1621 "Synchronization request failed (error=%d).",
1626 G_RAID3_LOGREQ(3, bp, "Synchronization request finished.");
1627 atom = sc->sc_sectorsize / (sc->sc_ndisks - 1);
1628 dst = src = bp->bio_data;
1629 if (disk->d_no == sc->sc_ndisks - 1) {
1632 /* Parity component. */
1633 for (left = bp->bio_length; left > 0;
1634 left -= sc->sc_sectorsize) {
1635 bcopy(src, dst, atom);
1637 for (n = 1; n < sc->sc_ndisks - 1; n++) {
1638 g_raid3_xor(src, dst, dst, atom);
1644 /* Regular component. */
1645 src += atom * disk->d_no;
1646 for (left = bp->bio_length; left > 0;
1647 left -= sc->sc_sectorsize) {
1648 bcopy(src, dst, atom);
1649 src += sc->sc_sectorsize;
1653 bp->bio_driver1 = bp->bio_driver2 = NULL;
1655 bp->bio_offset /= sc->sc_ndisks - 1;
1656 bp->bio_length /= sc->sc_ndisks - 1;
1657 bp->bio_cmd = BIO_WRITE;
1659 bp->bio_children = bp->bio_inbed = 0;
1660 cp = disk->d_consumer;
1661 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1662 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1663 cp->acr, cp->acw, cp->ace));
1665 g_io_request(bp, cp);
1670 struct g_raid3_disk_sync *sync;
1671 off_t boffset, moffset;
1675 if (bp->bio_error != 0) {
1676 G_RAID3_LOGREQ(0, bp,
1677 "Synchronization request failed (error=%d).",
1680 sc->sc_bump_id |= G_RAID3_BUMP_GENID;
1681 g_raid3_event_send(disk,
1682 G_RAID3_DISK_STATE_DISCONNECTED,
1683 G_RAID3_EVENT_DONTWAIT);
1686 G_RAID3_LOGREQ(3, bp, "Synchronization request finished.");
1687 sync = &disk->d_sync;
1688 if (sync->ds_offset == sc->sc_mediasize / (sc->sc_ndisks - 1) ||
1689 sync->ds_consumer == NULL ||
1690 (sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
1691 /* Don't send more synchronization requests. */
1692 sync->ds_inflight--;
1693 if (sync->ds_bios != NULL) {
1694 i = (int)(uintptr_t)bp->bio_caller1;
1695 sync->ds_bios[i] = NULL;
1697 free(bp->bio_data, M_RAID3);
1699 if (sync->ds_inflight > 0)
1701 if (sync->ds_consumer == NULL ||
1702 (sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
1706 * Disk up-to-date, activate it.
1708 g_raid3_event_send(disk, G_RAID3_DISK_STATE_ACTIVE,
1709 G_RAID3_EVENT_DONTWAIT);
1713 /* Send next synchronization request. */
1714 data = bp->bio_data;
1715 bzero(bp, sizeof(*bp));
1716 bp->bio_cmd = BIO_READ;
1717 bp->bio_offset = sync->ds_offset * (sc->sc_ndisks - 1);
1718 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset);
1719 sync->ds_offset += bp->bio_length / (sc->sc_ndisks - 1);
1720 bp->bio_done = g_raid3_sync_done;
1721 bp->bio_data = data;
1722 bp->bio_from = sync->ds_consumer;
1723 bp->bio_to = sc->sc_provider;
1724 G_RAID3_LOGREQ(3, bp, "Sending synchronization request.");
1725 sync->ds_consumer->index++;
1727 * Delay the request if it is colliding with a regular request.
1729 if (g_raid3_regular_collision(sc, bp))
1730 g_raid3_sync_delay(sc, bp);
1732 g_io_request(bp, sync->ds_consumer);
1734 /* Release delayed requests if possible. */
1735 g_raid3_regular_release(sc);
1737 /* Find the smallest offset. */
1738 moffset = sc->sc_mediasize;
1739 for (i = 0; i < g_raid3_syncreqs; i++) {
1740 bp = sync->ds_bios[i];
1741 boffset = bp->bio_offset;
1742 if (bp->bio_cmd == BIO_WRITE)
1743 boffset *= sc->sc_ndisks - 1;
1744 if (boffset < moffset)
1747 if (sync->ds_offset_done + (MAXPHYS * 100) < moffset) {
1748 /* Update offset_done on every 100 blocks. */
1749 sync->ds_offset_done = moffset;
1750 g_raid3_update_metadata(disk);
1755 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)",
1756 bp->bio_cmd, sc->sc_name));
1762 g_raid3_register_request(struct bio *pbp)
1764 struct g_raid3_softc *sc;
1765 struct g_raid3_disk *disk;
1766 struct g_consumer *cp;
1767 struct bio *cbp, *tmpbp;
1768 off_t offset, length;
1770 int round_robin, verify;
1773 sc = pbp->bio_to->geom->softc;
1774 if ((pbp->bio_cflags & G_RAID3_BIO_CFLAG_REGSYNC) != 0 &&
1775 sc->sc_syncdisk == NULL) {
1776 g_io_deliver(pbp, EIO);
1779 g_raid3_init_bio(pbp);
1780 length = pbp->bio_length / (sc->sc_ndisks - 1);
1781 offset = pbp->bio_offset / (sc->sc_ndisks - 1);
1782 round_robin = verify = 0;
1783 switch (pbp->bio_cmd) {
1785 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_VERIFY) != 0 &&
1786 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1787 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_VERIFY;
1789 ndisks = sc->sc_ndisks;
1792 ndisks = sc->sc_ndisks - 1;
1794 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_ROUND_ROBIN) != 0 &&
1795 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1800 KASSERT(!round_robin || !verify,
1801 ("ROUND-ROBIN and VERIFY are mutually exclusive."));
1802 pbp->bio_driver2 = &sc->sc_disks[sc->sc_ndisks - 1];
1807 * Delay the request if it is colliding with a synchronization
1810 if (g_raid3_sync_collision(sc, pbp)) {
1811 g_raid3_regular_delay(sc, pbp);
1818 sc->sc_last_write = time_uptime;
1820 ndisks = sc->sc_ndisks;
1823 for (n = 0; n < ndisks; n++) {
1824 disk = &sc->sc_disks[n];
1825 cbp = g_raid3_clone_bio(sc, pbp);
1827 while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL)
1828 g_raid3_destroy_bio(sc, cbp);
1830 * To prevent deadlock, we must run back up
1831 * with the ENOMEM for failed requests of any
1832 * of our consumers. Our own sync requests
1833 * can stick around, as they are finite.
1835 if ((pbp->bio_cflags &
1836 G_RAID3_BIO_CFLAG_REGULAR) != 0) {
1837 g_io_deliver(pbp, ENOMEM);
1842 cbp->bio_offset = offset;
1843 cbp->bio_length = length;
1844 cbp->bio_done = g_raid3_done;
1845 switch (pbp->bio_cmd) {
1847 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) {
1849 * Replace invalid component with the parity
1852 disk = &sc->sc_disks[sc->sc_ndisks - 1];
1853 cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1854 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED;
1855 } else if (round_robin &&
1856 disk->d_no == sc->sc_round_robin) {
1858 * In round-robin mode skip one data component
1859 * and use parity component when reading.
1861 pbp->bio_driver2 = disk;
1862 disk = &sc->sc_disks[sc->sc_ndisks - 1];
1863 cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1864 sc->sc_round_robin++;
1866 } else if (verify && disk->d_no == sc->sc_ndisks - 1) {
1867 cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1872 if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
1873 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
1874 if (n == ndisks - 1) {
1876 * Active parity component, mark it as such.
1879 G_RAID3_BIO_CFLAG_PARITY;
1882 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED;
1883 if (n == ndisks - 1) {
1885 * Parity component is not connected,
1886 * so destroy its request.
1889 G_RAID3_BIO_PFLAG_NOPARITY;
1890 g_raid3_destroy_bio(sc, cbp);
1894 G_RAID3_BIO_CFLAG_NODISK;
1901 cbp->bio_caller2 = disk;
1903 switch (pbp->bio_cmd) {
1907 * If we are in round-robin mode and 'round_robin' is
1908 * still 1, it means, that we skipped parity component
1909 * for this read and must reset sc_round_robin field.
1911 sc->sc_round_robin = 0;
1913 G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) {
1914 disk = cbp->bio_caller2;
1915 cp = disk->d_consumer;
1916 cbp->bio_to = cp->provider;
1917 G_RAID3_LOGREQ(3, cbp, "Sending request.");
1918 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1919 ("Consumer %s not opened (r%dw%de%d).",
1920 cp->provider->name, cp->acr, cp->acw, cp->ace));
1922 g_io_request(cbp, cp);
1928 * Put request onto inflight queue, so we can check if new
1929 * synchronization requests don't collide with it.
1931 bioq_insert_tail(&sc->sc_inflight, pbp);
1934 * Bump syncid on first write.
1936 if ((sc->sc_bump_id & G_RAID3_BUMP_SYNCID) != 0) {
1937 sc->sc_bump_id &= ~G_RAID3_BUMP_SYNCID;
1938 g_raid3_bump_syncid(sc);
1940 g_raid3_scatter(pbp);
1947 g_raid3_can_destroy(struct g_raid3_softc *sc)
1950 struct g_consumer *cp;
1952 g_topology_assert();
1954 if (gp->softc == NULL)
1956 LIST_FOREACH(cp, &gp->consumer, consumer) {
1957 if (g_raid3_is_busy(sc, cp))
1960 gp = sc->sc_sync.ds_geom;
1961 LIST_FOREACH(cp, &gp->consumer, consumer) {
1962 if (g_raid3_is_busy(sc, cp))
1965 G_RAID3_DEBUG(2, "No I/O requests for %s, it can be destroyed.",
1971 g_raid3_try_destroy(struct g_raid3_softc *sc)
1974 g_topology_assert_not();
1975 sx_assert(&sc->sc_lock, SX_XLOCKED);
1977 if (sc->sc_rootmount != NULL) {
1978 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
1980 root_mount_rel(sc->sc_rootmount);
1981 sc->sc_rootmount = NULL;
1985 if (!g_raid3_can_destroy(sc)) {
1986 g_topology_unlock();
1989 sc->sc_geom->softc = NULL;
1990 sc->sc_sync.ds_geom->softc = NULL;
1991 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_WAIT) != 0) {
1992 g_topology_unlock();
1993 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__,
1995 /* Unlock sc_lock here, as it can be destroyed after wakeup. */
1996 sx_xunlock(&sc->sc_lock);
1997 wakeup(&sc->sc_worker);
1998 sc->sc_worker = NULL;
2000 g_topology_unlock();
2001 g_raid3_destroy_device(sc);
2002 free(sc->sc_disks, M_RAID3);
2012 g_raid3_worker(void *arg)
2014 struct g_raid3_softc *sc;
2015 struct g_raid3_event *ep;
2020 thread_lock(curthread);
2021 sched_prio(curthread, PRIBIO);
2022 thread_unlock(curthread);
2024 sx_xlock(&sc->sc_lock);
2026 G_RAID3_DEBUG(5, "%s: Let's see...", __func__);
2028 * First take a look at events.
2029 * This is important to handle events before any I/O requests.
2031 ep = g_raid3_event_get(sc);
2033 g_raid3_event_remove(sc, ep);
2034 if ((ep->e_flags & G_RAID3_EVENT_DEVICE) != 0) {
2035 /* Update only device status. */
2037 "Running event for device %s.",
2040 g_raid3_update_device(sc, 1);
2042 /* Update disk status. */
2043 G_RAID3_DEBUG(3, "Running event for disk %s.",
2044 g_raid3_get_diskname(ep->e_disk));
2045 ep->e_error = g_raid3_update_disk(ep->e_disk,
2047 if (ep->e_error == 0)
2048 g_raid3_update_device(sc, 0);
2050 if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0) {
2051 KASSERT(ep->e_error == 0,
2052 ("Error cannot be handled."));
2053 g_raid3_event_free(ep);
2055 ep->e_flags |= G_RAID3_EVENT_DONE;
2056 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__,
2058 mtx_lock(&sc->sc_events_mtx);
2060 mtx_unlock(&sc->sc_events_mtx);
2063 G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
2064 if (g_raid3_try_destroy(sc)) {
2065 curthread->td_pflags &= ~TDP_GEOM;
2066 G_RAID3_DEBUG(1, "Thread exiting.");
2070 G_RAID3_DEBUG(5, "%s: I'm here 1.", __func__);
2074 * Check if we can mark array as CLEAN and if we can't take
2075 * how much seconds should we wait.
2077 timeout = g_raid3_idle(sc, -1);
2081 /* Get first request from the queue. */
2082 mtx_lock(&sc->sc_queue_mtx);
2083 bp = bioq_first(&sc->sc_queue);
2086 G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
2087 mtx_unlock(&sc->sc_queue_mtx);
2088 if (g_raid3_try_destroy(sc)) {
2089 curthread->td_pflags &= ~TDP_GEOM;
2090 G_RAID3_DEBUG(1, "Thread exiting.");
2093 mtx_lock(&sc->sc_queue_mtx);
2095 sx_xunlock(&sc->sc_lock);
2097 * XXX: We can miss an event here, because an event
2098 * can be added without sx-device-lock and without
2099 * mtx-queue-lock. Maybe I should just stop using
2100 * dedicated mutex for events synchronization and
2101 * stick with the queue lock?
2102 * The event will hang here until next I/O request
2103 * or next event is received.
2105 MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "r3:w1",
2107 sx_xlock(&sc->sc_lock);
2108 G_RAID3_DEBUG(5, "%s: I'm here 4.", __func__);
2112 bioq_remove(&sc->sc_queue, bp);
2113 mtx_unlock(&sc->sc_queue_mtx);
2115 if (bp->bio_from->geom == sc->sc_sync.ds_geom &&
2116 (bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) != 0) {
2117 g_raid3_sync_request(bp); /* READ */
2118 } else if (bp->bio_to != sc->sc_provider) {
2119 if ((bp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) != 0)
2120 g_raid3_regular_request(bp);
2121 else if ((bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) != 0)
2122 g_raid3_sync_request(bp); /* WRITE */
2125 ("Invalid request cflags=0x%hhx to=%s.",
2126 bp->bio_cflags, bp->bio_to->name));
2128 } else if (g_raid3_register_request(bp) != 0) {
2129 mtx_lock(&sc->sc_queue_mtx);
2130 bioq_insert_head(&sc->sc_queue, bp);
2132 * We are short in memory, let see if there are finished
2133 * request we can free.
2135 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
2136 if (bp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR)
2140 * No finished regular request, so at least keep
2141 * synchronization running.
2143 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
2144 if (bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC)
2147 sx_xunlock(&sc->sc_lock);
2148 MSLEEP(&sc->sc_queue, &sc->sc_queue_mtx, PRIBIO | PDROP,
2149 "r3:lowmem", hz / 10);
2150 sx_xlock(&sc->sc_lock);
2152 G_RAID3_DEBUG(5, "%s: I'm here 9.", __func__);
2157 g_raid3_update_idle(struct g_raid3_softc *sc, struct g_raid3_disk *disk)
2160 sx_assert(&sc->sc_lock, SX_LOCKED);
2161 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0)
2163 if (!sc->sc_idle && (disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) == 0) {
2164 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as dirty.",
2165 g_raid3_get_diskname(disk), sc->sc_name);
2166 disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY;
2167 } else if (sc->sc_idle &&
2168 (disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) != 0) {
2169 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as clean.",
2170 g_raid3_get_diskname(disk), sc->sc_name);
2171 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2176 g_raid3_sync_start(struct g_raid3_softc *sc)
2178 struct g_raid3_disk *disk;
2179 struct g_consumer *cp;
2184 g_topology_assert_not();
2185 sx_assert(&sc->sc_lock, SX_XLOCKED);
2187 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED,
2188 ("Device not in DEGRADED state (%s, %u).", sc->sc_name,
2190 KASSERT(sc->sc_syncdisk == NULL, ("Syncdisk is not NULL (%s, %u).",
2191 sc->sc_name, sc->sc_state));
2193 for (n = 0; n < sc->sc_ndisks; n++) {
2194 if (sc->sc_disks[n].d_state != G_RAID3_DISK_STATE_SYNCHRONIZING)
2196 disk = &sc->sc_disks[n];
2202 sx_xunlock(&sc->sc_lock);
2204 cp = g_new_consumer(sc->sc_sync.ds_geom);
2205 error = g_attach(cp, sc->sc_provider);
2207 ("Cannot attach to %s (error=%d).", sc->sc_name, error));
2208 error = g_access(cp, 1, 0, 0);
2209 KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error));
2210 g_topology_unlock();
2211 sx_xlock(&sc->sc_lock);
2213 G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name,
2214 g_raid3_get_diskname(disk));
2215 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) == 0)
2216 disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY;
2217 KASSERT(disk->d_sync.ds_consumer == NULL,
2218 ("Sync consumer already exists (device=%s, disk=%s).",
2219 sc->sc_name, g_raid3_get_diskname(disk)));
2221 disk->d_sync.ds_consumer = cp;
2222 disk->d_sync.ds_consumer->private = disk;
2223 disk->d_sync.ds_consumer->index = 0;
2224 sc->sc_syncdisk = disk;
2227 * Allocate memory for synchronization bios and initialize them.
2229 disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_raid3_syncreqs,
2231 for (n = 0; n < g_raid3_syncreqs; n++) {
2233 disk->d_sync.ds_bios[n] = bp;
2234 bp->bio_parent = NULL;
2235 bp->bio_cmd = BIO_READ;
2236 bp->bio_data = malloc(MAXPHYS, M_RAID3, M_WAITOK);
2238 bp->bio_offset = disk->d_sync.ds_offset * (sc->sc_ndisks - 1);
2239 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset);
2240 disk->d_sync.ds_offset += bp->bio_length / (sc->sc_ndisks - 1);
2241 bp->bio_done = g_raid3_sync_done;
2242 bp->bio_from = disk->d_sync.ds_consumer;
2243 bp->bio_to = sc->sc_provider;
2244 bp->bio_caller1 = (void *)(uintptr_t)n;
2247 /* Set the number of in-flight synchronization requests. */
2248 disk->d_sync.ds_inflight = g_raid3_syncreqs;
2251 * Fire off first synchronization requests.
2253 for (n = 0; n < g_raid3_syncreqs; n++) {
2254 bp = disk->d_sync.ds_bios[n];
2255 G_RAID3_LOGREQ(3, bp, "Sending synchronization request.");
2256 disk->d_sync.ds_consumer->index++;
2258 * Delay the request if it is colliding with a regular request.
2260 if (g_raid3_regular_collision(sc, bp))
2261 g_raid3_sync_delay(sc, bp);
2263 g_io_request(bp, disk->d_sync.ds_consumer);
2268 * Stop synchronization process.
2269 * type: 0 - synchronization finished
2270 * 1 - synchronization stopped
2273 g_raid3_sync_stop(struct g_raid3_softc *sc, int type)
2275 struct g_raid3_disk *disk;
2276 struct g_consumer *cp;
2278 g_topology_assert_not();
2279 sx_assert(&sc->sc_lock, SX_LOCKED);
2281 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED,
2282 ("Device not in DEGRADED state (%s, %u).", sc->sc_name,
2284 disk = sc->sc_syncdisk;
2285 sc->sc_syncdisk = NULL;
2286 KASSERT(disk != NULL, ("No disk was synchronized (%s).", sc->sc_name));
2287 KASSERT(disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING,
2288 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2289 g_raid3_disk_state2str(disk->d_state)));
2290 if (disk->d_sync.ds_consumer == NULL)
2294 G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s finished.",
2295 sc->sc_name, g_raid3_get_diskname(disk));
2296 } else /* if (type == 1) */ {
2297 G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s stopped.",
2298 sc->sc_name, g_raid3_get_diskname(disk));
2300 free(disk->d_sync.ds_bios, M_RAID3);
2301 disk->d_sync.ds_bios = NULL;
2302 cp = disk->d_sync.ds_consumer;
2303 disk->d_sync.ds_consumer = NULL;
2304 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2305 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */
2307 g_raid3_kill_consumer(sc, cp);
2308 g_topology_unlock();
2309 sx_xlock(&sc->sc_lock);
2313 g_raid3_launch_provider(struct g_raid3_softc *sc)
2315 struct g_provider *pp;
2317 sx_assert(&sc->sc_lock, SX_LOCKED);
2320 pp = g_new_providerf(sc->sc_geom, "raid3/%s", sc->sc_name);
2321 pp->mediasize = sc->sc_mediasize;
2322 pp->sectorsize = sc->sc_sectorsize;
2323 sc->sc_provider = pp;
2324 g_error_provider(pp, 0);
2325 g_topology_unlock();
2326 G_RAID3_DEBUG(0, "Device %s launched (%u/%u).", pp->name,
2327 g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE), sc->sc_ndisks);
2329 if (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED)
2330 g_raid3_sync_start(sc);
2334 g_raid3_destroy_provider(struct g_raid3_softc *sc)
2338 g_topology_assert_not();
2339 KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).",
2343 g_error_provider(sc->sc_provider, ENXIO);
2344 mtx_lock(&sc->sc_queue_mtx);
2345 while ((bp = bioq_first(&sc->sc_queue)) != NULL) {
2346 bioq_remove(&sc->sc_queue, bp);
2347 g_io_deliver(bp, ENXIO);
2349 mtx_unlock(&sc->sc_queue_mtx);
2350 G_RAID3_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name,
2351 sc->sc_provider->name);
2352 sc->sc_provider->flags |= G_PF_WITHER;
2353 g_orphan_provider(sc->sc_provider, ENXIO);
2354 g_topology_unlock();
2355 sc->sc_provider = NULL;
2356 if (sc->sc_syncdisk != NULL)
2357 g_raid3_sync_stop(sc, 1);
2361 g_raid3_go(void *arg)
2363 struct g_raid3_softc *sc;
2366 G_RAID3_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name);
2367 g_raid3_event_send(sc, 0,
2368 G_RAID3_EVENT_DONTWAIT | G_RAID3_EVENT_DEVICE);
2372 g_raid3_determine_state(struct g_raid3_disk *disk)
2374 struct g_raid3_softc *sc;
2378 if (sc->sc_syncid == disk->d_sync.ds_syncid) {
2379 if ((disk->d_flags &
2380 G_RAID3_DISK_FLAG_SYNCHRONIZING) == 0) {
2381 /* Disk does not need synchronization. */
2382 state = G_RAID3_DISK_STATE_ACTIVE;
2385 G_RAID3_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
2387 G_RAID3_DISK_FLAG_FORCE_SYNC) != 0) {
2389 * We can start synchronization from
2390 * the stored offset.
2392 state = G_RAID3_DISK_STATE_SYNCHRONIZING;
2394 state = G_RAID3_DISK_STATE_STALE;
2397 } else if (disk->d_sync.ds_syncid < sc->sc_syncid) {
2399 * Reset all synchronization data for this disk,
2400 * because if it even was synchronized, it was
2401 * synchronized to disks with different syncid.
2403 disk->d_flags |= G_RAID3_DISK_FLAG_SYNCHRONIZING;
2404 disk->d_sync.ds_offset = 0;
2405 disk->d_sync.ds_offset_done = 0;
2406 disk->d_sync.ds_syncid = sc->sc_syncid;
2407 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
2408 (disk->d_flags & G_RAID3_DISK_FLAG_FORCE_SYNC) != 0) {
2409 state = G_RAID3_DISK_STATE_SYNCHRONIZING;
2411 state = G_RAID3_DISK_STATE_STALE;
2413 } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ {
2415 * Not good, NOT GOOD!
2416 * It means that device was started on stale disks
2417 * and more fresh disk just arrive.
2418 * If there were writes, device is broken, sorry.
2419 * I think the best choice here is don't touch
2420 * this disk and inform the user loudly.
2422 G_RAID3_DEBUG(0, "Device %s was started before the freshest "
2423 "disk (%s) arrives!! It will not be connected to the "
2424 "running device.", sc->sc_name,
2425 g_raid3_get_diskname(disk));
2426 g_raid3_destroy_disk(disk);
2427 state = G_RAID3_DISK_STATE_NONE;
2428 /* Return immediately, because disk was destroyed. */
2431 G_RAID3_DEBUG(3, "State for %s disk: %s.",
2432 g_raid3_get_diskname(disk), g_raid3_disk_state2str(state));
2437 * Update device state.
2440 g_raid3_update_device(struct g_raid3_softc *sc, boolean_t force)
2442 struct g_raid3_disk *disk;
2445 sx_assert(&sc->sc_lock, SX_XLOCKED);
2447 switch (sc->sc_state) {
2448 case G_RAID3_DEVICE_STATE_STARTING:
2450 u_int n, ndirty, ndisks, genid, syncid;
2452 KASSERT(sc->sc_provider == NULL,
2453 ("Non-NULL provider in STARTING state (%s).", sc->sc_name));
2455 * Are we ready? We are, if all disks are connected or
2456 * one disk is missing and 'force' is true.
2458 if (g_raid3_ndisks(sc, -1) + force == sc->sc_ndisks) {
2460 callout_drain(&sc->sc_callout);
2464 * Timeout expired, so destroy device.
2466 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
2467 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p",
2468 __LINE__, sc->sc_rootmount);
2469 root_mount_rel(sc->sc_rootmount);
2470 sc->sc_rootmount = NULL;
2476 * Find the biggest genid.
2479 for (n = 0; n < sc->sc_ndisks; n++) {
2480 disk = &sc->sc_disks[n];
2481 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2483 if (disk->d_genid > genid)
2484 genid = disk->d_genid;
2486 sc->sc_genid = genid;
2488 * Remove all disks without the biggest genid.
2490 for (n = 0; n < sc->sc_ndisks; n++) {
2491 disk = &sc->sc_disks[n];
2492 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2494 if (disk->d_genid < genid) {
2496 "Component %s (device %s) broken, skipping.",
2497 g_raid3_get_diskname(disk), sc->sc_name);
2498 g_raid3_destroy_disk(disk);
2503 * There must be at least 'sc->sc_ndisks - 1' components
2504 * with the same syncid and without SYNCHRONIZING flag.
2508 * Find the biggest syncid, number of valid components and
2509 * number of dirty components.
2511 ndirty = ndisks = syncid = 0;
2512 for (n = 0; n < sc->sc_ndisks; n++) {
2513 disk = &sc->sc_disks[n];
2514 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2516 if ((disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) != 0)
2518 if (disk->d_sync.ds_syncid > syncid) {
2519 syncid = disk->d_sync.ds_syncid;
2521 } else if (disk->d_sync.ds_syncid < syncid) {
2524 if ((disk->d_flags &
2525 G_RAID3_DISK_FLAG_SYNCHRONIZING) != 0) {
2531 * Do we have enough valid components?
2533 if (ndisks + 1 < sc->sc_ndisks) {
2535 "Device %s is broken, too few valid components.",
2537 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
2541 * If there is one DIRTY component and all disks are present,
2542 * mark it for synchronization. If there is more than one DIRTY
2543 * component, mark parity component for synchronization.
2545 if (ndisks == sc->sc_ndisks && ndirty == 1) {
2546 for (n = 0; n < sc->sc_ndisks; n++) {
2547 disk = &sc->sc_disks[n];
2548 if ((disk->d_flags &
2549 G_RAID3_DISK_FLAG_DIRTY) == 0) {
2553 G_RAID3_DISK_FLAG_SYNCHRONIZING;
2555 } else if (ndisks == sc->sc_ndisks && ndirty > 1) {
2556 disk = &sc->sc_disks[sc->sc_ndisks - 1];
2557 disk->d_flags |= G_RAID3_DISK_FLAG_SYNCHRONIZING;
2560 sc->sc_syncid = syncid;
2562 /* Remember to bump syncid on first write. */
2563 sc->sc_bump_id |= G_RAID3_BUMP_SYNCID;
2565 if (ndisks == sc->sc_ndisks)
2566 state = G_RAID3_DEVICE_STATE_COMPLETE;
2567 else /* if (ndisks == sc->sc_ndisks - 1) */
2568 state = G_RAID3_DEVICE_STATE_DEGRADED;
2569 G_RAID3_DEBUG(1, "Device %s state changed from %s to %s.",
2570 sc->sc_name, g_raid3_device_state2str(sc->sc_state),
2571 g_raid3_device_state2str(state));
2572 sc->sc_state = state;
2573 for (n = 0; n < sc->sc_ndisks; n++) {
2574 disk = &sc->sc_disks[n];
2575 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2577 state = g_raid3_determine_state(disk);
2578 g_raid3_event_send(disk, state, G_RAID3_EVENT_DONTWAIT);
2579 if (state == G_RAID3_DISK_STATE_STALE)
2580 sc->sc_bump_id |= G_RAID3_BUMP_SYNCID;
2584 case G_RAID3_DEVICE_STATE_DEGRADED:
2586 * Genid need to be bumped immediately, so do it here.
2588 if ((sc->sc_bump_id & G_RAID3_BUMP_GENID) != 0) {
2589 sc->sc_bump_id &= ~G_RAID3_BUMP_GENID;
2590 g_raid3_bump_genid(sc);
2593 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NEW) > 0)
2595 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) <
2596 sc->sc_ndisks - 1) {
2597 if (sc->sc_provider != NULL)
2598 g_raid3_destroy_provider(sc);
2599 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
2602 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) ==
2604 state = G_RAID3_DEVICE_STATE_COMPLETE;
2606 "Device %s state changed from %s to %s.",
2607 sc->sc_name, g_raid3_device_state2str(sc->sc_state),
2608 g_raid3_device_state2str(state));
2609 sc->sc_state = state;
2611 if (sc->sc_provider == NULL)
2612 g_raid3_launch_provider(sc);
2613 if (sc->sc_rootmount != NULL) {
2614 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
2616 root_mount_rel(sc->sc_rootmount);
2617 sc->sc_rootmount = NULL;
2620 case G_RAID3_DEVICE_STATE_COMPLETE:
2622 * Genid need to be bumped immediately, so do it here.
2624 if ((sc->sc_bump_id & G_RAID3_BUMP_GENID) != 0) {
2625 sc->sc_bump_id &= ~G_RAID3_BUMP_GENID;
2626 g_raid3_bump_genid(sc);
2629 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NEW) > 0)
2631 KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) >=
2633 ("Too few ACTIVE components in COMPLETE state (device %s).",
2635 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) ==
2636 sc->sc_ndisks - 1) {
2637 state = G_RAID3_DEVICE_STATE_DEGRADED;
2639 "Device %s state changed from %s to %s.",
2640 sc->sc_name, g_raid3_device_state2str(sc->sc_state),
2641 g_raid3_device_state2str(state));
2642 sc->sc_state = state;
2644 if (sc->sc_provider == NULL)
2645 g_raid3_launch_provider(sc);
2646 if (sc->sc_rootmount != NULL) {
2647 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
2649 root_mount_rel(sc->sc_rootmount);
2650 sc->sc_rootmount = NULL;
2654 KASSERT(1 == 0, ("Wrong device state (%s, %s).", sc->sc_name,
2655 g_raid3_device_state2str(sc->sc_state)));
2661 * Update disk state and device state if needed.
2663 #define DISK_STATE_CHANGED() G_RAID3_DEBUG(1, \
2664 "Disk %s state changed from %s to %s (device %s).", \
2665 g_raid3_get_diskname(disk), \
2666 g_raid3_disk_state2str(disk->d_state), \
2667 g_raid3_disk_state2str(state), sc->sc_name)
2669 g_raid3_update_disk(struct g_raid3_disk *disk, u_int state)
2671 struct g_raid3_softc *sc;
2674 sx_assert(&sc->sc_lock, SX_XLOCKED);
2677 G_RAID3_DEBUG(3, "Changing disk %s state from %s to %s.",
2678 g_raid3_get_diskname(disk), g_raid3_disk_state2str(disk->d_state),
2679 g_raid3_disk_state2str(state));
2681 case G_RAID3_DISK_STATE_NEW:
2683 * Possible scenarios:
2684 * 1. New disk arrive.
2686 /* Previous state should be NONE. */
2687 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NONE,
2688 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2689 g_raid3_disk_state2str(disk->d_state)));
2690 DISK_STATE_CHANGED();
2692 disk->d_state = state;
2693 G_RAID3_DEBUG(1, "Device %s: provider %s detected.",
2694 sc->sc_name, g_raid3_get_diskname(disk));
2695 if (sc->sc_state == G_RAID3_DEVICE_STATE_STARTING)
2697 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2698 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2699 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2700 g_raid3_device_state2str(sc->sc_state),
2701 g_raid3_get_diskname(disk),
2702 g_raid3_disk_state2str(disk->d_state)));
2703 state = g_raid3_determine_state(disk);
2704 if (state != G_RAID3_DISK_STATE_NONE)
2707 case G_RAID3_DISK_STATE_ACTIVE:
2709 * Possible scenarios:
2710 * 1. New disk does not need synchronization.
2711 * 2. Synchronization process finished successfully.
2713 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2714 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2715 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2716 g_raid3_device_state2str(sc->sc_state),
2717 g_raid3_get_diskname(disk),
2718 g_raid3_disk_state2str(disk->d_state)));
2719 /* Previous state should be NEW or SYNCHRONIZING. */
2720 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW ||
2721 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING,
2722 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2723 g_raid3_disk_state2str(disk->d_state)));
2724 DISK_STATE_CHANGED();
2726 if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
2727 disk->d_flags &= ~G_RAID3_DISK_FLAG_SYNCHRONIZING;
2728 disk->d_flags &= ~G_RAID3_DISK_FLAG_FORCE_SYNC;
2729 g_raid3_sync_stop(sc, 0);
2731 disk->d_state = state;
2732 disk->d_sync.ds_offset = 0;
2733 disk->d_sync.ds_offset_done = 0;
2734 g_raid3_update_idle(sc, disk);
2735 g_raid3_update_metadata(disk);
2736 G_RAID3_DEBUG(1, "Device %s: provider %s activated.",
2737 sc->sc_name, g_raid3_get_diskname(disk));
2739 case G_RAID3_DISK_STATE_STALE:
2741 * Possible scenarios:
2742 * 1. Stale disk was connected.
2744 /* Previous state should be NEW. */
2745 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW,
2746 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2747 g_raid3_disk_state2str(disk->d_state)));
2748 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2749 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2750 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2751 g_raid3_device_state2str(sc->sc_state),
2752 g_raid3_get_diskname(disk),
2753 g_raid3_disk_state2str(disk->d_state)));
2755 * STALE state is only possible if device is marked
2758 KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOAUTOSYNC) != 0,
2759 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2760 g_raid3_device_state2str(sc->sc_state),
2761 g_raid3_get_diskname(disk),
2762 g_raid3_disk_state2str(disk->d_state)));
2763 DISK_STATE_CHANGED();
2765 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2766 disk->d_state = state;
2767 g_raid3_update_metadata(disk);
2768 G_RAID3_DEBUG(0, "Device %s: provider %s is stale.",
2769 sc->sc_name, g_raid3_get_diskname(disk));
2771 case G_RAID3_DISK_STATE_SYNCHRONIZING:
2773 * Possible scenarios:
2774 * 1. Disk which needs synchronization was connected.
2776 /* Previous state should be NEW. */
2777 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW,
2778 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2779 g_raid3_disk_state2str(disk->d_state)));
2780 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2781 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2782 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2783 g_raid3_device_state2str(sc->sc_state),
2784 g_raid3_get_diskname(disk),
2785 g_raid3_disk_state2str(disk->d_state)));
2786 DISK_STATE_CHANGED();
2788 if (disk->d_state == G_RAID3_DISK_STATE_NEW)
2789 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2790 disk->d_state = state;
2791 if (sc->sc_provider != NULL) {
2792 g_raid3_sync_start(sc);
2793 g_raid3_update_metadata(disk);
2796 case G_RAID3_DISK_STATE_DISCONNECTED:
2798 * Possible scenarios:
2799 * 1. Device wasn't running yet, but disk disappear.
2800 * 2. Disk was active and disapppear.
2801 * 3. Disk disappear during synchronization process.
2803 if (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2804 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
2806 * Previous state should be ACTIVE, STALE or
2809 KASSERT(disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
2810 disk->d_state == G_RAID3_DISK_STATE_STALE ||
2811 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING,
2812 ("Wrong disk state (%s, %s).",
2813 g_raid3_get_diskname(disk),
2814 g_raid3_disk_state2str(disk->d_state)));
2815 } else if (sc->sc_state == G_RAID3_DEVICE_STATE_STARTING) {
2816 /* Previous state should be NEW. */
2817 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW,
2818 ("Wrong disk state (%s, %s).",
2819 g_raid3_get_diskname(disk),
2820 g_raid3_disk_state2str(disk->d_state)));
2822 * Reset bumping syncid if disk disappeared in STARTING
2825 if ((sc->sc_bump_id & G_RAID3_BUMP_SYNCID) != 0)
2826 sc->sc_bump_id &= ~G_RAID3_BUMP_SYNCID;
2829 KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).",
2831 g_raid3_device_state2str(sc->sc_state),
2832 g_raid3_get_diskname(disk),
2833 g_raid3_disk_state2str(disk->d_state)));
2836 DISK_STATE_CHANGED();
2837 G_RAID3_DEBUG(0, "Device %s: provider %s disconnected.",
2838 sc->sc_name, g_raid3_get_diskname(disk));
2840 g_raid3_destroy_disk(disk);
2843 KASSERT(1 == 0, ("Unknown state (%u).", state));
2848 #undef DISK_STATE_CHANGED
2851 g_raid3_read_metadata(struct g_consumer *cp, struct g_raid3_metadata *md)
2853 struct g_provider *pp;
2857 g_topology_assert();
2859 error = g_access(cp, 1, 0, 0);
2863 g_topology_unlock();
2864 /* Metadata are stored on last sector. */
2865 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
2868 g_access(cp, -1, 0, 0);
2870 G_RAID3_DEBUG(1, "Cannot read metadata from %s (error=%d).",
2871 cp->provider->name, error);
2875 /* Decode metadata. */
2876 error = raid3_metadata_decode(buf, md);
2878 if (strcmp(md->md_magic, G_RAID3_MAGIC) != 0)
2880 if (md->md_version > G_RAID3_VERSION) {
2882 "Kernel module is too old to handle metadata from %s.",
2883 cp->provider->name);
2887 G_RAID3_DEBUG(1, "MD5 metadata hash mismatch for provider %s.",
2888 cp->provider->name);
2896 g_raid3_check_metadata(struct g_raid3_softc *sc, struct g_provider *pp,
2897 struct g_raid3_metadata *md)
2900 if (md->md_no >= sc->sc_ndisks) {
2901 G_RAID3_DEBUG(1, "Invalid disk %s number (no=%u), skipping.",
2902 pp->name, md->md_no);
2905 if (sc->sc_disks[md->md_no].d_state != G_RAID3_DISK_STATE_NODISK) {
2906 G_RAID3_DEBUG(1, "Disk %s (no=%u) already exists, skipping.",
2907 pp->name, md->md_no);
2910 if (md->md_all != sc->sc_ndisks) {
2912 "Invalid '%s' field on disk %s (device %s), skipping.",
2913 "md_all", pp->name, sc->sc_name);
2916 if ((md->md_mediasize % md->md_sectorsize) != 0) {
2917 G_RAID3_DEBUG(1, "Invalid metadata (mediasize %% sectorsize != "
2918 "0) on disk %s (device %s), skipping.", pp->name,
2922 if (md->md_mediasize != sc->sc_mediasize) {
2924 "Invalid '%s' field on disk %s (device %s), skipping.",
2925 "md_mediasize", pp->name, sc->sc_name);
2928 if ((md->md_mediasize % (sc->sc_ndisks - 1)) != 0) {
2930 "Invalid '%s' field on disk %s (device %s), skipping.",
2931 "md_mediasize", pp->name, sc->sc_name);
2934 if ((sc->sc_mediasize / (sc->sc_ndisks - 1)) > pp->mediasize) {
2936 "Invalid size of disk %s (device %s), skipping.", pp->name,
2940 if ((md->md_sectorsize / pp->sectorsize) < sc->sc_ndisks - 1) {
2942 "Invalid '%s' field on disk %s (device %s), skipping.",
2943 "md_sectorsize", pp->name, sc->sc_name);
2946 if (md->md_sectorsize != sc->sc_sectorsize) {
2948 "Invalid '%s' field on disk %s (device %s), skipping.",
2949 "md_sectorsize", pp->name, sc->sc_name);
2952 if ((sc->sc_sectorsize % pp->sectorsize) != 0) {
2954 "Invalid sector size of disk %s (device %s), skipping.",
2955 pp->name, sc->sc_name);
2958 if ((md->md_mflags & ~G_RAID3_DEVICE_FLAG_MASK) != 0) {
2960 "Invalid device flags on disk %s (device %s), skipping.",
2961 pp->name, sc->sc_name);
2964 if ((md->md_mflags & G_RAID3_DEVICE_FLAG_VERIFY) != 0 &&
2965 (md->md_mflags & G_RAID3_DEVICE_FLAG_ROUND_ROBIN) != 0) {
2967 * VERIFY and ROUND-ROBIN options are mutally exclusive.
2969 G_RAID3_DEBUG(1, "Both VERIFY and ROUND-ROBIN flags exist on "
2970 "disk %s (device %s), skipping.", pp->name, sc->sc_name);
2973 if ((md->md_dflags & ~G_RAID3_DISK_FLAG_MASK) != 0) {
2975 "Invalid disk flags on disk %s (device %s), skipping.",
2976 pp->name, sc->sc_name);
2983 g_raid3_add_disk(struct g_raid3_softc *sc, struct g_provider *pp,
2984 struct g_raid3_metadata *md)
2986 struct g_raid3_disk *disk;
2989 g_topology_assert_not();
2990 G_RAID3_DEBUG(2, "Adding disk %s.", pp->name);
2992 error = g_raid3_check_metadata(sc, pp, md);
2995 if (sc->sc_state != G_RAID3_DEVICE_STATE_STARTING &&
2996 md->md_genid < sc->sc_genid) {
2997 G_RAID3_DEBUG(0, "Component %s (device %s) broken, skipping.",
2998 pp->name, sc->sc_name);
3001 disk = g_raid3_init_disk(sc, pp, md, &error);
3004 error = g_raid3_event_send(disk, G_RAID3_DISK_STATE_NEW,
3005 G_RAID3_EVENT_WAIT);
3008 if (md->md_version < G_RAID3_VERSION) {
3009 G_RAID3_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).",
3010 pp->name, md->md_version, G_RAID3_VERSION);
3011 g_raid3_update_metadata(disk);
3017 g_raid3_destroy_delayed(void *arg, int flag)
3019 struct g_raid3_softc *sc;
3022 if (flag == EV_CANCEL) {
3023 G_RAID3_DEBUG(1, "Destroying canceled.");
3027 g_topology_unlock();
3028 sx_xlock(&sc->sc_lock);
3029 KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) == 0,
3030 ("DESTROY flag set on %s.", sc->sc_name));
3031 KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0,
3032 ("DESTROYING flag not set on %s.", sc->sc_name));
3033 G_RAID3_DEBUG(0, "Destroying %s (delayed).", sc->sc_name);
3034 error = g_raid3_destroy(sc, G_RAID3_DESTROY_SOFT);
3036 G_RAID3_DEBUG(0, "Cannot destroy %s.", sc->sc_name);
3037 sx_xunlock(&sc->sc_lock);
3043 g_raid3_access(struct g_provider *pp, int acr, int acw, int ace)
3045 struct g_raid3_softc *sc;
3046 int dcr, dcw, dce, error = 0;
3048 g_topology_assert();
3049 G_RAID3_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr,
3052 sc = pp->geom->softc;
3053 if (sc == NULL && acr <= 0 && acw <= 0 && ace <= 0)
3055 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name));
3057 dcr = pp->acr + acr;
3058 dcw = pp->acw + acw;
3059 dce = pp->ace + ace;
3061 g_topology_unlock();
3062 sx_xlock(&sc->sc_lock);
3063 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0 ||
3064 g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) < sc->sc_ndisks - 1) {
3065 if (acr > 0 || acw > 0 || ace > 0)
3069 if (dcw == 0 && !sc->sc_idle)
3070 g_raid3_idle(sc, dcw);
3071 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0) {
3072 if (acr > 0 || acw > 0 || ace > 0) {
3076 if (dcr == 0 && dcw == 0 && dce == 0) {
3077 g_post_event(g_raid3_destroy_delayed, sc, M_WAITOK,
3082 sx_xunlock(&sc->sc_lock);
3087 static struct g_geom *
3088 g_raid3_create(struct g_class *mp, const struct g_raid3_metadata *md)
3090 struct g_raid3_softc *sc;
3095 g_topology_assert();
3096 G_RAID3_DEBUG(1, "Creating device %s (id=%u).", md->md_name, md->md_id);
3098 /* One disk is minimum. */
3104 gp = g_new_geomf(mp, "%s", md->md_name);
3105 sc = malloc(sizeof(*sc), M_RAID3, M_WAITOK | M_ZERO);
3106 sc->sc_disks = malloc(sizeof(struct g_raid3_disk) * md->md_all, M_RAID3,
3108 gp->start = g_raid3_start;
3109 gp->orphan = g_raid3_orphan;
3110 gp->access = g_raid3_access;
3111 gp->dumpconf = g_raid3_dumpconf;
3113 sc->sc_id = md->md_id;
3114 sc->sc_mediasize = md->md_mediasize;
3115 sc->sc_sectorsize = md->md_sectorsize;
3116 sc->sc_ndisks = md->md_all;
3117 sc->sc_round_robin = 0;
3118 sc->sc_flags = md->md_mflags;
3121 sc->sc_last_write = time_uptime;
3123 for (n = 0; n < sc->sc_ndisks; n++) {
3124 sc->sc_disks[n].d_softc = sc;
3125 sc->sc_disks[n].d_no = n;
3126 sc->sc_disks[n].d_state = G_RAID3_DISK_STATE_NODISK;
3128 sx_init(&sc->sc_lock, "graid3:lock");
3129 bioq_init(&sc->sc_queue);
3130 mtx_init(&sc->sc_queue_mtx, "graid3:queue", NULL, MTX_DEF);
3131 bioq_init(&sc->sc_regular_delayed);
3132 bioq_init(&sc->sc_inflight);
3133 bioq_init(&sc->sc_sync_delayed);
3134 TAILQ_INIT(&sc->sc_events);
3135 mtx_init(&sc->sc_events_mtx, "graid3:events", NULL, MTX_DEF);
3136 callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
3137 sc->sc_state = G_RAID3_DEVICE_STATE_STARTING;
3140 sc->sc_provider = NULL;
3142 * Synchronization geom.
3144 gp = g_new_geomf(mp, "%s.sync", md->md_name);
3146 gp->orphan = g_raid3_orphan;
3147 sc->sc_sync.ds_geom = gp;
3149 if (!g_raid3_use_malloc) {
3150 sc->sc_zones[G_RAID3_ZONE_64K].sz_zone = uma_zcreate("gr3:64k",
3151 65536, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL,
3153 sc->sc_zones[G_RAID3_ZONE_64K].sz_inuse = 0;
3154 sc->sc_zones[G_RAID3_ZONE_64K].sz_max = g_raid3_n64k;
3155 sc->sc_zones[G_RAID3_ZONE_64K].sz_requested =
3156 sc->sc_zones[G_RAID3_ZONE_64K].sz_failed = 0;
3157 sc->sc_zones[G_RAID3_ZONE_16K].sz_zone = uma_zcreate("gr3:16k",
3158 16384, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL,
3160 sc->sc_zones[G_RAID3_ZONE_16K].sz_inuse = 0;
3161 sc->sc_zones[G_RAID3_ZONE_16K].sz_max = g_raid3_n16k;
3162 sc->sc_zones[G_RAID3_ZONE_16K].sz_requested =
3163 sc->sc_zones[G_RAID3_ZONE_16K].sz_failed = 0;
3164 sc->sc_zones[G_RAID3_ZONE_4K].sz_zone = uma_zcreate("gr3:4k",
3165 4096, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL,
3167 sc->sc_zones[G_RAID3_ZONE_4K].sz_inuse = 0;
3168 sc->sc_zones[G_RAID3_ZONE_4K].sz_max = g_raid3_n4k;
3169 sc->sc_zones[G_RAID3_ZONE_4K].sz_requested =
3170 sc->sc_zones[G_RAID3_ZONE_4K].sz_failed = 0;
3173 error = kthread_create(g_raid3_worker, sc, &sc->sc_worker, 0, 0,
3174 "g_raid3 %s", md->md_name);
3176 G_RAID3_DEBUG(1, "Cannot create kernel thread for %s.",
3178 if (!g_raid3_use_malloc) {
3179 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone);
3180 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone);
3181 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone);
3183 g_destroy_geom(sc->sc_sync.ds_geom);
3184 mtx_destroy(&sc->sc_events_mtx);
3185 mtx_destroy(&sc->sc_queue_mtx);
3186 sx_destroy(&sc->sc_lock);
3187 g_destroy_geom(sc->sc_geom);
3188 free(sc->sc_disks, M_RAID3);
3193 G_RAID3_DEBUG(1, "Device %s created (%u components, id=%u).",
3194 sc->sc_name, sc->sc_ndisks, sc->sc_id);
3196 sc->sc_rootmount = root_mount_hold("GRAID3");
3197 G_RAID3_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount);
3202 timeout = atomic_load_acq_int(&g_raid3_timeout);
3203 callout_reset(&sc->sc_callout, timeout * hz, g_raid3_go, sc);
3204 return (sc->sc_geom);
3208 g_raid3_destroy(struct g_raid3_softc *sc, int how)
3210 struct g_provider *pp;
3212 g_topology_assert_not();
3215 sx_assert(&sc->sc_lock, SX_XLOCKED);
3217 pp = sc->sc_provider;
3218 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
3220 case G_RAID3_DESTROY_SOFT:
3222 "Device %s is still open (r%dw%de%d).", pp->name,
3223 pp->acr, pp->acw, pp->ace);
3225 case G_RAID3_DESTROY_DELAYED:
3227 "Device %s will be destroyed on last close.",
3229 if (sc->sc_syncdisk != NULL)
3230 g_raid3_sync_stop(sc, 1);
3231 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROYING;
3233 case G_RAID3_DESTROY_HARD:
3234 G_RAID3_DEBUG(1, "Device %s is still open, so it "
3235 "can't be definitely removed.", pp->name);
3241 if (sc->sc_geom->softc == NULL) {
3242 g_topology_unlock();
3245 sc->sc_geom->softc = NULL;
3246 sc->sc_sync.ds_geom->softc = NULL;
3247 g_topology_unlock();
3249 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
3250 sc->sc_flags |= G_RAID3_DEVICE_FLAG_WAIT;
3251 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc);
3252 sx_xunlock(&sc->sc_lock);
3253 mtx_lock(&sc->sc_queue_mtx);
3255 wakeup(&sc->sc_queue);
3256 mtx_unlock(&sc->sc_queue_mtx);
3257 G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker);
3258 while (sc->sc_worker != NULL)
3259 tsleep(&sc->sc_worker, PRIBIO, "r3:destroy", hz / 5);
3260 G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker);
3261 sx_xlock(&sc->sc_lock);
3262 g_raid3_destroy_device(sc);
3263 free(sc->sc_disks, M_RAID3);
3269 g_raid3_taste_orphan(struct g_consumer *cp)
3272 KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
3273 cp->provider->name));
3276 static struct g_geom *
3277 g_raid3_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
3279 struct g_raid3_metadata md;
3280 struct g_raid3_softc *sc;
3281 struct g_consumer *cp;
3285 g_topology_assert();
3286 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
3287 G_RAID3_DEBUG(2, "Tasting %s.", pp->name);
3289 gp = g_new_geomf(mp, "raid3:taste");
3290 /* This orphan function should be never called. */
3291 gp->orphan = g_raid3_taste_orphan;
3292 cp = g_new_consumer(gp);
3294 error = g_raid3_read_metadata(cp, &md);
3296 g_destroy_consumer(cp);
3302 if (md.md_provider[0] != '\0' && strcmp(md.md_provider, pp->name) != 0)
3304 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize)
3306 if (g_raid3_debug >= 2)
3307 raid3_metadata_dump(&md);
3310 * Let's check if device already exists.
3313 LIST_FOREACH(gp, &mp->geom, geom) {
3317 if (sc->sc_sync.ds_geom == gp)
3319 if (strcmp(md.md_name, sc->sc_name) != 0)
3321 if (md.md_id != sc->sc_id) {
3322 G_RAID3_DEBUG(0, "Device %s already configured.",
3329 gp = g_raid3_create(mp, &md);
3331 G_RAID3_DEBUG(0, "Cannot create device %s.",
3337 G_RAID3_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
3338 g_topology_unlock();
3339 sx_xlock(&sc->sc_lock);
3340 error = g_raid3_add_disk(sc, pp, &md);
3342 G_RAID3_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
3343 pp->name, gp->name, error);
3344 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NODISK) ==
3347 g_raid3_destroy(sc, G_RAID3_DESTROY_HARD);
3353 sx_xunlock(&sc->sc_lock);
3359 g_raid3_destroy_geom(struct gctl_req *req __unused, struct g_class *mp __unused,
3362 struct g_raid3_softc *sc;
3365 g_topology_unlock();
3367 sx_xlock(&sc->sc_lock);
3369 error = g_raid3_destroy(gp->softc, G_RAID3_DESTROY_SOFT);
3371 sx_xunlock(&sc->sc_lock);
3377 g_raid3_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
3378 struct g_consumer *cp, struct g_provider *pp)
3380 struct g_raid3_softc *sc;
3382 g_topology_assert();
3387 /* Skip synchronization geom. */
3388 if (gp == sc->sc_sync.ds_geom)
3392 } else if (cp != NULL) {
3393 struct g_raid3_disk *disk;
3398 g_topology_unlock();
3399 sx_xlock(&sc->sc_lock);
3400 sbuf_printf(sb, "%s<Type>", indent);
3401 if (disk->d_no == sc->sc_ndisks - 1)
3402 sbuf_printf(sb, "PARITY");
3404 sbuf_printf(sb, "DATA");
3405 sbuf_printf(sb, "</Type>\n");
3406 sbuf_printf(sb, "%s<Number>%u</Number>\n", indent,
3408 if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
3409 sbuf_printf(sb, "%s<Synchronized>", indent);
3410 if (disk->d_sync.ds_offset == 0)
3411 sbuf_printf(sb, "0%%");
3413 sbuf_printf(sb, "%u%%",
3414 (u_int)((disk->d_sync.ds_offset * 100) /
3415 (sc->sc_mediasize / (sc->sc_ndisks - 1))));
3417 sbuf_printf(sb, "</Synchronized>\n");
3419 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent,
3420 disk->d_sync.ds_syncid);
3421 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, disk->d_genid);
3422 sbuf_printf(sb, "%s<Flags>", indent);
3423 if (disk->d_flags == 0)
3424 sbuf_printf(sb, "NONE");
3428 #define ADD_FLAG(flag, name) do { \
3429 if ((disk->d_flags & (flag)) != 0) { \
3431 sbuf_printf(sb, ", "); \
3434 sbuf_printf(sb, name); \
3437 ADD_FLAG(G_RAID3_DISK_FLAG_DIRTY, "DIRTY");
3438 ADD_FLAG(G_RAID3_DISK_FLAG_HARDCODED, "HARDCODED");
3439 ADD_FLAG(G_RAID3_DISK_FLAG_SYNCHRONIZING,
3441 ADD_FLAG(G_RAID3_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC");
3442 ADD_FLAG(G_RAID3_DISK_FLAG_BROKEN, "BROKEN");
3445 sbuf_printf(sb, "</Flags>\n");
3446 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
3447 g_raid3_disk_state2str(disk->d_state));
3448 sx_xunlock(&sc->sc_lock);
3451 g_topology_unlock();
3452 sx_xlock(&sc->sc_lock);
3453 if (!g_raid3_use_malloc) {
3455 "%s<Zone4kRequested>%u</Zone4kRequested>\n", indent,
3456 sc->sc_zones[G_RAID3_ZONE_4K].sz_requested);
3458 "%s<Zone4kFailed>%u</Zone4kFailed>\n", indent,
3459 sc->sc_zones[G_RAID3_ZONE_4K].sz_failed);
3461 "%s<Zone16kRequested>%u</Zone16kRequested>\n", indent,
3462 sc->sc_zones[G_RAID3_ZONE_16K].sz_requested);
3464 "%s<Zone16kFailed>%u</Zone16kFailed>\n", indent,
3465 sc->sc_zones[G_RAID3_ZONE_16K].sz_failed);
3467 "%s<Zone64kRequested>%u</Zone64kRequested>\n", indent,
3468 sc->sc_zones[G_RAID3_ZONE_64K].sz_requested);
3470 "%s<Zone64kFailed>%u</Zone64kFailed>\n", indent,
3471 sc->sc_zones[G_RAID3_ZONE_64K].sz_failed);
3473 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
3474 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid);
3475 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid);
3476 sbuf_printf(sb, "%s<Flags>", indent);
3477 if (sc->sc_flags == 0)
3478 sbuf_printf(sb, "NONE");
3482 #define ADD_FLAG(flag, name) do { \
3483 if ((sc->sc_flags & (flag)) != 0) { \
3485 sbuf_printf(sb, ", "); \
3488 sbuf_printf(sb, name); \
3491 ADD_FLAG(G_RAID3_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC");
3492 ADD_FLAG(G_RAID3_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC");
3493 ADD_FLAG(G_RAID3_DEVICE_FLAG_ROUND_ROBIN,
3495 ADD_FLAG(G_RAID3_DEVICE_FLAG_VERIFY, "VERIFY");
3498 sbuf_printf(sb, "</Flags>\n");
3499 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent,
3501 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
3502 g_raid3_device_state2str(sc->sc_state));
3503 sx_xunlock(&sc->sc_lock);
3509 g_raid3_shutdown_pre_sync(void *arg, int howto)
3512 struct g_geom *gp, *gp2;
3513 struct g_raid3_softc *sc;
3519 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
3520 if ((sc = gp->softc) == NULL)
3522 /* Skip synchronization geom. */
3523 if (gp == sc->sc_sync.ds_geom)
3525 g_topology_unlock();
3526 sx_xlock(&sc->sc_lock);
3528 error = g_raid3_destroy(sc, G_RAID3_DESTROY_DELAYED);
3530 sx_xunlock(&sc->sc_lock);
3533 g_topology_unlock();
3538 g_raid3_init(struct g_class *mp)
3541 g_raid3_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync,
3542 g_raid3_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST);
3543 if (g_raid3_pre_sync == NULL)
3544 G_RAID3_DEBUG(0, "Warning! Cannot register shutdown event.");
3548 g_raid3_fini(struct g_class *mp)
3551 if (g_raid3_pre_sync != NULL)
3552 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_raid3_pre_sync);
3555 DECLARE_GEOM_CLASS(g_raid3_class, g_raid3);