2 * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
34 #include <sys/limits.h>
36 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
40 #include <sys/malloc.h>
41 #include <sys/eventhandler.h>
43 #include <geom/geom.h>
45 #include <sys/kthread.h>
46 #include <sys/sched.h>
47 #include <geom/raid/g_raid.h>
48 #include "g_raid_md_if.h"
49 #include "g_raid_tr_if.h"
51 static MALLOC_DEFINE(M_RAID, "raid_data", "GEOM_RAID Data");
53 SYSCTL_DECL(_kern_geom);
54 SYSCTL_NODE(_kern_geom, OID_AUTO, raid, CTLFLAG_RW, 0, "GEOM_RAID stuff");
55 u_int g_raid_aggressive_spare = 0;
56 TUNABLE_INT("kern.geom.raid.aggressive_spare", &g_raid_aggressive_spare);
57 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, aggressive_spare, CTLFLAG_RW,
58 &g_raid_aggressive_spare, 0, "Use disks without metadata as spare");
59 u_int g_raid_debug = 0;
60 TUNABLE_INT("kern.geom.raid.debug", &g_raid_debug);
61 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, debug, CTLFLAG_RW, &g_raid_debug, 0,
63 int g_raid_read_err_thresh = 10;
64 TUNABLE_INT("kern.geom.raid.read_err_thresh", &g_raid_read_err_thresh);
65 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, read_err_thresh, CTLFLAG_RW,
66 &g_raid_read_err_thresh, 0,
67 "Number of read errors equated to disk failure");
68 u_int g_raid_start_timeout = 30;
69 TUNABLE_INT("kern.geom.raid.start_timeout", &g_raid_start_timeout);
70 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, start_timeout, CTLFLAG_RW,
71 &g_raid_start_timeout, 0,
72 "Time to wait for all array components");
73 static u_int g_raid_clean_time = 5;
74 TUNABLE_INT("kern.geom.raid.clean_time", &g_raid_clean_time);
75 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, clean_time, CTLFLAG_RW,
76 &g_raid_clean_time, 0, "Mark volume as clean when idling");
77 static u_int g_raid_disconnect_on_failure = 1;
78 TUNABLE_INT("kern.geom.raid.disconnect_on_failure",
79 &g_raid_disconnect_on_failure);
80 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, disconnect_on_failure, CTLFLAG_RW,
81 &g_raid_disconnect_on_failure, 0, "Disconnect component on I/O failure.");
82 static u_int g_raid_name_format = 0;
83 TUNABLE_INT("kern.geom.raid.name_format", &g_raid_name_format);
84 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, name_format, CTLFLAG_RW,
85 &g_raid_name_format, 0, "Providers name format.");
86 static u_int g_raid_idle_threshold = 1000000;
87 TUNABLE_INT("kern.geom.raid.idle_threshold", &g_raid_idle_threshold);
88 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, idle_threshold, CTLFLAG_RW,
89 &g_raid_idle_threshold, 1000000,
90 "Time in microseconds to consider a volume idle.");
92 #define MSLEEP(rv, ident, mtx, priority, wmesg, timeout) do { \
93 G_RAID_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \
94 rv = msleep((ident), (mtx), (priority), (wmesg), (timeout)); \
95 G_RAID_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \
98 LIST_HEAD(, g_raid_md_class) g_raid_md_classes =
99 LIST_HEAD_INITIALIZER(g_raid_md_classes);
101 LIST_HEAD(, g_raid_tr_class) g_raid_tr_classes =
102 LIST_HEAD_INITIALIZER(g_raid_tr_classes);
104 LIST_HEAD(, g_raid_volume) g_raid_volumes =
105 LIST_HEAD_INITIALIZER(g_raid_volumes);
107 static eventhandler_tag g_raid_pre_sync = NULL;
108 static int g_raid_started = 0;
110 static int g_raid_destroy_geom(struct gctl_req *req, struct g_class *mp,
112 static g_taste_t g_raid_taste;
113 static void g_raid_init(struct g_class *mp);
114 static void g_raid_fini(struct g_class *mp);
116 struct g_class g_raid_class = {
117 .name = G_RAID_CLASS_NAME,
118 .version = G_VERSION,
119 .ctlreq = g_raid_ctl,
120 .taste = g_raid_taste,
121 .destroy_geom = g_raid_destroy_geom,
126 static void g_raid_destroy_provider(struct g_raid_volume *vol);
127 static int g_raid_update_disk(struct g_raid_disk *disk, u_int event);
128 static int g_raid_update_subdisk(struct g_raid_subdisk *subdisk, u_int event);
129 static int g_raid_update_volume(struct g_raid_volume *vol, u_int event);
130 static int g_raid_update_node(struct g_raid_softc *sc, u_int event);
131 static void g_raid_dumpconf(struct sbuf *sb, const char *indent,
132 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp);
133 static void g_raid_start(struct bio *bp);
134 static void g_raid_start_request(struct bio *bp);
135 static void g_raid_disk_done(struct bio *bp);
136 static void g_raid_poll(struct g_raid_softc *sc);
139 g_raid_node_event2str(int event)
143 case G_RAID_NODE_E_WAKE:
145 case G_RAID_NODE_E_START:
153 g_raid_disk_state2str(int state)
157 case G_RAID_DISK_S_NONE:
159 case G_RAID_DISK_S_OFFLINE:
161 case G_RAID_DISK_S_FAILED:
163 case G_RAID_DISK_S_STALE_FAILED:
164 return ("STALE_FAILED");
165 case G_RAID_DISK_S_SPARE:
167 case G_RAID_DISK_S_STALE:
169 case G_RAID_DISK_S_ACTIVE:
177 g_raid_disk_event2str(int event)
181 case G_RAID_DISK_E_DISCONNECTED:
182 return ("DISCONNECTED");
189 g_raid_subdisk_state2str(int state)
193 case G_RAID_SUBDISK_S_NONE:
195 case G_RAID_SUBDISK_S_FAILED:
197 case G_RAID_SUBDISK_S_NEW:
199 case G_RAID_SUBDISK_S_REBUILD:
201 case G_RAID_SUBDISK_S_UNINITIALIZED:
202 return ("UNINITIALIZED");
203 case G_RAID_SUBDISK_S_STALE:
205 case G_RAID_SUBDISK_S_RESYNC:
207 case G_RAID_SUBDISK_S_ACTIVE:
215 g_raid_subdisk_event2str(int event)
219 case G_RAID_SUBDISK_E_NEW:
221 case G_RAID_SUBDISK_E_DISCONNECTED:
222 return ("DISCONNECTED");
229 g_raid_volume_state2str(int state)
233 case G_RAID_VOLUME_S_STARTING:
235 case G_RAID_VOLUME_S_BROKEN:
237 case G_RAID_VOLUME_S_DEGRADED:
239 case G_RAID_VOLUME_S_SUBOPTIMAL:
240 return ("SUBOPTIMAL");
241 case G_RAID_VOLUME_S_OPTIMAL:
243 case G_RAID_VOLUME_S_UNSUPPORTED:
244 return ("UNSUPPORTED");
245 case G_RAID_VOLUME_S_STOPPED:
253 g_raid_volume_event2str(int event)
257 case G_RAID_VOLUME_E_UP:
259 case G_RAID_VOLUME_E_DOWN:
261 case G_RAID_VOLUME_E_START:
263 case G_RAID_VOLUME_E_STARTMD:
271 g_raid_volume_level2str(int level, int qual)
275 case G_RAID_VOLUME_RL_RAID0:
277 case G_RAID_VOLUME_RL_RAID1:
279 case G_RAID_VOLUME_RL_RAID3:
281 case G_RAID_VOLUME_RL_RAID4:
283 case G_RAID_VOLUME_RL_RAID5:
285 case G_RAID_VOLUME_RL_RAID6:
287 case G_RAID_VOLUME_RL_RAID1E:
289 case G_RAID_VOLUME_RL_SINGLE:
291 case G_RAID_VOLUME_RL_CONCAT:
293 case G_RAID_VOLUME_RL_RAID5E:
295 case G_RAID_VOLUME_RL_RAID5EE:
303 g_raid_volume_str2level(const char *str, int *level, int *qual)
306 *level = G_RAID_VOLUME_RL_UNKNOWN;
307 *qual = G_RAID_VOLUME_RLQ_NONE;
308 if (strcasecmp(str, "RAID0") == 0)
309 *level = G_RAID_VOLUME_RL_RAID0;
310 else if (strcasecmp(str, "RAID1") == 0)
311 *level = G_RAID_VOLUME_RL_RAID1;
312 else if (strcasecmp(str, "RAID3") == 0)
313 *level = G_RAID_VOLUME_RL_RAID3;
314 else if (strcasecmp(str, "RAID4") == 0)
315 *level = G_RAID_VOLUME_RL_RAID4;
316 else if (strcasecmp(str, "RAID5") == 0)
317 *level = G_RAID_VOLUME_RL_RAID5;
318 else if (strcasecmp(str, "RAID6") == 0)
319 *level = G_RAID_VOLUME_RL_RAID6;
320 else if (strcasecmp(str, "RAID10") == 0 ||
321 strcasecmp(str, "RAID1E") == 0)
322 *level = G_RAID_VOLUME_RL_RAID1E;
323 else if (strcasecmp(str, "SINGLE") == 0)
324 *level = G_RAID_VOLUME_RL_SINGLE;
325 else if (strcasecmp(str, "CONCAT") == 0)
326 *level = G_RAID_VOLUME_RL_CONCAT;
327 else if (strcasecmp(str, "RAID5E") == 0)
328 *level = G_RAID_VOLUME_RL_RAID5E;
329 else if (strcasecmp(str, "RAID5EE") == 0)
330 *level = G_RAID_VOLUME_RL_RAID5EE;
337 g_raid_get_diskname(struct g_raid_disk *disk)
340 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL)
341 return ("[unknown]");
342 return (disk->d_consumer->provider->name);
346 g_raid_report_disk_state(struct g_raid_disk *disk)
348 struct g_raid_subdisk *sd;
352 if (disk->d_consumer == NULL)
354 if (disk->d_state == G_RAID_DISK_S_FAILED ||
355 disk->d_state == G_RAID_DISK_S_STALE_FAILED) {
358 state = G_RAID_SUBDISK_S_ACTIVE;
359 TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
360 if (sd->sd_state < state)
361 state = sd->sd_state;
363 if (state == G_RAID_SUBDISK_S_FAILED)
365 else if (state == G_RAID_SUBDISK_S_NEW ||
366 state == G_RAID_SUBDISK_S_REBUILD)
368 else if (state == G_RAID_SUBDISK_S_STALE ||
369 state == G_RAID_SUBDISK_S_RESYNC)
375 g_io_getattr("GEOM::setstate", disk->d_consumer, &len, &s);
376 G_RAID_DEBUG1(2, disk->d_softc, "Disk %s state reported as %d.",
377 g_raid_get_diskname(disk), s);
381 g_raid_change_disk_state(struct g_raid_disk *disk, int state)
384 G_RAID_DEBUG1(0, disk->d_softc, "Disk %s state changed from %s to %s.",
385 g_raid_get_diskname(disk),
386 g_raid_disk_state2str(disk->d_state),
387 g_raid_disk_state2str(state));
388 disk->d_state = state;
389 g_raid_report_disk_state(disk);
393 g_raid_change_subdisk_state(struct g_raid_subdisk *sd, int state)
396 G_RAID_DEBUG1(0, sd->sd_softc,
397 "Subdisk %s:%d-%s state changed from %s to %s.",
398 sd->sd_volume->v_name, sd->sd_pos,
399 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]",
400 g_raid_subdisk_state2str(sd->sd_state),
401 g_raid_subdisk_state2str(state));
402 sd->sd_state = state;
404 g_raid_report_disk_state(sd->sd_disk);
408 g_raid_change_volume_state(struct g_raid_volume *vol, int state)
411 G_RAID_DEBUG1(0, vol->v_softc,
412 "Volume %s state changed from %s to %s.",
414 g_raid_volume_state2str(vol->v_state),
415 g_raid_volume_state2str(state));
416 vol->v_state = state;
420 * --- Events handling functions ---
421 * Events in geom_raid are used to maintain subdisks and volumes status
422 * from one thread to simplify locking.
425 g_raid_event_free(struct g_raid_event *ep)
432 g_raid_event_send(void *arg, int event, int flags)
434 struct g_raid_softc *sc;
435 struct g_raid_event *ep;
438 if ((flags & G_RAID_EVENT_VOLUME) != 0) {
439 sc = ((struct g_raid_volume *)arg)->v_softc;
440 } else if ((flags & G_RAID_EVENT_DISK) != 0) {
441 sc = ((struct g_raid_disk *)arg)->d_softc;
442 } else if ((flags & G_RAID_EVENT_SUBDISK) != 0) {
443 sc = ((struct g_raid_subdisk *)arg)->sd_softc;
447 ep = malloc(sizeof(*ep), M_RAID,
448 sx_xlocked(&sc->sc_lock) ? M_WAITOK : M_NOWAIT);
455 G_RAID_DEBUG1(4, sc, "Sending event %p. Waking up %p.", ep, sc);
456 mtx_lock(&sc->sc_queue_mtx);
457 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next);
458 mtx_unlock(&sc->sc_queue_mtx);
461 if ((flags & G_RAID_EVENT_WAIT) == 0)
464 sx_assert(&sc->sc_lock, SX_XLOCKED);
465 G_RAID_DEBUG1(4, sc, "Sleeping on %p.", ep);
466 sx_xunlock(&sc->sc_lock);
467 while ((ep->e_flags & G_RAID_EVENT_DONE) == 0) {
468 mtx_lock(&sc->sc_queue_mtx);
469 MSLEEP(error, ep, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:event",
473 g_raid_event_free(ep);
474 sx_xlock(&sc->sc_lock);
479 g_raid_event_cancel(struct g_raid_softc *sc, void *tgt)
481 struct g_raid_event *ep, *tmpep;
483 sx_assert(&sc->sc_lock, SX_XLOCKED);
485 mtx_lock(&sc->sc_queue_mtx);
486 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) {
487 if (ep->e_tgt != tgt)
489 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
490 if ((ep->e_flags & G_RAID_EVENT_WAIT) == 0)
491 g_raid_event_free(ep);
493 ep->e_error = ECANCELED;
497 mtx_unlock(&sc->sc_queue_mtx);
501 g_raid_event_check(struct g_raid_softc *sc, void *tgt)
503 struct g_raid_event *ep;
506 sx_assert(&sc->sc_lock, SX_XLOCKED);
508 mtx_lock(&sc->sc_queue_mtx);
509 TAILQ_FOREACH(ep, &sc->sc_events, e_next) {
510 if (ep->e_tgt != tgt)
515 mtx_unlock(&sc->sc_queue_mtx);
520 * Return the number of disks in given state.
521 * If state is equal to -1, count all connected disks.
524 g_raid_ndisks(struct g_raid_softc *sc, int state)
526 struct g_raid_disk *disk;
529 sx_assert(&sc->sc_lock, SX_LOCKED);
532 TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
533 if (disk->d_state == state || state == -1)
540 * Return the number of subdisks in given state.
541 * If state is equal to -1, count all connected disks.
544 g_raid_nsubdisks(struct g_raid_volume *vol, int state)
546 struct g_raid_subdisk *subdisk;
547 struct g_raid_softc *sc;
551 sx_assert(&sc->sc_lock, SX_LOCKED);
554 for (i = 0; i < vol->v_disks_count; i++) {
555 subdisk = &vol->v_subdisks[i];
557 subdisk->sd_state != G_RAID_SUBDISK_S_NONE) ||
558 subdisk->sd_state == state)
565 * Return the first subdisk in given state.
566 * If state is equal to -1, then the first connected disks.
568 struct g_raid_subdisk *
569 g_raid_get_subdisk(struct g_raid_volume *vol, int state)
571 struct g_raid_subdisk *sd;
572 struct g_raid_softc *sc;
576 sx_assert(&sc->sc_lock, SX_LOCKED);
578 for (i = 0; i < vol->v_disks_count; i++) {
579 sd = &vol->v_subdisks[i];
581 sd->sd_state != G_RAID_SUBDISK_S_NONE) ||
582 sd->sd_state == state)
589 g_raid_open_consumer(struct g_raid_softc *sc, const char *name)
591 struct g_consumer *cp;
592 struct g_provider *pp;
596 if (strncmp(name, "/dev/", 5) == 0)
598 pp = g_provider_by_name(name);
601 cp = g_new_consumer(sc->sc_geom);
602 if (g_attach(cp, pp) != 0) {
603 g_destroy_consumer(cp);
606 if (g_access(cp, 1, 1, 1) != 0) {
608 g_destroy_consumer(cp);
615 g_raid_nrequests(struct g_raid_softc *sc, struct g_consumer *cp)
620 mtx_lock(&sc->sc_queue_mtx);
621 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
622 if (bp->bio_from == cp)
625 mtx_unlock(&sc->sc_queue_mtx);
630 g_raid_nopens(struct g_raid_softc *sc)
632 struct g_raid_volume *vol;
636 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
637 if (vol->v_provider_open != 0)
644 g_raid_consumer_is_busy(struct g_raid_softc *sc, struct g_consumer *cp)
649 "I/O requests for %s exist, can't destroy it now.",
653 if (g_raid_nrequests(sc, cp) > 0) {
655 "I/O requests for %s in queue, can't destroy it now.",
663 g_raid_destroy_consumer(void *arg, int flags __unused)
665 struct g_consumer *cp;
670 G_RAID_DEBUG(1, "Consumer %s destroyed.", cp->provider->name);
672 g_destroy_consumer(cp);
676 g_raid_kill_consumer(struct g_raid_softc *sc, struct g_consumer *cp)
678 struct g_provider *pp;
681 g_topology_assert_not();
685 if (g_raid_consumer_is_busy(sc, cp))
690 if ((pp->geom->flags & G_GEOM_WITHER) == 0)
693 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
694 g_access(cp, -cp->acr, -cp->acw, -cp->ace);
697 * After retaste event was send (inside g_access()), we can send
698 * event to detach and destroy consumer.
699 * A class, which has consumer to the given provider connected
700 * will not receive retaste event for the provider.
701 * This is the way how I ignore retaste events when I close
702 * consumers opened for write: I detach and destroy consumer
703 * after retaste event is sent.
705 g_post_event(g_raid_destroy_consumer, cp, M_WAITOK, NULL);
708 G_RAID_DEBUG(1, "Consumer %s destroyed.", pp->name);
710 g_destroy_consumer(cp);
716 g_raid_orphan(struct g_consumer *cp)
718 struct g_raid_disk *disk;
725 g_raid_event_send(disk, G_RAID_DISK_E_DISCONNECTED,
730 g_raid_clean(struct g_raid_volume *vol, int acw)
732 struct g_raid_softc *sc;
736 g_topology_assert_not();
737 sx_assert(&sc->sc_lock, SX_XLOCKED);
739 // if ((sc->sc_flags & G_RAID_DEVICE_FLAG_NOFAILSYNC) != 0)
743 if (vol->v_writes > 0)
745 if (acw > 0 || (acw == -1 &&
746 vol->v_provider != NULL && vol->v_provider->acw > 0)) {
747 timeout = g_raid_clean_time - (time_uptime - vol->v_last_write);
752 G_RAID_DEBUG1(1, sc, "Volume %s marked as clean.",
754 g_raid_write_metadata(sc, vol, NULL, NULL);
759 g_raid_dirty(struct g_raid_volume *vol)
761 struct g_raid_softc *sc;
764 g_topology_assert_not();
765 sx_assert(&sc->sc_lock, SX_XLOCKED);
767 // if ((sc->sc_flags & G_RAID_DEVICE_FLAG_NOFAILSYNC) != 0)
770 G_RAID_DEBUG1(1, sc, "Volume %s marked as dirty.",
772 g_raid_write_metadata(sc, vol, NULL, NULL);
776 g_raid_tr_flush_common(struct g_raid_tr_object *tr, struct bio *bp)
778 struct g_raid_softc *sc;
779 struct g_raid_volume *vol;
780 struct g_raid_subdisk *sd;
781 struct bio_queue_head queue;
785 vol = tr->tro_volume;
789 * Allocate all bios before sending any request, so we can return
790 * ENOMEM in nice and clean way.
793 for (i = 0; i < vol->v_disks_count; i++) {
794 sd = &vol->v_subdisks[i];
795 if (sd->sd_state == G_RAID_SUBDISK_S_NONE ||
796 sd->sd_state == G_RAID_SUBDISK_S_FAILED)
798 cbp = g_clone_bio(bp);
801 cbp->bio_caller1 = sd;
802 bioq_insert_tail(&queue, cbp);
804 for (cbp = bioq_first(&queue); cbp != NULL;
805 cbp = bioq_first(&queue)) {
806 bioq_remove(&queue, cbp);
807 sd = cbp->bio_caller1;
808 cbp->bio_caller1 = NULL;
809 g_raid_subdisk_iostart(sd, cbp);
813 for (cbp = bioq_first(&queue); cbp != NULL;
814 cbp = bioq_first(&queue)) {
815 bioq_remove(&queue, cbp);
818 if (bp->bio_error == 0)
819 bp->bio_error = ENOMEM;
820 g_raid_iodone(bp, bp->bio_error);
824 g_raid_tr_kerneldump_common_done(struct bio *bp)
827 bp->bio_flags |= BIO_DONE;
831 g_raid_tr_kerneldump_common(struct g_raid_tr_object *tr,
832 void *virtual, vm_offset_t physical, off_t offset, size_t length)
834 struct g_raid_softc *sc;
835 struct g_raid_volume *vol;
838 vol = tr->tro_volume;
841 bzero(&bp, sizeof(bp));
842 bp.bio_cmd = BIO_WRITE;
843 bp.bio_done = g_raid_tr_kerneldump_common_done;
844 bp.bio_attribute = NULL;
845 bp.bio_offset = offset;
846 bp.bio_length = length;
847 bp.bio_data = virtual;
848 bp.bio_to = vol->v_provider;
851 while (!(bp.bio_flags & BIO_DONE)) {
852 G_RAID_DEBUG1(4, sc, "Poll...");
857 return (bp.bio_error != 0 ? EIO : 0);
861 g_raid_dump(void *arg,
862 void *virtual, vm_offset_t physical, off_t offset, size_t length)
864 struct g_raid_volume *vol;
867 vol = (struct g_raid_volume *)arg;
868 G_RAID_DEBUG1(3, vol->v_softc, "Dumping at off %llu len %llu.",
869 (long long unsigned)offset, (long long unsigned)length);
871 error = G_RAID_TR_KERNELDUMP(vol->v_tr,
872 virtual, physical, offset, length);
877 g_raid_kerneldump(struct g_raid_softc *sc, struct bio *bp)
879 struct g_kerneldump *gkd;
880 struct g_provider *pp;
881 struct g_raid_volume *vol;
883 gkd = (struct g_kerneldump*)bp->bio_data;
886 g_trace(G_T_TOPOLOGY, "g_raid_kerneldump(%s, %jd, %jd)",
887 pp->name, (intmax_t)gkd->offset, (intmax_t)gkd->length);
888 gkd->di.dumper = g_raid_dump;
890 gkd->di.blocksize = vol->v_sectorsize;
891 gkd->di.maxiosize = DFLTPHYS;
892 gkd->di.mediaoffset = gkd->offset;
893 if ((gkd->offset + gkd->length) > vol->v_mediasize)
894 gkd->length = vol->v_mediasize - gkd->offset;
895 gkd->di.mediasize = gkd->length;
900 g_raid_start(struct bio *bp)
902 struct g_raid_softc *sc;
904 sc = bp->bio_to->geom->softc;
906 * If sc == NULL or there are no valid disks, provider's error
907 * should be set and g_raid_start() should not be called at all.
909 // KASSERT(sc != NULL && sc->sc_state == G_RAID_VOLUME_S_RUNNING,
910 // ("Provider's error should be set (error=%d)(mirror=%s).",
911 // bp->bio_to->error, bp->bio_to->name));
912 G_RAID_LOGREQ(3, bp, "Request received.");
914 switch (bp->bio_cmd) {
921 if (!strcmp(bp->bio_attribute, "GEOM::kerneldump"))
922 g_raid_kerneldump(sc, bp);
924 g_io_deliver(bp, EOPNOTSUPP);
927 g_io_deliver(bp, EOPNOTSUPP);
930 mtx_lock(&sc->sc_queue_mtx);
931 bioq_disksort(&sc->sc_queue, bp);
932 mtx_unlock(&sc->sc_queue_mtx);
934 G_RAID_DEBUG1(4, sc, "Waking up %p.", sc);
940 g_raid_bio_overlaps(const struct bio *bp, off_t lstart, off_t len)
944 * (1) bp entirely below NO
945 * (2) bp entirely above NO
946 * (3) bp start below, but end in range YES
947 * (4) bp entirely within YES
948 * (5) bp starts within, ends above YES
950 * lock range 10-19 (offset 10 length 10)
951 * (1) 1-5: first if kicks it out
952 * (2) 30-35: second if kicks it out
953 * (3) 5-15: passes both ifs
954 * (4) 12-14: passes both ifs
955 * (5) 19-20: passes both
957 off_t lend = lstart + len - 1;
958 off_t bstart = bp->bio_offset;
959 off_t bend = bp->bio_offset + bp->bio_length - 1;
969 g_raid_is_in_locked_range(struct g_raid_volume *vol, const struct bio *bp)
971 struct g_raid_lock *lp;
973 sx_assert(&vol->v_softc->sc_lock, SX_LOCKED);
975 LIST_FOREACH(lp, &vol->v_locks, l_next) {
976 if (g_raid_bio_overlaps(bp, lp->l_offset, lp->l_length))
983 g_raid_start_request(struct bio *bp)
985 struct g_raid_softc *sc;
986 struct g_raid_volume *vol;
988 sc = bp->bio_to->geom->softc;
989 sx_assert(&sc->sc_lock, SX_LOCKED);
990 vol = bp->bio_to->private;
993 * Check to see if this item is in a locked range. If so,
994 * queue it to our locked queue and return. We'll requeue
995 * it when the range is unlocked. Internal I/O for the
996 * rebuild/rescan/recovery process is excluded from this
997 * check so we can actually do the recovery.
999 if (!(bp->bio_cflags & G_RAID_BIO_FLAG_SPECIAL) &&
1000 g_raid_is_in_locked_range(vol, bp)) {
1001 G_RAID_LOGREQ(3, bp, "Defer request.");
1002 bioq_insert_tail(&vol->v_locked, bp);
1007 * If we're actually going to do the write/delete, then
1008 * update the idle stats for the volume.
1010 if (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_DELETE) {
1017 * Put request onto inflight queue, so we can check if new
1018 * synchronization requests don't collide with it. Then tell
1019 * the transformation layer to start the I/O.
1021 bioq_insert_tail(&vol->v_inflight, bp);
1022 G_RAID_LOGREQ(4, bp, "Request started");
1023 G_RAID_TR_IOSTART(vol->v_tr, bp);
1027 g_raid_finish_with_locked_ranges(struct g_raid_volume *vol, struct bio *bp)
1031 struct g_raid_lock *lp;
1033 vol->v_pending_lock = 0;
1034 LIST_FOREACH(lp, &vol->v_locks, l_next) {
1035 if (lp->l_pending) {
1039 TAILQ_FOREACH(nbp, &vol->v_inflight.queue, bio_queue) {
1040 if (g_raid_bio_overlaps(nbp, off, len))
1043 if (lp->l_pending) {
1044 vol->v_pending_lock = 1;
1045 G_RAID_DEBUG1(4, vol->v_softc,
1046 "Deferred lock(%jd, %jd) has %d pending",
1047 (intmax_t)off, (intmax_t)(off + len),
1051 G_RAID_DEBUG1(4, vol->v_softc,
1052 "Deferred lock of %jd to %jd completed",
1053 (intmax_t)off, (intmax_t)(off + len));
1054 G_RAID_TR_LOCKED(vol->v_tr, lp->l_callback_arg);
1060 g_raid_iodone(struct bio *bp, int error)
1062 struct g_raid_softc *sc;
1063 struct g_raid_volume *vol;
1065 sc = bp->bio_to->geom->softc;
1066 sx_assert(&sc->sc_lock, SX_LOCKED);
1067 vol = bp->bio_to->private;
1068 G_RAID_LOGREQ(3, bp, "Request done: %d.", error);
1070 /* Update stats if we done write/delete. */
1071 if (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_DELETE) {
1073 vol->v_last_write = time_uptime;
1076 bioq_remove(&vol->v_inflight, bp);
1077 if (vol->v_pending_lock && g_raid_is_in_locked_range(vol, bp))
1078 g_raid_finish_with_locked_ranges(vol, bp);
1079 getmicrouptime(&vol->v_last_done);
1080 g_io_deliver(bp, error);
1084 g_raid_lock_range(struct g_raid_volume *vol, off_t off, off_t len,
1085 struct bio *ignore, void *argp)
1087 struct g_raid_softc *sc;
1088 struct g_raid_lock *lp;
1092 lp = malloc(sizeof(*lp), M_RAID, M_WAITOK | M_ZERO);
1093 LIST_INSERT_HEAD(&vol->v_locks, lp, l_next);
1096 lp->l_callback_arg = argp;
1099 TAILQ_FOREACH(bp, &vol->v_inflight.queue, bio_queue) {
1100 if (bp != ignore && g_raid_bio_overlaps(bp, off, len))
1105 * If there are any writes that are pending, we return EBUSY. All
1106 * callers will have to wait until all pending writes clear.
1108 if (lp->l_pending > 0) {
1109 vol->v_pending_lock = 1;
1110 G_RAID_DEBUG1(4, sc, "Locking range %jd to %jd deferred %d pend",
1111 (intmax_t)off, (intmax_t)(off+len), lp->l_pending);
1114 G_RAID_DEBUG1(4, sc, "Locking range %jd to %jd",
1115 (intmax_t)off, (intmax_t)(off+len));
1116 G_RAID_TR_LOCKED(vol->v_tr, lp->l_callback_arg);
1121 g_raid_unlock_range(struct g_raid_volume *vol, off_t off, off_t len)
1123 struct g_raid_lock *lp;
1124 struct g_raid_softc *sc;
1128 LIST_FOREACH(lp, &vol->v_locks, l_next) {
1129 if (lp->l_offset == off && lp->l_length == len) {
1130 LIST_REMOVE(lp, l_next);
1132 * Right now we just put them all back on the queue
1133 * and hope for the best. We hope this because any
1134 * locked ranges will go right back on this list
1135 * when the worker thread runs.
1138 G_RAID_DEBUG1(4, sc, "Unlocked %jd to %jd",
1139 (intmax_t)lp->l_offset,
1140 (intmax_t)(lp->l_offset+lp->l_length));
1141 mtx_lock(&sc->sc_queue_mtx);
1142 while ((bp = bioq_takefirst(&vol->v_locked)) != NULL)
1143 bioq_disksort(&sc->sc_queue, bp);
1144 mtx_unlock(&sc->sc_queue_mtx);
1153 g_raid_subdisk_iostart(struct g_raid_subdisk *sd, struct bio *bp)
1155 struct g_consumer *cp;
1156 struct g_raid_disk *disk, *tdisk;
1158 bp->bio_caller1 = sd;
1161 * Make sure that the disk is present. Generally it is a task of
1162 * transformation layers to not send requests to absent disks, but
1163 * it is better to be safe and report situation then sorry.
1165 if (sd->sd_disk == NULL) {
1166 G_RAID_LOGREQ(0, bp, "Warning! I/O request to an absent disk!");
1168 bp->bio_from = NULL;
1170 bp->bio_error = ENXIO;
1171 g_raid_disk_done(bp);
1175 if (disk->d_state != G_RAID_DISK_S_ACTIVE &&
1176 disk->d_state != G_RAID_DISK_S_FAILED) {
1177 G_RAID_LOGREQ(0, bp, "Warning! I/O request to a disk in a "
1178 "wrong state (%s)!", g_raid_disk_state2str(disk->d_state));
1182 cp = disk->d_consumer;
1184 bp->bio_to = cp->provider;
1187 /* Update average disks load. */
1188 TAILQ_FOREACH(tdisk, &sd->sd_softc->sc_disks, d_next) {
1189 if (tdisk->d_consumer == NULL)
1192 tdisk->d_load = (tdisk->d_consumer->index *
1193 G_RAID_SUBDISK_LOAD_SCALE + tdisk->d_load * 7) / 8;
1196 disk->d_last_offset = bp->bio_offset + bp->bio_length;
1198 G_RAID_LOGREQ(3, bp, "Sending dumping request.");
1199 if (bp->bio_cmd == BIO_WRITE) {
1200 bp->bio_error = g_raid_subdisk_kerneldump(sd,
1201 bp->bio_data, 0, bp->bio_offset, bp->bio_length);
1203 bp->bio_error = EOPNOTSUPP;
1204 g_raid_disk_done(bp);
1206 bp->bio_done = g_raid_disk_done;
1207 bp->bio_offset += sd->sd_offset;
1208 G_RAID_LOGREQ(3, bp, "Sending request.");
1209 g_io_request(bp, cp);
1214 g_raid_subdisk_kerneldump(struct g_raid_subdisk *sd,
1215 void *virtual, vm_offset_t physical, off_t offset, size_t length)
1218 if (sd->sd_disk == NULL)
1220 if (sd->sd_disk->d_kd.di.dumper == NULL)
1221 return (EOPNOTSUPP);
1222 return (dump_write(&sd->sd_disk->d_kd.di,
1224 sd->sd_disk->d_kd.di.mediaoffset + sd->sd_offset + offset,
1229 g_raid_disk_done(struct bio *bp)
1231 struct g_raid_softc *sc;
1232 struct g_raid_subdisk *sd;
1234 sd = bp->bio_caller1;
1236 mtx_lock(&sc->sc_queue_mtx);
1237 bioq_disksort(&sc->sc_queue, bp);
1238 mtx_unlock(&sc->sc_queue_mtx);
1244 g_raid_disk_done_request(struct bio *bp)
1246 struct g_raid_softc *sc;
1247 struct g_raid_disk *disk;
1248 struct g_raid_subdisk *sd;
1249 struct g_raid_volume *vol;
1251 g_topology_assert_not();
1253 G_RAID_LOGREQ(3, bp, "Disk request done: %d.", bp->bio_error);
1254 sd = bp->bio_caller1;
1256 vol = sd->sd_volume;
1257 if (bp->bio_from != NULL) {
1258 bp->bio_from->index--;
1259 disk = bp->bio_from->private;
1261 g_raid_kill_consumer(sc, bp->bio_from);
1263 bp->bio_offset -= sd->sd_offset;
1265 G_RAID_TR_IODONE(vol->v_tr, sd, bp);
1269 g_raid_handle_event(struct g_raid_softc *sc, struct g_raid_event *ep)
1272 if ((ep->e_flags & G_RAID_EVENT_VOLUME) != 0)
1273 ep->e_error = g_raid_update_volume(ep->e_tgt, ep->e_event);
1274 else if ((ep->e_flags & G_RAID_EVENT_DISK) != 0)
1275 ep->e_error = g_raid_update_disk(ep->e_tgt, ep->e_event);
1276 else if ((ep->e_flags & G_RAID_EVENT_SUBDISK) != 0)
1277 ep->e_error = g_raid_update_subdisk(ep->e_tgt, ep->e_event);
1279 ep->e_error = g_raid_update_node(ep->e_tgt, ep->e_event);
1280 if ((ep->e_flags & G_RAID_EVENT_WAIT) == 0) {
1281 KASSERT(ep->e_error == 0,
1282 ("Error cannot be handled."));
1283 g_raid_event_free(ep);
1285 ep->e_flags |= G_RAID_EVENT_DONE;
1286 G_RAID_DEBUG1(4, sc, "Waking up %p.", ep);
1287 mtx_lock(&sc->sc_queue_mtx);
1289 mtx_unlock(&sc->sc_queue_mtx);
1297 g_raid_worker(void *arg)
1299 struct g_raid_softc *sc;
1300 struct g_raid_event *ep;
1301 struct g_raid_volume *vol;
1303 struct timeval now, t;
1307 thread_lock(curthread);
1308 sched_prio(curthread, PRIBIO);
1309 thread_unlock(curthread);
1311 sx_xlock(&sc->sc_lock);
1313 mtx_lock(&sc->sc_queue_mtx);
1315 * First take a look at events.
1316 * This is important to handle events before any I/O requests.
1321 ep = TAILQ_FIRST(&sc->sc_events);
1323 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
1324 else if ((bp = bioq_takefirst(&sc->sc_queue)) != NULL)
1327 getmicrouptime(&now);
1329 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
1330 if (bioq_first(&vol->v_inflight) == NULL &&
1332 timevalcmp(&vol->v_last_done, &t, < ))
1333 t = vol->v_last_done;
1335 timevalsub(&t, &now);
1336 timeout = g_raid_idle_threshold +
1337 t.tv_sec * 1000000 + t.tv_usec;
1340 * Two steps to avoid overflows at HZ=1000
1341 * and idle timeouts > 2.1s. Some rounding
1342 * errors can occur, but they are < 1tick,
1343 * which is deemed to be close enough for
1346 int micpertic = 1000000 / hz;
1347 timeout = (timeout + micpertic - 1) / micpertic;
1348 sx_xunlock(&sc->sc_lock);
1349 MSLEEP(rv, sc, &sc->sc_queue_mtx,
1350 PRIBIO | PDROP, "-", timeout);
1351 sx_xlock(&sc->sc_lock);
1356 mtx_unlock(&sc->sc_queue_mtx);
1359 g_raid_handle_event(sc, ep);
1360 } else if (bp != NULL) {
1361 if (bp->bio_to != NULL &&
1362 bp->bio_to->geom == sc->sc_geom)
1363 g_raid_start_request(bp);
1365 g_raid_disk_done_request(bp);
1366 } else if (rv == EWOULDBLOCK) {
1367 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
1368 if (vol->v_writes == 0 && vol->v_dirty)
1369 g_raid_clean(vol, -1);
1370 if (bioq_first(&vol->v_inflight) == NULL &&
1372 t.tv_sec = g_raid_idle_threshold / 1000000;
1373 t.tv_usec = g_raid_idle_threshold % 1000000;
1374 timevaladd(&t, &vol->v_last_done);
1375 getmicrouptime(&now);
1376 if (timevalcmp(&t, &now, <= )) {
1377 G_RAID_TR_IDLE(vol->v_tr);
1378 vol->v_last_done = now;
1383 if (sc->sc_stopping == G_RAID_DESTROY_HARD)
1384 g_raid_destroy_node(sc, 1); /* May not return. */
1389 g_raid_poll(struct g_raid_softc *sc)
1391 struct g_raid_event *ep;
1394 sx_xlock(&sc->sc_lock);
1395 mtx_lock(&sc->sc_queue_mtx);
1397 * First take a look at events.
1398 * This is important to handle events before any I/O requests.
1400 ep = TAILQ_FIRST(&sc->sc_events);
1402 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
1403 mtx_unlock(&sc->sc_queue_mtx);
1404 g_raid_handle_event(sc, ep);
1407 bp = bioq_takefirst(&sc->sc_queue);
1409 mtx_unlock(&sc->sc_queue_mtx);
1410 if (bp->bio_from == NULL ||
1411 bp->bio_from->geom != sc->sc_geom)
1412 g_raid_start_request(bp);
1414 g_raid_disk_done_request(bp);
1417 sx_xunlock(&sc->sc_lock);
1421 g_raid_launch_provider(struct g_raid_volume *vol)
1423 struct g_raid_disk *disk;
1424 struct g_raid_softc *sc;
1425 struct g_provider *pp;
1426 char name[G_RAID_MAX_VOLUMENAME];
1430 sx_assert(&sc->sc_lock, SX_LOCKED);
1433 /* Try to name provider with volume name. */
1434 snprintf(name, sizeof(name), "raid/%s", vol->v_name);
1435 if (g_raid_name_format == 0 || vol->v_name[0] == 0 ||
1436 g_provider_by_name(name) != NULL) {
1437 /* Otherwise use sequential volume number. */
1438 snprintf(name, sizeof(name), "raid/r%d", vol->v_global_id);
1440 pp = g_new_providerf(sc->sc_geom, "%s", name);
1442 pp->mediasize = vol->v_mediasize;
1443 pp->sectorsize = vol->v_sectorsize;
1445 pp->stripeoffset = 0;
1446 if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID1 ||
1447 vol->v_raid_level == G_RAID_VOLUME_RL_RAID3 ||
1448 vol->v_raid_level == G_RAID_VOLUME_RL_SINGLE ||
1449 vol->v_raid_level == G_RAID_VOLUME_RL_CONCAT) {
1450 if ((disk = vol->v_subdisks[0].sd_disk) != NULL &&
1451 disk->d_consumer != NULL &&
1452 disk->d_consumer->provider != NULL) {
1453 pp->stripesize = disk->d_consumer->provider->stripesize;
1454 off = disk->d_consumer->provider->stripeoffset;
1455 pp->stripeoffset = off + vol->v_subdisks[0].sd_offset;
1457 pp->stripeoffset %= off;
1459 if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID3) {
1460 pp->stripesize *= (vol->v_disks_count - 1);
1461 pp->stripeoffset *= (vol->v_disks_count - 1);
1464 pp->stripesize = vol->v_strip_size;
1465 vol->v_provider = pp;
1466 g_error_provider(pp, 0);
1467 g_topology_unlock();
1468 G_RAID_DEBUG1(0, sc, "Provider %s for volume %s created.",
1469 pp->name, vol->v_name);
1473 g_raid_destroy_provider(struct g_raid_volume *vol)
1475 struct g_raid_softc *sc;
1476 struct g_provider *pp;
1477 struct bio *bp, *tmp;
1479 g_topology_assert_not();
1481 pp = vol->v_provider;
1482 KASSERT(pp != NULL, ("NULL provider (volume=%s).", vol->v_name));
1485 g_error_provider(pp, ENXIO);
1486 mtx_lock(&sc->sc_queue_mtx);
1487 TAILQ_FOREACH_SAFE(bp, &sc->sc_queue.queue, bio_queue, tmp) {
1488 if (bp->bio_to != pp)
1490 bioq_remove(&sc->sc_queue, bp);
1491 g_io_deliver(bp, ENXIO);
1493 mtx_unlock(&sc->sc_queue_mtx);
1494 G_RAID_DEBUG1(0, sc, "Provider %s for volume %s destroyed.",
1495 pp->name, vol->v_name);
1496 g_wither_provider(pp, ENXIO);
1497 g_topology_unlock();
1498 vol->v_provider = NULL;
1502 * Update device state.
1505 g_raid_update_volume(struct g_raid_volume *vol, u_int event)
1507 struct g_raid_softc *sc;
1510 sx_assert(&sc->sc_lock, SX_XLOCKED);
1512 G_RAID_DEBUG1(2, sc, "Event %s for volume %s.",
1513 g_raid_volume_event2str(event),
1516 case G_RAID_VOLUME_E_DOWN:
1517 if (vol->v_provider != NULL)
1518 g_raid_destroy_provider(vol);
1520 case G_RAID_VOLUME_E_UP:
1521 if (vol->v_provider == NULL)
1522 g_raid_launch_provider(vol);
1524 case G_RAID_VOLUME_E_START:
1526 G_RAID_TR_START(vol->v_tr);
1530 G_RAID_MD_VOLUME_EVENT(sc->sc_md, vol, event);
1534 /* Manage root mount release. */
1535 if (vol->v_starting) {
1536 vol->v_starting = 0;
1537 G_RAID_DEBUG1(1, sc, "root_mount_rel %p", vol->v_rootmount);
1538 root_mount_rel(vol->v_rootmount);
1539 vol->v_rootmount = NULL;
1541 if (vol->v_stopping && vol->v_provider_open == 0)
1542 g_raid_destroy_volume(vol);
1547 * Update subdisk state.
1550 g_raid_update_subdisk(struct g_raid_subdisk *sd, u_int event)
1552 struct g_raid_softc *sc;
1553 struct g_raid_volume *vol;
1556 vol = sd->sd_volume;
1557 sx_assert(&sc->sc_lock, SX_XLOCKED);
1559 G_RAID_DEBUG1(2, sc, "Event %s for subdisk %s:%d-%s.",
1560 g_raid_subdisk_event2str(event),
1561 vol->v_name, sd->sd_pos,
1562 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
1564 G_RAID_TR_EVENT(vol->v_tr, sd, event);
1570 * Update disk state.
1573 g_raid_update_disk(struct g_raid_disk *disk, u_int event)
1575 struct g_raid_softc *sc;
1578 sx_assert(&sc->sc_lock, SX_XLOCKED);
1580 G_RAID_DEBUG1(2, sc, "Event %s for disk %s.",
1581 g_raid_disk_event2str(event),
1582 g_raid_get_diskname(disk));
1585 G_RAID_MD_EVENT(sc->sc_md, disk, event);
1593 g_raid_update_node(struct g_raid_softc *sc, u_int event)
1595 sx_assert(&sc->sc_lock, SX_XLOCKED);
1597 G_RAID_DEBUG1(2, sc, "Event %s for the array.",
1598 g_raid_node_event2str(event));
1600 if (event == G_RAID_NODE_E_WAKE)
1603 G_RAID_MD_EVENT(sc->sc_md, NULL, event);
1608 g_raid_access(struct g_provider *pp, int acr, int acw, int ace)
1610 struct g_raid_volume *vol;
1611 struct g_raid_softc *sc;
1612 int dcw, opens, error = 0;
1614 g_topology_assert();
1615 sc = pp->geom->softc;
1617 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name));
1618 KASSERT(vol != NULL, ("NULL volume (provider=%s).", pp->name));
1620 G_RAID_DEBUG1(2, sc, "Access request for %s: r%dw%de%d.", pp->name,
1622 dcw = pp->acw + acw;
1624 g_topology_unlock();
1625 sx_xlock(&sc->sc_lock);
1626 /* Deny new opens while dying. */
1627 if (sc->sc_stopping != 0 && (acr > 0 || acw > 0 || ace > 0)) {
1631 if (dcw == 0 && vol->v_dirty)
1632 g_raid_clean(vol, dcw);
1633 vol->v_provider_open += acr + acw + ace;
1634 /* Handle delayed node destruction. */
1635 if (sc->sc_stopping == G_RAID_DESTROY_DELAYED &&
1636 vol->v_provider_open == 0) {
1637 /* Count open volumes. */
1638 opens = g_raid_nopens(sc);
1640 sc->sc_stopping = G_RAID_DESTROY_HARD;
1641 /* Wake up worker to make it selfdestruct. */
1642 g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0);
1645 /* Handle open volume destruction. */
1646 if (vol->v_stopping && vol->v_provider_open == 0)
1647 g_raid_destroy_volume(vol);
1649 sx_xunlock(&sc->sc_lock);
1654 struct g_raid_softc *
1655 g_raid_create_node(struct g_class *mp,
1656 const char *name, struct g_raid_md_object *md)
1658 struct g_raid_softc *sc;
1662 g_topology_assert();
1663 G_RAID_DEBUG(1, "Creating array %s.", name);
1665 gp = g_new_geomf(mp, "%s", name);
1666 sc = malloc(sizeof(*sc), M_RAID, M_WAITOK | M_ZERO);
1667 gp->start = g_raid_start;
1668 gp->orphan = g_raid_orphan;
1669 gp->access = g_raid_access;
1670 gp->dumpconf = g_raid_dumpconf;
1675 TAILQ_INIT(&sc->sc_volumes);
1676 TAILQ_INIT(&sc->sc_disks);
1677 sx_init(&sc->sc_lock, "gmirror:lock");
1678 mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF);
1679 TAILQ_INIT(&sc->sc_events);
1680 bioq_init(&sc->sc_queue);
1682 error = kproc_create(g_raid_worker, sc, &sc->sc_worker, 0, 0,
1685 G_RAID_DEBUG(0, "Cannot create kernel thread for %s.", name);
1686 mtx_destroy(&sc->sc_queue_mtx);
1687 sx_destroy(&sc->sc_lock);
1688 g_destroy_geom(sc->sc_geom);
1693 G_RAID_DEBUG1(0, sc, "Array %s created.", name);
1697 struct g_raid_volume *
1698 g_raid_create_volume(struct g_raid_softc *sc, const char *name, int id)
1700 struct g_raid_volume *vol, *vol1;
1703 G_RAID_DEBUG1(1, sc, "Creating volume %s.", name);
1704 vol = malloc(sizeof(*vol), M_RAID, M_WAITOK | M_ZERO);
1706 strlcpy(vol->v_name, name, G_RAID_MAX_VOLUMENAME);
1707 vol->v_state = G_RAID_VOLUME_S_STARTING;
1708 vol->v_raid_level = G_RAID_VOLUME_RL_UNKNOWN;
1709 vol->v_raid_level_qualifier = G_RAID_VOLUME_RLQ_UNKNOWN;
1710 bioq_init(&vol->v_inflight);
1711 bioq_init(&vol->v_locked);
1712 LIST_INIT(&vol->v_locks);
1713 for (i = 0; i < G_RAID_MAX_SUBDISKS; i++) {
1714 vol->v_subdisks[i].sd_softc = sc;
1715 vol->v_subdisks[i].sd_volume = vol;
1716 vol->v_subdisks[i].sd_pos = i;
1717 vol->v_subdisks[i].sd_state = G_RAID_DISK_S_NONE;
1720 /* Find free ID for this volume. */
1724 LIST_FOREACH(vol1, &g_raid_volumes, v_global_next) {
1725 if (vol1->v_global_id == id)
1730 for (id = 0; ; id++) {
1731 LIST_FOREACH(vol1, &g_raid_volumes, v_global_next) {
1732 if (vol1->v_global_id == id)
1739 vol->v_global_id = id;
1740 LIST_INSERT_HEAD(&g_raid_volumes, vol, v_global_next);
1741 g_topology_unlock();
1743 /* Delay root mounting. */
1744 vol->v_rootmount = root_mount_hold("GRAID");
1745 G_RAID_DEBUG1(1, sc, "root_mount_hold %p", vol->v_rootmount);
1746 vol->v_starting = 1;
1747 TAILQ_INSERT_TAIL(&sc->sc_volumes, vol, v_next);
1751 struct g_raid_disk *
1752 g_raid_create_disk(struct g_raid_softc *sc)
1754 struct g_raid_disk *disk;
1756 G_RAID_DEBUG1(1, sc, "Creating disk.");
1757 disk = malloc(sizeof(*disk), M_RAID, M_WAITOK | M_ZERO);
1759 disk->d_state = G_RAID_DISK_S_NONE;
1760 TAILQ_INIT(&disk->d_subdisks);
1761 TAILQ_INSERT_TAIL(&sc->sc_disks, disk, d_next);
1765 int g_raid_start_volume(struct g_raid_volume *vol)
1767 struct g_raid_tr_class *class;
1768 struct g_raid_tr_object *obj;
1771 G_RAID_DEBUG1(2, vol->v_softc, "Starting volume %s.", vol->v_name);
1772 LIST_FOREACH(class, &g_raid_tr_classes, trc_list) {
1773 G_RAID_DEBUG1(2, vol->v_softc,
1774 "Tasting volume %s for %s transformation.",
1775 vol->v_name, class->name);
1776 obj = (void *)kobj_create((kobj_class_t)class, M_RAID,
1778 obj->tro_class = class;
1779 obj->tro_volume = vol;
1780 status = G_RAID_TR_TASTE(obj, vol);
1781 if (status != G_RAID_TR_TASTE_FAIL)
1783 kobj_delete((kobj_t)obj, M_RAID);
1785 if (class == NULL) {
1786 G_RAID_DEBUG1(0, vol->v_softc,
1787 "No transformation module found for %s.",
1790 g_raid_change_volume_state(vol, G_RAID_VOLUME_S_UNSUPPORTED);
1791 g_raid_event_send(vol, G_RAID_VOLUME_E_DOWN,
1792 G_RAID_EVENT_VOLUME);
1795 G_RAID_DEBUG1(2, vol->v_softc,
1796 "Transformation module %s chosen for %s.",
1797 class->name, vol->v_name);
1803 g_raid_destroy_node(struct g_raid_softc *sc, int worker)
1805 struct g_raid_volume *vol, *tmpv;
1806 struct g_raid_disk *disk, *tmpd;
1809 sc->sc_stopping = G_RAID_DESTROY_HARD;
1810 TAILQ_FOREACH_SAFE(vol, &sc->sc_volumes, v_next, tmpv) {
1811 if (g_raid_destroy_volume(vol))
1816 TAILQ_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tmpd) {
1817 if (g_raid_destroy_disk(disk))
1823 G_RAID_MD_FREE(sc->sc_md);
1824 kobj_delete((kobj_t)sc->sc_md, M_RAID);
1827 if (sc->sc_geom != NULL) {
1828 G_RAID_DEBUG1(0, sc, "Array %s destroyed.", sc->sc_name);
1830 sc->sc_geom->softc = NULL;
1831 g_wither_geom(sc->sc_geom, ENXIO);
1832 g_topology_unlock();
1835 G_RAID_DEBUG(1, "Array destroyed.");
1837 g_raid_event_cancel(sc, sc);
1838 mtx_destroy(&sc->sc_queue_mtx);
1839 sx_xunlock(&sc->sc_lock);
1840 sx_destroy(&sc->sc_lock);
1841 wakeup(&sc->sc_stopping);
1843 curthread->td_pflags &= ~TDP_GEOM;
1844 G_RAID_DEBUG(1, "Thread exiting.");
1847 /* Wake up worker to make it selfdestruct. */
1848 g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0);
1854 g_raid_destroy_volume(struct g_raid_volume *vol)
1856 struct g_raid_softc *sc;
1857 struct g_raid_disk *disk;
1861 G_RAID_DEBUG1(2, sc, "Destroying volume %s.", vol->v_name);
1862 vol->v_stopping = 1;
1863 if (vol->v_state != G_RAID_VOLUME_S_STOPPED) {
1865 G_RAID_TR_STOP(vol->v_tr);
1868 vol->v_state = G_RAID_VOLUME_S_STOPPED;
1870 if (g_raid_event_check(sc, vol) != 0)
1872 if (vol->v_provider != NULL)
1874 if (vol->v_provider_open != 0)
1877 G_RAID_TR_FREE(vol->v_tr);
1878 kobj_delete((kobj_t)vol->v_tr, M_RAID);
1881 if (vol->v_rootmount)
1882 root_mount_rel(vol->v_rootmount);
1884 LIST_REMOVE(vol, v_global_next);
1885 g_topology_unlock();
1886 TAILQ_REMOVE(&sc->sc_volumes, vol, v_next);
1887 for (i = 0; i < G_RAID_MAX_SUBDISKS; i++) {
1888 g_raid_event_cancel(sc, &vol->v_subdisks[i]);
1889 disk = vol->v_subdisks[i].sd_disk;
1892 TAILQ_REMOVE(&disk->d_subdisks, &vol->v_subdisks[i], sd_next);
1894 G_RAID_DEBUG1(2, sc, "Volume %s destroyed.", vol->v_name);
1896 G_RAID_MD_FREE_VOLUME(sc->sc_md, vol);
1897 g_raid_event_cancel(sc, vol);
1899 if (sc->sc_stopping == G_RAID_DESTROY_HARD) {
1900 /* Wake up worker to let it selfdestruct. */
1901 g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0);
1907 g_raid_destroy_disk(struct g_raid_disk *disk)
1909 struct g_raid_softc *sc;
1910 struct g_raid_subdisk *sd, *tmp;
1913 G_RAID_DEBUG1(2, sc, "Destroying disk.");
1914 if (disk->d_consumer) {
1915 g_raid_kill_consumer(sc, disk->d_consumer);
1916 disk->d_consumer = NULL;
1918 TAILQ_FOREACH_SAFE(sd, &disk->d_subdisks, sd_next, tmp) {
1919 g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_NONE);
1920 g_raid_event_send(sd, G_RAID_SUBDISK_E_DISCONNECTED,
1921 G_RAID_EVENT_SUBDISK);
1922 TAILQ_REMOVE(&disk->d_subdisks, sd, sd_next);
1925 TAILQ_REMOVE(&sc->sc_disks, disk, d_next);
1927 G_RAID_MD_FREE_DISK(sc->sc_md, disk);
1928 g_raid_event_cancel(sc, disk);
1934 g_raid_destroy(struct g_raid_softc *sc, int how)
1938 g_topology_assert_not();
1941 sx_assert(&sc->sc_lock, SX_XLOCKED);
1943 /* Count open volumes. */
1944 opens = g_raid_nopens(sc);
1946 /* React on some opened volumes. */
1949 case G_RAID_DESTROY_SOFT:
1950 G_RAID_DEBUG1(1, sc,
1951 "%d volumes are still open.",
1954 case G_RAID_DESTROY_DELAYED:
1955 G_RAID_DEBUG1(1, sc,
1956 "Array will be destroyed on last close.");
1957 sc->sc_stopping = G_RAID_DESTROY_DELAYED;
1959 case G_RAID_DESTROY_HARD:
1960 G_RAID_DEBUG1(1, sc,
1961 "%d volumes are still open.",
1966 /* Mark node for destruction. */
1967 sc->sc_stopping = G_RAID_DESTROY_HARD;
1968 /* Wake up worker to let it selfdestruct. */
1969 g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0);
1970 /* Sleep until node destroyed. */
1971 sx_sleep(&sc->sc_stopping, &sc->sc_lock,
1972 PRIBIO | PDROP, "r:destroy", 0);
1977 g_raid_taste_orphan(struct g_consumer *cp)
1980 KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
1981 cp->provider->name));
1984 static struct g_geom *
1985 g_raid_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
1987 struct g_consumer *cp;
1988 struct g_geom *gp, *geom;
1989 struct g_raid_md_class *class;
1990 struct g_raid_md_object *obj;
1993 g_topology_assert();
1994 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
1995 G_RAID_DEBUG(2, "Tasting provider %s.", pp->name);
1997 gp = g_new_geomf(mp, "mirror:taste");
1999 * This orphan function should be never called.
2001 gp->orphan = g_raid_taste_orphan;
2002 cp = g_new_consumer(gp);
2006 LIST_FOREACH(class, &g_raid_md_classes, mdc_list) {
2007 G_RAID_DEBUG(2, "Tasting provider %s for %s metadata.",
2008 pp->name, class->name);
2009 obj = (void *)kobj_create((kobj_class_t)class, M_RAID,
2011 obj->mdo_class = class;
2012 status = G_RAID_MD_TASTE(obj, mp, cp, &geom);
2013 if (status != G_RAID_MD_TASTE_NEW)
2014 kobj_delete((kobj_t)obj, M_RAID);
2015 if (status != G_RAID_MD_TASTE_FAIL)
2020 g_destroy_consumer(cp);
2022 G_RAID_DEBUG(2, "Tasting provider %s done.", pp->name);
2027 g_raid_create_node_format(const char *format, struct g_geom **gp)
2029 struct g_raid_md_class *class;
2030 struct g_raid_md_object *obj;
2033 G_RAID_DEBUG(2, "Creating array for %s metadata.", format);
2034 LIST_FOREACH(class, &g_raid_md_classes, mdc_list) {
2035 if (strcasecmp(class->name, format) == 0)
2038 if (class == NULL) {
2039 G_RAID_DEBUG(1, "No support for %s metadata.", format);
2040 return (G_RAID_MD_TASTE_FAIL);
2042 obj = (void *)kobj_create((kobj_class_t)class, M_RAID,
2044 obj->mdo_class = class;
2045 status = G_RAID_MD_CREATE(obj, &g_raid_class, gp);
2046 if (status != G_RAID_MD_TASTE_NEW)
2047 kobj_delete((kobj_t)obj, M_RAID);
2052 g_raid_destroy_geom(struct gctl_req *req __unused,
2053 struct g_class *mp __unused, struct g_geom *gp)
2055 struct g_raid_softc *sc;
2058 g_topology_unlock();
2060 sx_xlock(&sc->sc_lock);
2062 error = g_raid_destroy(gp->softc, G_RAID_DESTROY_SOFT);
2064 sx_xunlock(&sc->sc_lock);
2069 void g_raid_write_metadata(struct g_raid_softc *sc, struct g_raid_volume *vol,
2070 struct g_raid_subdisk *sd, struct g_raid_disk *disk)
2073 if (sc->sc_stopping == G_RAID_DESTROY_HARD)
2076 G_RAID_MD_WRITE(sc->sc_md, vol, sd, disk);
2079 void g_raid_fail_disk(struct g_raid_softc *sc,
2080 struct g_raid_subdisk *sd, struct g_raid_disk *disk)
2086 G_RAID_DEBUG1(0, sc, "Warning! Fail request to an absent disk!");
2089 if (disk->d_state != G_RAID_DISK_S_ACTIVE) {
2090 G_RAID_DEBUG1(0, sc, "Warning! Fail request to a disk in a "
2091 "wrong state (%s)!", g_raid_disk_state2str(disk->d_state));
2095 G_RAID_MD_FAIL_DISK(sc->sc_md, sd, disk);
2099 g_raid_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2100 struct g_consumer *cp, struct g_provider *pp)
2102 struct g_raid_softc *sc;
2103 struct g_raid_volume *vol;
2104 struct g_raid_subdisk *sd;
2105 struct g_raid_disk *disk;
2108 g_topology_assert();
2115 g_topology_unlock();
2116 sx_xlock(&sc->sc_lock);
2117 sbuf_printf(sb, "%s<Label>%s</Label>\n", indent,
2119 sbuf_printf(sb, "%s<RAIDLevel>%s</RAIDLevel>\n", indent,
2120 g_raid_volume_level2str(vol->v_raid_level,
2121 vol->v_raid_level_qualifier));
2123 "%s<Transformation>%s</Transformation>\n", indent,
2124 vol->v_tr ? vol->v_tr->tro_class->name : "NONE");
2125 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent,
2126 vol->v_disks_count);
2127 sbuf_printf(sb, "%s<Strip>%u</Strip>\n", indent,
2129 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
2130 g_raid_volume_state2str(vol->v_state));
2131 sbuf_printf(sb, "%s<Dirty>%s</Dirty>\n", indent,
2132 vol->v_dirty ? "Yes" : "No");
2133 sbuf_printf(sb, "%s<Subdisks>", indent);
2134 for (i = 0; i < vol->v_disks_count; i++) {
2135 sd = &vol->v_subdisks[i];
2136 if (sd->sd_disk != NULL &&
2137 sd->sd_disk->d_consumer != NULL) {
2138 sbuf_printf(sb, "%s ",
2139 g_raid_get_diskname(sd->sd_disk));
2141 sbuf_printf(sb, "NONE ");
2143 sbuf_printf(sb, "(%s",
2144 g_raid_subdisk_state2str(sd->sd_state));
2145 if (sd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
2146 sd->sd_state == G_RAID_SUBDISK_S_RESYNC) {
2147 sbuf_printf(sb, " %d%%",
2148 (int)(sd->sd_rebuild_pos * 100 /
2151 sbuf_printf(sb, ")");
2152 if (i + 1 < vol->v_disks_count)
2153 sbuf_printf(sb, ", ");
2155 sbuf_printf(sb, "</Subdisks>\n");
2156 sx_xunlock(&sc->sc_lock);
2158 } else if (cp != NULL) {
2162 g_topology_unlock();
2163 sx_xlock(&sc->sc_lock);
2164 sbuf_printf(sb, "%s<State>%s", indent,
2165 g_raid_disk_state2str(disk->d_state));
2166 if (!TAILQ_EMPTY(&disk->d_subdisks)) {
2167 sbuf_printf(sb, " (");
2168 TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
2169 sbuf_printf(sb, "%s",
2170 g_raid_subdisk_state2str(sd->sd_state));
2171 if (sd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
2172 sd->sd_state == G_RAID_SUBDISK_S_RESYNC) {
2173 sbuf_printf(sb, " %d%%",
2174 (int)(sd->sd_rebuild_pos * 100 /
2177 if (TAILQ_NEXT(sd, sd_next))
2178 sbuf_printf(sb, ", ");
2180 sbuf_printf(sb, ")");
2182 sbuf_printf(sb, "</State>\n");
2183 sbuf_printf(sb, "%s<Subdisks>", indent);
2184 TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
2185 sbuf_printf(sb, "r%d(%s):%d@%ju",
2186 sd->sd_volume->v_global_id,
2187 sd->sd_volume->v_name,
2188 sd->sd_pos, sd->sd_offset);
2189 if (TAILQ_NEXT(sd, sd_next))
2190 sbuf_printf(sb, ", ");
2192 sbuf_printf(sb, "</Subdisks>\n");
2193 sbuf_printf(sb, "%s<ReadErrors>%d</ReadErrors>\n", indent,
2195 sx_xunlock(&sc->sc_lock);
2198 g_topology_unlock();
2199 sx_xlock(&sc->sc_lock);
2201 sbuf_printf(sb, "%s<Metadata>%s</Metadata>\n", indent,
2202 sc->sc_md->mdo_class->name);
2204 if (!TAILQ_EMPTY(&sc->sc_volumes)) {
2206 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
2207 if (vol->v_state < s)
2210 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
2211 g_raid_volume_state2str(s));
2213 sx_xunlock(&sc->sc_lock);
2219 g_raid_shutdown_pre_sync(void *arg, int howto)
2222 struct g_geom *gp, *gp2;
2223 struct g_raid_softc *sc;
2229 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
2230 if ((sc = gp->softc) == NULL)
2232 g_topology_unlock();
2233 sx_xlock(&sc->sc_lock);
2235 error = g_raid_destroy(sc, G_RAID_DESTROY_DELAYED);
2237 sx_xunlock(&sc->sc_lock);
2240 g_topology_unlock();
2245 g_raid_init(struct g_class *mp)
2248 g_raid_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync,
2249 g_raid_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST);
2250 if (g_raid_pre_sync == NULL)
2251 G_RAID_DEBUG(0, "Warning! Cannot register shutdown event.");
2256 g_raid_fini(struct g_class *mp)
2259 if (g_raid_pre_sync != NULL)
2260 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_raid_pre_sync);
2265 g_raid_md_modevent(module_t mod, int type, void *arg)
2267 struct g_raid_md_class *class, *c, *nc;
2274 c = LIST_FIRST(&g_raid_md_classes);
2275 if (c == NULL || c->mdc_priority > class->mdc_priority)
2276 LIST_INSERT_HEAD(&g_raid_md_classes, class, mdc_list);
2278 while ((nc = LIST_NEXT(c, mdc_list)) != NULL &&
2279 nc->mdc_priority < class->mdc_priority)
2281 LIST_INSERT_AFTER(c, class, mdc_list);
2284 g_retaste(&g_raid_class);
2287 LIST_REMOVE(class, mdc_list);
2298 g_raid_tr_modevent(module_t mod, int type, void *arg)
2300 struct g_raid_tr_class *class, *c, *nc;
2307 c = LIST_FIRST(&g_raid_tr_classes);
2308 if (c == NULL || c->trc_priority > class->trc_priority)
2309 LIST_INSERT_HEAD(&g_raid_tr_classes, class, trc_list);
2311 while ((nc = LIST_NEXT(c, trc_list)) != NULL &&
2312 nc->trc_priority < class->trc_priority)
2314 LIST_INSERT_AFTER(c, class, trc_list);
2318 LIST_REMOVE(class, trc_list);
2329 * Use local implementation of DECLARE_GEOM_CLASS(g_raid_class, g_raid)
2330 * to reduce module priority, allowing submodules to register them first.
2332 static moduledata_t g_raid_mod = {
2337 DECLARE_MODULE(g_raid, g_raid_mod, SI_SUB_DRIVERS, SI_ORDER_THIRD);
2338 MODULE_VERSION(geom_raid, 0);