2 * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
34 #include <sys/limits.h>
36 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
40 #include <sys/malloc.h>
41 #include <sys/eventhandler.h>
43 #include <geom/geom.h>
45 #include <sys/kthread.h>
46 #include <sys/sched.h>
47 #include <geom/raid/g_raid.h>
48 #include "g_raid_md_if.h"
49 #include "g_raid_tr_if.h"
51 static MALLOC_DEFINE(M_RAID, "raid_data", "GEOM_RAID Data");
53 SYSCTL_DECL(_kern_geom);
54 SYSCTL_NODE(_kern_geom, OID_AUTO, raid, CTLFLAG_RW, 0, "GEOM_RAID stuff");
55 int g_raid_enable = 1;
56 TUNABLE_INT("kern.geom.raid.enable", &g_raid_enable);
57 SYSCTL_INT(_kern_geom_raid, OID_AUTO, enable, CTLFLAG_RW,
58 &g_raid_enable, 0, "Enable on-disk metadata taste");
59 u_int g_raid_aggressive_spare = 0;
60 TUNABLE_INT("kern.geom.raid.aggressive_spare", &g_raid_aggressive_spare);
61 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, aggressive_spare, CTLFLAG_RW,
62 &g_raid_aggressive_spare, 0, "Use disks without metadata as spare");
63 u_int g_raid_debug = 0;
64 TUNABLE_INT("kern.geom.raid.debug", &g_raid_debug);
65 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, debug, CTLFLAG_RW, &g_raid_debug, 0,
67 int g_raid_read_err_thresh = 10;
68 TUNABLE_INT("kern.geom.raid.read_err_thresh", &g_raid_read_err_thresh);
69 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, read_err_thresh, CTLFLAG_RW,
70 &g_raid_read_err_thresh, 0,
71 "Number of read errors equated to disk failure");
72 u_int g_raid_start_timeout = 30;
73 TUNABLE_INT("kern.geom.raid.start_timeout", &g_raid_start_timeout);
74 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, start_timeout, CTLFLAG_RW,
75 &g_raid_start_timeout, 0,
76 "Time to wait for all array components");
77 static u_int g_raid_clean_time = 5;
78 TUNABLE_INT("kern.geom.raid.clean_time", &g_raid_clean_time);
79 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, clean_time, CTLFLAG_RW,
80 &g_raid_clean_time, 0, "Mark volume as clean when idling");
81 static u_int g_raid_disconnect_on_failure = 1;
82 TUNABLE_INT("kern.geom.raid.disconnect_on_failure",
83 &g_raid_disconnect_on_failure);
84 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, disconnect_on_failure, CTLFLAG_RW,
85 &g_raid_disconnect_on_failure, 0, "Disconnect component on I/O failure.");
86 static u_int g_raid_name_format = 0;
87 TUNABLE_INT("kern.geom.raid.name_format", &g_raid_name_format);
88 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, name_format, CTLFLAG_RW,
89 &g_raid_name_format, 0, "Providers name format.");
90 static u_int g_raid_idle_threshold = 1000000;
91 TUNABLE_INT("kern.geom.raid.idle_threshold", &g_raid_idle_threshold);
92 SYSCTL_UINT(_kern_geom_raid, OID_AUTO, idle_threshold, CTLFLAG_RW,
93 &g_raid_idle_threshold, 1000000,
94 "Time in microseconds to consider a volume idle.");
95 static u_int ar_legacy_aliases = 1;
96 SYSCTL_INT(_kern_geom_raid, OID_AUTO, legacy_aliases, CTLFLAG_RW,
97 &ar_legacy_aliases, 0, "Create aliases named as the legacy ataraid style.");
98 TUNABLE_INT("kern.geom_raid.legacy_aliases", &ar_legacy_aliases);
101 #define MSLEEP(rv, ident, mtx, priority, wmesg, timeout) do { \
102 G_RAID_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \
103 rv = msleep((ident), (mtx), (priority), (wmesg), (timeout)); \
104 G_RAID_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \
107 LIST_HEAD(, g_raid_md_class) g_raid_md_classes =
108 LIST_HEAD_INITIALIZER(g_raid_md_classes);
110 LIST_HEAD(, g_raid_tr_class) g_raid_tr_classes =
111 LIST_HEAD_INITIALIZER(g_raid_tr_classes);
113 LIST_HEAD(, g_raid_volume) g_raid_volumes =
114 LIST_HEAD_INITIALIZER(g_raid_volumes);
116 static eventhandler_tag g_raid_post_sync = NULL;
117 static int g_raid_started = 0;
118 static int g_raid_shutdown = 0;
120 static int g_raid_destroy_geom(struct gctl_req *req, struct g_class *mp,
122 static g_taste_t g_raid_taste;
123 static void g_raid_init(struct g_class *mp);
124 static void g_raid_fini(struct g_class *mp);
126 struct g_class g_raid_class = {
127 .name = G_RAID_CLASS_NAME,
128 .version = G_VERSION,
129 .ctlreq = g_raid_ctl,
130 .taste = g_raid_taste,
131 .destroy_geom = g_raid_destroy_geom,
136 static void g_raid_destroy_provider(struct g_raid_volume *vol);
137 static int g_raid_update_disk(struct g_raid_disk *disk, u_int event);
138 static int g_raid_update_subdisk(struct g_raid_subdisk *subdisk, u_int event);
139 static int g_raid_update_volume(struct g_raid_volume *vol, u_int event);
140 static int g_raid_update_node(struct g_raid_softc *sc, u_int event);
141 static void g_raid_dumpconf(struct sbuf *sb, const char *indent,
142 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp);
143 static void g_raid_start(struct bio *bp);
144 static void g_raid_start_request(struct bio *bp);
145 static void g_raid_disk_done(struct bio *bp);
146 static void g_raid_poll(struct g_raid_softc *sc);
149 g_raid_node_event2str(int event)
153 case G_RAID_NODE_E_WAKE:
155 case G_RAID_NODE_E_START:
163 g_raid_disk_state2str(int state)
167 case G_RAID_DISK_S_NONE:
169 case G_RAID_DISK_S_OFFLINE:
171 case G_RAID_DISK_S_DISABLED:
173 case G_RAID_DISK_S_FAILED:
175 case G_RAID_DISK_S_STALE_FAILED:
176 return ("STALE_FAILED");
177 case G_RAID_DISK_S_SPARE:
179 case G_RAID_DISK_S_STALE:
181 case G_RAID_DISK_S_ACTIVE:
189 g_raid_disk_event2str(int event)
193 case G_RAID_DISK_E_DISCONNECTED:
194 return ("DISCONNECTED");
201 g_raid_subdisk_state2str(int state)
205 case G_RAID_SUBDISK_S_NONE:
207 case G_RAID_SUBDISK_S_FAILED:
209 case G_RAID_SUBDISK_S_NEW:
211 case G_RAID_SUBDISK_S_REBUILD:
213 case G_RAID_SUBDISK_S_UNINITIALIZED:
214 return ("UNINITIALIZED");
215 case G_RAID_SUBDISK_S_STALE:
217 case G_RAID_SUBDISK_S_RESYNC:
219 case G_RAID_SUBDISK_S_ACTIVE:
227 g_raid_subdisk_event2str(int event)
231 case G_RAID_SUBDISK_E_NEW:
233 case G_RAID_SUBDISK_E_FAILED:
235 case G_RAID_SUBDISK_E_DISCONNECTED:
236 return ("DISCONNECTED");
243 g_raid_volume_state2str(int state)
247 case G_RAID_VOLUME_S_STARTING:
249 case G_RAID_VOLUME_S_BROKEN:
251 case G_RAID_VOLUME_S_DEGRADED:
253 case G_RAID_VOLUME_S_SUBOPTIMAL:
254 return ("SUBOPTIMAL");
255 case G_RAID_VOLUME_S_OPTIMAL:
257 case G_RAID_VOLUME_S_UNSUPPORTED:
258 return ("UNSUPPORTED");
259 case G_RAID_VOLUME_S_STOPPED:
267 g_raid_volume_event2str(int event)
271 case G_RAID_VOLUME_E_UP:
273 case G_RAID_VOLUME_E_DOWN:
275 case G_RAID_VOLUME_E_START:
277 case G_RAID_VOLUME_E_STARTMD:
285 g_raid_volume_level2str(int level, int qual)
289 case G_RAID_VOLUME_RL_RAID0:
291 case G_RAID_VOLUME_RL_RAID1:
293 case G_RAID_VOLUME_RL_RAID3:
294 if (qual == G_RAID_VOLUME_RLQ_R3P0)
296 if (qual == G_RAID_VOLUME_RLQ_R3PN)
299 case G_RAID_VOLUME_RL_RAID4:
300 if (qual == G_RAID_VOLUME_RLQ_R4P0)
302 if (qual == G_RAID_VOLUME_RLQ_R4PN)
305 case G_RAID_VOLUME_RL_RAID5:
306 if (qual == G_RAID_VOLUME_RLQ_R5RA)
308 if (qual == G_RAID_VOLUME_RLQ_R5RS)
310 if (qual == G_RAID_VOLUME_RLQ_R5LA)
312 if (qual == G_RAID_VOLUME_RLQ_R5LS)
315 case G_RAID_VOLUME_RL_RAID6:
316 if (qual == G_RAID_VOLUME_RLQ_R6RA)
318 if (qual == G_RAID_VOLUME_RLQ_R6RS)
320 if (qual == G_RAID_VOLUME_RLQ_R6LA)
322 if (qual == G_RAID_VOLUME_RLQ_R6LS)
325 case G_RAID_VOLUME_RL_RAIDMDF:
326 if (qual == G_RAID_VOLUME_RLQ_RMDFRA)
327 return ("RAIDMDF-RA");
328 if (qual == G_RAID_VOLUME_RLQ_RMDFRS)
329 return ("RAIDMDF-RS");
330 if (qual == G_RAID_VOLUME_RLQ_RMDFLA)
331 return ("RAIDMDF-LA");
332 if (qual == G_RAID_VOLUME_RLQ_RMDFLS)
333 return ("RAIDMDF-LS");
335 case G_RAID_VOLUME_RL_RAID1E:
336 if (qual == G_RAID_VOLUME_RLQ_R1EA)
338 if (qual == G_RAID_VOLUME_RLQ_R1EO)
341 case G_RAID_VOLUME_RL_SINGLE:
343 case G_RAID_VOLUME_RL_CONCAT:
345 case G_RAID_VOLUME_RL_RAID5E:
346 if (qual == G_RAID_VOLUME_RLQ_R5ERA)
347 return ("RAID5E-RA");
348 if (qual == G_RAID_VOLUME_RLQ_R5ERS)
349 return ("RAID5E-RS");
350 if (qual == G_RAID_VOLUME_RLQ_R5ELA)
351 return ("RAID5E-LA");
352 if (qual == G_RAID_VOLUME_RLQ_R5ELS)
353 return ("RAID5E-LS");
355 case G_RAID_VOLUME_RL_RAID5EE:
356 if (qual == G_RAID_VOLUME_RLQ_R5EERA)
357 return ("RAID5EE-RA");
358 if (qual == G_RAID_VOLUME_RLQ_R5EERS)
359 return ("RAID5EE-RS");
360 if (qual == G_RAID_VOLUME_RLQ_R5EELA)
361 return ("RAID5EE-LA");
362 if (qual == G_RAID_VOLUME_RLQ_R5EELS)
363 return ("RAID5EE-LS");
365 case G_RAID_VOLUME_RL_RAID5R:
366 if (qual == G_RAID_VOLUME_RLQ_R5RRA)
367 return ("RAID5R-RA");
368 if (qual == G_RAID_VOLUME_RLQ_R5RRS)
369 return ("RAID5R-RS");
370 if (qual == G_RAID_VOLUME_RLQ_R5RLA)
371 return ("RAID5R-LA");
372 if (qual == G_RAID_VOLUME_RLQ_R5RLS)
373 return ("RAID5R-LS");
381 g_raid_volume_str2level(const char *str, int *level, int *qual)
384 *level = G_RAID_VOLUME_RL_UNKNOWN;
385 *qual = G_RAID_VOLUME_RLQ_NONE;
386 if (strcasecmp(str, "RAID0") == 0)
387 *level = G_RAID_VOLUME_RL_RAID0;
388 else if (strcasecmp(str, "RAID1") == 0)
389 *level = G_RAID_VOLUME_RL_RAID1;
390 else if (strcasecmp(str, "RAID3-P0") == 0) {
391 *level = G_RAID_VOLUME_RL_RAID3;
392 *qual = G_RAID_VOLUME_RLQ_R3P0;
393 } else if (strcasecmp(str, "RAID3-PN") == 0 ||
394 strcasecmp(str, "RAID3") == 0) {
395 *level = G_RAID_VOLUME_RL_RAID3;
396 *qual = G_RAID_VOLUME_RLQ_R3PN;
397 } else if (strcasecmp(str, "RAID4-P0") == 0) {
398 *level = G_RAID_VOLUME_RL_RAID4;
399 *qual = G_RAID_VOLUME_RLQ_R4P0;
400 } else if (strcasecmp(str, "RAID4-PN") == 0 ||
401 strcasecmp(str, "RAID4") == 0) {
402 *level = G_RAID_VOLUME_RL_RAID4;
403 *qual = G_RAID_VOLUME_RLQ_R4PN;
404 } else if (strcasecmp(str, "RAID5-RA") == 0) {
405 *level = G_RAID_VOLUME_RL_RAID5;
406 *qual = G_RAID_VOLUME_RLQ_R5RA;
407 } else if (strcasecmp(str, "RAID5-RS") == 0) {
408 *level = G_RAID_VOLUME_RL_RAID5;
409 *qual = G_RAID_VOLUME_RLQ_R5RS;
410 } else if (strcasecmp(str, "RAID5") == 0 ||
411 strcasecmp(str, "RAID5-LA") == 0) {
412 *level = G_RAID_VOLUME_RL_RAID5;
413 *qual = G_RAID_VOLUME_RLQ_R5LA;
414 } else if (strcasecmp(str, "RAID5-LS") == 0) {
415 *level = G_RAID_VOLUME_RL_RAID5;
416 *qual = G_RAID_VOLUME_RLQ_R5LS;
417 } else if (strcasecmp(str, "RAID6-RA") == 0) {
418 *level = G_RAID_VOLUME_RL_RAID6;
419 *qual = G_RAID_VOLUME_RLQ_R6RA;
420 } else if (strcasecmp(str, "RAID6-RS") == 0) {
421 *level = G_RAID_VOLUME_RL_RAID6;
422 *qual = G_RAID_VOLUME_RLQ_R6RS;
423 } else if (strcasecmp(str, "RAID6") == 0 ||
424 strcasecmp(str, "RAID6-LA") == 0) {
425 *level = G_RAID_VOLUME_RL_RAID6;
426 *qual = G_RAID_VOLUME_RLQ_R6LA;
427 } else if (strcasecmp(str, "RAID6-LS") == 0) {
428 *level = G_RAID_VOLUME_RL_RAID6;
429 *qual = G_RAID_VOLUME_RLQ_R6LS;
430 } else if (strcasecmp(str, "RAIDMDF-RA") == 0) {
431 *level = G_RAID_VOLUME_RL_RAIDMDF;
432 *qual = G_RAID_VOLUME_RLQ_RMDFRA;
433 } else if (strcasecmp(str, "RAIDMDF-RS") == 0) {
434 *level = G_RAID_VOLUME_RL_RAIDMDF;
435 *qual = G_RAID_VOLUME_RLQ_RMDFRS;
436 } else if (strcasecmp(str, "RAIDMDF") == 0 ||
437 strcasecmp(str, "RAIDMDF-LA") == 0) {
438 *level = G_RAID_VOLUME_RL_RAIDMDF;
439 *qual = G_RAID_VOLUME_RLQ_RMDFLA;
440 } else if (strcasecmp(str, "RAIDMDF-LS") == 0) {
441 *level = G_RAID_VOLUME_RL_RAIDMDF;
442 *qual = G_RAID_VOLUME_RLQ_RMDFLS;
443 } else if (strcasecmp(str, "RAID10") == 0 ||
444 strcasecmp(str, "RAID1E") == 0 ||
445 strcasecmp(str, "RAID1E-A") == 0) {
446 *level = G_RAID_VOLUME_RL_RAID1E;
447 *qual = G_RAID_VOLUME_RLQ_R1EA;
448 } else if (strcasecmp(str, "RAID1E-O") == 0) {
449 *level = G_RAID_VOLUME_RL_RAID1E;
450 *qual = G_RAID_VOLUME_RLQ_R1EO;
451 } else if (strcasecmp(str, "SINGLE") == 0)
452 *level = G_RAID_VOLUME_RL_SINGLE;
453 else if (strcasecmp(str, "CONCAT") == 0)
454 *level = G_RAID_VOLUME_RL_CONCAT;
455 else if (strcasecmp(str, "RAID5E-RA") == 0) {
456 *level = G_RAID_VOLUME_RL_RAID5E;
457 *qual = G_RAID_VOLUME_RLQ_R5ERA;
458 } else if (strcasecmp(str, "RAID5E-RS") == 0) {
459 *level = G_RAID_VOLUME_RL_RAID5E;
460 *qual = G_RAID_VOLUME_RLQ_R5ERS;
461 } else if (strcasecmp(str, "RAID5E") == 0 ||
462 strcasecmp(str, "RAID5E-LA") == 0) {
463 *level = G_RAID_VOLUME_RL_RAID5E;
464 *qual = G_RAID_VOLUME_RLQ_R5ELA;
465 } else if (strcasecmp(str, "RAID5E-LS") == 0) {
466 *level = G_RAID_VOLUME_RL_RAID5E;
467 *qual = G_RAID_VOLUME_RLQ_R5ELS;
468 } else if (strcasecmp(str, "RAID5EE-RA") == 0) {
469 *level = G_RAID_VOLUME_RL_RAID5EE;
470 *qual = G_RAID_VOLUME_RLQ_R5EERA;
471 } else if (strcasecmp(str, "RAID5EE-RS") == 0) {
472 *level = G_RAID_VOLUME_RL_RAID5EE;
473 *qual = G_RAID_VOLUME_RLQ_R5EERS;
474 } else if (strcasecmp(str, "RAID5EE") == 0 ||
475 strcasecmp(str, "RAID5EE-LA") == 0) {
476 *level = G_RAID_VOLUME_RL_RAID5EE;
477 *qual = G_RAID_VOLUME_RLQ_R5EELA;
478 } else if (strcasecmp(str, "RAID5EE-LS") == 0) {
479 *level = G_RAID_VOLUME_RL_RAID5EE;
480 *qual = G_RAID_VOLUME_RLQ_R5EELS;
481 } else if (strcasecmp(str, "RAID5R-RA") == 0) {
482 *level = G_RAID_VOLUME_RL_RAID5R;
483 *qual = G_RAID_VOLUME_RLQ_R5RRA;
484 } else if (strcasecmp(str, "RAID5R-RS") == 0) {
485 *level = G_RAID_VOLUME_RL_RAID5R;
486 *qual = G_RAID_VOLUME_RLQ_R5RRS;
487 } else if (strcasecmp(str, "RAID5R") == 0 ||
488 strcasecmp(str, "RAID5R-LA") == 0) {
489 *level = G_RAID_VOLUME_RL_RAID5R;
490 *qual = G_RAID_VOLUME_RLQ_R5RLA;
491 } else if (strcasecmp(str, "RAID5R-LS") == 0) {
492 *level = G_RAID_VOLUME_RL_RAID5R;
493 *qual = G_RAID_VOLUME_RLQ_R5RLS;
500 g_raid_get_diskname(struct g_raid_disk *disk)
503 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL)
504 return ("[unknown]");
505 return (disk->d_consumer->provider->name);
509 g_raid_get_disk_info(struct g_raid_disk *disk)
511 struct g_consumer *cp = disk->d_consumer;
514 /* Read kernel dumping information. */
515 disk->d_kd.offset = 0;
516 disk->d_kd.length = OFF_MAX;
517 len = sizeof(disk->d_kd);
518 error = g_io_getattr("GEOM::kerneldump", cp, &len, &disk->d_kd);
520 disk->d_kd.di.dumper = NULL;
521 if (disk->d_kd.di.dumper == NULL)
522 G_RAID_DEBUG1(2, disk->d_softc,
523 "Dumping not supported by %s: %d.",
524 cp->provider->name, error);
526 /* Read BIO_DELETE support. */
527 error = g_getattr("GEOM::candelete", cp, &disk->d_candelete);
529 disk->d_candelete = 0;
530 if (!disk->d_candelete)
531 G_RAID_DEBUG1(2, disk->d_softc,
532 "BIO_DELETE not supported by %s: %d.",
533 cp->provider->name, error);
537 g_raid_report_disk_state(struct g_raid_disk *disk)
539 struct g_raid_subdisk *sd;
543 if (disk->d_consumer == NULL)
545 if (disk->d_state == G_RAID_DISK_S_DISABLED) {
546 s = G_STATE_ACTIVE; /* XXX */
547 } else if (disk->d_state == G_RAID_DISK_S_FAILED ||
548 disk->d_state == G_RAID_DISK_S_STALE_FAILED) {
551 state = G_RAID_SUBDISK_S_ACTIVE;
552 TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
553 if (sd->sd_state < state)
554 state = sd->sd_state;
556 if (state == G_RAID_SUBDISK_S_FAILED)
558 else if (state == G_RAID_SUBDISK_S_NEW ||
559 state == G_RAID_SUBDISK_S_REBUILD)
561 else if (state == G_RAID_SUBDISK_S_STALE ||
562 state == G_RAID_SUBDISK_S_RESYNC)
568 g_io_getattr("GEOM::setstate", disk->d_consumer, &len, &s);
569 G_RAID_DEBUG1(2, disk->d_softc, "Disk %s state reported as %d.",
570 g_raid_get_diskname(disk), s);
574 g_raid_change_disk_state(struct g_raid_disk *disk, int state)
577 G_RAID_DEBUG1(0, disk->d_softc, "Disk %s state changed from %s to %s.",
578 g_raid_get_diskname(disk),
579 g_raid_disk_state2str(disk->d_state),
580 g_raid_disk_state2str(state));
581 disk->d_state = state;
582 g_raid_report_disk_state(disk);
586 g_raid_change_subdisk_state(struct g_raid_subdisk *sd, int state)
589 G_RAID_DEBUG1(0, sd->sd_softc,
590 "Subdisk %s:%d-%s state changed from %s to %s.",
591 sd->sd_volume->v_name, sd->sd_pos,
592 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]",
593 g_raid_subdisk_state2str(sd->sd_state),
594 g_raid_subdisk_state2str(state));
595 sd->sd_state = state;
597 g_raid_report_disk_state(sd->sd_disk);
601 g_raid_change_volume_state(struct g_raid_volume *vol, int state)
604 G_RAID_DEBUG1(0, vol->v_softc,
605 "Volume %s state changed from %s to %s.",
607 g_raid_volume_state2str(vol->v_state),
608 g_raid_volume_state2str(state));
609 vol->v_state = state;
613 * --- Events handling functions ---
614 * Events in geom_raid are used to maintain subdisks and volumes status
615 * from one thread to simplify locking.
618 g_raid_event_free(struct g_raid_event *ep)
625 g_raid_event_send(void *arg, int event, int flags)
627 struct g_raid_softc *sc;
628 struct g_raid_event *ep;
631 if ((flags & G_RAID_EVENT_VOLUME) != 0) {
632 sc = ((struct g_raid_volume *)arg)->v_softc;
633 } else if ((flags & G_RAID_EVENT_DISK) != 0) {
634 sc = ((struct g_raid_disk *)arg)->d_softc;
635 } else if ((flags & G_RAID_EVENT_SUBDISK) != 0) {
636 sc = ((struct g_raid_subdisk *)arg)->sd_softc;
640 ep = malloc(sizeof(*ep), M_RAID,
641 sx_xlocked(&sc->sc_lock) ? M_WAITOK : M_NOWAIT);
648 G_RAID_DEBUG1(4, sc, "Sending event %p. Waking up %p.", ep, sc);
649 mtx_lock(&sc->sc_queue_mtx);
650 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next);
651 mtx_unlock(&sc->sc_queue_mtx);
654 if ((flags & G_RAID_EVENT_WAIT) == 0)
657 sx_assert(&sc->sc_lock, SX_XLOCKED);
658 G_RAID_DEBUG1(4, sc, "Sleeping on %p.", ep);
659 sx_xunlock(&sc->sc_lock);
660 while ((ep->e_flags & G_RAID_EVENT_DONE) == 0) {
661 mtx_lock(&sc->sc_queue_mtx);
662 MSLEEP(error, ep, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:event",
666 g_raid_event_free(ep);
667 sx_xlock(&sc->sc_lock);
672 g_raid_event_cancel(struct g_raid_softc *sc, void *tgt)
674 struct g_raid_event *ep, *tmpep;
676 sx_assert(&sc->sc_lock, SX_XLOCKED);
678 mtx_lock(&sc->sc_queue_mtx);
679 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) {
680 if (ep->e_tgt != tgt)
682 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
683 if ((ep->e_flags & G_RAID_EVENT_WAIT) == 0)
684 g_raid_event_free(ep);
686 ep->e_error = ECANCELED;
690 mtx_unlock(&sc->sc_queue_mtx);
694 g_raid_event_check(struct g_raid_softc *sc, void *tgt)
696 struct g_raid_event *ep;
699 sx_assert(&sc->sc_lock, SX_XLOCKED);
701 mtx_lock(&sc->sc_queue_mtx);
702 TAILQ_FOREACH(ep, &sc->sc_events, e_next) {
703 if (ep->e_tgt != tgt)
708 mtx_unlock(&sc->sc_queue_mtx);
713 * Return the number of disks in given state.
714 * If state is equal to -1, count all connected disks.
717 g_raid_ndisks(struct g_raid_softc *sc, int state)
719 struct g_raid_disk *disk;
722 sx_assert(&sc->sc_lock, SX_LOCKED);
725 TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
726 if (disk->d_state == state || state == -1)
733 * Return the number of subdisks in given state.
734 * If state is equal to -1, count all connected disks.
737 g_raid_nsubdisks(struct g_raid_volume *vol, int state)
739 struct g_raid_subdisk *subdisk;
740 struct g_raid_softc *sc;
744 sx_assert(&sc->sc_lock, SX_LOCKED);
747 for (i = 0; i < vol->v_disks_count; i++) {
748 subdisk = &vol->v_subdisks[i];
750 subdisk->sd_state != G_RAID_SUBDISK_S_NONE) ||
751 subdisk->sd_state == state)
758 * Return the first subdisk in given state.
759 * If state is equal to -1, then the first connected disks.
761 struct g_raid_subdisk *
762 g_raid_get_subdisk(struct g_raid_volume *vol, int state)
764 struct g_raid_subdisk *sd;
765 struct g_raid_softc *sc;
769 sx_assert(&sc->sc_lock, SX_LOCKED);
771 for (i = 0; i < vol->v_disks_count; i++) {
772 sd = &vol->v_subdisks[i];
774 sd->sd_state != G_RAID_SUBDISK_S_NONE) ||
775 sd->sd_state == state)
782 g_raid_open_consumer(struct g_raid_softc *sc, const char *name)
784 struct g_consumer *cp;
785 struct g_provider *pp;
789 if (strncmp(name, "/dev/", 5) == 0)
791 pp = g_provider_by_name(name);
794 cp = g_new_consumer(sc->sc_geom);
795 if (g_attach(cp, pp) != 0) {
796 g_destroy_consumer(cp);
799 if (g_access(cp, 1, 1, 1) != 0) {
801 g_destroy_consumer(cp);
808 g_raid_nrequests(struct g_raid_softc *sc, struct g_consumer *cp)
813 mtx_lock(&sc->sc_queue_mtx);
814 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
815 if (bp->bio_from == cp)
818 mtx_unlock(&sc->sc_queue_mtx);
823 g_raid_nopens(struct g_raid_softc *sc)
825 struct g_raid_volume *vol;
829 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
830 if (vol->v_provider_open != 0)
837 g_raid_consumer_is_busy(struct g_raid_softc *sc, struct g_consumer *cp)
842 "I/O requests for %s exist, can't destroy it now.",
846 if (g_raid_nrequests(sc, cp) > 0) {
848 "I/O requests for %s in queue, can't destroy it now.",
856 g_raid_destroy_consumer(void *arg, int flags __unused)
858 struct g_consumer *cp;
863 G_RAID_DEBUG(1, "Consumer %s destroyed.", cp->provider->name);
865 g_destroy_consumer(cp);
869 g_raid_kill_consumer(struct g_raid_softc *sc, struct g_consumer *cp)
871 struct g_provider *pp;
874 g_topology_assert_not();
878 if (g_raid_consumer_is_busy(sc, cp))
883 if ((pp->geom->flags & G_GEOM_WITHER) == 0)
886 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
887 g_access(cp, -cp->acr, -cp->acw, -cp->ace);
890 * After retaste event was send (inside g_access()), we can send
891 * event to detach and destroy consumer.
892 * A class, which has consumer to the given provider connected
893 * will not receive retaste event for the provider.
894 * This is the way how I ignore retaste events when I close
895 * consumers opened for write: I detach and destroy consumer
896 * after retaste event is sent.
898 g_post_event(g_raid_destroy_consumer, cp, M_WAITOK, NULL);
901 G_RAID_DEBUG(1, "Consumer %s destroyed.", pp->name);
903 g_destroy_consumer(cp);
909 g_raid_orphan(struct g_consumer *cp)
911 struct g_raid_disk *disk;
918 g_raid_event_send(disk, G_RAID_DISK_E_DISCONNECTED,
923 g_raid_clean(struct g_raid_volume *vol, int acw)
925 struct g_raid_softc *sc;
929 g_topology_assert_not();
930 sx_assert(&sc->sc_lock, SX_XLOCKED);
932 // if ((sc->sc_flags & G_RAID_DEVICE_FLAG_NOFAILSYNC) != 0)
936 if (vol->v_writes > 0)
938 if (acw > 0 || (acw == -1 &&
939 vol->v_provider != NULL && vol->v_provider->acw > 0)) {
940 timeout = g_raid_clean_time - (time_uptime - vol->v_last_write);
941 if (!g_raid_shutdown && timeout > 0)
945 G_RAID_DEBUG1(1, sc, "Volume %s marked as clean.",
947 g_raid_write_metadata(sc, vol, NULL, NULL);
951 g_raid_dirty(struct g_raid_volume *vol)
953 struct g_raid_softc *sc;
956 g_topology_assert_not();
957 sx_assert(&sc->sc_lock, SX_XLOCKED);
959 // if ((sc->sc_flags & G_RAID_DEVICE_FLAG_NOFAILSYNC) != 0)
962 G_RAID_DEBUG1(1, sc, "Volume %s marked as dirty.",
964 g_raid_write_metadata(sc, vol, NULL, NULL);
968 g_raid_tr_flush_common(struct g_raid_tr_object *tr, struct bio *bp)
970 struct g_raid_softc *sc;
971 struct g_raid_volume *vol;
972 struct g_raid_subdisk *sd;
973 struct bio_queue_head queue;
977 vol = tr->tro_volume;
981 * Allocate all bios before sending any request, so we can return
982 * ENOMEM in nice and clean way.
985 for (i = 0; i < vol->v_disks_count; i++) {
986 sd = &vol->v_subdisks[i];
987 if (sd->sd_state == G_RAID_SUBDISK_S_NONE ||
988 sd->sd_state == G_RAID_SUBDISK_S_FAILED)
990 cbp = g_clone_bio(bp);
993 cbp->bio_caller1 = sd;
994 bioq_insert_tail(&queue, cbp);
996 while ((cbp = bioq_takefirst(&queue)) != NULL) {
997 sd = cbp->bio_caller1;
998 cbp->bio_caller1 = NULL;
999 g_raid_subdisk_iostart(sd, cbp);
1003 while ((cbp = bioq_takefirst(&queue)) != NULL)
1005 if (bp->bio_error == 0)
1006 bp->bio_error = ENOMEM;
1007 g_raid_iodone(bp, bp->bio_error);
1011 g_raid_tr_kerneldump_common_done(struct bio *bp)
1014 bp->bio_flags |= BIO_DONE;
1018 g_raid_tr_kerneldump_common(struct g_raid_tr_object *tr,
1019 void *virtual, vm_offset_t physical, off_t offset, size_t length)
1021 struct g_raid_softc *sc;
1022 struct g_raid_volume *vol;
1025 vol = tr->tro_volume;
1028 bzero(&bp, sizeof(bp));
1029 bp.bio_cmd = BIO_WRITE;
1030 bp.bio_done = g_raid_tr_kerneldump_common_done;
1031 bp.bio_attribute = NULL;
1032 bp.bio_offset = offset;
1033 bp.bio_length = length;
1034 bp.bio_data = virtual;
1035 bp.bio_to = vol->v_provider;
1038 while (!(bp.bio_flags & BIO_DONE)) {
1039 G_RAID_DEBUG1(4, sc, "Poll...");
1044 return (bp.bio_error != 0 ? EIO : 0);
1048 g_raid_dump(void *arg,
1049 void *virtual, vm_offset_t physical, off_t offset, size_t length)
1051 struct g_raid_volume *vol;
1054 vol = (struct g_raid_volume *)arg;
1055 G_RAID_DEBUG1(3, vol->v_softc, "Dumping at off %llu len %llu.",
1056 (long long unsigned)offset, (long long unsigned)length);
1058 error = G_RAID_TR_KERNELDUMP(vol->v_tr,
1059 virtual, physical, offset, length);
1064 g_raid_kerneldump(struct g_raid_softc *sc, struct bio *bp)
1066 struct g_kerneldump *gkd;
1067 struct g_provider *pp;
1068 struct g_raid_volume *vol;
1070 gkd = (struct g_kerneldump*)bp->bio_data;
1073 g_trace(G_T_TOPOLOGY, "g_raid_kerneldump(%s, %jd, %jd)",
1074 pp->name, (intmax_t)gkd->offset, (intmax_t)gkd->length);
1075 gkd->di.dumper = g_raid_dump;
1077 gkd->di.blocksize = vol->v_sectorsize;
1078 gkd->di.maxiosize = DFLTPHYS;
1079 gkd->di.mediaoffset = gkd->offset;
1080 if ((gkd->offset + gkd->length) > vol->v_mediasize)
1081 gkd->length = vol->v_mediasize - gkd->offset;
1082 gkd->di.mediasize = gkd->length;
1083 g_io_deliver(bp, 0);
1087 g_raid_candelete(struct g_raid_softc *sc, struct bio *bp)
1089 struct g_provider *pp;
1090 struct g_raid_volume *vol;
1091 struct g_raid_subdisk *sd;
1095 val = (int *)bp->bio_data;
1099 for (i = 0; i < vol->v_disks_count; i++) {
1100 sd = &vol->v_subdisks[i];
1101 if (sd->sd_state == G_RAID_SUBDISK_S_NONE)
1103 if (sd->sd_disk->d_candelete) {
1108 g_io_deliver(bp, 0);
1112 g_raid_start(struct bio *bp)
1114 struct g_raid_softc *sc;
1116 sc = bp->bio_to->geom->softc;
1118 * If sc == NULL or there are no valid disks, provider's error
1119 * should be set and g_raid_start() should not be called at all.
1121 // KASSERT(sc != NULL && sc->sc_state == G_RAID_VOLUME_S_RUNNING,
1122 // ("Provider's error should be set (error=%d)(mirror=%s).",
1123 // bp->bio_to->error, bp->bio_to->name));
1124 G_RAID_LOGREQ(3, bp, "Request received.");
1126 switch (bp->bio_cmd) {
1133 if (!strcmp(bp->bio_attribute, "GEOM::candelete"))
1134 g_raid_candelete(sc, bp);
1135 else if (!strcmp(bp->bio_attribute, "GEOM::kerneldump"))
1136 g_raid_kerneldump(sc, bp);
1138 g_io_deliver(bp, EOPNOTSUPP);
1141 g_io_deliver(bp, EOPNOTSUPP);
1144 mtx_lock(&sc->sc_queue_mtx);
1145 bioq_insert_tail(&sc->sc_queue, bp);
1146 mtx_unlock(&sc->sc_queue_mtx);
1148 G_RAID_DEBUG1(4, sc, "Waking up %p.", sc);
1154 g_raid_bio_overlaps(const struct bio *bp, off_t lstart, off_t len)
1158 * (1) bp entirely below NO
1159 * (2) bp entirely above NO
1160 * (3) bp start below, but end in range YES
1161 * (4) bp entirely within YES
1162 * (5) bp starts within, ends above YES
1164 * lock range 10-19 (offset 10 length 10)
1165 * (1) 1-5: first if kicks it out
1166 * (2) 30-35: second if kicks it out
1167 * (3) 5-15: passes both ifs
1168 * (4) 12-14: passes both ifs
1169 * (5) 19-20: passes both
1171 off_t lend = lstart + len - 1;
1172 off_t bstart = bp->bio_offset;
1173 off_t bend = bp->bio_offset + bp->bio_length - 1;
1183 g_raid_is_in_locked_range(struct g_raid_volume *vol, const struct bio *bp)
1185 struct g_raid_lock *lp;
1187 sx_assert(&vol->v_softc->sc_lock, SX_LOCKED);
1189 LIST_FOREACH(lp, &vol->v_locks, l_next) {
1190 if (g_raid_bio_overlaps(bp, lp->l_offset, lp->l_length))
1197 g_raid_start_request(struct bio *bp)
1199 struct g_raid_softc *sc;
1200 struct g_raid_volume *vol;
1202 sc = bp->bio_to->geom->softc;
1203 sx_assert(&sc->sc_lock, SX_LOCKED);
1204 vol = bp->bio_to->private;
1207 * Check to see if this item is in a locked range. If so,
1208 * queue it to our locked queue and return. We'll requeue
1209 * it when the range is unlocked. Internal I/O for the
1210 * rebuild/rescan/recovery process is excluded from this
1211 * check so we can actually do the recovery.
1213 if (!(bp->bio_cflags & G_RAID_BIO_FLAG_SPECIAL) &&
1214 g_raid_is_in_locked_range(vol, bp)) {
1215 G_RAID_LOGREQ(3, bp, "Defer request.");
1216 bioq_insert_tail(&vol->v_locked, bp);
1221 * If we're actually going to do the write/delete, then
1222 * update the idle stats for the volume.
1224 if (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_DELETE) {
1231 * Put request onto inflight queue, so we can check if new
1232 * synchronization requests don't collide with it. Then tell
1233 * the transformation layer to start the I/O.
1235 bioq_insert_tail(&vol->v_inflight, bp);
1236 G_RAID_LOGREQ(4, bp, "Request started");
1237 G_RAID_TR_IOSTART(vol->v_tr, bp);
1241 g_raid_finish_with_locked_ranges(struct g_raid_volume *vol, struct bio *bp)
1245 struct g_raid_lock *lp;
1247 vol->v_pending_lock = 0;
1248 LIST_FOREACH(lp, &vol->v_locks, l_next) {
1249 if (lp->l_pending) {
1253 TAILQ_FOREACH(nbp, &vol->v_inflight.queue, bio_queue) {
1254 if (g_raid_bio_overlaps(nbp, off, len))
1257 if (lp->l_pending) {
1258 vol->v_pending_lock = 1;
1259 G_RAID_DEBUG1(4, vol->v_softc,
1260 "Deferred lock(%jd, %jd) has %d pending",
1261 (intmax_t)off, (intmax_t)(off + len),
1265 G_RAID_DEBUG1(4, vol->v_softc,
1266 "Deferred lock of %jd to %jd completed",
1267 (intmax_t)off, (intmax_t)(off + len));
1268 G_RAID_TR_LOCKED(vol->v_tr, lp->l_callback_arg);
1274 g_raid_iodone(struct bio *bp, int error)
1276 struct g_raid_softc *sc;
1277 struct g_raid_volume *vol;
1279 sc = bp->bio_to->geom->softc;
1280 sx_assert(&sc->sc_lock, SX_LOCKED);
1281 vol = bp->bio_to->private;
1282 G_RAID_LOGREQ(3, bp, "Request done: %d.", error);
1284 /* Update stats if we done write/delete. */
1285 if (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_DELETE) {
1287 vol->v_last_write = time_uptime;
1290 bioq_remove(&vol->v_inflight, bp);
1291 if (vol->v_pending_lock && g_raid_is_in_locked_range(vol, bp))
1292 g_raid_finish_with_locked_ranges(vol, bp);
1293 getmicrouptime(&vol->v_last_done);
1294 g_io_deliver(bp, error);
1298 g_raid_lock_range(struct g_raid_volume *vol, off_t off, off_t len,
1299 struct bio *ignore, void *argp)
1301 struct g_raid_softc *sc;
1302 struct g_raid_lock *lp;
1306 lp = malloc(sizeof(*lp), M_RAID, M_WAITOK | M_ZERO);
1307 LIST_INSERT_HEAD(&vol->v_locks, lp, l_next);
1310 lp->l_callback_arg = argp;
1313 TAILQ_FOREACH(bp, &vol->v_inflight.queue, bio_queue) {
1314 if (bp != ignore && g_raid_bio_overlaps(bp, off, len))
1319 * If there are any writes that are pending, we return EBUSY. All
1320 * callers will have to wait until all pending writes clear.
1322 if (lp->l_pending > 0) {
1323 vol->v_pending_lock = 1;
1324 G_RAID_DEBUG1(4, sc, "Locking range %jd to %jd deferred %d pend",
1325 (intmax_t)off, (intmax_t)(off+len), lp->l_pending);
1328 G_RAID_DEBUG1(4, sc, "Locking range %jd to %jd",
1329 (intmax_t)off, (intmax_t)(off+len));
1330 G_RAID_TR_LOCKED(vol->v_tr, lp->l_callback_arg);
1335 g_raid_unlock_range(struct g_raid_volume *vol, off_t off, off_t len)
1337 struct g_raid_lock *lp;
1338 struct g_raid_softc *sc;
1342 LIST_FOREACH(lp, &vol->v_locks, l_next) {
1343 if (lp->l_offset == off && lp->l_length == len) {
1344 LIST_REMOVE(lp, l_next);
1346 * Right now we just put them all back on the queue
1347 * and hope for the best. We hope this because any
1348 * locked ranges will go right back on this list
1349 * when the worker thread runs.
1352 G_RAID_DEBUG1(4, sc, "Unlocked %jd to %jd",
1353 (intmax_t)lp->l_offset,
1354 (intmax_t)(lp->l_offset+lp->l_length));
1355 mtx_lock(&sc->sc_queue_mtx);
1356 while ((bp = bioq_takefirst(&vol->v_locked)) != NULL)
1357 bioq_insert_tail(&sc->sc_queue, bp);
1358 mtx_unlock(&sc->sc_queue_mtx);
1367 g_raid_subdisk_iostart(struct g_raid_subdisk *sd, struct bio *bp)
1369 struct g_consumer *cp;
1370 struct g_raid_disk *disk, *tdisk;
1372 bp->bio_caller1 = sd;
1375 * Make sure that the disk is present. Generally it is a task of
1376 * transformation layers to not send requests to absent disks, but
1377 * it is better to be safe and report situation then sorry.
1379 if (sd->sd_disk == NULL) {
1380 G_RAID_LOGREQ(0, bp, "Warning! I/O request to an absent disk!");
1382 bp->bio_from = NULL;
1384 bp->bio_error = ENXIO;
1385 g_raid_disk_done(bp);
1389 if (disk->d_state != G_RAID_DISK_S_ACTIVE &&
1390 disk->d_state != G_RAID_DISK_S_FAILED) {
1391 G_RAID_LOGREQ(0, bp, "Warning! I/O request to a disk in a "
1392 "wrong state (%s)!", g_raid_disk_state2str(disk->d_state));
1396 cp = disk->d_consumer;
1398 bp->bio_to = cp->provider;
1401 /* Update average disks load. */
1402 TAILQ_FOREACH(tdisk, &sd->sd_softc->sc_disks, d_next) {
1403 if (tdisk->d_consumer == NULL)
1406 tdisk->d_load = (tdisk->d_consumer->index *
1407 G_RAID_SUBDISK_LOAD_SCALE + tdisk->d_load * 7) / 8;
1410 disk->d_last_offset = bp->bio_offset + bp->bio_length;
1412 G_RAID_LOGREQ(3, bp, "Sending dumping request.");
1413 if (bp->bio_cmd == BIO_WRITE) {
1414 bp->bio_error = g_raid_subdisk_kerneldump(sd,
1415 bp->bio_data, 0, bp->bio_offset, bp->bio_length);
1417 bp->bio_error = EOPNOTSUPP;
1418 g_raid_disk_done(bp);
1420 bp->bio_done = g_raid_disk_done;
1421 bp->bio_offset += sd->sd_offset;
1422 G_RAID_LOGREQ(3, bp, "Sending request.");
1423 g_io_request(bp, cp);
1428 g_raid_subdisk_kerneldump(struct g_raid_subdisk *sd,
1429 void *virtual, vm_offset_t physical, off_t offset, size_t length)
1432 if (sd->sd_disk == NULL)
1434 if (sd->sd_disk->d_kd.di.dumper == NULL)
1435 return (EOPNOTSUPP);
1436 return (dump_write(&sd->sd_disk->d_kd.di,
1438 sd->sd_disk->d_kd.di.mediaoffset + sd->sd_offset + offset,
1443 g_raid_disk_done(struct bio *bp)
1445 struct g_raid_softc *sc;
1446 struct g_raid_subdisk *sd;
1448 sd = bp->bio_caller1;
1450 mtx_lock(&sc->sc_queue_mtx);
1451 bioq_insert_tail(&sc->sc_queue, bp);
1452 mtx_unlock(&sc->sc_queue_mtx);
1458 g_raid_disk_done_request(struct bio *bp)
1460 struct g_raid_softc *sc;
1461 struct g_raid_disk *disk;
1462 struct g_raid_subdisk *sd;
1463 struct g_raid_volume *vol;
1465 g_topology_assert_not();
1467 G_RAID_LOGREQ(3, bp, "Disk request done: %d.", bp->bio_error);
1468 sd = bp->bio_caller1;
1470 vol = sd->sd_volume;
1471 if (bp->bio_from != NULL) {
1472 bp->bio_from->index--;
1473 disk = bp->bio_from->private;
1475 g_raid_kill_consumer(sc, bp->bio_from);
1477 bp->bio_offset -= sd->sd_offset;
1479 G_RAID_TR_IODONE(vol->v_tr, sd, bp);
1483 g_raid_handle_event(struct g_raid_softc *sc, struct g_raid_event *ep)
1486 if ((ep->e_flags & G_RAID_EVENT_VOLUME) != 0)
1487 ep->e_error = g_raid_update_volume(ep->e_tgt, ep->e_event);
1488 else if ((ep->e_flags & G_RAID_EVENT_DISK) != 0)
1489 ep->e_error = g_raid_update_disk(ep->e_tgt, ep->e_event);
1490 else if ((ep->e_flags & G_RAID_EVENT_SUBDISK) != 0)
1491 ep->e_error = g_raid_update_subdisk(ep->e_tgt, ep->e_event);
1493 ep->e_error = g_raid_update_node(ep->e_tgt, ep->e_event);
1494 if ((ep->e_flags & G_RAID_EVENT_WAIT) == 0) {
1495 KASSERT(ep->e_error == 0,
1496 ("Error cannot be handled."));
1497 g_raid_event_free(ep);
1499 ep->e_flags |= G_RAID_EVENT_DONE;
1500 G_RAID_DEBUG1(4, sc, "Waking up %p.", ep);
1501 mtx_lock(&sc->sc_queue_mtx);
1503 mtx_unlock(&sc->sc_queue_mtx);
1511 g_raid_worker(void *arg)
1513 struct g_raid_softc *sc;
1514 struct g_raid_event *ep;
1515 struct g_raid_volume *vol;
1517 struct timeval now, t;
1521 thread_lock(curthread);
1522 sched_prio(curthread, PRIBIO);
1523 thread_unlock(curthread);
1525 sx_xlock(&sc->sc_lock);
1527 mtx_lock(&sc->sc_queue_mtx);
1529 * First take a look at events.
1530 * This is important to handle events before any I/O requests.
1535 ep = TAILQ_FIRST(&sc->sc_events);
1537 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
1538 else if ((bp = bioq_takefirst(&sc->sc_queue)) != NULL)
1541 getmicrouptime(&now);
1543 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
1544 if (bioq_first(&vol->v_inflight) == NULL &&
1546 timevalcmp(&vol->v_last_done, &t, < ))
1547 t = vol->v_last_done;
1549 timevalsub(&t, &now);
1550 timeout = g_raid_idle_threshold +
1551 t.tv_sec * 1000000 + t.tv_usec;
1554 * Two steps to avoid overflows at HZ=1000
1555 * and idle timeouts > 2.1s. Some rounding
1556 * errors can occur, but they are < 1tick,
1557 * which is deemed to be close enough for
1560 int micpertic = 1000000 / hz;
1561 timeout = (timeout + micpertic - 1) / micpertic;
1562 sx_xunlock(&sc->sc_lock);
1563 MSLEEP(rv, sc, &sc->sc_queue_mtx,
1564 PRIBIO | PDROP, "-", timeout);
1565 sx_xlock(&sc->sc_lock);
1570 mtx_unlock(&sc->sc_queue_mtx);
1573 g_raid_handle_event(sc, ep);
1574 } else if (bp != NULL) {
1575 if (bp->bio_to != NULL &&
1576 bp->bio_to->geom == sc->sc_geom)
1577 g_raid_start_request(bp);
1579 g_raid_disk_done_request(bp);
1580 } else if (rv == EWOULDBLOCK) {
1581 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
1582 g_raid_clean(vol, -1);
1583 if (bioq_first(&vol->v_inflight) == NULL &&
1585 t.tv_sec = g_raid_idle_threshold / 1000000;
1586 t.tv_usec = g_raid_idle_threshold % 1000000;
1587 timevaladd(&t, &vol->v_last_done);
1588 getmicrouptime(&now);
1589 if (timevalcmp(&t, &now, <= )) {
1590 G_RAID_TR_IDLE(vol->v_tr);
1591 vol->v_last_done = now;
1596 if (sc->sc_stopping == G_RAID_DESTROY_HARD)
1597 g_raid_destroy_node(sc, 1); /* May not return. */
1602 g_raid_poll(struct g_raid_softc *sc)
1604 struct g_raid_event *ep;
1607 sx_xlock(&sc->sc_lock);
1608 mtx_lock(&sc->sc_queue_mtx);
1610 * First take a look at events.
1611 * This is important to handle events before any I/O requests.
1613 ep = TAILQ_FIRST(&sc->sc_events);
1615 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
1616 mtx_unlock(&sc->sc_queue_mtx);
1617 g_raid_handle_event(sc, ep);
1620 bp = bioq_takefirst(&sc->sc_queue);
1622 mtx_unlock(&sc->sc_queue_mtx);
1623 if (bp->bio_from == NULL ||
1624 bp->bio_from->geom != sc->sc_geom)
1625 g_raid_start_request(bp);
1627 g_raid_disk_done_request(bp);
1630 sx_xunlock(&sc->sc_lock);
1634 g_raid_launch_provider(struct g_raid_volume *vol)
1636 struct g_raid_disk *disk;
1637 struct g_raid_subdisk *sd;
1638 struct g_raid_softc *sc;
1639 struct g_provider *pp;
1640 char name[G_RAID_MAX_VOLUMENAME];
1641 char announce_buf[80], buf1[32];
1646 sx_assert(&sc->sc_lock, SX_LOCKED);
1649 /* Try to name provider with volume name. */
1650 snprintf(name, sizeof(name), "raid/%s", vol->v_name);
1651 if (g_raid_name_format == 0 || vol->v_name[0] == 0 ||
1652 g_provider_by_name(name) != NULL) {
1653 /* Otherwise use sequential volume number. */
1654 snprintf(name, sizeof(name), "raid/r%d", vol->v_global_id);
1658 * Create a /dev/ar%d that the old ataraid(4) stack once
1659 * created as an alias for /dev/raid/r%d if requested.
1660 * This helps going from stable/7 ataraid devices to newer
1661 * FreeBSD releases. sbruno 07 MAY 2013
1664 if (ar_legacy_aliases) {
1665 snprintf(announce_buf, sizeof(announce_buf),
1666 "kern.devalias.%s", name);
1667 snprintf(buf1, sizeof(buf1),
1668 "ar%d", vol->v_global_id);
1669 setenv(announce_buf, buf1);
1672 pp = g_new_providerf(sc->sc_geom, "%s", name);
1673 if (vol->v_tr->tro_class->trc_accept_unmapped) {
1674 pp->flags |= G_PF_ACCEPT_UNMAPPED;
1675 for (i = 0; i < vol->v_disks_count; i++) {
1676 sd = &vol->v_subdisks[i];
1677 if (sd->sd_state == G_RAID_SUBDISK_S_NONE)
1679 if ((sd->sd_disk->d_consumer->provider->flags &
1680 G_PF_ACCEPT_UNMAPPED) == 0)
1681 pp->flags &= ~G_PF_ACCEPT_UNMAPPED;
1685 pp->mediasize = vol->v_mediasize;
1686 pp->sectorsize = vol->v_sectorsize;
1688 pp->stripeoffset = 0;
1689 if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID1 ||
1690 vol->v_raid_level == G_RAID_VOLUME_RL_RAID3 ||
1691 vol->v_raid_level == G_RAID_VOLUME_RL_SINGLE ||
1692 vol->v_raid_level == G_RAID_VOLUME_RL_CONCAT) {
1693 if ((disk = vol->v_subdisks[0].sd_disk) != NULL &&
1694 disk->d_consumer != NULL &&
1695 disk->d_consumer->provider != NULL) {
1696 pp->stripesize = disk->d_consumer->provider->stripesize;
1697 off = disk->d_consumer->provider->stripeoffset;
1698 pp->stripeoffset = off + vol->v_subdisks[0].sd_offset;
1700 pp->stripeoffset %= off;
1702 if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID3) {
1703 pp->stripesize *= (vol->v_disks_count - 1);
1704 pp->stripeoffset *= (vol->v_disks_count - 1);
1707 pp->stripesize = vol->v_strip_size;
1708 vol->v_provider = pp;
1709 g_error_provider(pp, 0);
1710 g_topology_unlock();
1711 G_RAID_DEBUG1(0, sc, "Provider %s for volume %s created.",
1712 pp->name, vol->v_name);
1716 g_raid_destroy_provider(struct g_raid_volume *vol)
1718 struct g_raid_softc *sc;
1719 struct g_provider *pp;
1720 struct bio *bp, *tmp;
1722 g_topology_assert_not();
1724 pp = vol->v_provider;
1725 KASSERT(pp != NULL, ("NULL provider (volume=%s).", vol->v_name));
1728 g_error_provider(pp, ENXIO);
1729 mtx_lock(&sc->sc_queue_mtx);
1730 TAILQ_FOREACH_SAFE(bp, &sc->sc_queue.queue, bio_queue, tmp) {
1731 if (bp->bio_to != pp)
1733 bioq_remove(&sc->sc_queue, bp);
1734 g_io_deliver(bp, ENXIO);
1736 mtx_unlock(&sc->sc_queue_mtx);
1737 G_RAID_DEBUG1(0, sc, "Provider %s for volume %s destroyed.",
1738 pp->name, vol->v_name);
1739 g_wither_provider(pp, ENXIO);
1740 g_topology_unlock();
1741 vol->v_provider = NULL;
1745 * Update device state.
1748 g_raid_update_volume(struct g_raid_volume *vol, u_int event)
1750 struct g_raid_softc *sc;
1753 sx_assert(&sc->sc_lock, SX_XLOCKED);
1755 G_RAID_DEBUG1(2, sc, "Event %s for volume %s.",
1756 g_raid_volume_event2str(event),
1759 case G_RAID_VOLUME_E_DOWN:
1760 if (vol->v_provider != NULL)
1761 g_raid_destroy_provider(vol);
1763 case G_RAID_VOLUME_E_UP:
1764 if (vol->v_provider == NULL)
1765 g_raid_launch_provider(vol);
1767 case G_RAID_VOLUME_E_START:
1769 G_RAID_TR_START(vol->v_tr);
1773 G_RAID_MD_VOLUME_EVENT(sc->sc_md, vol, event);
1777 /* Manage root mount release. */
1778 if (vol->v_starting) {
1779 vol->v_starting = 0;
1780 G_RAID_DEBUG1(1, sc, "root_mount_rel %p", vol->v_rootmount);
1781 root_mount_rel(vol->v_rootmount);
1782 vol->v_rootmount = NULL;
1784 if (vol->v_stopping && vol->v_provider_open == 0)
1785 g_raid_destroy_volume(vol);
1790 * Update subdisk state.
1793 g_raid_update_subdisk(struct g_raid_subdisk *sd, u_int event)
1795 struct g_raid_softc *sc;
1796 struct g_raid_volume *vol;
1799 vol = sd->sd_volume;
1800 sx_assert(&sc->sc_lock, SX_XLOCKED);
1802 G_RAID_DEBUG1(2, sc, "Event %s for subdisk %s:%d-%s.",
1803 g_raid_subdisk_event2str(event),
1804 vol->v_name, sd->sd_pos,
1805 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
1807 G_RAID_TR_EVENT(vol->v_tr, sd, event);
1813 * Update disk state.
1816 g_raid_update_disk(struct g_raid_disk *disk, u_int event)
1818 struct g_raid_softc *sc;
1821 sx_assert(&sc->sc_lock, SX_XLOCKED);
1823 G_RAID_DEBUG1(2, sc, "Event %s for disk %s.",
1824 g_raid_disk_event2str(event),
1825 g_raid_get_diskname(disk));
1828 G_RAID_MD_EVENT(sc->sc_md, disk, event);
1836 g_raid_update_node(struct g_raid_softc *sc, u_int event)
1838 sx_assert(&sc->sc_lock, SX_XLOCKED);
1840 G_RAID_DEBUG1(2, sc, "Event %s for the array.",
1841 g_raid_node_event2str(event));
1843 if (event == G_RAID_NODE_E_WAKE)
1846 G_RAID_MD_EVENT(sc->sc_md, NULL, event);
1851 g_raid_access(struct g_provider *pp, int acr, int acw, int ace)
1853 struct g_raid_volume *vol;
1854 struct g_raid_softc *sc;
1855 int dcw, opens, error = 0;
1857 g_topology_assert();
1858 sc = pp->geom->softc;
1860 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name));
1861 KASSERT(vol != NULL, ("NULL volume (provider=%s).", pp->name));
1863 G_RAID_DEBUG1(2, sc, "Access request for %s: r%dw%de%d.", pp->name,
1865 dcw = pp->acw + acw;
1867 g_topology_unlock();
1868 sx_xlock(&sc->sc_lock);
1869 /* Deny new opens while dying. */
1870 if (sc->sc_stopping != 0 && (acr > 0 || acw > 0 || ace > 0)) {
1874 /* Deny write opens for read-only volumes. */
1875 if (vol->v_read_only && acw > 0) {
1880 g_raid_clean(vol, dcw);
1881 vol->v_provider_open += acr + acw + ace;
1882 /* Handle delayed node destruction. */
1883 if (sc->sc_stopping == G_RAID_DESTROY_DELAYED &&
1884 vol->v_provider_open == 0) {
1885 /* Count open volumes. */
1886 opens = g_raid_nopens(sc);
1888 sc->sc_stopping = G_RAID_DESTROY_HARD;
1889 /* Wake up worker to make it selfdestruct. */
1890 g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0);
1893 /* Handle open volume destruction. */
1894 if (vol->v_stopping && vol->v_provider_open == 0)
1895 g_raid_destroy_volume(vol);
1897 sx_xunlock(&sc->sc_lock);
1902 struct g_raid_softc *
1903 g_raid_create_node(struct g_class *mp,
1904 const char *name, struct g_raid_md_object *md)
1906 struct g_raid_softc *sc;
1910 g_topology_assert();
1911 G_RAID_DEBUG(1, "Creating array %s.", name);
1913 gp = g_new_geomf(mp, "%s", name);
1914 sc = malloc(sizeof(*sc), M_RAID, M_WAITOK | M_ZERO);
1915 gp->start = g_raid_start;
1916 gp->orphan = g_raid_orphan;
1917 gp->access = g_raid_access;
1918 gp->dumpconf = g_raid_dumpconf;
1923 TAILQ_INIT(&sc->sc_volumes);
1924 TAILQ_INIT(&sc->sc_disks);
1925 sx_init(&sc->sc_lock, "graid:lock");
1926 mtx_init(&sc->sc_queue_mtx, "graid:queue", NULL, MTX_DEF);
1927 TAILQ_INIT(&sc->sc_events);
1928 bioq_init(&sc->sc_queue);
1930 error = kproc_create(g_raid_worker, sc, &sc->sc_worker, 0, 0,
1933 G_RAID_DEBUG(0, "Cannot create kernel thread for %s.", name);
1934 mtx_destroy(&sc->sc_queue_mtx);
1935 sx_destroy(&sc->sc_lock);
1936 g_destroy_geom(sc->sc_geom);
1941 G_RAID_DEBUG1(0, sc, "Array %s created.", name);
1945 struct g_raid_volume *
1946 g_raid_create_volume(struct g_raid_softc *sc, const char *name, int id)
1948 struct g_raid_volume *vol, *vol1;
1951 G_RAID_DEBUG1(1, sc, "Creating volume %s.", name);
1952 vol = malloc(sizeof(*vol), M_RAID, M_WAITOK | M_ZERO);
1954 strlcpy(vol->v_name, name, G_RAID_MAX_VOLUMENAME);
1955 vol->v_state = G_RAID_VOLUME_S_STARTING;
1956 vol->v_raid_level = G_RAID_VOLUME_RL_UNKNOWN;
1957 vol->v_raid_level_qualifier = G_RAID_VOLUME_RLQ_UNKNOWN;
1958 vol->v_rotate_parity = 1;
1959 bioq_init(&vol->v_inflight);
1960 bioq_init(&vol->v_locked);
1961 LIST_INIT(&vol->v_locks);
1962 for (i = 0; i < G_RAID_MAX_SUBDISKS; i++) {
1963 vol->v_subdisks[i].sd_softc = sc;
1964 vol->v_subdisks[i].sd_volume = vol;
1965 vol->v_subdisks[i].sd_pos = i;
1966 vol->v_subdisks[i].sd_state = G_RAID_DISK_S_NONE;
1969 /* Find free ID for this volume. */
1973 LIST_FOREACH(vol1, &g_raid_volumes, v_global_next) {
1974 if (vol1->v_global_id == id)
1979 for (id = 0; ; id++) {
1980 LIST_FOREACH(vol1, &g_raid_volumes, v_global_next) {
1981 if (vol1->v_global_id == id)
1988 vol->v_global_id = id;
1989 LIST_INSERT_HEAD(&g_raid_volumes, vol, v_global_next);
1990 g_topology_unlock();
1992 /* Delay root mounting. */
1993 vol->v_rootmount = root_mount_hold("GRAID");
1994 G_RAID_DEBUG1(1, sc, "root_mount_hold %p", vol->v_rootmount);
1995 vol->v_starting = 1;
1996 TAILQ_INSERT_TAIL(&sc->sc_volumes, vol, v_next);
2000 struct g_raid_disk *
2001 g_raid_create_disk(struct g_raid_softc *sc)
2003 struct g_raid_disk *disk;
2005 G_RAID_DEBUG1(1, sc, "Creating disk.");
2006 disk = malloc(sizeof(*disk), M_RAID, M_WAITOK | M_ZERO);
2008 disk->d_state = G_RAID_DISK_S_NONE;
2009 TAILQ_INIT(&disk->d_subdisks);
2010 TAILQ_INSERT_TAIL(&sc->sc_disks, disk, d_next);
2014 int g_raid_start_volume(struct g_raid_volume *vol)
2016 struct g_raid_tr_class *class;
2017 struct g_raid_tr_object *obj;
2020 G_RAID_DEBUG1(2, vol->v_softc, "Starting volume %s.", vol->v_name);
2021 LIST_FOREACH(class, &g_raid_tr_classes, trc_list) {
2022 if (!class->trc_enable)
2024 G_RAID_DEBUG1(2, vol->v_softc,
2025 "Tasting volume %s for %s transformation.",
2026 vol->v_name, class->name);
2027 obj = (void *)kobj_create((kobj_class_t)class, M_RAID,
2029 obj->tro_class = class;
2030 obj->tro_volume = vol;
2031 status = G_RAID_TR_TASTE(obj, vol);
2032 if (status != G_RAID_TR_TASTE_FAIL)
2034 kobj_delete((kobj_t)obj, M_RAID);
2036 if (class == NULL) {
2037 G_RAID_DEBUG1(0, vol->v_softc,
2038 "No transformation module found for %s.",
2041 g_raid_change_volume_state(vol, G_RAID_VOLUME_S_UNSUPPORTED);
2042 g_raid_event_send(vol, G_RAID_VOLUME_E_DOWN,
2043 G_RAID_EVENT_VOLUME);
2046 G_RAID_DEBUG1(2, vol->v_softc,
2047 "Transformation module %s chosen for %s.",
2048 class->name, vol->v_name);
2054 g_raid_destroy_node(struct g_raid_softc *sc, int worker)
2056 struct g_raid_volume *vol, *tmpv;
2057 struct g_raid_disk *disk, *tmpd;
2060 sc->sc_stopping = G_RAID_DESTROY_HARD;
2061 TAILQ_FOREACH_SAFE(vol, &sc->sc_volumes, v_next, tmpv) {
2062 if (g_raid_destroy_volume(vol))
2067 TAILQ_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tmpd) {
2068 if (g_raid_destroy_disk(disk))
2074 G_RAID_MD_FREE(sc->sc_md);
2075 kobj_delete((kobj_t)sc->sc_md, M_RAID);
2078 if (sc->sc_geom != NULL) {
2079 G_RAID_DEBUG1(0, sc, "Array %s destroyed.", sc->sc_name);
2081 sc->sc_geom->softc = NULL;
2082 g_wither_geom(sc->sc_geom, ENXIO);
2083 g_topology_unlock();
2086 G_RAID_DEBUG(1, "Array destroyed.");
2088 g_raid_event_cancel(sc, sc);
2089 mtx_destroy(&sc->sc_queue_mtx);
2090 sx_xunlock(&sc->sc_lock);
2091 sx_destroy(&sc->sc_lock);
2092 wakeup(&sc->sc_stopping);
2094 curthread->td_pflags &= ~TDP_GEOM;
2095 G_RAID_DEBUG(1, "Thread exiting.");
2098 /* Wake up worker to make it selfdestruct. */
2099 g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0);
2105 g_raid_destroy_volume(struct g_raid_volume *vol)
2107 struct g_raid_softc *sc;
2108 struct g_raid_disk *disk;
2112 G_RAID_DEBUG1(2, sc, "Destroying volume %s.", vol->v_name);
2113 vol->v_stopping = 1;
2114 if (vol->v_state != G_RAID_VOLUME_S_STOPPED) {
2116 G_RAID_TR_STOP(vol->v_tr);
2119 vol->v_state = G_RAID_VOLUME_S_STOPPED;
2121 if (g_raid_event_check(sc, vol) != 0)
2123 if (vol->v_provider != NULL)
2125 if (vol->v_provider_open != 0)
2128 G_RAID_TR_FREE(vol->v_tr);
2129 kobj_delete((kobj_t)vol->v_tr, M_RAID);
2132 if (vol->v_rootmount)
2133 root_mount_rel(vol->v_rootmount);
2135 LIST_REMOVE(vol, v_global_next);
2136 g_topology_unlock();
2137 TAILQ_REMOVE(&sc->sc_volumes, vol, v_next);
2138 for (i = 0; i < G_RAID_MAX_SUBDISKS; i++) {
2139 g_raid_event_cancel(sc, &vol->v_subdisks[i]);
2140 disk = vol->v_subdisks[i].sd_disk;
2143 TAILQ_REMOVE(&disk->d_subdisks, &vol->v_subdisks[i], sd_next);
2145 G_RAID_DEBUG1(2, sc, "Volume %s destroyed.", vol->v_name);
2147 G_RAID_MD_FREE_VOLUME(sc->sc_md, vol);
2148 g_raid_event_cancel(sc, vol);
2150 if (sc->sc_stopping == G_RAID_DESTROY_HARD) {
2151 /* Wake up worker to let it selfdestruct. */
2152 g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0);
2158 g_raid_destroy_disk(struct g_raid_disk *disk)
2160 struct g_raid_softc *sc;
2161 struct g_raid_subdisk *sd, *tmp;
2164 G_RAID_DEBUG1(2, sc, "Destroying disk.");
2165 if (disk->d_consumer) {
2166 g_raid_kill_consumer(sc, disk->d_consumer);
2167 disk->d_consumer = NULL;
2169 TAILQ_FOREACH_SAFE(sd, &disk->d_subdisks, sd_next, tmp) {
2170 g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_NONE);
2171 g_raid_event_send(sd, G_RAID_SUBDISK_E_DISCONNECTED,
2172 G_RAID_EVENT_SUBDISK);
2173 TAILQ_REMOVE(&disk->d_subdisks, sd, sd_next);
2176 TAILQ_REMOVE(&sc->sc_disks, disk, d_next);
2178 G_RAID_MD_FREE_DISK(sc->sc_md, disk);
2179 g_raid_event_cancel(sc, disk);
2185 g_raid_destroy(struct g_raid_softc *sc, int how)
2189 g_topology_assert_not();
2192 sx_assert(&sc->sc_lock, SX_XLOCKED);
2194 /* Count open volumes. */
2195 opens = g_raid_nopens(sc);
2197 /* React on some opened volumes. */
2200 case G_RAID_DESTROY_SOFT:
2201 G_RAID_DEBUG1(1, sc,
2202 "%d volumes are still open.",
2204 sx_xunlock(&sc->sc_lock);
2206 case G_RAID_DESTROY_DELAYED:
2207 G_RAID_DEBUG1(1, sc,
2208 "Array will be destroyed on last close.");
2209 sc->sc_stopping = G_RAID_DESTROY_DELAYED;
2210 sx_xunlock(&sc->sc_lock);
2212 case G_RAID_DESTROY_HARD:
2213 G_RAID_DEBUG1(1, sc,
2214 "%d volumes are still open.",
2219 /* Mark node for destruction. */
2220 sc->sc_stopping = G_RAID_DESTROY_HARD;
2221 /* Wake up worker to let it selfdestruct. */
2222 g_raid_event_send(sc, G_RAID_NODE_E_WAKE, 0);
2223 /* Sleep until node destroyed. */
2224 error = sx_sleep(&sc->sc_stopping, &sc->sc_lock,
2225 PRIBIO | PDROP, "r:destroy", hz * 3);
2226 return (error == EWOULDBLOCK ? EBUSY : 0);
2230 g_raid_taste_orphan(struct g_consumer *cp)
2233 KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
2234 cp->provider->name));
2237 static struct g_geom *
2238 g_raid_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
2240 struct g_consumer *cp;
2241 struct g_geom *gp, *geom;
2242 struct g_raid_md_class *class;
2243 struct g_raid_md_object *obj;
2246 g_topology_assert();
2247 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
2250 G_RAID_DEBUG(2, "Tasting provider %s.", pp->name);
2253 status = G_RAID_MD_TASTE_FAIL;
2254 gp = g_new_geomf(mp, "raid:taste");
2256 * This orphan function should be never called.
2258 gp->orphan = g_raid_taste_orphan;
2259 cp = g_new_consumer(gp);
2261 if (g_access(cp, 1, 0, 0) != 0)
2264 LIST_FOREACH(class, &g_raid_md_classes, mdc_list) {
2265 if (!class->mdc_enable)
2267 G_RAID_DEBUG(2, "Tasting provider %s for %s metadata.",
2268 pp->name, class->name);
2269 obj = (void *)kobj_create((kobj_class_t)class, M_RAID,
2271 obj->mdo_class = class;
2272 status = G_RAID_MD_TASTE(obj, mp, cp, &geom);
2273 if (status != G_RAID_MD_TASTE_NEW)
2274 kobj_delete((kobj_t)obj, M_RAID);
2275 if (status != G_RAID_MD_TASTE_FAIL)
2279 if (status == G_RAID_MD_TASTE_FAIL)
2280 (void)g_access(cp, -1, 0, 0);
2283 g_destroy_consumer(cp);
2285 G_RAID_DEBUG(2, "Tasting provider %s done.", pp->name);
2290 g_raid_create_node_format(const char *format, struct gctl_req *req,
2293 struct g_raid_md_class *class;
2294 struct g_raid_md_object *obj;
2297 G_RAID_DEBUG(2, "Creating array for %s metadata.", format);
2298 LIST_FOREACH(class, &g_raid_md_classes, mdc_list) {
2299 if (strcasecmp(class->name, format) == 0)
2302 if (class == NULL) {
2303 G_RAID_DEBUG(1, "No support for %s metadata.", format);
2304 return (G_RAID_MD_TASTE_FAIL);
2306 obj = (void *)kobj_create((kobj_class_t)class, M_RAID,
2308 obj->mdo_class = class;
2309 status = G_RAID_MD_CREATE_REQ(obj, &g_raid_class, req, gp);
2310 if (status != G_RAID_MD_TASTE_NEW)
2311 kobj_delete((kobj_t)obj, M_RAID);
2316 g_raid_destroy_geom(struct gctl_req *req __unused,
2317 struct g_class *mp __unused, struct g_geom *gp)
2319 struct g_raid_softc *sc;
2322 g_topology_unlock();
2324 sx_xlock(&sc->sc_lock);
2326 error = g_raid_destroy(gp->softc, G_RAID_DESTROY_SOFT);
2331 void g_raid_write_metadata(struct g_raid_softc *sc, struct g_raid_volume *vol,
2332 struct g_raid_subdisk *sd, struct g_raid_disk *disk)
2335 if (sc->sc_stopping == G_RAID_DESTROY_HARD)
2338 G_RAID_MD_WRITE(sc->sc_md, vol, sd, disk);
2341 void g_raid_fail_disk(struct g_raid_softc *sc,
2342 struct g_raid_subdisk *sd, struct g_raid_disk *disk)
2348 G_RAID_DEBUG1(0, sc, "Warning! Fail request to an absent disk!");
2351 if (disk->d_state != G_RAID_DISK_S_ACTIVE) {
2352 G_RAID_DEBUG1(0, sc, "Warning! Fail request to a disk in a "
2353 "wrong state (%s)!", g_raid_disk_state2str(disk->d_state));
2357 G_RAID_MD_FAIL_DISK(sc->sc_md, sd, disk);
2361 g_raid_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2362 struct g_consumer *cp, struct g_provider *pp)
2364 struct g_raid_softc *sc;
2365 struct g_raid_volume *vol;
2366 struct g_raid_subdisk *sd;
2367 struct g_raid_disk *disk;
2370 g_topology_assert();
2377 g_topology_unlock();
2378 sx_xlock(&sc->sc_lock);
2379 sbuf_printf(sb, "%s<descr>%s %s volume</descr>\n", indent,
2380 sc->sc_md->mdo_class->name,
2381 g_raid_volume_level2str(vol->v_raid_level,
2382 vol->v_raid_level_qualifier));
2383 sbuf_printf(sb, "%s<Label>%s</Label>\n", indent,
2385 sbuf_printf(sb, "%s<RAIDLevel>%s</RAIDLevel>\n", indent,
2386 g_raid_volume_level2str(vol->v_raid_level,
2387 vol->v_raid_level_qualifier));
2389 "%s<Transformation>%s</Transformation>\n", indent,
2390 vol->v_tr ? vol->v_tr->tro_class->name : "NONE");
2391 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent,
2392 vol->v_disks_count);
2393 sbuf_printf(sb, "%s<Strip>%u</Strip>\n", indent,
2395 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
2396 g_raid_volume_state2str(vol->v_state));
2397 sbuf_printf(sb, "%s<Dirty>%s</Dirty>\n", indent,
2398 vol->v_dirty ? "Yes" : "No");
2399 sbuf_printf(sb, "%s<Subdisks>", indent);
2400 for (i = 0; i < vol->v_disks_count; i++) {
2401 sd = &vol->v_subdisks[i];
2402 if (sd->sd_disk != NULL &&
2403 sd->sd_disk->d_consumer != NULL) {
2404 sbuf_printf(sb, "%s ",
2405 g_raid_get_diskname(sd->sd_disk));
2407 sbuf_printf(sb, "NONE ");
2409 sbuf_printf(sb, "(%s",
2410 g_raid_subdisk_state2str(sd->sd_state));
2411 if (sd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
2412 sd->sd_state == G_RAID_SUBDISK_S_RESYNC) {
2413 sbuf_printf(sb, " %d%%",
2414 (int)(sd->sd_rebuild_pos * 100 /
2417 sbuf_printf(sb, ")");
2418 if (i + 1 < vol->v_disks_count)
2419 sbuf_printf(sb, ", ");
2421 sbuf_printf(sb, "</Subdisks>\n");
2422 sx_xunlock(&sc->sc_lock);
2424 } else if (cp != NULL) {
2428 g_topology_unlock();
2429 sx_xlock(&sc->sc_lock);
2430 sbuf_printf(sb, "%s<State>%s", indent,
2431 g_raid_disk_state2str(disk->d_state));
2432 if (!TAILQ_EMPTY(&disk->d_subdisks)) {
2433 sbuf_printf(sb, " (");
2434 TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
2435 sbuf_printf(sb, "%s",
2436 g_raid_subdisk_state2str(sd->sd_state));
2437 if (sd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
2438 sd->sd_state == G_RAID_SUBDISK_S_RESYNC) {
2439 sbuf_printf(sb, " %d%%",
2440 (int)(sd->sd_rebuild_pos * 100 /
2443 if (TAILQ_NEXT(sd, sd_next))
2444 sbuf_printf(sb, ", ");
2446 sbuf_printf(sb, ")");
2448 sbuf_printf(sb, "</State>\n");
2449 sbuf_printf(sb, "%s<Subdisks>", indent);
2450 TAILQ_FOREACH(sd, &disk->d_subdisks, sd_next) {
2451 sbuf_printf(sb, "r%d(%s):%d@%ju",
2452 sd->sd_volume->v_global_id,
2453 sd->sd_volume->v_name,
2454 sd->sd_pos, sd->sd_offset);
2455 if (TAILQ_NEXT(sd, sd_next))
2456 sbuf_printf(sb, ", ");
2458 sbuf_printf(sb, "</Subdisks>\n");
2459 sbuf_printf(sb, "%s<ReadErrors>%d</ReadErrors>\n", indent,
2461 sx_xunlock(&sc->sc_lock);
2464 g_topology_unlock();
2465 sx_xlock(&sc->sc_lock);
2467 sbuf_printf(sb, "%s<Metadata>%s</Metadata>\n", indent,
2468 sc->sc_md->mdo_class->name);
2470 if (!TAILQ_EMPTY(&sc->sc_volumes)) {
2472 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
2473 if (vol->v_state < s)
2476 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
2477 g_raid_volume_state2str(s));
2479 sx_xunlock(&sc->sc_lock);
2485 g_raid_shutdown_post_sync(void *arg, int howto)
2488 struct g_geom *gp, *gp2;
2489 struct g_raid_softc *sc;
2490 struct g_raid_volume *vol;
2495 g_raid_shutdown = 1;
2496 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
2497 if ((sc = gp->softc) == NULL)
2499 g_topology_unlock();
2500 sx_xlock(&sc->sc_lock);
2501 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next)
2502 g_raid_clean(vol, -1);
2504 g_raid_destroy(sc, G_RAID_DESTROY_DELAYED);
2507 g_topology_unlock();
2512 g_raid_init(struct g_class *mp)
2515 g_raid_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync,
2516 g_raid_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST);
2517 if (g_raid_post_sync == NULL)
2518 G_RAID_DEBUG(0, "Warning! Cannot register shutdown event.");
2523 g_raid_fini(struct g_class *mp)
2526 if (g_raid_post_sync != NULL)
2527 EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_raid_post_sync);
2532 g_raid_md_modevent(module_t mod, int type, void *arg)
2534 struct g_raid_md_class *class, *c, *nc;
2541 c = LIST_FIRST(&g_raid_md_classes);
2542 if (c == NULL || c->mdc_priority > class->mdc_priority)
2543 LIST_INSERT_HEAD(&g_raid_md_classes, class, mdc_list);
2545 while ((nc = LIST_NEXT(c, mdc_list)) != NULL &&
2546 nc->mdc_priority < class->mdc_priority)
2548 LIST_INSERT_AFTER(c, class, mdc_list);
2551 g_retaste(&g_raid_class);
2554 LIST_REMOVE(class, mdc_list);
2565 g_raid_tr_modevent(module_t mod, int type, void *arg)
2567 struct g_raid_tr_class *class, *c, *nc;
2574 c = LIST_FIRST(&g_raid_tr_classes);
2575 if (c == NULL || c->trc_priority > class->trc_priority)
2576 LIST_INSERT_HEAD(&g_raid_tr_classes, class, trc_list);
2578 while ((nc = LIST_NEXT(c, trc_list)) != NULL &&
2579 nc->trc_priority < class->trc_priority)
2581 LIST_INSERT_AFTER(c, class, trc_list);
2585 LIST_REMOVE(class, trc_list);
2596 * Use local implementation of DECLARE_GEOM_CLASS(g_raid_class, g_raid)
2597 * to reduce module priority, allowing submodules to register them first.
2599 static moduledata_t g_raid_mod = {
2604 DECLARE_MODULE(g_raid, g_raid_mod, SI_SUB_DRIVERS, SI_ORDER_THIRD);
2605 MODULE_VERSION(geom_raid, 0);