2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011 Alexander Motin <mav@FreeBSD.org>
5 * Copyright (c) 2000 - 2008 Søren Schmidt <sos@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
35 #include <sys/endian.h>
36 #include <sys/kernel.h>
38 #include <sys/limits.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/systm.h>
43 #include <geom/geom.h>
44 #include "geom/raid/g_raid.h"
45 #include "g_raid_md_if.h"
47 static MALLOC_DEFINE(M_MD_PROMISE, "md_promise_data", "GEOM_RAID Promise metadata");
49 #define PROMISE_MAX_DISKS 8
50 #define PROMISE_MAX_SUBDISKS 2
51 #define PROMISE_META_OFFSET 14
53 struct promise_raid_disk {
54 uint8_t flags; /* Subdisk status. */
55 #define PROMISE_F_VALID 0x01
56 #define PROMISE_F_ONLINE 0x02
57 #define PROMISE_F_ASSIGNED 0x04
58 #define PROMISE_F_SPARE 0x08
59 #define PROMISE_F_DUPLICATE 0x10
60 #define PROMISE_F_REDIR 0x20
61 #define PROMISE_F_DOWN 0x40
62 #define PROMISE_F_READY 0x80
64 uint8_t number; /* Position in a volume. */
65 uint8_t channel; /* ATA channel number. */
66 uint8_t device; /* ATA device number. */
67 uint64_t id __packed; /* Subdisk ID. */
70 struct promise_raid_conf {
72 #define PROMISE_MAGIC "Promise Technology, Inc."
73 #define FREEBSD_MAGIC "FreeBSD ATA driver RAID "
77 #define PROMISE_MAGIC0(x) (((uint64_t)(x.channel) << 48) | \
78 ((uint64_t)(x.device != 0) << 56))
84 #define PROMISE_I_VALID 0x00000080
86 struct promise_raid_disk disk; /* This subdisk info. */
87 uint32_t disk_offset; /* Subdisk offset. */
88 uint32_t disk_sectors; /* Subdisk size */
89 uint32_t disk_rebuild; /* Rebuild position. */
90 uint16_t generation; /* Generation number. */
91 uint8_t status; /* Volume status. */
92 #define PROMISE_S_VALID 0x01
93 #define PROMISE_S_ONLINE 0x02
94 #define PROMISE_S_INITED 0x04
95 #define PROMISE_S_READY 0x08
96 #define PROMISE_S_DEGRADED 0x10
97 #define PROMISE_S_MARKED 0x20
98 #define PROMISE_S_MIGRATING 0x40
99 #define PROMISE_S_FUNCTIONAL 0x80
101 uint8_t type; /* Voluem type. */
102 #define PROMISE_T_RAID0 0x00
103 #define PROMISE_T_RAID1 0x01
104 #define PROMISE_T_RAID3 0x02
105 #define PROMISE_T_RAID5 0x04
106 #define PROMISE_T_SPAN 0x08
107 #define PROMISE_T_JBOD 0x10
109 uint8_t total_disks; /* Disks in this volume. */
110 uint8_t stripe_shift; /* Strip size. */
111 uint8_t array_width; /* Number of RAID0 stripes. */
112 uint8_t array_number; /* Global volume number. */
113 uint32_t total_sectors; /* Volume size. */
114 uint16_t cylinders; /* Volume geometry: C. */
115 uint8_t heads; /* Volume geometry: H. */
116 uint8_t sectors; /* Volume geometry: S. */
117 uint64_t volume_id __packed; /* Volume ID, */
118 struct promise_raid_disk disks[PROMISE_MAX_DISKS];
119 /* Subdisks in this volume. */
120 char name[32]; /* Volume label. */
123 uint32_t magic_3; /* Something related to rebuild. */
124 uint64_t rebuild_lba64; /* Per-volume rebuild position. */
127 uint32_t total_sectors_high;
131 uint32_t magic_8[31];
132 uint32_t backup_time;
134 uint32_t disk_offset_high;
135 uint32_t disk_sectors_high;
136 uint32_t disk_rebuild_high;
138 uint32_t magic_11[3];
139 uint32_t filler3[284];
143 struct g_raid_md_promise_perdisk {
146 struct promise_raid_conf *pd_meta[PROMISE_MAX_SUBDISKS];
149 struct g_raid_md_promise_pervolume {
150 struct promise_raid_conf *pv_meta;
152 uint16_t pv_generation;
153 int pv_disks_present;
155 struct callout pv_start_co; /* STARTING state timer. */
158 static g_raid_md_create_t g_raid_md_create_promise;
159 static g_raid_md_taste_t g_raid_md_taste_promise;
160 static g_raid_md_event_t g_raid_md_event_promise;
161 static g_raid_md_volume_event_t g_raid_md_volume_event_promise;
162 static g_raid_md_ctl_t g_raid_md_ctl_promise;
163 static g_raid_md_write_t g_raid_md_write_promise;
164 static g_raid_md_fail_disk_t g_raid_md_fail_disk_promise;
165 static g_raid_md_free_disk_t g_raid_md_free_disk_promise;
166 static g_raid_md_free_volume_t g_raid_md_free_volume_promise;
167 static g_raid_md_free_t g_raid_md_free_promise;
169 static kobj_method_t g_raid_md_promise_methods[] = {
170 KOBJMETHOD(g_raid_md_create, g_raid_md_create_promise),
171 KOBJMETHOD(g_raid_md_taste, g_raid_md_taste_promise),
172 KOBJMETHOD(g_raid_md_event, g_raid_md_event_promise),
173 KOBJMETHOD(g_raid_md_volume_event, g_raid_md_volume_event_promise),
174 KOBJMETHOD(g_raid_md_ctl, g_raid_md_ctl_promise),
175 KOBJMETHOD(g_raid_md_write, g_raid_md_write_promise),
176 KOBJMETHOD(g_raid_md_fail_disk, g_raid_md_fail_disk_promise),
177 KOBJMETHOD(g_raid_md_free_disk, g_raid_md_free_disk_promise),
178 KOBJMETHOD(g_raid_md_free_volume, g_raid_md_free_volume_promise),
179 KOBJMETHOD(g_raid_md_free, g_raid_md_free_promise),
183 static struct g_raid_md_class g_raid_md_promise_class = {
185 g_raid_md_promise_methods,
186 sizeof(struct g_raid_md_object),
193 g_raid_md_promise_print(struct promise_raid_conf *meta)
197 if (g_raid_debug < 1)
200 printf("********* ATA Promise Metadata *********\n");
201 printf("promise_id <%.24s>\n", meta->promise_id);
202 printf("disk %02x %02x %02x %02x %016jx\n",
203 meta->disk.flags, meta->disk.number, meta->disk.channel,
204 meta->disk.device, meta->disk.id);
205 printf("disk_offset %u\n", meta->disk_offset);
206 printf("disk_sectors %u\n", meta->disk_sectors);
207 printf("disk_rebuild %u\n", meta->disk_rebuild);
208 printf("generation %u\n", meta->generation);
209 printf("status 0x%02x\n", meta->status);
210 printf("type %u\n", meta->type);
211 printf("total_disks %u\n", meta->total_disks);
212 printf("stripe_shift %u\n", meta->stripe_shift);
213 printf("array_width %u\n", meta->array_width);
214 printf("array_number %u\n", meta->array_number);
215 printf("total_sectors %u\n", meta->total_sectors);
216 printf("cylinders %u\n", meta->cylinders);
217 printf("heads %u\n", meta->heads);
218 printf("sectors %u\n", meta->sectors);
219 printf("volume_id 0x%016jx\n", meta->volume_id);
221 for (i = 0; i < PROMISE_MAX_DISKS; i++ ) {
222 printf(" %02x %02x %02x %02x %016jx\n",
223 meta->disks[i].flags, meta->disks[i].number,
224 meta->disks[i].channel, meta->disks[i].device,
227 printf("name <%.32s>\n", meta->name);
228 printf("magic_3 0x%08x\n", meta->magic_3);
229 printf("rebuild_lba64 %ju\n", meta->rebuild_lba64);
230 printf("magic_4 0x%08x\n", meta->magic_4);
231 printf("magic_5 0x%08x\n", meta->magic_5);
232 printf("total_sectors_high 0x%08x\n", meta->total_sectors_high);
233 printf("sector_size %u\n", meta->sector_size);
234 printf("backup_time %d\n", meta->backup_time);
235 printf("disk_offset_high 0x%08x\n", meta->disk_offset_high);
236 printf("disk_sectors_high 0x%08x\n", meta->disk_sectors_high);
237 printf("disk_rebuild_high 0x%08x\n", meta->disk_rebuild_high);
238 printf("=================================================\n");
241 static struct promise_raid_conf *
242 promise_meta_copy(struct promise_raid_conf *meta)
244 struct promise_raid_conf *nmeta;
246 nmeta = malloc(sizeof(*nmeta), M_MD_PROMISE, M_WAITOK);
247 memcpy(nmeta, meta, sizeof(*nmeta));
252 promise_meta_find_disk(struct promise_raid_conf *meta, uint64_t id)
256 for (pos = 0; pos < meta->total_disks; pos++) {
257 if (meta->disks[pos].id == id)
264 promise_meta_unused_range(struct promise_raid_conf **metaarr, int nsd,
265 off_t sectors, off_t *off, off_t *size)
267 off_t coff, csize, tmp;
277 for (j = 0; j < nsd; j++) {
278 tmp = ((off_t)metaarr[j]->disk_offset_high << 32) +
279 metaarr[j]->disk_offset;
281 csize = MIN(csize, tmp - coff);
289 coff = ((off_t)metaarr[i]->disk_offset_high << 32) +
290 metaarr[i]->disk_offset +
291 ((off_t)metaarr[i]->disk_sectors_high << 32) +
292 metaarr[i]->disk_sectors;
293 csize = sectors - coff;
296 return ((*size > 0) ? 1 : 0);
300 promise_meta_translate_disk(struct g_raid_volume *vol, int md_disk_pos)
304 if (md_disk_pos >= 0 && vol->v_raid_level == G_RAID_VOLUME_RL_RAID1E) {
305 width = vol->v_disks_count / 2;
306 disk_pos = (md_disk_pos / width) +
307 (md_disk_pos % width) * width;
309 disk_pos = md_disk_pos;
314 promise_meta_get_name(struct promise_raid_conf *meta, char *buf)
318 strncpy(buf, meta->name, 32);
320 for (i = 31; i >= 0; i--) {
328 promise_meta_put_name(struct promise_raid_conf *meta, char *buf)
331 memset(meta->name, 0x20, 32);
332 memcpy(meta->name, buf, MIN(strlen(buf), 32));
336 promise_meta_read(struct g_consumer *cp, struct promise_raid_conf **metaarr)
338 struct g_provider *pp;
339 struct promise_raid_conf *meta;
341 int error, i, subdisks;
342 uint32_t checksum, *ptr;
347 if (pp->sectorsize * 4 > MAXPHYS) {
348 G_RAID_DEBUG(1, "%s: Blocksize is too big.", pp->name);
352 /* Read metadata block. */
353 buf = g_read_data(cp, pp->mediasize - pp->sectorsize *
354 (63 - subdisks * PROMISE_META_OFFSET),
355 pp->sectorsize * 4, &error);
357 G_RAID_DEBUG(1, "Cannot read metadata from %s (error=%d).",
361 meta = (struct promise_raid_conf *)buf;
363 /* Check if this is an Promise RAID struct */
364 if (strncmp(meta->promise_id, PROMISE_MAGIC, strlen(PROMISE_MAGIC)) &&
365 strncmp(meta->promise_id, FREEBSD_MAGIC, strlen(FREEBSD_MAGIC))) {
368 "Promise signature check failed on %s", pp->name);
372 meta = malloc(sizeof(*meta), M_MD_PROMISE, M_WAITOK);
373 memcpy(meta, buf, MIN(sizeof(*meta), pp->sectorsize * 4));
376 /* Check metadata checksum. */
377 for (checksum = 0, ptr = (uint32_t *)meta, i = 0; i < 511; i++)
379 if (checksum != meta->checksum) {
380 G_RAID_DEBUG(1, "Promise checksum check failed on %s", pp->name);
381 free(meta, M_MD_PROMISE);
385 if ((meta->integrity & PROMISE_I_VALID) == 0) {
386 G_RAID_DEBUG(1, "Promise metadata is invalid on %s", pp->name);
387 free(meta, M_MD_PROMISE);
391 if (meta->total_disks > PROMISE_MAX_DISKS) {
392 G_RAID_DEBUG(1, "Wrong number of disks on %s (%d)",
393 pp->name, meta->total_disks);
394 free(meta, M_MD_PROMISE);
398 /* Remove filler garbage from fields used in newer metadata. */
399 if (meta->disk_offset_high == 0x8b8c8d8e &&
400 meta->disk_sectors_high == 0x8788898a &&
401 meta->disk_rebuild_high == 0x83848586) {
402 meta->disk_offset_high = 0;
403 meta->disk_sectors_high = 0;
404 if (meta->disk_rebuild == UINT32_MAX)
405 meta->disk_rebuild_high = UINT32_MAX;
407 meta->disk_rebuild_high = 0;
408 if (meta->total_sectors_high == 0x15161718) {
409 meta->total_sectors_high = 0;
410 meta->backup_time = 0;
411 if (meta->rebuild_lba64 == 0x2122232425262728)
412 meta->rebuild_lba64 = UINT64_MAX;
415 if (meta->sector_size < 1 || meta->sector_size > 8)
416 meta->sector_size = 1;
418 /* Save this part and look for next. */
422 if (subdisks < PROMISE_MAX_SUBDISKS)
429 promise_meta_write(struct g_consumer *cp,
430 struct promise_raid_conf **metaarr, int nsd)
432 struct g_provider *pp;
433 struct promise_raid_conf *meta;
436 int error, i, subdisk, fake;
437 uint32_t checksum, *ptr;
443 buf = malloc(pp->sectorsize * 4, M_MD_PROMISE, M_WAITOK | M_ZERO);
446 meta = metaarr[subdisk];
447 } else if (!fake && promise_meta_unused_range(metaarr, nsd,
448 cp->provider->mediasize / cp->provider->sectorsize,
450 /* Optionally add record for unused space. */
451 meta = (struct promise_raid_conf *)buf;
452 memcpy(&meta->promise_id[0], PROMISE_MAGIC,
453 sizeof(PROMISE_MAGIC) - 1);
454 meta->dummy_0 = 0x00020000;
455 meta->integrity = PROMISE_I_VALID;
456 meta->disk.flags = PROMISE_F_ONLINE | PROMISE_F_VALID;
457 meta->disk.number = 0xff;
458 arc4rand(&meta->disk.id, sizeof(meta->disk.id), 0);
459 meta->disk_offset_high = off >> 32;
460 meta->disk_offset = (uint32_t)off;
461 meta->disk_sectors_high = size >> 32;
462 meta->disk_sectors = (uint32_t)size;
463 meta->disk_rebuild_high = UINT32_MAX;
464 meta->disk_rebuild = UINT32_MAX;
468 /* Recalculate checksum for case if metadata were changed. */
470 for (checksum = 0, ptr = (uint32_t *)meta, i = 0; i < 511; i++)
472 meta->checksum = checksum;
473 memcpy(buf, meta, MIN(pp->sectorsize * 4, sizeof(*meta)));
475 error = g_write_data(cp, pp->mediasize - pp->sectorsize *
476 (63 - subdisk * PROMISE_META_OFFSET),
477 buf, pp->sectorsize * 4);
479 G_RAID_DEBUG(1, "Cannot write metadata to %s (error=%d).",
482 free(buf, M_MD_PROMISE);
485 if (subdisk < PROMISE_MAX_SUBDISKS)
492 promise_meta_erase(struct g_consumer *cp)
494 struct g_provider *pp;
499 buf = malloc(4 * pp->sectorsize, M_MD_PROMISE, M_WAITOK | M_ZERO);
500 for (subdisk = 0; subdisk < PROMISE_MAX_SUBDISKS; subdisk++) {
501 error = g_write_data(cp, pp->mediasize - pp->sectorsize *
502 (63 - subdisk * PROMISE_META_OFFSET),
503 buf, 4 * pp->sectorsize);
505 G_RAID_DEBUG(1, "Cannot erase metadata on %s (error=%d).",
509 free(buf, M_MD_PROMISE);
514 promise_meta_write_spare(struct g_consumer *cp)
516 struct promise_raid_conf *meta;
520 meta = malloc(sizeof(*meta), M_MD_PROMISE, M_WAITOK | M_ZERO);
521 memcpy(&meta->promise_id[0], PROMISE_MAGIC, sizeof(PROMISE_MAGIC) - 1);
522 meta->dummy_0 = 0x00020000;
523 meta->integrity = PROMISE_I_VALID;
524 meta->disk.flags = PROMISE_F_SPARE | PROMISE_F_ONLINE | PROMISE_F_VALID;
525 meta->disk.number = 0xff;
526 arc4rand(&meta->disk.id, sizeof(meta->disk.id), 0);
527 tmp = cp->provider->mediasize / cp->provider->sectorsize - 131072;
528 meta->disk_sectors_high = tmp >> 32;
529 meta->disk_sectors = (uint32_t)tmp;
530 meta->disk_rebuild_high = UINT32_MAX;
531 meta->disk_rebuild = UINT32_MAX;
532 error = promise_meta_write(cp, &meta, 1);
533 free(meta, M_MD_PROMISE);
537 static struct g_raid_volume *
538 g_raid_md_promise_get_volume(struct g_raid_softc *sc, uint64_t id)
540 struct g_raid_volume *vol;
541 struct g_raid_md_promise_pervolume *pv;
543 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
552 g_raid_md_promise_purge_volumes(struct g_raid_softc *sc)
554 struct g_raid_volume *vol, *tvol;
555 struct g_raid_md_promise_pervolume *pv;
559 TAILQ_FOREACH_SAFE(vol, &sc->sc_volumes, v_next, tvol) {
561 if (!pv->pv_started || vol->v_stopping)
563 for (i = 0; i < vol->v_disks_count; i++) {
564 if (vol->v_subdisks[i].sd_state != G_RAID_SUBDISK_S_NONE)
567 if (i >= vol->v_disks_count) {
568 g_raid_destroy_volume(vol);
576 g_raid_md_promise_purge_disks(struct g_raid_softc *sc)
578 struct g_raid_disk *disk, *tdisk;
579 struct g_raid_volume *vol;
580 struct g_raid_md_promise_perdisk *pd;
584 TAILQ_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tdisk) {
585 if (disk->d_state == G_RAID_DISK_S_SPARE)
587 pd = (struct g_raid_md_promise_perdisk *)disk->d_md_data;
589 /* Scan for deleted volumes. */
590 for (i = 0; i < pd->pd_subdisks; ) {
591 vol = g_raid_md_promise_get_volume(sc,
592 pd->pd_meta[i]->volume_id);
593 if (vol != NULL && !vol->v_stopping) {
597 free(pd->pd_meta[i], M_MD_PROMISE);
598 for (j = i; j < pd->pd_subdisks - 1; j++)
599 pd->pd_meta[j] = pd->pd_meta[j + 1];
600 pd->pd_meta[pd->pd_subdisks - 1] = NULL;
605 /* If there is no metadata left - erase and delete disk. */
606 if (pd->pd_subdisks == 0) {
607 promise_meta_erase(disk->d_consumer);
608 g_raid_destroy_disk(disk);
616 g_raid_md_promise_supported(int level, int qual, int disks, int force)
619 if (disks > PROMISE_MAX_DISKS)
622 case G_RAID_VOLUME_RL_RAID0:
625 if (!force && disks < 2)
628 case G_RAID_VOLUME_RL_RAID1:
631 if (!force && (disks != 2))
634 case G_RAID_VOLUME_RL_RAID1E:
639 if (!force && (disks != 4))
642 case G_RAID_VOLUME_RL_SINGLE:
646 case G_RAID_VOLUME_RL_CONCAT:
650 case G_RAID_VOLUME_RL_RAID5:
653 if (qual != G_RAID_VOLUME_RLQ_R5LA)
659 if (level != G_RAID_VOLUME_RL_RAID5 && qual != G_RAID_VOLUME_RLQ_NONE)
665 g_raid_md_promise_start_disk(struct g_raid_disk *disk, int sdn,
666 struct g_raid_volume *vol)
668 struct g_raid_softc *sc;
669 struct g_raid_subdisk *sd;
670 struct g_raid_md_promise_perdisk *pd;
671 struct g_raid_md_promise_pervolume *pv;
672 struct promise_raid_conf *meta;
673 off_t eoff, esize, size;
674 int disk_pos, md_disk_pos, i, resurrection = 0;
677 pd = (struct g_raid_md_promise_perdisk *)disk->d_md_data;
683 /* Find disk position in metadata by its serial. */
684 md_disk_pos = promise_meta_find_disk(meta, pd->pd_meta[sdn]->disk.id);
685 /* For RAID0+1 we need to translate order. */
686 disk_pos = promise_meta_translate_disk(vol, md_disk_pos);
692 G_RAID_DEBUG1(1, sc, "Disk %s is not part of the volume %s",
693 g_raid_get_diskname(disk), vol->v_name);
694 /* Failed stale disk is useless for us. */
696 pd->pd_meta[sdn]->disk.flags & PROMISE_F_DOWN) {
697 g_raid_change_disk_state(disk, G_RAID_DISK_S_STALE_FAILED);
700 /* If we were given specific metadata subdisk - erase it. */
702 free(pd->pd_meta[sdn], M_MD_PROMISE);
703 for (i = sdn; i < pd->pd_subdisks - 1; i++)
704 pd->pd_meta[i] = pd->pd_meta[i + 1];
705 pd->pd_meta[pd->pd_subdisks - 1] = NULL;
708 /* If we are in the start process, that's all for now. */
712 * If we have already started - try to get use of the disk.
713 * Try to replace OFFLINE disks first, then FAILED.
715 promise_meta_unused_range(pd->pd_meta, pd->pd_subdisks,
716 disk->d_consumer->provider->mediasize /
717 disk->d_consumer->provider->sectorsize,
720 G_RAID_DEBUG1(1, sc, "No free space on disk %s",
721 g_raid_get_diskname(disk));
725 for (i = 0; i < vol->v_disks_count; i++) {
726 sd = &vol->v_subdisks[i];
727 if (sd->sd_state != G_RAID_SUBDISK_S_NONE)
729 if (sd->sd_state <= G_RAID_SUBDISK_S_FAILED &&
731 vol->v_subdisks[i].sd_state < sd->sd_state))
735 vol->v_raid_level != G_RAID_VOLUME_RL_CONCAT &&
736 (off_t)esize * 512 < size) {
737 G_RAID_DEBUG1(1, sc, "Disk %s free space "
738 "is too small (%ju < %ju)",
739 g_raid_get_diskname(disk),
740 (off_t)esize * 512, size);
744 if (vol->v_raid_level != G_RAID_VOLUME_RL_CONCAT)
746 /* For RAID0+1 we need to translate order. */
747 md_disk_pos = promise_meta_translate_disk(vol, disk_pos);
750 if (pd->pd_subdisks == 0) {
751 g_raid_change_disk_state(disk,
752 G_RAID_DISK_S_SPARE);
756 G_RAID_DEBUG1(1, sc, "Disk %s takes pos %d in the volume %s",
757 g_raid_get_diskname(disk), disk_pos, vol->v_name);
761 sd = &vol->v_subdisks[disk_pos];
763 if (resurrection && sd->sd_disk != NULL) {
764 g_raid_change_disk_state(sd->sd_disk,
765 G_RAID_DISK_S_STALE_FAILED);
766 TAILQ_REMOVE(&sd->sd_disk->d_subdisks,
769 vol->v_subdisks[disk_pos].sd_disk = disk;
770 TAILQ_INSERT_TAIL(&disk->d_subdisks, sd, sd_next);
772 /* Welcome the new disk. */
774 g_raid_change_disk_state(disk, G_RAID_DISK_S_ACTIVE);
775 else if (meta->disks[md_disk_pos].flags & PROMISE_F_DOWN)
776 g_raid_change_disk_state(disk, G_RAID_DISK_S_FAILED);
778 g_raid_change_disk_state(disk, G_RAID_DISK_S_ACTIVE);
781 sd->sd_offset = (off_t)eoff * 512;
782 sd->sd_size = (off_t)esize * 512;
784 sd->sd_offset = (((off_t)pd->pd_meta[sdn]->disk_offset_high
785 << 32) + pd->pd_meta[sdn]->disk_offset) * 512;
786 sd->sd_size = (((off_t)pd->pd_meta[sdn]->disk_sectors_high
787 << 32) + pd->pd_meta[sdn]->disk_sectors) * 512;
791 /* Stale disk, almost same as new. */
792 g_raid_change_subdisk_state(sd,
793 G_RAID_SUBDISK_S_NEW);
794 } else if (meta->disks[md_disk_pos].flags & PROMISE_F_DOWN) {
796 g_raid_change_subdisk_state(sd,
797 G_RAID_SUBDISK_S_FAILED);
798 } else if (meta->disks[md_disk_pos].flags & PROMISE_F_REDIR) {
799 /* Rebuilding disk. */
800 g_raid_change_subdisk_state(sd,
801 G_RAID_SUBDISK_S_REBUILD);
802 if (pd->pd_meta[sdn]->generation != meta->generation)
803 sd->sd_rebuild_pos = 0;
806 (((off_t)pd->pd_meta[sdn]->disk_rebuild_high << 32) +
807 pd->pd_meta[sdn]->disk_rebuild) * 512;
809 } else if (!(meta->disks[md_disk_pos].flags & PROMISE_F_ONLINE)) {
810 /* Rebuilding disk. */
811 g_raid_change_subdisk_state(sd,
812 G_RAID_SUBDISK_S_NEW);
813 } else if (pd->pd_meta[sdn]->generation != meta->generation ||
814 (meta->status & PROMISE_S_MARKED)) {
815 /* Stale disk or dirty volume (unclean shutdown). */
816 g_raid_change_subdisk_state(sd,
817 G_RAID_SUBDISK_S_STALE);
819 /* Up to date disk. */
820 g_raid_change_subdisk_state(sd,
821 G_RAID_SUBDISK_S_ACTIVE);
823 g_raid_event_send(sd, G_RAID_SUBDISK_E_NEW,
824 G_RAID_EVENT_SUBDISK);
826 return (resurrection);
830 g_raid_md_promise_refill(struct g_raid_softc *sc)
832 struct g_raid_volume *vol;
833 struct g_raid_subdisk *sd;
834 struct g_raid_disk *disk;
835 struct g_raid_md_object *md;
836 struct g_raid_md_promise_perdisk *pd;
837 struct g_raid_md_promise_pervolume *pv;
838 int update, updated, i, bad;
843 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
845 if (!pv->pv_started || vol->v_stopping)
848 /* Search for subdisk that needs replacement. */
850 for (i = 0; i < vol->v_disks_count; i++) {
851 sd = &vol->v_subdisks[i];
852 if (sd->sd_state == G_RAID_SUBDISK_S_NONE ||
853 sd->sd_state == G_RAID_SUBDISK_S_FAILED)
859 G_RAID_DEBUG1(1, sc, "Volume %s is not complete, "
860 "trying to refill.", vol->v_name);
862 TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
864 if (disk->d_state < G_RAID_DISK_S_SPARE)
866 /* Skip already used by this volume. */
867 for (i = 0; i < vol->v_disks_count; i++) {
868 sd = &vol->v_subdisks[i];
869 if (sd->sd_disk == disk)
872 if (i < vol->v_disks_count)
875 /* Try to use disk if it has empty extents. */
876 pd = disk->d_md_data;
877 if (pd->pd_subdisks < PROMISE_MAX_SUBDISKS) {
879 g_raid_md_promise_start_disk(disk, -1, vol);
884 g_raid_md_write_promise(md, vol, NULL, disk);
894 g_raid_md_promise_start(struct g_raid_volume *vol)
896 struct g_raid_softc *sc;
897 struct g_raid_subdisk *sd;
898 struct g_raid_disk *disk;
899 struct g_raid_md_object *md;
900 struct g_raid_md_promise_perdisk *pd;
901 struct g_raid_md_promise_pervolume *pv;
902 struct promise_raid_conf *meta;
910 vol->v_raid_level_qualifier = G_RAID_VOLUME_RLQ_NONE;
911 if (meta->type == PROMISE_T_RAID0)
912 vol->v_raid_level = G_RAID_VOLUME_RL_RAID0;
913 else if (meta->type == PROMISE_T_RAID1) {
914 if (meta->array_width == 1)
915 vol->v_raid_level = G_RAID_VOLUME_RL_RAID1;
917 vol->v_raid_level = G_RAID_VOLUME_RL_RAID1E;
918 } else if (meta->type == PROMISE_T_RAID3)
919 vol->v_raid_level = G_RAID_VOLUME_RL_RAID3;
920 else if (meta->type == PROMISE_T_RAID5) {
921 vol->v_raid_level = G_RAID_VOLUME_RL_RAID5;
922 vol->v_raid_level_qualifier = G_RAID_VOLUME_RLQ_R5LA;
923 } else if (meta->type == PROMISE_T_SPAN)
924 vol->v_raid_level = G_RAID_VOLUME_RL_CONCAT;
925 else if (meta->type == PROMISE_T_JBOD)
926 vol->v_raid_level = G_RAID_VOLUME_RL_SINGLE;
928 vol->v_raid_level = G_RAID_VOLUME_RL_UNKNOWN;
929 vol->v_strip_size = 512 << meta->stripe_shift; //ZZZ
930 vol->v_disks_count = meta->total_disks;
931 vol->v_mediasize = (off_t)meta->total_sectors * 512; //ZZZ
932 if (meta->total_sectors_high < 256) /* If value looks sane. */
934 ((off_t)meta->total_sectors_high << 32) * 512; //ZZZ
935 vol->v_sectorsize = 512 * meta->sector_size;
936 for (i = 0; i < vol->v_disks_count; i++) {
937 sd = &vol->v_subdisks[i];
938 sd->sd_offset = (((off_t)meta->disk_offset_high << 32) +
939 meta->disk_offset) * 512;
940 sd->sd_size = (((off_t)meta->disk_sectors_high << 32) +
941 meta->disk_sectors) * 512;
943 g_raid_start_volume(vol);
945 /* Make all disks found till the moment take their places. */
946 TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
947 pd = disk->d_md_data;
948 for (i = 0; i < pd->pd_subdisks; i++) {
949 if (pd->pd_meta[i]->volume_id == meta->volume_id)
950 g_raid_md_promise_start_disk(disk, i, vol);
955 callout_stop(&pv->pv_start_co);
956 G_RAID_DEBUG1(0, sc, "Volume started.");
957 g_raid_md_write_promise(md, vol, NULL, NULL);
959 /* Pickup any STALE/SPARE disks to refill array if needed. */
960 g_raid_md_promise_refill(sc);
962 g_raid_event_send(vol, G_RAID_VOLUME_E_START, G_RAID_EVENT_VOLUME);
966 g_raid_promise_go(void *arg)
968 struct g_raid_volume *vol;
969 struct g_raid_softc *sc;
970 struct g_raid_md_promise_pervolume *pv;
975 if (!pv->pv_started) {
976 G_RAID_DEBUG1(0, sc, "Force volume start due to timeout.");
977 g_raid_event_send(vol, G_RAID_VOLUME_E_STARTMD,
978 G_RAID_EVENT_VOLUME);
983 g_raid_md_promise_new_disk(struct g_raid_disk *disk)
985 struct g_raid_softc *sc;
986 struct g_raid_md_object *md;
987 struct promise_raid_conf *pdmeta;
988 struct g_raid_md_promise_perdisk *pd;
989 struct g_raid_md_promise_pervolume *pv;
990 struct g_raid_volume *vol;
996 pd = (struct g_raid_md_promise_perdisk *)disk->d_md_data;
998 if (pd->pd_subdisks == 0) {
999 g_raid_change_disk_state(disk, G_RAID_DISK_S_SPARE);
1000 g_raid_md_promise_refill(sc);
1004 for (i = 0; i < pd->pd_subdisks; i++) {
1005 pdmeta = pd->pd_meta[i];
1007 /* Look for volume with matching ID. */
1008 vol = g_raid_md_promise_get_volume(sc, pdmeta->volume_id);
1010 promise_meta_get_name(pdmeta, buf);
1011 vol = g_raid_create_volume(sc, buf, pdmeta->array_number);
1012 pv = malloc(sizeof(*pv), M_MD_PROMISE, M_WAITOK | M_ZERO);
1013 pv->pv_id = pdmeta->volume_id;
1014 vol->v_md_data = pv;
1015 callout_init(&pv->pv_start_co, 1);
1016 callout_reset(&pv->pv_start_co,
1017 g_raid_start_timeout * hz,
1018 g_raid_promise_go, vol);
1020 pv = vol->v_md_data;
1022 /* If we haven't started yet - check metadata freshness. */
1023 if (pv->pv_meta == NULL || !pv->pv_started) {
1024 if (pv->pv_meta == NULL ||
1025 ((int16_t)(pdmeta->generation - pv->pv_generation)) > 0) {
1026 G_RAID_DEBUG1(1, sc, "Newer disk");
1027 if (pv->pv_meta != NULL)
1028 free(pv->pv_meta, M_MD_PROMISE);
1029 pv->pv_meta = promise_meta_copy(pdmeta);
1030 pv->pv_generation = pv->pv_meta->generation;
1031 pv->pv_disks_present = 1;
1032 } else if (pdmeta->generation == pv->pv_generation) {
1033 pv->pv_disks_present++;
1034 G_RAID_DEBUG1(1, sc, "Matching disk (%d of %d up)",
1035 pv->pv_disks_present,
1036 pv->pv_meta->total_disks);
1038 G_RAID_DEBUG1(1, sc, "Older disk");
1043 for (i = 0; i < pd->pd_subdisks; i++) {
1044 pdmeta = pd->pd_meta[i];
1046 /* Look for volume with matching ID. */
1047 vol = g_raid_md_promise_get_volume(sc, pdmeta->volume_id);
1050 pv = vol->v_md_data;
1052 if (pv->pv_started) {
1053 if (g_raid_md_promise_start_disk(disk, i, vol))
1054 g_raid_md_write_promise(md, vol, NULL, NULL);
1056 /* If we collected all needed disks - start array. */
1057 if (pv->pv_disks_present == pv->pv_meta->total_disks)
1058 g_raid_md_promise_start(vol);
1064 g_raid_md_create_promise(struct g_raid_md_object *md, struct g_class *mp,
1067 struct g_geom *geom;
1068 struct g_raid_softc *sc;
1070 /* Search for existing node. */
1071 LIST_FOREACH(geom, &mp->geom, geom) {
1075 if (sc->sc_stopping != 0)
1077 if (sc->sc_md->mdo_class != md->mdo_class)
1083 return (G_RAID_MD_TASTE_EXISTING);
1086 /* Create new one if not found. */
1087 sc = g_raid_create_node(mp, "Promise", md);
1089 return (G_RAID_MD_TASTE_FAIL);
1092 return (G_RAID_MD_TASTE_NEW);
1096 g_raid_md_taste_promise(struct g_raid_md_object *md, struct g_class *mp,
1097 struct g_consumer *cp, struct g_geom **gp)
1099 struct g_consumer *rcp;
1100 struct g_provider *pp;
1101 struct g_raid_softc *sc;
1102 struct g_raid_disk *disk;
1103 struct promise_raid_conf *meta, *metaarr[4];
1104 struct g_raid_md_promise_perdisk *pd;
1105 struct g_geom *geom;
1106 int i, j, result, len, subdisks;
1110 G_RAID_DEBUG(1, "Tasting Promise on %s", cp->provider->name);
1113 /* Read metadata from device. */
1115 g_topology_unlock();
1117 len = sizeof(vendor);
1118 if (pp->geom->rank == 1)
1119 g_io_getattr("GEOM::hba_vendor", cp, &len, &vendor);
1120 subdisks = promise_meta_read(cp, metaarr);
1122 if (subdisks == 0) {
1123 if (g_raid_aggressive_spare) {
1124 if (vendor == 0x105a || vendor == 0x1002) {
1126 "No Promise metadata, forcing spare.");
1130 "Promise/ATI vendor mismatch "
1131 "0x%04x != 0x105a/0x1002",
1135 return (G_RAID_MD_TASTE_FAIL);
1138 /* Metadata valid. Print it. */
1139 for (i = 0; i < subdisks; i++)
1140 g_raid_md_promise_print(metaarr[i]);
1142 /* Purge meaningless (empty/spare) records. */
1143 for (i = 0; i < subdisks; ) {
1144 if (metaarr[i]->disk.flags & PROMISE_F_ASSIGNED) {
1148 free(metaarr[i], M_MD_PROMISE);
1149 for (j = i; j < subdisks - 1; j++)
1150 metaarr[i] = metaarr[j + 1];
1151 metaarr[subdisks - 1] = NULL;
1156 /* Search for matching node. */
1158 LIST_FOREACH(geom, &mp->geom, geom) {
1162 if (sc->sc_stopping != 0)
1164 if (sc->sc_md->mdo_class != md->mdo_class)
1169 /* Found matching node. */
1171 G_RAID_DEBUG(1, "Found matching array %s", sc->sc_name);
1172 result = G_RAID_MD_TASTE_EXISTING;
1174 } else { /* Not found matching node -- create one. */
1175 result = G_RAID_MD_TASTE_NEW;
1176 snprintf(name, sizeof(name), "Promise");
1177 sc = g_raid_create_node(mp, name, md);
1182 /* There is no return after this point, so we close passed consumer. */
1183 g_access(cp, -1, 0, 0);
1185 rcp = g_new_consumer(geom);
1186 rcp->flags |= G_CF_DIRECT_RECEIVE;
1188 if (g_access(rcp, 1, 1, 1) != 0)
1191 g_topology_unlock();
1192 sx_xlock(&sc->sc_lock);
1194 pd = malloc(sizeof(*pd), M_MD_PROMISE, M_WAITOK | M_ZERO);
1195 pd->pd_subdisks = subdisks;
1196 for (i = 0; i < subdisks; i++)
1197 pd->pd_meta[i] = metaarr[i];
1198 disk = g_raid_create_disk(sc);
1199 disk->d_md_data = (void *)pd;
1200 disk->d_consumer = rcp;
1201 rcp->private = disk;
1203 g_raid_get_disk_info(disk);
1205 g_raid_md_promise_new_disk(disk);
1207 sx_xunlock(&sc->sc_lock);
1214 g_raid_md_event_promise(struct g_raid_md_object *md,
1215 struct g_raid_disk *disk, u_int event)
1217 struct g_raid_softc *sc;
1223 case G_RAID_DISK_E_DISCONNECTED:
1225 g_raid_change_disk_state(disk, G_RAID_DISK_S_NONE);
1226 g_raid_destroy_disk(disk);
1227 g_raid_md_promise_purge_volumes(sc);
1229 /* Write updated metadata to all disks. */
1230 g_raid_md_write_promise(md, NULL, NULL, NULL);
1232 /* Check if anything left. */
1233 if (g_raid_ndisks(sc, -1) == 0)
1234 g_raid_destroy_node(sc, 0);
1236 g_raid_md_promise_refill(sc);
1243 g_raid_md_volume_event_promise(struct g_raid_md_object *md,
1244 struct g_raid_volume *vol, u_int event)
1246 struct g_raid_md_promise_pervolume *pv;
1248 pv = (struct g_raid_md_promise_pervolume *)vol->v_md_data;
1250 case G_RAID_VOLUME_E_STARTMD:
1251 if (!pv->pv_started)
1252 g_raid_md_promise_start(vol);
1259 g_raid_md_ctl_promise(struct g_raid_md_object *md,
1260 struct gctl_req *req)
1262 struct g_raid_softc *sc;
1263 struct g_raid_volume *vol, *vol1;
1264 struct g_raid_subdisk *sd;
1265 struct g_raid_disk *disk, *disks[PROMISE_MAX_DISKS];
1266 struct g_raid_md_promise_perdisk *pd;
1267 struct g_raid_md_promise_pervolume *pv;
1268 struct g_consumer *cp;
1269 struct g_provider *pp;
1271 const char *nodename, *verb, *volname, *levelname, *diskname;
1274 off_t esize, offs[PROMISE_MAX_DISKS], size, sectorsize, strip;
1275 intmax_t *sizearg, *striparg;
1276 int numdisks, i, len, level, qual;
1280 verb = gctl_get_param(req, "verb", NULL);
1281 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
1283 if (strcmp(verb, "label") == 0) {
1286 gctl_error(req, "Invalid number of arguments.");
1289 volname = gctl_get_asciiparam(req, "arg1");
1290 if (volname == NULL) {
1291 gctl_error(req, "No volume name.");
1294 levelname = gctl_get_asciiparam(req, "arg2");
1295 if (levelname == NULL) {
1296 gctl_error(req, "No RAID level.");
1299 if (strcasecmp(levelname, "RAID5") == 0)
1300 levelname = "RAID5-LA";
1301 if (g_raid_volume_str2level(levelname, &level, &qual)) {
1302 gctl_error(req, "Unknown RAID level '%s'.", levelname);
1305 numdisks = *nargs - 3;
1306 force = gctl_get_paraml(req, "force", sizeof(*force));
1307 if (!g_raid_md_promise_supported(level, qual, numdisks,
1308 force ? *force : 0)) {
1309 gctl_error(req, "Unsupported RAID level "
1310 "(0x%02x/0x%02x), or number of disks (%d).",
1311 level, qual, numdisks);
1315 /* Search for disks, connect them and probe. */
1318 bzero(disks, sizeof(disks));
1319 bzero(offs, sizeof(offs));
1320 for (i = 0; i < numdisks; i++) {
1321 snprintf(arg, sizeof(arg), "arg%d", i + 3);
1322 diskname = gctl_get_asciiparam(req, arg);
1323 if (diskname == NULL) {
1324 gctl_error(req, "No disk name (%s).", arg);
1328 if (strcmp(diskname, "NONE") == 0)
1331 TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
1332 if (disk->d_consumer != NULL &&
1333 disk->d_consumer->provider != NULL &&
1334 strcmp(disk->d_consumer->provider->name,
1339 if (disk->d_state != G_RAID_DISK_S_ACTIVE) {
1340 gctl_error(req, "Disk '%s' is in a "
1341 "wrong state (%s).", diskname,
1342 g_raid_disk_state2str(disk->d_state));
1346 pd = disk->d_md_data;
1347 if (pd->pd_subdisks >= PROMISE_MAX_SUBDISKS) {
1348 gctl_error(req, "Disk '%s' already "
1349 "used by %d volumes.",
1350 diskname, pd->pd_subdisks);
1354 pp = disk->d_consumer->provider;
1356 promise_meta_unused_range(pd->pd_meta,
1358 pp->mediasize / pp->sectorsize,
1360 size = MIN(size, (off_t)esize * pp->sectorsize);
1361 sectorsize = MAX(sectorsize, pp->sectorsize);
1366 cp = g_raid_open_consumer(sc, diskname);
1368 gctl_error(req, "Can't open disk '%s'.",
1370 g_topology_unlock();
1375 pd = malloc(sizeof(*pd), M_MD_PROMISE, M_WAITOK | M_ZERO);
1376 disk = g_raid_create_disk(sc);
1377 disk->d_md_data = (void *)pd;
1378 disk->d_consumer = cp;
1381 g_topology_unlock();
1383 g_raid_get_disk_info(disk);
1385 /* Reserve some space for metadata. */
1386 size = MIN(size, pp->mediasize - 131072llu * pp->sectorsize);
1387 sectorsize = MAX(sectorsize, pp->sectorsize);
1390 for (i = 0; i < numdisks; i++) {
1391 if (disks[i] != NULL &&
1392 disks[i]->d_state == G_RAID_DISK_S_NONE)
1393 g_raid_destroy_disk(disks[i]);
1398 if (sectorsize <= 0) {
1399 gctl_error(req, "Can't get sector size.");
1403 /* Handle size argument. */
1404 len = sizeof(*sizearg);
1405 sizearg = gctl_get_param(req, "size", &len);
1406 if (sizearg != NULL && len == sizeof(*sizearg) &&
1408 if (*sizearg > size) {
1409 gctl_error(req, "Size too big %lld > %lld.",
1410 (long long)*sizearg, (long long)size);
1416 /* Handle strip argument. */
1418 len = sizeof(*striparg);
1419 striparg = gctl_get_param(req, "strip", &len);
1420 if (striparg != NULL && len == sizeof(*striparg) &&
1422 if (*striparg < sectorsize) {
1423 gctl_error(req, "Strip size too small.");
1426 if (*striparg % sectorsize != 0) {
1427 gctl_error(req, "Incorrect strip size.");
1433 /* Round size down to strip or sector. */
1434 if (level == G_RAID_VOLUME_RL_RAID1 ||
1435 level == G_RAID_VOLUME_RL_SINGLE ||
1436 level == G_RAID_VOLUME_RL_CONCAT)
1437 size -= (size % sectorsize);
1438 else if (level == G_RAID_VOLUME_RL_RAID1E &&
1439 (numdisks & 1) != 0)
1440 size -= (size % (2 * strip));
1442 size -= (size % strip);
1444 gctl_error(req, "Size too small.");
1448 /* We have all we need, create things: volume, ... */
1449 pv = malloc(sizeof(*pv), M_MD_PROMISE, M_WAITOK | M_ZERO);
1450 arc4rand(&pv->pv_id, sizeof(pv->pv_id), 0);
1451 pv->pv_generation = 0;
1453 vol = g_raid_create_volume(sc, volname, -1);
1454 vol->v_md_data = pv;
1455 vol->v_raid_level = level;
1456 vol->v_raid_level_qualifier = qual;
1457 vol->v_strip_size = strip;
1458 vol->v_disks_count = numdisks;
1459 if (level == G_RAID_VOLUME_RL_RAID0 ||
1460 level == G_RAID_VOLUME_RL_CONCAT ||
1461 level == G_RAID_VOLUME_RL_SINGLE)
1462 vol->v_mediasize = size * numdisks;
1463 else if (level == G_RAID_VOLUME_RL_RAID1)
1464 vol->v_mediasize = size;
1465 else if (level == G_RAID_VOLUME_RL_RAID3 ||
1466 level == G_RAID_VOLUME_RL_RAID5)
1467 vol->v_mediasize = size * (numdisks - 1);
1469 vol->v_mediasize = ((size * numdisks) / strip / 2) *
1472 vol->v_sectorsize = sectorsize;
1473 g_raid_start_volume(vol);
1475 /* , and subdisks. */
1476 for (i = 0; i < numdisks; i++) {
1478 sd = &vol->v_subdisks[i];
1480 sd->sd_offset = (off_t)offs[i] * 512;
1484 TAILQ_INSERT_TAIL(&disk->d_subdisks, sd, sd_next);
1485 g_raid_change_disk_state(disk,
1486 G_RAID_DISK_S_ACTIVE);
1487 g_raid_change_subdisk_state(sd,
1488 G_RAID_SUBDISK_S_ACTIVE);
1489 g_raid_event_send(sd, G_RAID_SUBDISK_E_NEW,
1490 G_RAID_EVENT_SUBDISK);
1493 /* Write metadata based on created entities. */
1494 G_RAID_DEBUG1(0, sc, "Array started.");
1495 g_raid_md_write_promise(md, vol, NULL, NULL);
1497 /* Pickup any STALE/SPARE disks to refill array if needed. */
1498 g_raid_md_promise_refill(sc);
1500 g_raid_event_send(vol, G_RAID_VOLUME_E_START,
1501 G_RAID_EVENT_VOLUME);
1504 if (strcmp(verb, "add") == 0) {
1506 gctl_error(req, "`add` command is not applicable, "
1507 "use `label` instead.");
1510 if (strcmp(verb, "delete") == 0) {
1512 nodename = gctl_get_asciiparam(req, "arg0");
1513 if (nodename != NULL && strcasecmp(sc->sc_name, nodename) != 0)
1516 /* Full node destruction. */
1517 if (*nargs == 1 && nodename != NULL) {
1518 /* Check if some volume is still open. */
1519 force = gctl_get_paraml(req, "force", sizeof(*force));
1520 if (force != NULL && *force == 0 &&
1521 g_raid_nopens(sc) != 0) {
1522 gctl_error(req, "Some volume is still open.");
1526 TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
1527 if (disk->d_consumer)
1528 promise_meta_erase(disk->d_consumer);
1530 g_raid_destroy_node(sc, 0);
1534 /* Destroy specified volume. If it was last - all node. */
1536 gctl_error(req, "Invalid number of arguments.");
1539 volname = gctl_get_asciiparam(req,
1540 nodename != NULL ? "arg1" : "arg0");
1541 if (volname == NULL) {
1542 gctl_error(req, "No volume name.");
1546 /* Search for volume. */
1547 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
1548 if (strcmp(vol->v_name, volname) == 0)
1550 pp = vol->v_provider;
1553 if (strcmp(pp->name, volname) == 0)
1555 if (strncmp(pp->name, "raid/", 5) == 0 &&
1556 strcmp(pp->name + 5, volname) == 0)
1560 i = strtol(volname, &tmp, 10);
1561 if (verb != volname && tmp[0] == 0) {
1562 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
1563 if (vol->v_global_id == i)
1569 gctl_error(req, "Volume '%s' not found.", volname);
1573 /* Check if volume is still open. */
1574 force = gctl_get_paraml(req, "force", sizeof(*force));
1575 if (force != NULL && *force == 0 &&
1576 vol->v_provider_open != 0) {
1577 gctl_error(req, "Volume is still open.");
1581 /* Destroy volume and potentially node. */
1583 TAILQ_FOREACH(vol1, &sc->sc_volumes, v_next)
1586 g_raid_destroy_volume(vol);
1587 g_raid_md_promise_purge_disks(sc);
1588 g_raid_md_write_promise(md, NULL, NULL, NULL);
1590 TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
1591 if (disk->d_consumer)
1592 promise_meta_erase(disk->d_consumer);
1594 g_raid_destroy_node(sc, 0);
1598 if (strcmp(verb, "remove") == 0 ||
1599 strcmp(verb, "fail") == 0) {
1601 gctl_error(req, "Invalid number of arguments.");
1604 for (i = 1; i < *nargs; i++) {
1605 snprintf(arg, sizeof(arg), "arg%d", i);
1606 diskname = gctl_get_asciiparam(req, arg);
1607 if (diskname == NULL) {
1608 gctl_error(req, "No disk name (%s).", arg);
1612 if (strncmp(diskname, "/dev/", 5) == 0)
1615 TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
1616 if (disk->d_consumer != NULL &&
1617 disk->d_consumer->provider != NULL &&
1618 strcmp(disk->d_consumer->provider->name,
1623 gctl_error(req, "Disk '%s' not found.",
1629 if (strcmp(verb, "fail") == 0) {
1630 g_raid_md_fail_disk_promise(md, NULL, disk);
1634 /* Erase metadata on deleting disk and destroy it. */
1635 promise_meta_erase(disk->d_consumer);
1636 g_raid_destroy_disk(disk);
1638 g_raid_md_promise_purge_volumes(sc);
1640 /* Write updated metadata to remaining disks. */
1641 g_raid_md_write_promise(md, NULL, NULL, NULL);
1643 /* Check if anything left. */
1644 if (g_raid_ndisks(sc, -1) == 0)
1645 g_raid_destroy_node(sc, 0);
1647 g_raid_md_promise_refill(sc);
1650 if (strcmp(verb, "insert") == 0) {
1652 gctl_error(req, "Invalid number of arguments.");
1655 for (i = 1; i < *nargs; i++) {
1656 /* Get disk name. */
1657 snprintf(arg, sizeof(arg), "arg%d", i);
1658 diskname = gctl_get_asciiparam(req, arg);
1659 if (diskname == NULL) {
1660 gctl_error(req, "No disk name (%s).", arg);
1665 /* Try to find provider with specified name. */
1667 cp = g_raid_open_consumer(sc, diskname);
1669 gctl_error(req, "Can't open disk '%s'.",
1671 g_topology_unlock();
1676 g_topology_unlock();
1678 pd = malloc(sizeof(*pd), M_MD_PROMISE, M_WAITOK | M_ZERO);
1680 disk = g_raid_create_disk(sc);
1681 disk->d_consumer = cp;
1682 disk->d_md_data = (void *)pd;
1685 g_raid_get_disk_info(disk);
1687 /* Welcome the "new" disk. */
1688 g_raid_change_disk_state(disk, G_RAID_DISK_S_SPARE);
1689 promise_meta_write_spare(cp);
1690 g_raid_md_promise_refill(sc);
1698 g_raid_md_write_promise(struct g_raid_md_object *md, struct g_raid_volume *tvol,
1699 struct g_raid_subdisk *tsd, struct g_raid_disk *tdisk)
1701 struct g_raid_softc *sc;
1702 struct g_raid_volume *vol;
1703 struct g_raid_subdisk *sd;
1704 struct g_raid_disk *disk;
1705 struct g_raid_md_promise_perdisk *pd;
1706 struct g_raid_md_promise_pervolume *pv;
1707 struct promise_raid_conf *meta;
1708 off_t rebuild_lba64;
1709 int i, j, pos, rebuild;
1713 if (sc->sc_stopping == G_RAID_DESTROY_HARD)
1716 /* Generate new per-volume metadata for affected volumes. */
1717 TAILQ_FOREACH(vol, &sc->sc_volumes, v_next) {
1718 if (vol->v_stopping)
1721 /* Skip volumes not related to specified targets. */
1722 if (tvol != NULL && vol != tvol)
1724 if (tsd != NULL && vol != tsd->sd_volume)
1726 if (tdisk != NULL) {
1727 for (i = 0; i < vol->v_disks_count; i++) {
1728 if (vol->v_subdisks[i].sd_disk == tdisk)
1731 if (i >= vol->v_disks_count)
1735 pv = (struct g_raid_md_promise_pervolume *)vol->v_md_data;
1736 pv->pv_generation++;
1738 meta = malloc(sizeof(*meta), M_MD_PROMISE, M_WAITOK | M_ZERO);
1739 if (pv->pv_meta != NULL)
1740 memcpy(meta, pv->pv_meta, sizeof(*meta));
1741 memcpy(meta->promise_id, PROMISE_MAGIC,
1742 sizeof(PROMISE_MAGIC) - 1);
1743 meta->dummy_0 = 0x00020000;
1744 meta->integrity = PROMISE_I_VALID;
1746 meta->generation = pv->pv_generation;
1747 meta->status = PROMISE_S_VALID | PROMISE_S_ONLINE |
1748 PROMISE_S_INITED | PROMISE_S_READY;
1749 if (vol->v_state <= G_RAID_VOLUME_S_DEGRADED)
1750 meta->status |= PROMISE_S_DEGRADED;
1752 meta->status |= PROMISE_S_MARKED; /* XXX: INVENTED! */
1753 if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID0 ||
1754 vol->v_raid_level == G_RAID_VOLUME_RL_SINGLE)
1755 meta->type = PROMISE_T_RAID0;
1756 else if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID1 ||
1757 vol->v_raid_level == G_RAID_VOLUME_RL_RAID1E)
1758 meta->type = PROMISE_T_RAID1;
1759 else if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID3)
1760 meta->type = PROMISE_T_RAID3;
1761 else if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID5)
1762 meta->type = PROMISE_T_RAID5;
1763 else if (vol->v_raid_level == G_RAID_VOLUME_RL_CONCAT)
1764 meta->type = PROMISE_T_SPAN;
1766 meta->type = PROMISE_T_JBOD;
1767 meta->total_disks = vol->v_disks_count;
1768 meta->stripe_shift = ffs(vol->v_strip_size / 1024);
1769 meta->array_width = vol->v_disks_count;
1770 if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID1 ||
1771 vol->v_raid_level == G_RAID_VOLUME_RL_RAID1E)
1772 meta->array_width /= 2;
1773 meta->array_number = vol->v_global_id;
1774 meta->total_sectors = vol->v_mediasize / 512;
1775 meta->total_sectors_high = (vol->v_mediasize / 512) >> 32;
1776 meta->sector_size = vol->v_sectorsize / 512;
1777 meta->cylinders = meta->total_sectors / (255 * 63) - 1;
1780 meta->volume_id = pv->pv_id;
1781 rebuild_lba64 = UINT64_MAX;
1783 for (i = 0; i < vol->v_disks_count; i++) {
1784 sd = &vol->v_subdisks[i];
1785 /* For RAID0+1 we need to translate order. */
1786 pos = promise_meta_translate_disk(vol, i);
1787 meta->disks[pos].flags = PROMISE_F_VALID |
1789 if (sd->sd_state == G_RAID_SUBDISK_S_NONE) {
1790 meta->disks[pos].flags |= 0;
1791 } else if (sd->sd_state == G_RAID_SUBDISK_S_FAILED) {
1792 meta->disks[pos].flags |=
1793 PROMISE_F_DOWN | PROMISE_F_REDIR;
1794 } else if (sd->sd_state <= G_RAID_SUBDISK_S_REBUILD) {
1795 meta->disks[pos].flags |=
1796 PROMISE_F_ONLINE | PROMISE_F_REDIR;
1797 if (sd->sd_state == G_RAID_SUBDISK_S_REBUILD) {
1798 rebuild_lba64 = MIN(rebuild_lba64,
1799 sd->sd_rebuild_pos / 512);
1804 meta->disks[pos].flags |= PROMISE_F_ONLINE;
1805 if (sd->sd_state < G_RAID_SUBDISK_S_ACTIVE) {
1806 meta->status |= PROMISE_S_MARKED;
1807 if (sd->sd_state == G_RAID_SUBDISK_S_RESYNC) {
1808 rebuild_lba64 = MIN(rebuild_lba64,
1809 sd->sd_rebuild_pos / 512);
1814 if (pv->pv_meta != NULL) {
1815 meta->disks[pos].id = pv->pv_meta->disks[pos].id;
1817 meta->disks[pos].number = i * 2;
1818 arc4rand(&meta->disks[pos].id,
1819 sizeof(meta->disks[pos].id), 0);
1822 promise_meta_put_name(meta, vol->v_name);
1824 /* Try to mimic AMD BIOS rebuild/resync behavior. */
1825 if (rebuild_lba64 != UINT64_MAX) {
1827 meta->magic_3 = 0x03040010UL; /* Rebuild? */
1829 meta->magic_3 = 0x03040008UL; /* Resync? */
1830 /* Translate from per-disk to per-volume LBA. */
1831 if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID1 ||
1832 vol->v_raid_level == G_RAID_VOLUME_RL_RAID1E) {
1833 rebuild_lba64 *= meta->array_width;
1834 } else if (vol->v_raid_level == G_RAID_VOLUME_RL_RAID3 ||
1835 vol->v_raid_level == G_RAID_VOLUME_RL_RAID5) {
1836 rebuild_lba64 *= meta->array_width - 1;
1840 meta->magic_3 = 0x03000000UL;
1841 meta->rebuild_lba64 = rebuild_lba64;
1842 meta->magic_4 = 0x04010101UL;
1844 /* Replace per-volume metadata with new. */
1845 if (pv->pv_meta != NULL)
1846 free(pv->pv_meta, M_MD_PROMISE);
1849 /* Copy new metadata to the disks, adding or replacing old. */
1850 for (i = 0; i < vol->v_disks_count; i++) {
1851 sd = &vol->v_subdisks[i];
1855 /* For RAID0+1 we need to translate order. */
1856 pos = promise_meta_translate_disk(vol, i);
1857 pd = (struct g_raid_md_promise_perdisk *)disk->d_md_data;
1858 for (j = 0; j < pd->pd_subdisks; j++) {
1859 if (pd->pd_meta[j]->volume_id == meta->volume_id)
1862 if (j == pd->pd_subdisks)
1864 if (pd->pd_meta[j] != NULL)
1865 free(pd->pd_meta[j], M_MD_PROMISE);
1866 pd->pd_meta[j] = promise_meta_copy(meta);
1867 pd->pd_meta[j]->disk = meta->disks[pos];
1868 pd->pd_meta[j]->disk.number = pos;
1869 pd->pd_meta[j]->disk_offset_high =
1870 (sd->sd_offset / 512) >> 32;
1871 pd->pd_meta[j]->disk_offset = sd->sd_offset / 512;
1872 pd->pd_meta[j]->disk_sectors_high =
1873 (sd->sd_size / 512) >> 32;
1874 pd->pd_meta[j]->disk_sectors = sd->sd_size / 512;
1875 if (sd->sd_state == G_RAID_SUBDISK_S_REBUILD) {
1876 pd->pd_meta[j]->disk_rebuild_high =
1877 (sd->sd_rebuild_pos / 512) >> 32;
1878 pd->pd_meta[j]->disk_rebuild =
1879 sd->sd_rebuild_pos / 512;
1880 } else if (sd->sd_state < G_RAID_SUBDISK_S_REBUILD) {
1881 pd->pd_meta[j]->disk_rebuild_high = 0;
1882 pd->pd_meta[j]->disk_rebuild = 0;
1884 pd->pd_meta[j]->disk_rebuild_high = UINT32_MAX;
1885 pd->pd_meta[j]->disk_rebuild = UINT32_MAX;
1891 TAILQ_FOREACH(disk, &sc->sc_disks, d_next) {
1892 pd = (struct g_raid_md_promise_perdisk *)disk->d_md_data;
1893 if (disk->d_state != G_RAID_DISK_S_ACTIVE)
1895 if (!pd->pd_updated)
1897 G_RAID_DEBUG(1, "Writing Promise metadata to %s",
1898 g_raid_get_diskname(disk));
1899 for (i = 0; i < pd->pd_subdisks; i++)
1900 g_raid_md_promise_print(pd->pd_meta[i]);
1901 promise_meta_write(disk->d_consumer,
1902 pd->pd_meta, pd->pd_subdisks);
1910 g_raid_md_fail_disk_promise(struct g_raid_md_object *md,
1911 struct g_raid_subdisk *tsd, struct g_raid_disk *tdisk)
1913 struct g_raid_softc *sc;
1914 struct g_raid_md_promise_perdisk *pd;
1915 struct g_raid_subdisk *sd;
1919 pd = (struct g_raid_md_promise_perdisk *)tdisk->d_md_data;
1921 /* We can't fail disk that is not a part of array now. */
1922 if (tdisk->d_state != G_RAID_DISK_S_ACTIVE)
1926 * Mark disk as failed in metadata and try to write that metadata
1927 * to the disk itself to prevent it's later resurrection as STALE.
1929 if (pd->pd_subdisks > 0 && tdisk->d_consumer != NULL)
1930 G_RAID_DEBUG(1, "Writing Promise metadata to %s",
1931 g_raid_get_diskname(tdisk));
1932 for (i = 0; i < pd->pd_subdisks; i++) {
1933 pd->pd_meta[i]->disk.flags |=
1934 PROMISE_F_DOWN | PROMISE_F_REDIR;
1935 pos = pd->pd_meta[i]->disk.number;
1936 if (pos >= 0 && pos < PROMISE_MAX_DISKS) {
1937 pd->pd_meta[i]->disks[pos].flags |=
1938 PROMISE_F_DOWN | PROMISE_F_REDIR;
1940 g_raid_md_promise_print(pd->pd_meta[i]);
1942 if (tdisk->d_consumer != NULL)
1943 promise_meta_write(tdisk->d_consumer,
1944 pd->pd_meta, pd->pd_subdisks);
1946 /* Change states. */
1947 g_raid_change_disk_state(tdisk, G_RAID_DISK_S_FAILED);
1948 TAILQ_FOREACH(sd, &tdisk->d_subdisks, sd_next) {
1949 g_raid_change_subdisk_state(sd,
1950 G_RAID_SUBDISK_S_FAILED);
1951 g_raid_event_send(sd, G_RAID_SUBDISK_E_FAILED,
1952 G_RAID_EVENT_SUBDISK);
1955 /* Write updated metadata to remaining disks. */
1956 g_raid_md_write_promise(md, NULL, NULL, tdisk);
1958 g_raid_md_promise_refill(sc);
1963 g_raid_md_free_disk_promise(struct g_raid_md_object *md,
1964 struct g_raid_disk *disk)
1966 struct g_raid_md_promise_perdisk *pd;
1969 pd = (struct g_raid_md_promise_perdisk *)disk->d_md_data;
1970 for (i = 0; i < pd->pd_subdisks; i++) {
1971 if (pd->pd_meta[i] != NULL) {
1972 free(pd->pd_meta[i], M_MD_PROMISE);
1973 pd->pd_meta[i] = NULL;
1976 free(pd, M_MD_PROMISE);
1977 disk->d_md_data = NULL;
1982 g_raid_md_free_volume_promise(struct g_raid_md_object *md,
1983 struct g_raid_volume *vol)
1985 struct g_raid_md_promise_pervolume *pv;
1987 pv = (struct g_raid_md_promise_pervolume *)vol->v_md_data;
1988 if (pv && pv->pv_meta != NULL) {
1989 free(pv->pv_meta, M_MD_PROMISE);
1992 if (pv && !pv->pv_started) {
1994 callout_stop(&pv->pv_start_co);
1996 free(pv, M_MD_PROMISE);
1997 vol->v_md_data = NULL;
2002 g_raid_md_free_promise(struct g_raid_md_object *md)
2008 G_RAID_MD_DECLARE(promise, "Promise");