2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
40 #include <sys/systm.h>
41 #include <geom/geom.h>
42 #include <geom/geom_dbg.h>
43 #include "geom/raid/g_raid.h"
44 #include "g_raid_tr_if.h"
46 static MALLOC_DEFINE(M_TR_CONCAT, "tr_concat_data", "GEOM_RAID CONCAT data");
48 struct g_raid_tr_concat_object {
49 struct g_raid_tr_object trso_base;
54 static g_raid_tr_taste_t g_raid_tr_taste_concat;
55 static g_raid_tr_event_t g_raid_tr_event_concat;
56 static g_raid_tr_start_t g_raid_tr_start_concat;
57 static g_raid_tr_stop_t g_raid_tr_stop_concat;
58 static g_raid_tr_iostart_t g_raid_tr_iostart_concat;
59 static g_raid_tr_iodone_t g_raid_tr_iodone_concat;
60 static g_raid_tr_kerneldump_t g_raid_tr_kerneldump_concat;
61 static g_raid_tr_free_t g_raid_tr_free_concat;
63 static kobj_method_t g_raid_tr_concat_methods[] = {
64 KOBJMETHOD(g_raid_tr_taste, g_raid_tr_taste_concat),
65 KOBJMETHOD(g_raid_tr_event, g_raid_tr_event_concat),
66 KOBJMETHOD(g_raid_tr_start, g_raid_tr_start_concat),
67 KOBJMETHOD(g_raid_tr_stop, g_raid_tr_stop_concat),
68 KOBJMETHOD(g_raid_tr_iostart, g_raid_tr_iostart_concat),
69 KOBJMETHOD(g_raid_tr_iodone, g_raid_tr_iodone_concat),
70 KOBJMETHOD(g_raid_tr_kerneldump, g_raid_tr_kerneldump_concat),
71 KOBJMETHOD(g_raid_tr_free, g_raid_tr_free_concat),
75 static struct g_raid_tr_class g_raid_tr_concat_class = {
77 g_raid_tr_concat_methods,
78 sizeof(struct g_raid_tr_concat_object),
81 .trc_accept_unmapped = 1
85 g_raid_tr_taste_concat(struct g_raid_tr_object *tr, struct g_raid_volume *volume)
87 struct g_raid_tr_concat_object *trs;
89 trs = (struct g_raid_tr_concat_object *)tr;
90 if (tr->tro_volume->v_raid_level != G_RAID_VOLUME_RL_SINGLE &&
91 tr->tro_volume->v_raid_level != G_RAID_VOLUME_RL_CONCAT &&
92 !(tr->tro_volume->v_disks_count == 1 &&
93 tr->tro_volume->v_raid_level != G_RAID_VOLUME_RL_UNKNOWN))
94 return (G_RAID_TR_TASTE_FAIL);
95 trs->trso_starting = 1;
96 return (G_RAID_TR_TASTE_SUCCEED);
100 g_raid_tr_update_state_concat(struct g_raid_volume *vol)
102 struct g_raid_tr_concat_object *trs;
103 struct g_raid_softc *sc;
109 trs = (struct g_raid_tr_concat_object *)vol->v_tr;
110 if (trs->trso_stopped)
111 s = G_RAID_VOLUME_S_STOPPED;
112 else if (trs->trso_starting)
113 s = G_RAID_VOLUME_S_STARTING;
115 n = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
116 f = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_FAILED);
117 if (n + f == vol->v_disks_count) {
119 s = G_RAID_VOLUME_S_OPTIMAL;
121 s = G_RAID_VOLUME_S_SUBOPTIMAL;
123 s = G_RAID_VOLUME_S_BROKEN;
125 if (s != vol->v_state) {
128 * Some metadata modules may not know CONCAT volume
129 * mediasize until all disks connected. Recalculate.
131 if (vol->v_raid_level == G_RAID_VOLUME_RL_CONCAT &&
132 G_RAID_VOLUME_S_ALIVE(s) &&
133 !G_RAID_VOLUME_S_ALIVE(vol->v_state)) {
135 for (i = 0; i < vol->v_disks_count; i++) {
136 if (vol->v_subdisks[i].sd_state !=
137 G_RAID_SUBDISK_S_NONE)
138 size += vol->v_subdisks[i].sd_size;
140 vol->v_mediasize = size;
143 g_raid_event_send(vol, G_RAID_VOLUME_S_ALIVE(s) ?
144 G_RAID_VOLUME_E_UP : G_RAID_VOLUME_E_DOWN,
145 G_RAID_EVENT_VOLUME);
146 g_raid_change_volume_state(vol, s);
147 if (!trs->trso_starting && !trs->trso_stopped)
148 g_raid_write_metadata(sc, vol, NULL, NULL);
154 g_raid_tr_event_concat(struct g_raid_tr_object *tr,
155 struct g_raid_subdisk *sd, u_int event)
157 struct g_raid_tr_concat_object *trs;
158 struct g_raid_softc *sc;
159 struct g_raid_volume *vol;
162 trs = (struct g_raid_tr_concat_object *)tr;
163 vol = tr->tro_volume;
166 state = sd->sd_state;
167 if (state != G_RAID_SUBDISK_S_NONE &&
168 state != G_RAID_SUBDISK_S_FAILED &&
169 state != G_RAID_SUBDISK_S_ACTIVE) {
171 "Promote subdisk %s:%d from %s to ACTIVE.",
172 vol->v_name, sd->sd_pos,
173 g_raid_subdisk_state2str(sd->sd_state));
174 g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_ACTIVE);
176 if (state != sd->sd_state &&
177 !trs->trso_starting && !trs->trso_stopped)
178 g_raid_write_metadata(sc, vol, sd, NULL);
179 g_raid_tr_update_state_concat(vol);
184 g_raid_tr_start_concat(struct g_raid_tr_object *tr)
186 struct g_raid_tr_concat_object *trs;
187 struct g_raid_volume *vol;
189 trs = (struct g_raid_tr_concat_object *)tr;
190 vol = tr->tro_volume;
191 trs->trso_starting = 0;
192 g_raid_tr_update_state_concat(vol);
197 g_raid_tr_stop_concat(struct g_raid_tr_object *tr)
199 struct g_raid_tr_concat_object *trs;
200 struct g_raid_volume *vol;
202 trs = (struct g_raid_tr_concat_object *)tr;
203 vol = tr->tro_volume;
204 trs->trso_starting = 0;
205 trs->trso_stopped = 1;
206 g_raid_tr_update_state_concat(vol);
211 g_raid_tr_iostart_concat(struct g_raid_tr_object *tr, struct bio *bp)
213 struct g_raid_volume *vol;
214 struct g_raid_subdisk *sd;
215 struct bio_queue_head queue;
218 off_t offset, length, remain;
221 vol = tr->tro_volume;
222 if (vol->v_state != G_RAID_VOLUME_S_OPTIMAL &&
223 vol->v_state != G_RAID_VOLUME_S_SUBOPTIMAL) {
224 g_raid_iodone(bp, EIO);
227 if (bp->bio_cmd == BIO_FLUSH) {
228 g_raid_tr_flush_common(tr, bp);
232 offset = bp->bio_offset;
233 remain = bp->bio_length;
234 if ((bp->bio_flags & BIO_UNMAPPED) != 0)
239 while (no < vol->v_disks_count &&
240 offset >= vol->v_subdisks[no].sd_size) {
241 offset -= vol->v_subdisks[no].sd_size;
244 KASSERT(no < vol->v_disks_count,
245 ("Request starts after volume end (%ju)", bp->bio_offset));
248 sd = &vol->v_subdisks[no];
249 length = MIN(sd->sd_size - offset, remain);
250 cbp = g_clone_bio(bp);
253 cbp->bio_offset = offset;
254 cbp->bio_length = length;
255 if ((bp->bio_flags & BIO_UNMAPPED) != 0 &&
256 bp->bio_cmd != BIO_DELETE) {
257 cbp->bio_ma_offset += (uintptr_t)addr;
258 cbp->bio_ma += cbp->bio_ma_offset / PAGE_SIZE;
259 cbp->bio_ma_offset %= PAGE_SIZE;
260 cbp->bio_ma_n = round_page(cbp->bio_ma_offset +
261 cbp->bio_length) / PAGE_SIZE;
263 cbp->bio_data = addr;
264 cbp->bio_caller1 = sd;
265 bioq_insert_tail(&queue, cbp);
267 if (bp->bio_cmd != BIO_DELETE)
271 KASSERT(no < vol->v_disks_count || remain == 0,
272 ("Request ends after volume end (%ju, %ju)",
273 bp->bio_offset, bp->bio_length));
274 } while (remain > 0);
275 while ((cbp = bioq_takefirst(&queue)) != NULL) {
276 sd = cbp->bio_caller1;
277 cbp->bio_caller1 = NULL;
278 g_raid_subdisk_iostart(sd, cbp);
282 while ((cbp = bioq_takefirst(&queue)) != NULL)
284 if (bp->bio_error == 0)
285 bp->bio_error = ENOMEM;
286 g_raid_iodone(bp, bp->bio_error);
290 g_raid_tr_kerneldump_concat(struct g_raid_tr_object *tr,
291 void *virtual, vm_offset_t physical, off_t boffset, size_t blength)
293 struct g_raid_volume *vol;
294 struct g_raid_subdisk *sd;
296 off_t offset, length, remain;
299 vol = tr->tro_volume;
300 if (vol->v_state != G_RAID_VOLUME_S_OPTIMAL)
307 while (no < vol->v_disks_count &&
308 offset >= vol->v_subdisks[no].sd_size) {
309 offset -= vol->v_subdisks[no].sd_size;
312 KASSERT(no < vol->v_disks_count,
313 ("Request starts after volume end (%ju)", boffset));
315 sd = &vol->v_subdisks[no];
316 length = MIN(sd->sd_size - offset, remain);
317 error = g_raid_subdisk_kerneldump(&vol->v_subdisks[no],
318 addr, 0, offset, length);
325 KASSERT(no < vol->v_disks_count || remain == 0,
326 ("Request ends after volume end (%ju, %zu)",
328 } while (remain > 0);
333 g_raid_tr_iodone_concat(struct g_raid_tr_object *tr,
334 struct g_raid_subdisk *sd,struct bio *bp)
338 pbp = bp->bio_parent;
339 if (pbp->bio_error == 0)
340 pbp->bio_error = bp->bio_error;
343 if (pbp->bio_children == pbp->bio_inbed) {
344 pbp->bio_completed = pbp->bio_length;
345 g_raid_iodone(pbp, pbp->bio_error);
350 g_raid_tr_free_concat(struct g_raid_tr_object *tr)
356 G_RAID_TR_DECLARE(concat, "CONCAT");