2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
40 #include <sys/systm.h>
41 #include <geom/geom.h>
42 #include "geom/raid/g_raid.h"
43 #include "g_raid_tr_if.h"
45 static MALLOC_DEFINE(M_TR_CONCAT, "tr_concat_data", "GEOM_RAID CONCAT data");
47 struct g_raid_tr_concat_object {
48 struct g_raid_tr_object trso_base;
53 static g_raid_tr_taste_t g_raid_tr_taste_concat;
54 static g_raid_tr_event_t g_raid_tr_event_concat;
55 static g_raid_tr_start_t g_raid_tr_start_concat;
56 static g_raid_tr_stop_t g_raid_tr_stop_concat;
57 static g_raid_tr_iostart_t g_raid_tr_iostart_concat;
58 static g_raid_tr_iodone_t g_raid_tr_iodone_concat;
59 static g_raid_tr_kerneldump_t g_raid_tr_kerneldump_concat;
60 static g_raid_tr_free_t g_raid_tr_free_concat;
62 static kobj_method_t g_raid_tr_concat_methods[] = {
63 KOBJMETHOD(g_raid_tr_taste, g_raid_tr_taste_concat),
64 KOBJMETHOD(g_raid_tr_event, g_raid_tr_event_concat),
65 KOBJMETHOD(g_raid_tr_start, g_raid_tr_start_concat),
66 KOBJMETHOD(g_raid_tr_stop, g_raid_tr_stop_concat),
67 KOBJMETHOD(g_raid_tr_iostart, g_raid_tr_iostart_concat),
68 KOBJMETHOD(g_raid_tr_iodone, g_raid_tr_iodone_concat),
69 KOBJMETHOD(g_raid_tr_kerneldump, g_raid_tr_kerneldump_concat),
70 KOBJMETHOD(g_raid_tr_free, g_raid_tr_free_concat),
74 static struct g_raid_tr_class g_raid_tr_concat_class = {
76 g_raid_tr_concat_methods,
77 sizeof(struct g_raid_tr_concat_object),
80 .trc_accept_unmapped = 1
84 g_raid_tr_taste_concat(struct g_raid_tr_object *tr, struct g_raid_volume *volume)
86 struct g_raid_tr_concat_object *trs;
88 trs = (struct g_raid_tr_concat_object *)tr;
89 if (tr->tro_volume->v_raid_level != G_RAID_VOLUME_RL_SINGLE &&
90 tr->tro_volume->v_raid_level != G_RAID_VOLUME_RL_CONCAT &&
91 !(tr->tro_volume->v_disks_count == 1 &&
92 tr->tro_volume->v_raid_level != G_RAID_VOLUME_RL_UNKNOWN))
93 return (G_RAID_TR_TASTE_FAIL);
94 trs->trso_starting = 1;
95 return (G_RAID_TR_TASTE_SUCCEED);
99 g_raid_tr_update_state_concat(struct g_raid_volume *vol)
101 struct g_raid_tr_concat_object *trs;
102 struct g_raid_softc *sc;
108 trs = (struct g_raid_tr_concat_object *)vol->v_tr;
109 if (trs->trso_stopped)
110 s = G_RAID_VOLUME_S_STOPPED;
111 else if (trs->trso_starting)
112 s = G_RAID_VOLUME_S_STARTING;
114 n = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
115 f = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_FAILED);
116 if (n + f == vol->v_disks_count) {
118 s = G_RAID_VOLUME_S_OPTIMAL;
120 s = G_RAID_VOLUME_S_SUBOPTIMAL;
122 s = G_RAID_VOLUME_S_BROKEN;
124 if (s != vol->v_state) {
127 * Some metadata modules may not know CONCAT volume
128 * mediasize until all disks connected. Recalculate.
130 if (vol->v_raid_level == G_RAID_VOLUME_RL_CONCAT &&
131 G_RAID_VOLUME_S_ALIVE(s) &&
132 !G_RAID_VOLUME_S_ALIVE(vol->v_state)) {
134 for (i = 0; i < vol->v_disks_count; i++) {
135 if (vol->v_subdisks[i].sd_state !=
136 G_RAID_SUBDISK_S_NONE)
137 size += vol->v_subdisks[i].sd_size;
139 vol->v_mediasize = size;
142 g_raid_event_send(vol, G_RAID_VOLUME_S_ALIVE(s) ?
143 G_RAID_VOLUME_E_UP : G_RAID_VOLUME_E_DOWN,
144 G_RAID_EVENT_VOLUME);
145 g_raid_change_volume_state(vol, s);
146 if (!trs->trso_starting && !trs->trso_stopped)
147 g_raid_write_metadata(sc, vol, NULL, NULL);
153 g_raid_tr_event_concat(struct g_raid_tr_object *tr,
154 struct g_raid_subdisk *sd, u_int event)
156 struct g_raid_tr_concat_object *trs;
157 struct g_raid_softc *sc;
158 struct g_raid_volume *vol;
161 trs = (struct g_raid_tr_concat_object *)tr;
162 vol = tr->tro_volume;
165 state = sd->sd_state;
166 if (state != G_RAID_SUBDISK_S_NONE &&
167 state != G_RAID_SUBDISK_S_FAILED &&
168 state != G_RAID_SUBDISK_S_ACTIVE) {
170 "Promote subdisk %s:%d from %s to ACTIVE.",
171 vol->v_name, sd->sd_pos,
172 g_raid_subdisk_state2str(sd->sd_state));
173 g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_ACTIVE);
175 if (state != sd->sd_state &&
176 !trs->trso_starting && !trs->trso_stopped)
177 g_raid_write_metadata(sc, vol, sd, NULL);
178 g_raid_tr_update_state_concat(vol);
183 g_raid_tr_start_concat(struct g_raid_tr_object *tr)
185 struct g_raid_tr_concat_object *trs;
186 struct g_raid_volume *vol;
188 trs = (struct g_raid_tr_concat_object *)tr;
189 vol = tr->tro_volume;
190 trs->trso_starting = 0;
191 g_raid_tr_update_state_concat(vol);
196 g_raid_tr_stop_concat(struct g_raid_tr_object *tr)
198 struct g_raid_tr_concat_object *trs;
199 struct g_raid_volume *vol;
201 trs = (struct g_raid_tr_concat_object *)tr;
202 vol = tr->tro_volume;
203 trs->trso_starting = 0;
204 trs->trso_stopped = 1;
205 g_raid_tr_update_state_concat(vol);
210 g_raid_tr_iostart_concat(struct g_raid_tr_object *tr, struct bio *bp)
212 struct g_raid_volume *vol;
213 struct g_raid_subdisk *sd;
214 struct bio_queue_head queue;
217 off_t offset, length, remain;
220 vol = tr->tro_volume;
221 if (vol->v_state != G_RAID_VOLUME_S_OPTIMAL &&
222 vol->v_state != G_RAID_VOLUME_S_SUBOPTIMAL) {
223 g_raid_iodone(bp, EIO);
226 if (bp->bio_cmd == BIO_FLUSH) {
227 g_raid_tr_flush_common(tr, bp);
231 offset = bp->bio_offset;
232 remain = bp->bio_length;
233 if ((bp->bio_flags & BIO_UNMAPPED) != 0)
238 while (no < vol->v_disks_count &&
239 offset >= vol->v_subdisks[no].sd_size) {
240 offset -= vol->v_subdisks[no].sd_size;
243 KASSERT(no < vol->v_disks_count,
244 ("Request starts after volume end (%ju)", bp->bio_offset));
247 sd = &vol->v_subdisks[no];
248 length = MIN(sd->sd_size - offset, remain);
249 cbp = g_clone_bio(bp);
252 cbp->bio_offset = offset;
253 cbp->bio_length = length;
254 if ((bp->bio_flags & BIO_UNMAPPED) != 0 &&
255 bp->bio_cmd != BIO_DELETE) {
256 cbp->bio_ma_offset += (uintptr_t)addr;
257 cbp->bio_ma += cbp->bio_ma_offset / PAGE_SIZE;
258 cbp->bio_ma_offset %= PAGE_SIZE;
259 cbp->bio_ma_n = round_page(cbp->bio_ma_offset +
260 cbp->bio_length) / PAGE_SIZE;
262 cbp->bio_data = addr;
263 cbp->bio_caller1 = sd;
264 bioq_insert_tail(&queue, cbp);
266 if (bp->bio_cmd != BIO_DELETE)
270 KASSERT(no < vol->v_disks_count || remain == 0,
271 ("Request ends after volume end (%ju, %ju)",
272 bp->bio_offset, bp->bio_length));
273 } while (remain > 0);
274 while ((cbp = bioq_takefirst(&queue)) != NULL) {
275 sd = cbp->bio_caller1;
276 cbp->bio_caller1 = NULL;
277 g_raid_subdisk_iostart(sd, cbp);
281 while ((cbp = bioq_takefirst(&queue)) != NULL)
283 if (bp->bio_error == 0)
284 bp->bio_error = ENOMEM;
285 g_raid_iodone(bp, bp->bio_error);
289 g_raid_tr_kerneldump_concat(struct g_raid_tr_object *tr,
290 void *virtual, vm_offset_t physical, off_t boffset, size_t blength)
292 struct g_raid_volume *vol;
293 struct g_raid_subdisk *sd;
295 off_t offset, length, remain;
298 vol = tr->tro_volume;
299 if (vol->v_state != G_RAID_VOLUME_S_OPTIMAL)
306 while (no < vol->v_disks_count &&
307 offset >= vol->v_subdisks[no].sd_size) {
308 offset -= vol->v_subdisks[no].sd_size;
311 KASSERT(no < vol->v_disks_count,
312 ("Request starts after volume end (%ju)", boffset));
314 sd = &vol->v_subdisks[no];
315 length = MIN(sd->sd_size - offset, remain);
316 error = g_raid_subdisk_kerneldump(&vol->v_subdisks[no],
317 addr, 0, offset, length);
324 KASSERT(no < vol->v_disks_count || remain == 0,
325 ("Request ends after volume end (%ju, %zu)",
327 } while (remain > 0);
332 g_raid_tr_iodone_concat(struct g_raid_tr_object *tr,
333 struct g_raid_subdisk *sd,struct bio *bp)
337 pbp = bp->bio_parent;
338 if (pbp->bio_error == 0)
339 pbp->bio_error = bp->bio_error;
342 if (pbp->bio_children == pbp->bio_inbed) {
343 pbp->bio_completed = pbp->bio_length;
344 g_raid_iodone(pbp, pbp->bio_error);
349 g_raid_tr_free_concat(struct g_raid_tr_object *tr)
355 G_RAID_TR_DECLARE(concat, "CONCAT");