]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/geom/raid/tr_raid1.c
Merge ^/vendor/llvm-project/release-10.x up to its last change (upstream
[FreeBSD/FreeBSD.git] / sys / geom / raid / tr_raid1.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/bio.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/kobj.h>
37 #include <sys/limits.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/mutex.h>
41 #include <sys/sysctl.h>
42 #include <sys/systm.h>
43 #include <geom/geom.h>
44 #include <geom/geom_dbg.h>
45 #include "geom/raid/g_raid.h"
46 #include "g_raid_tr_if.h"
47
48 SYSCTL_DECL(_kern_geom_raid_raid1);
49
50 #define RAID1_REBUILD_SLAB      (1 << 20) /* One transation in a rebuild */
51 static int g_raid1_rebuild_slab = RAID1_REBUILD_SLAB;
52 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_slab_size, CTLFLAG_RWTUN,
53     &g_raid1_rebuild_slab, 0,
54     "Amount of the disk to rebuild each read/write cycle of the rebuild.");
55
56 #define RAID1_REBUILD_FAIR_IO 20 /* use 1/x of the available I/O */
57 static int g_raid1_rebuild_fair_io = RAID1_REBUILD_FAIR_IO;
58 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_fair_io, CTLFLAG_RWTUN,
59     &g_raid1_rebuild_fair_io, 0,
60     "Fraction of the I/O bandwidth to use when disk busy for rebuild.");
61
62 #define RAID1_REBUILD_CLUSTER_IDLE 100
63 static int g_raid1_rebuild_cluster_idle = RAID1_REBUILD_CLUSTER_IDLE;
64 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_cluster_idle, CTLFLAG_RWTUN,
65     &g_raid1_rebuild_cluster_idle, 0,
66     "Number of slabs to do each time we trigger a rebuild cycle");
67
68 #define RAID1_REBUILD_META_UPDATE 1024 /* update meta data every 1GB or so */
69 static int g_raid1_rebuild_meta_update = RAID1_REBUILD_META_UPDATE;
70 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_meta_update, CTLFLAG_RWTUN,
71     &g_raid1_rebuild_meta_update, 0,
72     "When to update the meta data.");
73
74 static MALLOC_DEFINE(M_TR_RAID1, "tr_raid1_data", "GEOM_RAID RAID1 data");
75
76 #define TR_RAID1_NONE 0
77 #define TR_RAID1_REBUILD 1
78 #define TR_RAID1_RESYNC 2
79
80 #define TR_RAID1_F_DOING_SOME   0x1
81 #define TR_RAID1_F_LOCKED       0x2
82 #define TR_RAID1_F_ABORT        0x4
83
84 struct g_raid_tr_raid1_object {
85         struct g_raid_tr_object  trso_base;
86         int                      trso_starting;
87         int                      trso_stopping;
88         int                      trso_type;
89         int                      trso_recover_slabs; /* slabs before rest */
90         int                      trso_fair_io;
91         int                      trso_meta_update;
92         int                      trso_flags;
93         struct g_raid_subdisk   *trso_failed_sd; /* like per volume */
94         void                    *trso_buffer;    /* Buffer space */
95         struct bio               trso_bio;
96 };
97
98 static g_raid_tr_taste_t g_raid_tr_taste_raid1;
99 static g_raid_tr_event_t g_raid_tr_event_raid1;
100 static g_raid_tr_start_t g_raid_tr_start_raid1;
101 static g_raid_tr_stop_t g_raid_tr_stop_raid1;
102 static g_raid_tr_iostart_t g_raid_tr_iostart_raid1;
103 static g_raid_tr_iodone_t g_raid_tr_iodone_raid1;
104 static g_raid_tr_kerneldump_t g_raid_tr_kerneldump_raid1;
105 static g_raid_tr_locked_t g_raid_tr_locked_raid1;
106 static g_raid_tr_idle_t g_raid_tr_idle_raid1;
107 static g_raid_tr_free_t g_raid_tr_free_raid1;
108
109 static kobj_method_t g_raid_tr_raid1_methods[] = {
110         KOBJMETHOD(g_raid_tr_taste,     g_raid_tr_taste_raid1),
111         KOBJMETHOD(g_raid_tr_event,     g_raid_tr_event_raid1),
112         KOBJMETHOD(g_raid_tr_start,     g_raid_tr_start_raid1),
113         KOBJMETHOD(g_raid_tr_stop,      g_raid_tr_stop_raid1),
114         KOBJMETHOD(g_raid_tr_iostart,   g_raid_tr_iostart_raid1),
115         KOBJMETHOD(g_raid_tr_iodone,    g_raid_tr_iodone_raid1),
116         KOBJMETHOD(g_raid_tr_kerneldump, g_raid_tr_kerneldump_raid1),
117         KOBJMETHOD(g_raid_tr_locked,    g_raid_tr_locked_raid1),
118         KOBJMETHOD(g_raid_tr_idle,      g_raid_tr_idle_raid1),
119         KOBJMETHOD(g_raid_tr_free,      g_raid_tr_free_raid1),
120         { 0, 0 }
121 };
122
123 static struct g_raid_tr_class g_raid_tr_raid1_class = {
124         "RAID1",
125         g_raid_tr_raid1_methods,
126         sizeof(struct g_raid_tr_raid1_object),
127         .trc_enable = 1,
128         .trc_priority = 100,
129         .trc_accept_unmapped = 1
130 };
131
132 static void g_raid_tr_raid1_rebuild_abort(struct g_raid_tr_object *tr);
133 static void g_raid_tr_raid1_maybe_rebuild(struct g_raid_tr_object *tr,
134     struct g_raid_subdisk *sd);
135
136 static int
137 g_raid_tr_taste_raid1(struct g_raid_tr_object *tr, struct g_raid_volume *vol)
138 {
139         struct g_raid_tr_raid1_object *trs;
140
141         trs = (struct g_raid_tr_raid1_object *)tr;
142         if (tr->tro_volume->v_raid_level != G_RAID_VOLUME_RL_RAID1 ||
143             (tr->tro_volume->v_raid_level_qualifier != G_RAID_VOLUME_RLQ_R1SM &&
144              tr->tro_volume->v_raid_level_qualifier != G_RAID_VOLUME_RLQ_R1MM))
145                 return (G_RAID_TR_TASTE_FAIL);
146         trs->trso_starting = 1;
147         return (G_RAID_TR_TASTE_SUCCEED);
148 }
149
150 static int
151 g_raid_tr_update_state_raid1(struct g_raid_volume *vol,
152     struct g_raid_subdisk *sd)
153 {
154         struct g_raid_tr_raid1_object *trs;
155         struct g_raid_softc *sc;
156         struct g_raid_subdisk *tsd, *bestsd;
157         u_int s;
158         int i, na, ns;
159
160         sc = vol->v_softc;
161         trs = (struct g_raid_tr_raid1_object *)vol->v_tr;
162         if (trs->trso_stopping &&
163             (trs->trso_flags & TR_RAID1_F_DOING_SOME) == 0)
164                 s = G_RAID_VOLUME_S_STOPPED;
165         else if (trs->trso_starting)
166                 s = G_RAID_VOLUME_S_STARTING;
167         else {
168                 /* Make sure we have at least one ACTIVE disk. */
169                 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
170                 if (na == 0) {
171                         /*
172                          * Critical situation! We have no any active disk!
173                          * Choose the best disk we have to make it active.
174                          */
175                         bestsd = &vol->v_subdisks[0];
176                         for (i = 1; i < vol->v_disks_count; i++) {
177                                 tsd = &vol->v_subdisks[i];
178                                 if (tsd->sd_state > bestsd->sd_state)
179                                         bestsd = tsd;
180                                 else if (tsd->sd_state == bestsd->sd_state &&
181                                     (tsd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
182                                      tsd->sd_state == G_RAID_SUBDISK_S_RESYNC) &&
183                                     tsd->sd_rebuild_pos > bestsd->sd_rebuild_pos)
184                                         bestsd = tsd;
185                         }
186                         if (bestsd->sd_state >= G_RAID_SUBDISK_S_UNINITIALIZED) {
187                                 /* We found reasonable candidate. */
188                                 G_RAID_DEBUG1(1, sc,
189                                     "Promote subdisk %s:%d from %s to ACTIVE.",
190                                     vol->v_name, bestsd->sd_pos,
191                                     g_raid_subdisk_state2str(bestsd->sd_state));
192                                 g_raid_change_subdisk_state(bestsd,
193                                     G_RAID_SUBDISK_S_ACTIVE);
194                                 g_raid_write_metadata(sc,
195                                     vol, bestsd, bestsd->sd_disk);
196                         }
197                 }
198                 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
199                 ns = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) +
200                      g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC);
201                 if (na == vol->v_disks_count)
202                         s = G_RAID_VOLUME_S_OPTIMAL;
203                 else if (na + ns == vol->v_disks_count)
204                         s = G_RAID_VOLUME_S_SUBOPTIMAL;
205                 else if (na > 0)
206                         s = G_RAID_VOLUME_S_DEGRADED;
207                 else
208                         s = G_RAID_VOLUME_S_BROKEN;
209                 g_raid_tr_raid1_maybe_rebuild(vol->v_tr, sd);
210         }
211         if (s != vol->v_state) {
212                 g_raid_event_send(vol, G_RAID_VOLUME_S_ALIVE(s) ?
213                     G_RAID_VOLUME_E_UP : G_RAID_VOLUME_E_DOWN,
214                     G_RAID_EVENT_VOLUME);
215                 g_raid_change_volume_state(vol, s);
216                 if (!trs->trso_starting && !trs->trso_stopping)
217                         g_raid_write_metadata(sc, vol, NULL, NULL);
218         }
219         return (0);
220 }
221
222 static void
223 g_raid_tr_raid1_fail_disk(struct g_raid_softc *sc, struct g_raid_subdisk *sd,
224     struct g_raid_disk *disk)
225 {
226         /*
227          * We don't fail the last disk in the pack, since it still has decent
228          * data on it and that's better than failing the disk if it is the root
229          * file system.
230          *
231          * XXX should this be controlled via a tunable?  It makes sense for
232          * the volume that has / on it.  I can't think of a case where we'd
233          * want the volume to go away on this kind of event.
234          */
235         if (g_raid_nsubdisks(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE) == 1 &&
236             g_raid_get_subdisk(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE) == sd)
237                 return;
238         g_raid_fail_disk(sc, sd, disk);
239 }
240
241 static void
242 g_raid_tr_raid1_rebuild_some(struct g_raid_tr_object *tr)
243 {
244         struct g_raid_tr_raid1_object *trs;
245         struct g_raid_subdisk *sd, *good_sd;
246         struct bio *bp;
247
248         trs = (struct g_raid_tr_raid1_object *)tr;
249         if (trs->trso_flags & TR_RAID1_F_DOING_SOME)
250                 return;
251         sd = trs->trso_failed_sd;
252         good_sd = g_raid_get_subdisk(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE);
253         if (good_sd == NULL) {
254                 g_raid_tr_raid1_rebuild_abort(tr);
255                 return;
256         }
257         bp = &trs->trso_bio;
258         memset(bp, 0, sizeof(*bp));
259         bp->bio_offset = sd->sd_rebuild_pos;
260         bp->bio_length = MIN(g_raid1_rebuild_slab,
261             sd->sd_size - sd->sd_rebuild_pos);
262         bp->bio_data = trs->trso_buffer;
263         bp->bio_cmd = BIO_READ;
264         bp->bio_cflags = G_RAID_BIO_FLAG_SYNC;
265         bp->bio_caller1 = good_sd;
266         trs->trso_flags |= TR_RAID1_F_DOING_SOME;
267         trs->trso_flags |= TR_RAID1_F_LOCKED;
268         g_raid_lock_range(sd->sd_volume,        /* Lock callback starts I/O */
269            bp->bio_offset, bp->bio_length, NULL, bp);
270 }
271
272 static void
273 g_raid_tr_raid1_rebuild_done(struct g_raid_tr_raid1_object *trs)
274 {
275         struct g_raid_volume *vol;
276         struct g_raid_subdisk *sd;
277
278         vol = trs->trso_base.tro_volume;
279         sd = trs->trso_failed_sd;
280         g_raid_write_metadata(vol->v_softc, vol, sd, sd->sd_disk);
281         free(trs->trso_buffer, M_TR_RAID1);
282         trs->trso_buffer = NULL;
283         trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
284         trs->trso_type = TR_RAID1_NONE;
285         trs->trso_recover_slabs = 0;
286         trs->trso_failed_sd = NULL;
287         g_raid_tr_update_state_raid1(vol, NULL);
288 }
289
290 static void
291 g_raid_tr_raid1_rebuild_finish(struct g_raid_tr_object *tr)
292 {
293         struct g_raid_tr_raid1_object *trs;
294         struct g_raid_subdisk *sd;
295
296         trs = (struct g_raid_tr_raid1_object *)tr;
297         sd = trs->trso_failed_sd;
298         G_RAID_DEBUG1(0, tr->tro_volume->v_softc,
299             "Subdisk %s:%d-%s rebuild completed.",
300             sd->sd_volume->v_name, sd->sd_pos,
301             sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
302         g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_ACTIVE);
303         sd->sd_rebuild_pos = 0;
304         g_raid_tr_raid1_rebuild_done(trs);
305 }
306
307 static void
308 g_raid_tr_raid1_rebuild_abort(struct g_raid_tr_object *tr)
309 {
310         struct g_raid_tr_raid1_object *trs;
311         struct g_raid_subdisk *sd;
312         struct g_raid_volume *vol;
313         off_t len;
314
315         vol = tr->tro_volume;
316         trs = (struct g_raid_tr_raid1_object *)tr;
317         sd = trs->trso_failed_sd;
318         if (trs->trso_flags & TR_RAID1_F_DOING_SOME) {
319                 G_RAID_DEBUG1(1, vol->v_softc,
320                     "Subdisk %s:%d-%s rebuild is aborting.",
321                     sd->sd_volume->v_name, sd->sd_pos,
322                     sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
323                 trs->trso_flags |= TR_RAID1_F_ABORT;
324         } else {
325                 G_RAID_DEBUG1(0, vol->v_softc,
326                     "Subdisk %s:%d-%s rebuild aborted.",
327                     sd->sd_volume->v_name, sd->sd_pos,
328                     sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
329                 trs->trso_flags &= ~TR_RAID1_F_ABORT;
330                 if (trs->trso_flags & TR_RAID1_F_LOCKED) {
331                         trs->trso_flags &= ~TR_RAID1_F_LOCKED;
332                         len = MIN(g_raid1_rebuild_slab,
333                             sd->sd_size - sd->sd_rebuild_pos);
334                         g_raid_unlock_range(tr->tro_volume,
335                             sd->sd_rebuild_pos, len);
336                 }
337                 g_raid_tr_raid1_rebuild_done(trs);
338         }
339 }
340
341 static void
342 g_raid_tr_raid1_rebuild_start(struct g_raid_tr_object *tr)
343 {
344         struct g_raid_volume *vol;
345         struct g_raid_tr_raid1_object *trs;
346         struct g_raid_subdisk *sd, *fsd;
347
348         vol = tr->tro_volume;
349         trs = (struct g_raid_tr_raid1_object *)tr;
350         if (trs->trso_failed_sd) {
351                 G_RAID_DEBUG1(1, vol->v_softc,
352                     "Already rebuild in start rebuild. pos %jd\n",
353                     (intmax_t)trs->trso_failed_sd->sd_rebuild_pos);
354                 return;
355         }
356         sd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_ACTIVE);
357         if (sd == NULL) {
358                 G_RAID_DEBUG1(1, vol->v_softc,
359                     "No active disk to rebuild.  night night.");
360                 return;
361         }
362         fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_RESYNC);
363         if (fsd == NULL)
364                 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_REBUILD);
365         if (fsd == NULL) {
366                 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_STALE);
367                 if (fsd != NULL) {
368                         fsd->sd_rebuild_pos = 0;
369                         g_raid_change_subdisk_state(fsd,
370                             G_RAID_SUBDISK_S_RESYNC);
371                         g_raid_write_metadata(vol->v_softc, vol, fsd, NULL);
372                 } else {
373                         fsd = g_raid_get_subdisk(vol,
374                             G_RAID_SUBDISK_S_UNINITIALIZED);
375                         if (fsd == NULL)
376                                 fsd = g_raid_get_subdisk(vol,
377                                     G_RAID_SUBDISK_S_NEW);
378                         if (fsd != NULL) {
379                                 fsd->sd_rebuild_pos = 0;
380                                 g_raid_change_subdisk_state(fsd,
381                                     G_RAID_SUBDISK_S_REBUILD);
382                                 g_raid_write_metadata(vol->v_softc,
383                                     vol, fsd, NULL);
384                         }
385                 }
386         }
387         if (fsd == NULL) {
388                 G_RAID_DEBUG1(1, vol->v_softc,
389                     "No failed disk to rebuild.  night night.");
390                 return;
391         }
392         trs->trso_failed_sd = fsd;
393         G_RAID_DEBUG1(0, vol->v_softc,
394             "Subdisk %s:%d-%s rebuild start at %jd.",
395             fsd->sd_volume->v_name, fsd->sd_pos,
396             fsd->sd_disk ? g_raid_get_diskname(fsd->sd_disk) : "[none]",
397             trs->trso_failed_sd->sd_rebuild_pos);
398         trs->trso_type = TR_RAID1_REBUILD;
399         trs->trso_buffer = malloc(g_raid1_rebuild_slab, M_TR_RAID1, M_WAITOK);
400         trs->trso_meta_update = g_raid1_rebuild_meta_update;
401         g_raid_tr_raid1_rebuild_some(tr);
402 }
403
404
405 static void
406 g_raid_tr_raid1_maybe_rebuild(struct g_raid_tr_object *tr,
407     struct g_raid_subdisk *sd)
408 {
409         struct g_raid_volume *vol;
410         struct g_raid_tr_raid1_object *trs;
411         int na, nr;
412         
413         /*
414          * If we're stopping, don't do anything.  If we don't have at least one
415          * good disk and one bad disk, we don't do anything.  And if there's a
416          * 'good disk' stored in the trs, then we're in progress and we punt.
417          * If we make it past all these checks, we need to rebuild.
418          */
419         vol = tr->tro_volume;
420         trs = (struct g_raid_tr_raid1_object *)tr;
421         if (trs->trso_stopping)
422                 return;
423         na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
424         nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_REBUILD) +
425             g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC);
426         switch(trs->trso_type) {
427         case TR_RAID1_NONE:
428                 if (na == 0)
429                         return;
430                 if (nr == 0) {
431                         nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_NEW) +
432                             g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) +
433                             g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_UNINITIALIZED);
434                         if (nr == 0)
435                                 return;
436                 }
437                 g_raid_tr_raid1_rebuild_start(tr);
438                 break;
439         case TR_RAID1_REBUILD:
440                 if (na == 0 || nr == 0 || trs->trso_failed_sd == sd)
441                         g_raid_tr_raid1_rebuild_abort(tr);
442                 break;
443         case TR_RAID1_RESYNC:
444                 break;
445         }
446 }
447
448 static int
449 g_raid_tr_event_raid1(struct g_raid_tr_object *tr,
450     struct g_raid_subdisk *sd, u_int event)
451 {
452
453         g_raid_tr_update_state_raid1(tr->tro_volume, sd);
454         return (0);
455 }
456
457 static int
458 g_raid_tr_start_raid1(struct g_raid_tr_object *tr)
459 {
460         struct g_raid_tr_raid1_object *trs;
461         struct g_raid_volume *vol;
462
463         trs = (struct g_raid_tr_raid1_object *)tr;
464         vol = tr->tro_volume;
465         trs->trso_starting = 0;
466         g_raid_tr_update_state_raid1(vol, NULL);
467         return (0);
468 }
469
470 static int
471 g_raid_tr_stop_raid1(struct g_raid_tr_object *tr)
472 {
473         struct g_raid_tr_raid1_object *trs;
474         struct g_raid_volume *vol;
475
476         trs = (struct g_raid_tr_raid1_object *)tr;
477         vol = tr->tro_volume;
478         trs->trso_starting = 0;
479         trs->trso_stopping = 1;
480         g_raid_tr_update_state_raid1(vol, NULL);
481         return (0);
482 }
483
484 /*
485  * Select the disk to read from.  Take into account: subdisk state, running
486  * error recovery, average disk load, head position and possible cache hits.
487  */
488 #define ABS(x)          (((x) >= 0) ? (x) : (-(x)))
489 static struct g_raid_subdisk *
490 g_raid_tr_raid1_select_read_disk(struct g_raid_volume *vol, struct bio *bp,
491     u_int mask)
492 {
493         struct g_raid_subdisk *sd, *best;
494         int i, prio, bestprio;
495
496         best = NULL;
497         bestprio = INT_MAX;
498         for (i = 0; i < vol->v_disks_count; i++) {
499                 sd = &vol->v_subdisks[i];
500                 if (sd->sd_state != G_RAID_SUBDISK_S_ACTIVE &&
501                     ((sd->sd_state != G_RAID_SUBDISK_S_REBUILD &&
502                       sd->sd_state != G_RAID_SUBDISK_S_RESYNC) ||
503                      bp->bio_offset + bp->bio_length > sd->sd_rebuild_pos))
504                         continue;
505                 if ((mask & (1 << i)) != 0)
506                         continue;
507                 prio = G_RAID_SUBDISK_LOAD(sd);
508                 prio += min(sd->sd_recovery, 255) << 22;
509                 prio += (G_RAID_SUBDISK_S_ACTIVE - sd->sd_state) << 16;
510                 /* If disk head is precisely in position - highly prefer it. */
511                 if (G_RAID_SUBDISK_POS(sd) == bp->bio_offset)
512                         prio -= 2 * G_RAID_SUBDISK_LOAD_SCALE;
513                 else
514                 /* If disk head is close to position - prefer it. */
515                 if (ABS(G_RAID_SUBDISK_POS(sd) - bp->bio_offset) <
516                     G_RAID_SUBDISK_TRACK_SIZE)
517                         prio -= 1 * G_RAID_SUBDISK_LOAD_SCALE;
518                 if (prio < bestprio) {
519                         best = sd;
520                         bestprio = prio;
521                 }
522         }
523         return (best);
524 }
525
526 static void
527 g_raid_tr_iostart_raid1_read(struct g_raid_tr_object *tr, struct bio *bp)
528 {
529         struct g_raid_subdisk *sd;
530         struct bio *cbp;
531
532         sd = g_raid_tr_raid1_select_read_disk(tr->tro_volume, bp, 0);
533         KASSERT(sd != NULL, ("No active disks in volume %s.",
534                 tr->tro_volume->v_name));
535
536         cbp = g_clone_bio(bp);
537         if (cbp == NULL) {
538                 g_raid_iodone(bp, ENOMEM);
539                 return;
540         }
541
542         g_raid_subdisk_iostart(sd, cbp);
543 }
544
545 static void
546 g_raid_tr_iostart_raid1_write(struct g_raid_tr_object *tr, struct bio *bp)
547 {
548         struct g_raid_volume *vol;
549         struct g_raid_subdisk *sd;
550         struct bio_queue_head queue;
551         struct bio *cbp;
552         int i;
553
554         vol = tr->tro_volume;
555
556         /*
557          * Allocate all bios before sending any request, so we can return
558          * ENOMEM in nice and clean way.
559          */
560         bioq_init(&queue);
561         for (i = 0; i < vol->v_disks_count; i++) {
562                 sd = &vol->v_subdisks[i];
563                 switch (sd->sd_state) {
564                 case G_RAID_SUBDISK_S_ACTIVE:
565                         break;
566                 case G_RAID_SUBDISK_S_REBUILD:
567                         /*
568                          * When rebuilding, only part of this subdisk is
569                          * writable, the rest will be written as part of the
570                          * that process.
571                          */
572                         if (bp->bio_offset >= sd->sd_rebuild_pos)
573                                 continue;
574                         break;
575                 case G_RAID_SUBDISK_S_STALE:
576                 case G_RAID_SUBDISK_S_RESYNC:
577                         /*
578                          * Resyncing still writes on the theory that the
579                          * resync'd disk is very close and writing it will
580                          * keep it that way better if we keep up while
581                          * resyncing.
582                          */
583                         break;
584                 default:
585                         continue;
586                 }
587                 cbp = g_clone_bio(bp);
588                 if (cbp == NULL)
589                         goto failure;
590                 cbp->bio_caller1 = sd;
591                 bioq_insert_tail(&queue, cbp);
592         }
593         while ((cbp = bioq_takefirst(&queue)) != NULL) {
594                 sd = cbp->bio_caller1;
595                 cbp->bio_caller1 = NULL;
596                 g_raid_subdisk_iostart(sd, cbp);
597         }
598         return;
599 failure:
600         while ((cbp = bioq_takefirst(&queue)) != NULL)
601                 g_destroy_bio(cbp);
602         if (bp->bio_error == 0)
603                 bp->bio_error = ENOMEM;
604         g_raid_iodone(bp, bp->bio_error);
605 }
606
607 static void
608 g_raid_tr_iostart_raid1(struct g_raid_tr_object *tr, struct bio *bp)
609 {
610         struct g_raid_volume *vol;
611         struct g_raid_tr_raid1_object *trs;
612
613         vol = tr->tro_volume;
614         trs = (struct g_raid_tr_raid1_object *)tr;
615         if (vol->v_state != G_RAID_VOLUME_S_OPTIMAL &&
616             vol->v_state != G_RAID_VOLUME_S_SUBOPTIMAL &&
617             vol->v_state != G_RAID_VOLUME_S_DEGRADED) {
618                 g_raid_iodone(bp, EIO);
619                 return;
620         }
621         /*
622          * If we're rebuilding, squeeze in rebuild activity every so often,
623          * even when the disk is busy.  Be sure to only count real I/O
624          * to the disk.  All 'SPECIAL' I/O is traffic generated to the disk
625          * by this module.
626          */
627         if (trs->trso_failed_sd != NULL &&
628             !(bp->bio_cflags & G_RAID_BIO_FLAG_SPECIAL)) {
629                 /* Make this new or running now round short. */
630                 trs->trso_recover_slabs = 0;
631                 if (--trs->trso_fair_io <= 0) {
632                         trs->trso_fair_io = g_raid1_rebuild_fair_io;
633                         g_raid_tr_raid1_rebuild_some(tr);
634                 }
635         }
636         switch (bp->bio_cmd) {
637         case BIO_READ:
638                 g_raid_tr_iostart_raid1_read(tr, bp);
639                 break;
640         case BIO_WRITE:
641         case BIO_DELETE:
642                 g_raid_tr_iostart_raid1_write(tr, bp);
643                 break;
644         case BIO_SPEEDUP:
645         case BIO_FLUSH:
646                 g_raid_tr_flush_common(tr, bp);
647                 break;
648         default:
649                 KASSERT(1 == 0, ("Invalid command here: %u (volume=%s)",
650                     bp->bio_cmd, vol->v_name));
651                 break;
652         }
653 }
654
655 static void
656 g_raid_tr_iodone_raid1(struct g_raid_tr_object *tr,
657     struct g_raid_subdisk *sd, struct bio *bp)
658 {
659         struct bio *cbp;
660         struct g_raid_subdisk *nsd;
661         struct g_raid_volume *vol;
662         struct bio *pbp;
663         struct g_raid_tr_raid1_object *trs;
664         uintptr_t *mask;
665         int error, do_write;
666
667         trs = (struct g_raid_tr_raid1_object *)tr;
668         vol = tr->tro_volume;
669         if (bp->bio_cflags & G_RAID_BIO_FLAG_SYNC) {
670                 /*
671                  * This operation is part of a rebuild or resync operation.
672                  * See what work just got done, then schedule the next bit of
673                  * work, if any.  Rebuild/resync is done a little bit at a
674                  * time.  Either when a timeout happens, or after we get a
675                  * bunch of I/Os to the disk (to make sure an active system
676                  * will complete in a sane amount of time).
677                  *
678                  * We are setup to do differing amounts of work for each of
679                  * these cases.  so long as the slabs is smallish (less than
680                  * 50 or so, I'd guess, but that's just a WAG), we shouldn't
681                  * have any bio starvation issues.  For active disks, we do
682                  * 5MB of data, for inactive ones, we do 50MB.
683                  */
684                 if (trs->trso_type == TR_RAID1_REBUILD) {
685                         if (bp->bio_cmd == BIO_READ) {
686
687                                 /* Immediately abort rebuild, if requested. */
688                                 if (trs->trso_flags & TR_RAID1_F_ABORT) {
689                                         trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
690                                         g_raid_tr_raid1_rebuild_abort(tr);
691                                         return;
692                                 }
693
694                                 /* On read error, skip and cross fingers. */
695                                 if (bp->bio_error != 0) {
696                                         G_RAID_LOGREQ(0, bp,
697                                             "Read error during rebuild (%d), "
698                                             "possible data loss!",
699                                             bp->bio_error);
700                                         goto rebuild_round_done;
701                                 }
702
703                                 /*
704                                  * The read operation finished, queue the
705                                  * write and get out.
706                                  */
707                                 G_RAID_LOGREQ(4, bp, "rebuild read done. %d",
708                                     bp->bio_error);
709                                 bp->bio_cmd = BIO_WRITE;
710                                 bp->bio_cflags = G_RAID_BIO_FLAG_SYNC;
711                                 G_RAID_LOGREQ(4, bp, "Queueing rebuild write.");
712                                 g_raid_subdisk_iostart(trs->trso_failed_sd, bp);
713                         } else {
714                                 /*
715                                  * The write operation just finished.  Do
716                                  * another.  We keep cloning the master bio
717                                  * since it has the right buffers allocated to
718                                  * it.
719                                  */
720                                 G_RAID_LOGREQ(4, bp,
721                                     "rebuild write done. Error %d",
722                                     bp->bio_error);
723                                 nsd = trs->trso_failed_sd;
724                                 if (bp->bio_error != 0 ||
725                                     trs->trso_flags & TR_RAID1_F_ABORT) {
726                                         if ((trs->trso_flags &
727                                             TR_RAID1_F_ABORT) == 0) {
728                                                 g_raid_tr_raid1_fail_disk(sd->sd_softc,
729                                                     nsd, nsd->sd_disk);
730                                         }
731                                         trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
732                                         g_raid_tr_raid1_rebuild_abort(tr);
733                                         return;
734                                 }
735 rebuild_round_done:
736                                 nsd = trs->trso_failed_sd;
737                                 trs->trso_flags &= ~TR_RAID1_F_LOCKED;
738                                 g_raid_unlock_range(sd->sd_volume,
739                                     bp->bio_offset, bp->bio_length);
740                                 nsd->sd_rebuild_pos += bp->bio_length;
741                                 if (nsd->sd_rebuild_pos >= nsd->sd_size) {
742                                         g_raid_tr_raid1_rebuild_finish(tr);
743                                         return;
744                                 }
745
746                                 /* Abort rebuild if we are stopping */
747                                 if (trs->trso_stopping) {
748                                         trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
749                                         g_raid_tr_raid1_rebuild_abort(tr);
750                                         return;
751                                 }
752
753                                 if (--trs->trso_meta_update <= 0) {
754                                         g_raid_write_metadata(vol->v_softc,
755                                             vol, nsd, nsd->sd_disk);
756                                         trs->trso_meta_update =
757                                             g_raid1_rebuild_meta_update;
758                                 }
759                                 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
760                                 if (--trs->trso_recover_slabs <= 0)
761                                         return;
762                                 g_raid_tr_raid1_rebuild_some(tr);
763                         }
764                 } else if (trs->trso_type == TR_RAID1_RESYNC) {
765                         /*
766                          * read good sd, read bad sd in parallel.  when both
767                          * done, compare the buffers.  write good to the bad
768                          * if different.  do the next bit of work.
769                          */
770                         panic("Somehow, we think we're doing a resync");
771                 }
772                 return;
773         }
774         pbp = bp->bio_parent;
775         pbp->bio_inbed++;
776         if (bp->bio_cmd == BIO_READ && bp->bio_error != 0) {
777                 /*
778                  * Read failed on first drive.  Retry the read error on
779                  * another disk drive, if available, before erroring out the
780                  * read.
781                  */
782                 sd->sd_disk->d_read_errs++;
783                 G_RAID_LOGREQ(0, bp,
784                     "Read error (%d), %d read errors total",
785                     bp->bio_error, sd->sd_disk->d_read_errs);
786
787                 /*
788                  * If there are too many read errors, we move to degraded.
789                  * XXX Do we want to FAIL the drive (eg, make the user redo
790                  * everything to get it back in sync), or just degrade the
791                  * drive, which kicks off a resync?
792                  */
793                 do_write = 1;
794                 if (sd->sd_disk->d_read_errs > g_raid_read_err_thresh) {
795                         g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
796                         if (pbp->bio_children == 1)
797                                 do_write = 0;
798                 }
799
800                 /*
801                  * Find the other disk, and try to do the I/O to it.
802                  */
803                 mask = (uintptr_t *)(&pbp->bio_driver2);
804                 if (pbp->bio_children == 1) {
805                         /* Save original subdisk. */
806                         pbp->bio_driver1 = do_write ? sd : NULL;
807                         *mask = 0;
808                 }
809                 *mask |= 1 << sd->sd_pos;
810                 nsd = g_raid_tr_raid1_select_read_disk(vol, pbp, *mask);
811                 if (nsd != NULL && (cbp = g_clone_bio(pbp)) != NULL) {
812                         g_destroy_bio(bp);
813                         G_RAID_LOGREQ(2, cbp, "Retrying read from %d",
814                             nsd->sd_pos);
815                         if (pbp->bio_children == 2 && do_write) {
816                                 sd->sd_recovery++;
817                                 cbp->bio_caller1 = nsd;
818                                 pbp->bio_pflags = G_RAID_BIO_FLAG_LOCKED;
819                                 /* Lock callback starts I/O */
820                                 g_raid_lock_range(sd->sd_volume,
821                                     cbp->bio_offset, cbp->bio_length, pbp, cbp);
822                         } else {
823                                 g_raid_subdisk_iostart(nsd, cbp);
824                         }
825                         return;
826                 }
827                 /*
828                  * We can't retry.  Return the original error by falling
829                  * through.  This will happen when there's only one good disk.
830                  * We don't need to fail the raid, since its actual state is
831                  * based on the state of the subdisks.
832                  */
833                 G_RAID_LOGREQ(2, bp, "Couldn't retry read, failing it");
834         }
835         if (bp->bio_cmd == BIO_READ &&
836             bp->bio_error == 0 &&
837             pbp->bio_children > 1 &&
838             pbp->bio_driver1 != NULL) {
839                 /*
840                  * If it was a read, and bio_children is >1, then we just
841                  * recovered the data from the second drive.  We should try to
842                  * write that data to the first drive if sector remapping is
843                  * enabled.  A write should put the data in a new place on the
844                  * disk, remapping the bad sector.  Do we need to do that by
845                  * queueing a request to the main worker thread?  It doesn't
846                  * affect the return code of this current read, and can be
847                  * done at our leisure.  However, to make the code simpler, it
848                  * is done synchronously.
849                  */
850                 G_RAID_LOGREQ(3, bp, "Recovered data from other drive");
851                 cbp = g_clone_bio(pbp);
852                 if (cbp != NULL) {
853                         g_destroy_bio(bp);
854                         cbp->bio_cmd = BIO_WRITE;
855                         cbp->bio_cflags = G_RAID_BIO_FLAG_REMAP;
856                         G_RAID_LOGREQ(2, cbp,
857                             "Attempting bad sector remap on failing drive.");
858                         g_raid_subdisk_iostart(pbp->bio_driver1, cbp);
859                         return;
860                 }
861         }
862         if (pbp->bio_pflags & G_RAID_BIO_FLAG_LOCKED) {
863                 /*
864                  * We're done with a recovery, mark the range as unlocked.
865                  * For any write errors, we aggressively fail the disk since
866                  * there was both a READ and a WRITE error at this location.
867                  * Both types of errors generally indicates the drive is on
868                  * the verge of total failure anyway.  Better to stop trusting
869                  * it now.  However, we need to reset error to 0 in that case
870                  * because we're not failing the original I/O which succeeded.
871                  */
872                 if (bp->bio_cmd == BIO_WRITE && bp->bio_error) {
873                         G_RAID_LOGREQ(0, bp, "Remap write failed: "
874                             "failing subdisk.");
875                         g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
876                         bp->bio_error = 0;
877                 }
878                 if (pbp->bio_driver1 != NULL) {
879                         ((struct g_raid_subdisk *)pbp->bio_driver1)
880                             ->sd_recovery--;
881                 }
882                 G_RAID_LOGREQ(2, bp, "REMAP done %d.", bp->bio_error);
883                 g_raid_unlock_range(sd->sd_volume, bp->bio_offset,
884                     bp->bio_length);
885         }
886         if (pbp->bio_cmd != BIO_READ) {
887                 if (pbp->bio_inbed == 1 || pbp->bio_error != 0)
888                         pbp->bio_error = bp->bio_error;
889                 if (pbp->bio_cmd == BIO_WRITE && bp->bio_error != 0) {
890                         G_RAID_LOGREQ(0, bp, "Write failed: failing subdisk.");
891                         g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
892                 }
893                 error = pbp->bio_error;
894         } else
895                 error = bp->bio_error;
896         g_destroy_bio(bp);
897         if (pbp->bio_children == pbp->bio_inbed) {
898                 pbp->bio_completed = pbp->bio_length;
899                 g_raid_iodone(pbp, error);
900         }
901 }
902
903 static int
904 g_raid_tr_kerneldump_raid1(struct g_raid_tr_object *tr,
905     void *virtual, vm_offset_t physical, off_t offset, size_t length)
906 {
907         struct g_raid_volume *vol;
908         struct g_raid_subdisk *sd;
909         int error, i, ok;
910
911         vol = tr->tro_volume;
912         error = 0;
913         ok = 0;
914         for (i = 0; i < vol->v_disks_count; i++) {
915                 sd = &vol->v_subdisks[i];
916                 switch (sd->sd_state) {
917                 case G_RAID_SUBDISK_S_ACTIVE:
918                         break;
919                 case G_RAID_SUBDISK_S_REBUILD:
920                         /*
921                          * When rebuilding, only part of this subdisk is
922                          * writable, the rest will be written as part of the
923                          * that process.
924                          */
925                         if (offset >= sd->sd_rebuild_pos)
926                                 continue;
927                         break;
928                 case G_RAID_SUBDISK_S_STALE:
929                 case G_RAID_SUBDISK_S_RESYNC:
930                         /*
931                          * Resyncing still writes on the theory that the
932                          * resync'd disk is very close and writing it will
933                          * keep it that way better if we keep up while
934                          * resyncing.
935                          */
936                         break;
937                 default:
938                         continue;
939                 }
940                 error = g_raid_subdisk_kerneldump(sd,
941                     virtual, physical, offset, length);
942                 if (error == 0)
943                         ok++;
944         }
945         return (ok > 0 ? 0 : error);
946 }
947
948 static int
949 g_raid_tr_locked_raid1(struct g_raid_tr_object *tr, void *argp)
950 {
951         struct bio *bp;
952         struct g_raid_subdisk *sd;
953
954         bp = (struct bio *)argp;
955         sd = (struct g_raid_subdisk *)bp->bio_caller1;
956         g_raid_subdisk_iostart(sd, bp);
957
958         return (0);
959 }
960
961 static int
962 g_raid_tr_idle_raid1(struct g_raid_tr_object *tr)
963 {
964         struct g_raid_tr_raid1_object *trs;
965
966         trs = (struct g_raid_tr_raid1_object *)tr;
967         trs->trso_fair_io = g_raid1_rebuild_fair_io;
968         trs->trso_recover_slabs = g_raid1_rebuild_cluster_idle;
969         if (trs->trso_type == TR_RAID1_REBUILD)
970                 g_raid_tr_raid1_rebuild_some(tr);
971         return (0);
972 }
973
974 static int
975 g_raid_tr_free_raid1(struct g_raid_tr_object *tr)
976 {
977         struct g_raid_tr_raid1_object *trs;
978
979         trs = (struct g_raid_tr_raid1_object *)tr;
980
981         if (trs->trso_buffer != NULL) {
982                 free(trs->trso_buffer, M_TR_RAID1);
983                 trs->trso_buffer = NULL;
984         }
985         return (0);
986 }
987
988 G_RAID_TR_DECLARE(raid1, "RAID1");