]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/geom/raid/tr_raid1.c
Upgrade to OpenSSH 7.7p1.
[FreeBSD/FreeBSD.git] / sys / geom / raid / tr_raid1.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/bio.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/kobj.h>
37 #include <sys/limits.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/mutex.h>
41 #include <sys/sysctl.h>
42 #include <sys/systm.h>
43 #include <geom/geom.h>
44 #include "geom/raid/g_raid.h"
45 #include "g_raid_tr_if.h"
46
47 SYSCTL_DECL(_kern_geom_raid_raid1);
48
49 #define RAID1_REBUILD_SLAB      (1 << 20) /* One transation in a rebuild */
50 static int g_raid1_rebuild_slab = RAID1_REBUILD_SLAB;
51 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_slab_size, CTLFLAG_RWTUN,
52     &g_raid1_rebuild_slab, 0,
53     "Amount of the disk to rebuild each read/write cycle of the rebuild.");
54
55 #define RAID1_REBUILD_FAIR_IO 20 /* use 1/x of the available I/O */
56 static int g_raid1_rebuild_fair_io = RAID1_REBUILD_FAIR_IO;
57 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_fair_io, CTLFLAG_RWTUN,
58     &g_raid1_rebuild_fair_io, 0,
59     "Fraction of the I/O bandwidth to use when disk busy for rebuild.");
60
61 #define RAID1_REBUILD_CLUSTER_IDLE 100
62 static int g_raid1_rebuild_cluster_idle = RAID1_REBUILD_CLUSTER_IDLE;
63 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_cluster_idle, CTLFLAG_RWTUN,
64     &g_raid1_rebuild_cluster_idle, 0,
65     "Number of slabs to do each time we trigger a rebuild cycle");
66
67 #define RAID1_REBUILD_META_UPDATE 1024 /* update meta data every 1GB or so */
68 static int g_raid1_rebuild_meta_update = RAID1_REBUILD_META_UPDATE;
69 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_meta_update, CTLFLAG_RWTUN,
70     &g_raid1_rebuild_meta_update, 0,
71     "When to update the meta data.");
72
73 static MALLOC_DEFINE(M_TR_RAID1, "tr_raid1_data", "GEOM_RAID RAID1 data");
74
75 #define TR_RAID1_NONE 0
76 #define TR_RAID1_REBUILD 1
77 #define TR_RAID1_RESYNC 2
78
79 #define TR_RAID1_F_DOING_SOME   0x1
80 #define TR_RAID1_F_LOCKED       0x2
81 #define TR_RAID1_F_ABORT        0x4
82
83 struct g_raid_tr_raid1_object {
84         struct g_raid_tr_object  trso_base;
85         int                      trso_starting;
86         int                      trso_stopping;
87         int                      trso_type;
88         int                      trso_recover_slabs; /* slabs before rest */
89         int                      trso_fair_io;
90         int                      trso_meta_update;
91         int                      trso_flags;
92         struct g_raid_subdisk   *trso_failed_sd; /* like per volume */
93         void                    *trso_buffer;    /* Buffer space */
94         struct bio               trso_bio;
95 };
96
97 static g_raid_tr_taste_t g_raid_tr_taste_raid1;
98 static g_raid_tr_event_t g_raid_tr_event_raid1;
99 static g_raid_tr_start_t g_raid_tr_start_raid1;
100 static g_raid_tr_stop_t g_raid_tr_stop_raid1;
101 static g_raid_tr_iostart_t g_raid_tr_iostart_raid1;
102 static g_raid_tr_iodone_t g_raid_tr_iodone_raid1;
103 static g_raid_tr_kerneldump_t g_raid_tr_kerneldump_raid1;
104 static g_raid_tr_locked_t g_raid_tr_locked_raid1;
105 static g_raid_tr_idle_t g_raid_tr_idle_raid1;
106 static g_raid_tr_free_t g_raid_tr_free_raid1;
107
108 static kobj_method_t g_raid_tr_raid1_methods[] = {
109         KOBJMETHOD(g_raid_tr_taste,     g_raid_tr_taste_raid1),
110         KOBJMETHOD(g_raid_tr_event,     g_raid_tr_event_raid1),
111         KOBJMETHOD(g_raid_tr_start,     g_raid_tr_start_raid1),
112         KOBJMETHOD(g_raid_tr_stop,      g_raid_tr_stop_raid1),
113         KOBJMETHOD(g_raid_tr_iostart,   g_raid_tr_iostart_raid1),
114         KOBJMETHOD(g_raid_tr_iodone,    g_raid_tr_iodone_raid1),
115         KOBJMETHOD(g_raid_tr_kerneldump, g_raid_tr_kerneldump_raid1),
116         KOBJMETHOD(g_raid_tr_locked,    g_raid_tr_locked_raid1),
117         KOBJMETHOD(g_raid_tr_idle,      g_raid_tr_idle_raid1),
118         KOBJMETHOD(g_raid_tr_free,      g_raid_tr_free_raid1),
119         { 0, 0 }
120 };
121
122 static struct g_raid_tr_class g_raid_tr_raid1_class = {
123         "RAID1",
124         g_raid_tr_raid1_methods,
125         sizeof(struct g_raid_tr_raid1_object),
126         .trc_enable = 1,
127         .trc_priority = 100,
128         .trc_accept_unmapped = 1
129 };
130
131 static void g_raid_tr_raid1_rebuild_abort(struct g_raid_tr_object *tr);
132 static void g_raid_tr_raid1_maybe_rebuild(struct g_raid_tr_object *tr,
133     struct g_raid_subdisk *sd);
134
135 static int
136 g_raid_tr_taste_raid1(struct g_raid_tr_object *tr, struct g_raid_volume *vol)
137 {
138         struct g_raid_tr_raid1_object *trs;
139
140         trs = (struct g_raid_tr_raid1_object *)tr;
141         if (tr->tro_volume->v_raid_level != G_RAID_VOLUME_RL_RAID1 ||
142             (tr->tro_volume->v_raid_level_qualifier != G_RAID_VOLUME_RLQ_R1SM &&
143              tr->tro_volume->v_raid_level_qualifier != G_RAID_VOLUME_RLQ_R1MM))
144                 return (G_RAID_TR_TASTE_FAIL);
145         trs->trso_starting = 1;
146         return (G_RAID_TR_TASTE_SUCCEED);
147 }
148
149 static int
150 g_raid_tr_update_state_raid1(struct g_raid_volume *vol,
151     struct g_raid_subdisk *sd)
152 {
153         struct g_raid_tr_raid1_object *trs;
154         struct g_raid_softc *sc;
155         struct g_raid_subdisk *tsd, *bestsd;
156         u_int s;
157         int i, na, ns;
158
159         sc = vol->v_softc;
160         trs = (struct g_raid_tr_raid1_object *)vol->v_tr;
161         if (trs->trso_stopping &&
162             (trs->trso_flags & TR_RAID1_F_DOING_SOME) == 0)
163                 s = G_RAID_VOLUME_S_STOPPED;
164         else if (trs->trso_starting)
165                 s = G_RAID_VOLUME_S_STARTING;
166         else {
167                 /* Make sure we have at least one ACTIVE disk. */
168                 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
169                 if (na == 0) {
170                         /*
171                          * Critical situation! We have no any active disk!
172                          * Choose the best disk we have to make it active.
173                          */
174                         bestsd = &vol->v_subdisks[0];
175                         for (i = 1; i < vol->v_disks_count; i++) {
176                                 tsd = &vol->v_subdisks[i];
177                                 if (tsd->sd_state > bestsd->sd_state)
178                                         bestsd = tsd;
179                                 else if (tsd->sd_state == bestsd->sd_state &&
180                                     (tsd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
181                                      tsd->sd_state == G_RAID_SUBDISK_S_RESYNC) &&
182                                     tsd->sd_rebuild_pos > bestsd->sd_rebuild_pos)
183                                         bestsd = tsd;
184                         }
185                         if (bestsd->sd_state >= G_RAID_SUBDISK_S_UNINITIALIZED) {
186                                 /* We found reasonable candidate. */
187                                 G_RAID_DEBUG1(1, sc,
188                                     "Promote subdisk %s:%d from %s to ACTIVE.",
189                                     vol->v_name, bestsd->sd_pos,
190                                     g_raid_subdisk_state2str(bestsd->sd_state));
191                                 g_raid_change_subdisk_state(bestsd,
192                                     G_RAID_SUBDISK_S_ACTIVE);
193                                 g_raid_write_metadata(sc,
194                                     vol, bestsd, bestsd->sd_disk);
195                         }
196                 }
197                 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
198                 ns = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) +
199                      g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC);
200                 if (na == vol->v_disks_count)
201                         s = G_RAID_VOLUME_S_OPTIMAL;
202                 else if (na + ns == vol->v_disks_count)
203                         s = G_RAID_VOLUME_S_SUBOPTIMAL;
204                 else if (na > 0)
205                         s = G_RAID_VOLUME_S_DEGRADED;
206                 else
207                         s = G_RAID_VOLUME_S_BROKEN;
208                 g_raid_tr_raid1_maybe_rebuild(vol->v_tr, sd);
209         }
210         if (s != vol->v_state) {
211                 g_raid_event_send(vol, G_RAID_VOLUME_S_ALIVE(s) ?
212                     G_RAID_VOLUME_E_UP : G_RAID_VOLUME_E_DOWN,
213                     G_RAID_EVENT_VOLUME);
214                 g_raid_change_volume_state(vol, s);
215                 if (!trs->trso_starting && !trs->trso_stopping)
216                         g_raid_write_metadata(sc, vol, NULL, NULL);
217         }
218         return (0);
219 }
220
221 static void
222 g_raid_tr_raid1_fail_disk(struct g_raid_softc *sc, struct g_raid_subdisk *sd,
223     struct g_raid_disk *disk)
224 {
225         /*
226          * We don't fail the last disk in the pack, since it still has decent
227          * data on it and that's better than failing the disk if it is the root
228          * file system.
229          *
230          * XXX should this be controlled via a tunable?  It makes sense for
231          * the volume that has / on it.  I can't think of a case where we'd
232          * want the volume to go away on this kind of event.
233          */
234         if (g_raid_nsubdisks(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE) == 1 &&
235             g_raid_get_subdisk(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE) == sd)
236                 return;
237         g_raid_fail_disk(sc, sd, disk);
238 }
239
240 static void
241 g_raid_tr_raid1_rebuild_some(struct g_raid_tr_object *tr)
242 {
243         struct g_raid_tr_raid1_object *trs;
244         struct g_raid_subdisk *sd, *good_sd;
245         struct bio *bp;
246
247         trs = (struct g_raid_tr_raid1_object *)tr;
248         if (trs->trso_flags & TR_RAID1_F_DOING_SOME)
249                 return;
250         sd = trs->trso_failed_sd;
251         good_sd = g_raid_get_subdisk(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE);
252         if (good_sd == NULL) {
253                 g_raid_tr_raid1_rebuild_abort(tr);
254                 return;
255         }
256         bp = &trs->trso_bio;
257         memset(bp, 0, sizeof(*bp));
258         bp->bio_offset = sd->sd_rebuild_pos;
259         bp->bio_length = MIN(g_raid1_rebuild_slab,
260             sd->sd_size - sd->sd_rebuild_pos);
261         bp->bio_data = trs->trso_buffer;
262         bp->bio_cmd = BIO_READ;
263         bp->bio_cflags = G_RAID_BIO_FLAG_SYNC;
264         bp->bio_caller1 = good_sd;
265         trs->trso_flags |= TR_RAID1_F_DOING_SOME;
266         trs->trso_flags |= TR_RAID1_F_LOCKED;
267         g_raid_lock_range(sd->sd_volume,        /* Lock callback starts I/O */
268            bp->bio_offset, bp->bio_length, NULL, bp);
269 }
270
271 static void
272 g_raid_tr_raid1_rebuild_done(struct g_raid_tr_raid1_object *trs)
273 {
274         struct g_raid_volume *vol;
275         struct g_raid_subdisk *sd;
276
277         vol = trs->trso_base.tro_volume;
278         sd = trs->trso_failed_sd;
279         g_raid_write_metadata(vol->v_softc, vol, sd, sd->sd_disk);
280         free(trs->trso_buffer, M_TR_RAID1);
281         trs->trso_buffer = NULL;
282         trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
283         trs->trso_type = TR_RAID1_NONE;
284         trs->trso_recover_slabs = 0;
285         trs->trso_failed_sd = NULL;
286         g_raid_tr_update_state_raid1(vol, NULL);
287 }
288
289 static void
290 g_raid_tr_raid1_rebuild_finish(struct g_raid_tr_object *tr)
291 {
292         struct g_raid_tr_raid1_object *trs;
293         struct g_raid_subdisk *sd;
294
295         trs = (struct g_raid_tr_raid1_object *)tr;
296         sd = trs->trso_failed_sd;
297         G_RAID_DEBUG1(0, tr->tro_volume->v_softc,
298             "Subdisk %s:%d-%s rebuild completed.",
299             sd->sd_volume->v_name, sd->sd_pos,
300             sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
301         g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_ACTIVE);
302         sd->sd_rebuild_pos = 0;
303         g_raid_tr_raid1_rebuild_done(trs);
304 }
305
306 static void
307 g_raid_tr_raid1_rebuild_abort(struct g_raid_tr_object *tr)
308 {
309         struct g_raid_tr_raid1_object *trs;
310         struct g_raid_subdisk *sd;
311         struct g_raid_volume *vol;
312         off_t len;
313
314         vol = tr->tro_volume;
315         trs = (struct g_raid_tr_raid1_object *)tr;
316         sd = trs->trso_failed_sd;
317         if (trs->trso_flags & TR_RAID1_F_DOING_SOME) {
318                 G_RAID_DEBUG1(1, vol->v_softc,
319                     "Subdisk %s:%d-%s rebuild is aborting.",
320                     sd->sd_volume->v_name, sd->sd_pos,
321                     sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
322                 trs->trso_flags |= TR_RAID1_F_ABORT;
323         } else {
324                 G_RAID_DEBUG1(0, vol->v_softc,
325                     "Subdisk %s:%d-%s rebuild aborted.",
326                     sd->sd_volume->v_name, sd->sd_pos,
327                     sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
328                 trs->trso_flags &= ~TR_RAID1_F_ABORT;
329                 if (trs->trso_flags & TR_RAID1_F_LOCKED) {
330                         trs->trso_flags &= ~TR_RAID1_F_LOCKED;
331                         len = MIN(g_raid1_rebuild_slab,
332                             sd->sd_size - sd->sd_rebuild_pos);
333                         g_raid_unlock_range(tr->tro_volume,
334                             sd->sd_rebuild_pos, len);
335                 }
336                 g_raid_tr_raid1_rebuild_done(trs);
337         }
338 }
339
340 static void
341 g_raid_tr_raid1_rebuild_start(struct g_raid_tr_object *tr)
342 {
343         struct g_raid_volume *vol;
344         struct g_raid_tr_raid1_object *trs;
345         struct g_raid_subdisk *sd, *fsd;
346
347         vol = tr->tro_volume;
348         trs = (struct g_raid_tr_raid1_object *)tr;
349         if (trs->trso_failed_sd) {
350                 G_RAID_DEBUG1(1, vol->v_softc,
351                     "Already rebuild in start rebuild. pos %jd\n",
352                     (intmax_t)trs->trso_failed_sd->sd_rebuild_pos);
353                 return;
354         }
355         sd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_ACTIVE);
356         if (sd == NULL) {
357                 G_RAID_DEBUG1(1, vol->v_softc,
358                     "No active disk to rebuild.  night night.");
359                 return;
360         }
361         fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_RESYNC);
362         if (fsd == NULL)
363                 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_REBUILD);
364         if (fsd == NULL) {
365                 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_STALE);
366                 if (fsd != NULL) {
367                         fsd->sd_rebuild_pos = 0;
368                         g_raid_change_subdisk_state(fsd,
369                             G_RAID_SUBDISK_S_RESYNC);
370                         g_raid_write_metadata(vol->v_softc, vol, fsd, NULL);
371                 } else {
372                         fsd = g_raid_get_subdisk(vol,
373                             G_RAID_SUBDISK_S_UNINITIALIZED);
374                         if (fsd == NULL)
375                                 fsd = g_raid_get_subdisk(vol,
376                                     G_RAID_SUBDISK_S_NEW);
377                         if (fsd != NULL) {
378                                 fsd->sd_rebuild_pos = 0;
379                                 g_raid_change_subdisk_state(fsd,
380                                     G_RAID_SUBDISK_S_REBUILD);
381                                 g_raid_write_metadata(vol->v_softc,
382                                     vol, fsd, NULL);
383                         }
384                 }
385         }
386         if (fsd == NULL) {
387                 G_RAID_DEBUG1(1, vol->v_softc,
388                     "No failed disk to rebuild.  night night.");
389                 return;
390         }
391         trs->trso_failed_sd = fsd;
392         G_RAID_DEBUG1(0, vol->v_softc,
393             "Subdisk %s:%d-%s rebuild start at %jd.",
394             fsd->sd_volume->v_name, fsd->sd_pos,
395             fsd->sd_disk ? g_raid_get_diskname(fsd->sd_disk) : "[none]",
396             trs->trso_failed_sd->sd_rebuild_pos);
397         trs->trso_type = TR_RAID1_REBUILD;
398         trs->trso_buffer = malloc(g_raid1_rebuild_slab, M_TR_RAID1, M_WAITOK);
399         trs->trso_meta_update = g_raid1_rebuild_meta_update;
400         g_raid_tr_raid1_rebuild_some(tr);
401 }
402
403
404 static void
405 g_raid_tr_raid1_maybe_rebuild(struct g_raid_tr_object *tr,
406     struct g_raid_subdisk *sd)
407 {
408         struct g_raid_volume *vol;
409         struct g_raid_tr_raid1_object *trs;
410         int na, nr;
411         
412         /*
413          * If we're stopping, don't do anything.  If we don't have at least one
414          * good disk and one bad disk, we don't do anything.  And if there's a
415          * 'good disk' stored in the trs, then we're in progress and we punt.
416          * If we make it past all these checks, we need to rebuild.
417          */
418         vol = tr->tro_volume;
419         trs = (struct g_raid_tr_raid1_object *)tr;
420         if (trs->trso_stopping)
421                 return;
422         na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
423         nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_REBUILD) +
424             g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC);
425         switch(trs->trso_type) {
426         case TR_RAID1_NONE:
427                 if (na == 0)
428                         return;
429                 if (nr == 0) {
430                         nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_NEW) +
431                             g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) +
432                             g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_UNINITIALIZED);
433                         if (nr == 0)
434                                 return;
435                 }
436                 g_raid_tr_raid1_rebuild_start(tr);
437                 break;
438         case TR_RAID1_REBUILD:
439                 if (na == 0 || nr == 0 || trs->trso_failed_sd == sd)
440                         g_raid_tr_raid1_rebuild_abort(tr);
441                 break;
442         case TR_RAID1_RESYNC:
443                 break;
444         }
445 }
446
447 static int
448 g_raid_tr_event_raid1(struct g_raid_tr_object *tr,
449     struct g_raid_subdisk *sd, u_int event)
450 {
451
452         g_raid_tr_update_state_raid1(tr->tro_volume, sd);
453         return (0);
454 }
455
456 static int
457 g_raid_tr_start_raid1(struct g_raid_tr_object *tr)
458 {
459         struct g_raid_tr_raid1_object *trs;
460         struct g_raid_volume *vol;
461
462         trs = (struct g_raid_tr_raid1_object *)tr;
463         vol = tr->tro_volume;
464         trs->trso_starting = 0;
465         g_raid_tr_update_state_raid1(vol, NULL);
466         return (0);
467 }
468
469 static int
470 g_raid_tr_stop_raid1(struct g_raid_tr_object *tr)
471 {
472         struct g_raid_tr_raid1_object *trs;
473         struct g_raid_volume *vol;
474
475         trs = (struct g_raid_tr_raid1_object *)tr;
476         vol = tr->tro_volume;
477         trs->trso_starting = 0;
478         trs->trso_stopping = 1;
479         g_raid_tr_update_state_raid1(vol, NULL);
480         return (0);
481 }
482
483 /*
484  * Select the disk to read from.  Take into account: subdisk state, running
485  * error recovery, average disk load, head position and possible cache hits.
486  */
487 #define ABS(x)          (((x) >= 0) ? (x) : (-(x)))
488 static struct g_raid_subdisk *
489 g_raid_tr_raid1_select_read_disk(struct g_raid_volume *vol, struct bio *bp,
490     u_int mask)
491 {
492         struct g_raid_subdisk *sd, *best;
493         int i, prio, bestprio;
494
495         best = NULL;
496         bestprio = INT_MAX;
497         for (i = 0; i < vol->v_disks_count; i++) {
498                 sd = &vol->v_subdisks[i];
499                 if (sd->sd_state != G_RAID_SUBDISK_S_ACTIVE &&
500                     ((sd->sd_state != G_RAID_SUBDISK_S_REBUILD &&
501                       sd->sd_state != G_RAID_SUBDISK_S_RESYNC) ||
502                      bp->bio_offset + bp->bio_length > sd->sd_rebuild_pos))
503                         continue;
504                 if ((mask & (1 << i)) != 0)
505                         continue;
506                 prio = G_RAID_SUBDISK_LOAD(sd);
507                 prio += min(sd->sd_recovery, 255) << 22;
508                 prio += (G_RAID_SUBDISK_S_ACTIVE - sd->sd_state) << 16;
509                 /* If disk head is precisely in position - highly prefer it. */
510                 if (G_RAID_SUBDISK_POS(sd) == bp->bio_offset)
511                         prio -= 2 * G_RAID_SUBDISK_LOAD_SCALE;
512                 else
513                 /* If disk head is close to position - prefer it. */
514                 if (ABS(G_RAID_SUBDISK_POS(sd) - bp->bio_offset) <
515                     G_RAID_SUBDISK_TRACK_SIZE)
516                         prio -= 1 * G_RAID_SUBDISK_LOAD_SCALE;
517                 if (prio < bestprio) {
518                         best = sd;
519                         bestprio = prio;
520                 }
521         }
522         return (best);
523 }
524
525 static void
526 g_raid_tr_iostart_raid1_read(struct g_raid_tr_object *tr, struct bio *bp)
527 {
528         struct g_raid_subdisk *sd;
529         struct bio *cbp;
530
531         sd = g_raid_tr_raid1_select_read_disk(tr->tro_volume, bp, 0);
532         KASSERT(sd != NULL, ("No active disks in volume %s.",
533                 tr->tro_volume->v_name));
534
535         cbp = g_clone_bio(bp);
536         if (cbp == NULL) {
537                 g_raid_iodone(bp, ENOMEM);
538                 return;
539         }
540
541         g_raid_subdisk_iostart(sd, cbp);
542 }
543
544 static void
545 g_raid_tr_iostart_raid1_write(struct g_raid_tr_object *tr, struct bio *bp)
546 {
547         struct g_raid_volume *vol;
548         struct g_raid_subdisk *sd;
549         struct bio_queue_head queue;
550         struct bio *cbp;
551         int i;
552
553         vol = tr->tro_volume;
554
555         /*
556          * Allocate all bios before sending any request, so we can return
557          * ENOMEM in nice and clean way.
558          */
559         bioq_init(&queue);
560         for (i = 0; i < vol->v_disks_count; i++) {
561                 sd = &vol->v_subdisks[i];
562                 switch (sd->sd_state) {
563                 case G_RAID_SUBDISK_S_ACTIVE:
564                         break;
565                 case G_RAID_SUBDISK_S_REBUILD:
566                         /*
567                          * When rebuilding, only part of this subdisk is
568                          * writable, the rest will be written as part of the
569                          * that process.
570                          */
571                         if (bp->bio_offset >= sd->sd_rebuild_pos)
572                                 continue;
573                         break;
574                 case G_RAID_SUBDISK_S_STALE:
575                 case G_RAID_SUBDISK_S_RESYNC:
576                         /*
577                          * Resyncing still writes on the theory that the
578                          * resync'd disk is very close and writing it will
579                          * keep it that way better if we keep up while
580                          * resyncing.
581                          */
582                         break;
583                 default:
584                         continue;
585                 }
586                 cbp = g_clone_bio(bp);
587                 if (cbp == NULL)
588                         goto failure;
589                 cbp->bio_caller1 = sd;
590                 bioq_insert_tail(&queue, cbp);
591         }
592         while ((cbp = bioq_takefirst(&queue)) != NULL) {
593                 sd = cbp->bio_caller1;
594                 cbp->bio_caller1 = NULL;
595                 g_raid_subdisk_iostart(sd, cbp);
596         }
597         return;
598 failure:
599         while ((cbp = bioq_takefirst(&queue)) != NULL)
600                 g_destroy_bio(cbp);
601         if (bp->bio_error == 0)
602                 bp->bio_error = ENOMEM;
603         g_raid_iodone(bp, bp->bio_error);
604 }
605
606 static void
607 g_raid_tr_iostart_raid1(struct g_raid_tr_object *tr, struct bio *bp)
608 {
609         struct g_raid_volume *vol;
610         struct g_raid_tr_raid1_object *trs;
611
612         vol = tr->tro_volume;
613         trs = (struct g_raid_tr_raid1_object *)tr;
614         if (vol->v_state != G_RAID_VOLUME_S_OPTIMAL &&
615             vol->v_state != G_RAID_VOLUME_S_SUBOPTIMAL &&
616             vol->v_state != G_RAID_VOLUME_S_DEGRADED) {
617                 g_raid_iodone(bp, EIO);
618                 return;
619         }
620         /*
621          * If we're rebuilding, squeeze in rebuild activity every so often,
622          * even when the disk is busy.  Be sure to only count real I/O
623          * to the disk.  All 'SPECIAL' I/O is traffic generated to the disk
624          * by this module.
625          */
626         if (trs->trso_failed_sd != NULL &&
627             !(bp->bio_cflags & G_RAID_BIO_FLAG_SPECIAL)) {
628                 /* Make this new or running now round short. */
629                 trs->trso_recover_slabs = 0;
630                 if (--trs->trso_fair_io <= 0) {
631                         trs->trso_fair_io = g_raid1_rebuild_fair_io;
632                         g_raid_tr_raid1_rebuild_some(tr);
633                 }
634         }
635         switch (bp->bio_cmd) {
636         case BIO_READ:
637                 g_raid_tr_iostart_raid1_read(tr, bp);
638                 break;
639         case BIO_WRITE:
640         case BIO_DELETE:
641                 g_raid_tr_iostart_raid1_write(tr, bp);
642                 break;
643         case BIO_FLUSH:
644                 g_raid_tr_flush_common(tr, bp);
645                 break;
646         default:
647                 KASSERT(1 == 0, ("Invalid command here: %u (volume=%s)",
648                     bp->bio_cmd, vol->v_name));
649                 break;
650         }
651 }
652
653 static void
654 g_raid_tr_iodone_raid1(struct g_raid_tr_object *tr,
655     struct g_raid_subdisk *sd, struct bio *bp)
656 {
657         struct bio *cbp;
658         struct g_raid_subdisk *nsd;
659         struct g_raid_volume *vol;
660         struct bio *pbp;
661         struct g_raid_tr_raid1_object *trs;
662         uintptr_t *mask;
663         int error, do_write;
664
665         trs = (struct g_raid_tr_raid1_object *)tr;
666         vol = tr->tro_volume;
667         if (bp->bio_cflags & G_RAID_BIO_FLAG_SYNC) {
668                 /*
669                  * This operation is part of a rebuild or resync operation.
670                  * See what work just got done, then schedule the next bit of
671                  * work, if any.  Rebuild/resync is done a little bit at a
672                  * time.  Either when a timeout happens, or after we get a
673                  * bunch of I/Os to the disk (to make sure an active system
674                  * will complete in a sane amount of time).
675                  *
676                  * We are setup to do differing amounts of work for each of
677                  * these cases.  so long as the slabs is smallish (less than
678                  * 50 or so, I'd guess, but that's just a WAG), we shouldn't
679                  * have any bio starvation issues.  For active disks, we do
680                  * 5MB of data, for inactive ones, we do 50MB.
681                  */
682                 if (trs->trso_type == TR_RAID1_REBUILD) {
683                         if (bp->bio_cmd == BIO_READ) {
684
685                                 /* Immediately abort rebuild, if requested. */
686                                 if (trs->trso_flags & TR_RAID1_F_ABORT) {
687                                         trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
688                                         g_raid_tr_raid1_rebuild_abort(tr);
689                                         return;
690                                 }
691
692                                 /* On read error, skip and cross fingers. */
693                                 if (bp->bio_error != 0) {
694                                         G_RAID_LOGREQ(0, bp,
695                                             "Read error during rebuild (%d), "
696                                             "possible data loss!",
697                                             bp->bio_error);
698                                         goto rebuild_round_done;
699                                 }
700
701                                 /*
702                                  * The read operation finished, queue the
703                                  * write and get out.
704                                  */
705                                 G_RAID_LOGREQ(4, bp, "rebuild read done. %d",
706                                     bp->bio_error);
707                                 bp->bio_cmd = BIO_WRITE;
708                                 bp->bio_cflags = G_RAID_BIO_FLAG_SYNC;
709                                 G_RAID_LOGREQ(4, bp, "Queueing rebuild write.");
710                                 g_raid_subdisk_iostart(trs->trso_failed_sd, bp);
711                         } else {
712                                 /*
713                                  * The write operation just finished.  Do
714                                  * another.  We keep cloning the master bio
715                                  * since it has the right buffers allocated to
716                                  * it.
717                                  */
718                                 G_RAID_LOGREQ(4, bp,
719                                     "rebuild write done. Error %d",
720                                     bp->bio_error);
721                                 nsd = trs->trso_failed_sd;
722                                 if (bp->bio_error != 0 ||
723                                     trs->trso_flags & TR_RAID1_F_ABORT) {
724                                         if ((trs->trso_flags &
725                                             TR_RAID1_F_ABORT) == 0) {
726                                                 g_raid_tr_raid1_fail_disk(sd->sd_softc,
727                                                     nsd, nsd->sd_disk);
728                                         }
729                                         trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
730                                         g_raid_tr_raid1_rebuild_abort(tr);
731                                         return;
732                                 }
733 rebuild_round_done:
734                                 nsd = trs->trso_failed_sd;
735                                 trs->trso_flags &= ~TR_RAID1_F_LOCKED;
736                                 g_raid_unlock_range(sd->sd_volume,
737                                     bp->bio_offset, bp->bio_length);
738                                 nsd->sd_rebuild_pos += bp->bio_length;
739                                 if (nsd->sd_rebuild_pos >= nsd->sd_size) {
740                                         g_raid_tr_raid1_rebuild_finish(tr);
741                                         return;
742                                 }
743
744                                 /* Abort rebuild if we are stopping */
745                                 if (trs->trso_stopping) {
746                                         trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
747                                         g_raid_tr_raid1_rebuild_abort(tr);
748                                         return;
749                                 }
750
751                                 if (--trs->trso_meta_update <= 0) {
752                                         g_raid_write_metadata(vol->v_softc,
753                                             vol, nsd, nsd->sd_disk);
754                                         trs->trso_meta_update =
755                                             g_raid1_rebuild_meta_update;
756                                 }
757                                 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
758                                 if (--trs->trso_recover_slabs <= 0)
759                                         return;
760                                 g_raid_tr_raid1_rebuild_some(tr);
761                         }
762                 } else if (trs->trso_type == TR_RAID1_RESYNC) {
763                         /*
764                          * read good sd, read bad sd in parallel.  when both
765                          * done, compare the buffers.  write good to the bad
766                          * if different.  do the next bit of work.
767                          */
768                         panic("Somehow, we think we're doing a resync");
769                 }
770                 return;
771         }
772         pbp = bp->bio_parent;
773         pbp->bio_inbed++;
774         if (bp->bio_cmd == BIO_READ && bp->bio_error != 0) {
775                 /*
776                  * Read failed on first drive.  Retry the read error on
777                  * another disk drive, if available, before erroring out the
778                  * read.
779                  */
780                 sd->sd_disk->d_read_errs++;
781                 G_RAID_LOGREQ(0, bp,
782                     "Read error (%d), %d read errors total",
783                     bp->bio_error, sd->sd_disk->d_read_errs);
784
785                 /*
786                  * If there are too many read errors, we move to degraded.
787                  * XXX Do we want to FAIL the drive (eg, make the user redo
788                  * everything to get it back in sync), or just degrade the
789                  * drive, which kicks off a resync?
790                  */
791                 do_write = 1;
792                 if (sd->sd_disk->d_read_errs > g_raid_read_err_thresh) {
793                         g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
794                         if (pbp->bio_children == 1)
795                                 do_write = 0;
796                 }
797
798                 /*
799                  * Find the other disk, and try to do the I/O to it.
800                  */
801                 mask = (uintptr_t *)(&pbp->bio_driver2);
802                 if (pbp->bio_children == 1) {
803                         /* Save original subdisk. */
804                         pbp->bio_driver1 = do_write ? sd : NULL;
805                         *mask = 0;
806                 }
807                 *mask |= 1 << sd->sd_pos;
808                 nsd = g_raid_tr_raid1_select_read_disk(vol, pbp, *mask);
809                 if (nsd != NULL && (cbp = g_clone_bio(pbp)) != NULL) {
810                         g_destroy_bio(bp);
811                         G_RAID_LOGREQ(2, cbp, "Retrying read from %d",
812                             nsd->sd_pos);
813                         if (pbp->bio_children == 2 && do_write) {
814                                 sd->sd_recovery++;
815                                 cbp->bio_caller1 = nsd;
816                                 pbp->bio_pflags = G_RAID_BIO_FLAG_LOCKED;
817                                 /* Lock callback starts I/O */
818                                 g_raid_lock_range(sd->sd_volume,
819                                     cbp->bio_offset, cbp->bio_length, pbp, cbp);
820                         } else {
821                                 g_raid_subdisk_iostart(nsd, cbp);
822                         }
823                         return;
824                 }
825                 /*
826                  * We can't retry.  Return the original error by falling
827                  * through.  This will happen when there's only one good disk.
828                  * We don't need to fail the raid, since its actual state is
829                  * based on the state of the subdisks.
830                  */
831                 G_RAID_LOGREQ(2, bp, "Couldn't retry read, failing it");
832         }
833         if (bp->bio_cmd == BIO_READ &&
834             bp->bio_error == 0 &&
835             pbp->bio_children > 1 &&
836             pbp->bio_driver1 != NULL) {
837                 /*
838                  * If it was a read, and bio_children is >1, then we just
839                  * recovered the data from the second drive.  We should try to
840                  * write that data to the first drive if sector remapping is
841                  * enabled.  A write should put the data in a new place on the
842                  * disk, remapping the bad sector.  Do we need to do that by
843                  * queueing a request to the main worker thread?  It doesn't
844                  * affect the return code of this current read, and can be
845                  * done at our leisure.  However, to make the code simpler, it
846                  * is done synchronously.
847                  */
848                 G_RAID_LOGREQ(3, bp, "Recovered data from other drive");
849                 cbp = g_clone_bio(pbp);
850                 if (cbp != NULL) {
851                         g_destroy_bio(bp);
852                         cbp->bio_cmd = BIO_WRITE;
853                         cbp->bio_cflags = G_RAID_BIO_FLAG_REMAP;
854                         G_RAID_LOGREQ(2, cbp,
855                             "Attempting bad sector remap on failing drive.");
856                         g_raid_subdisk_iostart(pbp->bio_driver1, cbp);
857                         return;
858                 }
859         }
860         if (pbp->bio_pflags & G_RAID_BIO_FLAG_LOCKED) {
861                 /*
862                  * We're done with a recovery, mark the range as unlocked.
863                  * For any write errors, we aggressively fail the disk since
864                  * there was both a READ and a WRITE error at this location.
865                  * Both types of errors generally indicates the drive is on
866                  * the verge of total failure anyway.  Better to stop trusting
867                  * it now.  However, we need to reset error to 0 in that case
868                  * because we're not failing the original I/O which succeeded.
869                  */
870                 if (bp->bio_cmd == BIO_WRITE && bp->bio_error) {
871                         G_RAID_LOGREQ(0, bp, "Remap write failed: "
872                             "failing subdisk.");
873                         g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
874                         bp->bio_error = 0;
875                 }
876                 if (pbp->bio_driver1 != NULL) {
877                         ((struct g_raid_subdisk *)pbp->bio_driver1)
878                             ->sd_recovery--;
879                 }
880                 G_RAID_LOGREQ(2, bp, "REMAP done %d.", bp->bio_error);
881                 g_raid_unlock_range(sd->sd_volume, bp->bio_offset,
882                     bp->bio_length);
883         }
884         if (pbp->bio_cmd != BIO_READ) {
885                 if (pbp->bio_inbed == 1 || pbp->bio_error != 0)
886                         pbp->bio_error = bp->bio_error;
887                 if (pbp->bio_cmd == BIO_WRITE && bp->bio_error != 0) {
888                         G_RAID_LOGREQ(0, bp, "Write failed: failing subdisk.");
889                         g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
890                 }
891                 error = pbp->bio_error;
892         } else
893                 error = bp->bio_error;
894         g_destroy_bio(bp);
895         if (pbp->bio_children == pbp->bio_inbed) {
896                 pbp->bio_completed = pbp->bio_length;
897                 g_raid_iodone(pbp, error);
898         }
899 }
900
901 static int
902 g_raid_tr_kerneldump_raid1(struct g_raid_tr_object *tr,
903     void *virtual, vm_offset_t physical, off_t offset, size_t length)
904 {
905         struct g_raid_volume *vol;
906         struct g_raid_subdisk *sd;
907         int error, i, ok;
908
909         vol = tr->tro_volume;
910         error = 0;
911         ok = 0;
912         for (i = 0; i < vol->v_disks_count; i++) {
913                 sd = &vol->v_subdisks[i];
914                 switch (sd->sd_state) {
915                 case G_RAID_SUBDISK_S_ACTIVE:
916                         break;
917                 case G_RAID_SUBDISK_S_REBUILD:
918                         /*
919                          * When rebuilding, only part of this subdisk is
920                          * writable, the rest will be written as part of the
921                          * that process.
922                          */
923                         if (offset >= sd->sd_rebuild_pos)
924                                 continue;
925                         break;
926                 case G_RAID_SUBDISK_S_STALE:
927                 case G_RAID_SUBDISK_S_RESYNC:
928                         /*
929                          * Resyncing still writes on the theory that the
930                          * resync'd disk is very close and writing it will
931                          * keep it that way better if we keep up while
932                          * resyncing.
933                          */
934                         break;
935                 default:
936                         continue;
937                 }
938                 error = g_raid_subdisk_kerneldump(sd,
939                     virtual, physical, offset, length);
940                 if (error == 0)
941                         ok++;
942         }
943         return (ok > 0 ? 0 : error);
944 }
945
946 static int
947 g_raid_tr_locked_raid1(struct g_raid_tr_object *tr, void *argp)
948 {
949         struct bio *bp;
950         struct g_raid_subdisk *sd;
951
952         bp = (struct bio *)argp;
953         sd = (struct g_raid_subdisk *)bp->bio_caller1;
954         g_raid_subdisk_iostart(sd, bp);
955
956         return (0);
957 }
958
959 static int
960 g_raid_tr_idle_raid1(struct g_raid_tr_object *tr)
961 {
962         struct g_raid_tr_raid1_object *trs;
963
964         trs = (struct g_raid_tr_raid1_object *)tr;
965         trs->trso_fair_io = g_raid1_rebuild_fair_io;
966         trs->trso_recover_slabs = g_raid1_rebuild_cluster_idle;
967         if (trs->trso_type == TR_RAID1_REBUILD)
968                 g_raid_tr_raid1_rebuild_some(tr);
969         return (0);
970 }
971
972 static int
973 g_raid_tr_free_raid1(struct g_raid_tr_object *tr)
974 {
975         struct g_raid_tr_raid1_object *trs;
976
977         trs = (struct g_raid_tr_raid1_object *)tr;
978
979         if (trs->trso_buffer != NULL) {
980                 free(trs->trso_buffer, M_TR_RAID1);
981                 trs->trso_buffer = NULL;
982         }
983         return (0);
984 }
985
986 G_RAID_TR_DECLARE(raid1, "RAID1");