2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 #include <sys/endian.h>
37 #define G_MIRROR_CLASS_NAME "MIRROR"
39 #define G_MIRROR_MAGIC "GEOM::MIRROR"
42 * 0 - Initial version number.
43 * 1 - Added 'prefer' balance algorithm.
44 * 2 - Added md_genid field to metadata.
45 * 3 - Added md_provsize field to metadata.
46 * 4 - Added 'no failure synchronization' flag.
48 #define G_MIRROR_VERSION 4
50 #define G_MIRROR_BALANCE_NONE 0
51 #define G_MIRROR_BALANCE_ROUND_ROBIN 1
52 #define G_MIRROR_BALANCE_LOAD 2
53 #define G_MIRROR_BALANCE_SPLIT 3
54 #define G_MIRROR_BALANCE_PREFER 4
55 #define G_MIRROR_BALANCE_MIN G_MIRROR_BALANCE_NONE
56 #define G_MIRROR_BALANCE_MAX G_MIRROR_BALANCE_PREFER
58 #define G_MIRROR_DISK_FLAG_DIRTY 0x0000000000000001ULL
59 #define G_MIRROR_DISK_FLAG_SYNCHRONIZING 0x0000000000000002ULL
60 #define G_MIRROR_DISK_FLAG_FORCE_SYNC 0x0000000000000004ULL
61 #define G_MIRROR_DISK_FLAG_INACTIVE 0x0000000000000008ULL
62 #define G_MIRROR_DISK_FLAG_HARDCODED 0x0000000000000010ULL
63 #define G_MIRROR_DISK_FLAG_BROKEN 0x0000000000000020ULL
64 #define G_MIRROR_DISK_FLAG_CANDELETE 0x0000000000000040ULL
66 /* Per-disk flags which are recorded in on-disk metadata. */
67 #define G_MIRROR_DISK_FLAG_MASK (G_MIRROR_DISK_FLAG_DIRTY | \
68 G_MIRROR_DISK_FLAG_SYNCHRONIZING | \
69 G_MIRROR_DISK_FLAG_FORCE_SYNC | \
70 G_MIRROR_DISK_FLAG_INACTIVE | \
71 G_MIRROR_DISK_FLAG_CANDELETE)
73 #define G_MIRROR_DEVICE_FLAG_NOAUTOSYNC 0x0000000000000001ULL
74 #define G_MIRROR_DEVICE_FLAG_NOFAILSYNC 0x0000000000000002ULL
76 /* Mirror flags which are recorded in on-disk metadata. */
77 #define G_MIRROR_DEVICE_FLAG_MASK (G_MIRROR_DEVICE_FLAG_NOAUTOSYNC | \
78 G_MIRROR_DEVICE_FLAG_NOFAILSYNC)
81 #define G_MIRROR_DEVICE_FLAG_DESTROY 0x0100000000000000ULL
82 #define G_MIRROR_DEVICE_FLAG_DRAIN 0x0200000000000000ULL
83 #define G_MIRROR_DEVICE_FLAG_CLOSEWAIT 0x0400000000000000ULL
84 #define G_MIRROR_DEVICE_FLAG_TASTING 0x0800000000000000ULL
85 #define G_MIRROR_DEVICE_FLAG_WIPE 0x1000000000000000ULL
87 extern int g_mirror_debug;
89 #define G_MIRROR_DEBUG(lvl, ...) \
90 _GEOM_DEBUG("GEOM_MIRROR", g_mirror_debug, (lvl), NULL, __VA_ARGS__)
91 #define G_MIRROR_LOGREQ(lvl, bp, ...) \
92 _GEOM_DEBUG("GEOM_MIRROR", g_mirror_debug, (lvl), (bp), __VA_ARGS__)
94 #define G_MIRROR_BIO_FLAG_REGULAR 0x01
95 #define G_MIRROR_BIO_FLAG_SYNC 0x02
98 * Informations needed for synchronization.
100 struct g_mirror_disk_sync {
101 struct g_consumer *ds_consumer; /* Consumer connected to our mirror. */
102 off_t ds_offset; /* Offset of next request to send. */
103 off_t ds_offset_done; /* Offset of already synchronized
105 time_t ds_update_ts; /* Time of last metadata update. */
106 u_int ds_syncid; /* Disk's synchronization ID. */
107 u_int ds_inflight; /* Number of in-flight sync requests. */
108 struct bio **ds_bios; /* BIOs for synchronization I/O. */
112 * Informations needed for synchronization.
114 struct g_mirror_device_sync {
115 struct g_geom *ds_geom; /* Synchronization geom. */
116 u_int ds_ndisks; /* Number of disks in SYNCHRONIZING
120 #define G_MIRROR_DISK_STATE_NONE 0
121 #define G_MIRROR_DISK_STATE_NEW 1
122 #define G_MIRROR_DISK_STATE_ACTIVE 2
123 #define G_MIRROR_DISK_STATE_STALE 3
124 #define G_MIRROR_DISK_STATE_SYNCHRONIZING 4
125 #define G_MIRROR_DISK_STATE_DISCONNECTED 5
126 #define G_MIRROR_DISK_STATE_DESTROY 6
127 struct g_mirror_disk {
128 uint32_t d_id; /* Disk ID. */
129 struct g_consumer *d_consumer; /* Consumer. */
130 struct g_mirror_softc *d_softc; /* Back-pointer to softc. */
131 int d_state; /* Disk state. */
132 u_int d_priority; /* Disk priority. */
133 u_int load; /* Averaged queue length */
134 off_t d_last_offset; /* Last read offset */
135 uint64_t d_flags; /* Additional flags. */
136 u_int d_genid; /* Disk's generation ID. */
137 struct g_mirror_disk_sync d_sync;/* Sync information. */
138 LIST_ENTRY(g_mirror_disk) d_next;
139 u_int d_init_ndisks; /* Initial number of mirror components */
140 uint32_t d_init_slice; /* Initial slice size */
141 uint8_t d_init_balance;/* Initial balance */
142 uint64_t d_init_mediasize;/* Initial mediasize */
144 #define d_name d_consumer->provider->name
146 #define G_MIRROR_EVENT_DONTWAIT 0x1
147 #define G_MIRROR_EVENT_WAIT 0x2
148 #define G_MIRROR_EVENT_DEVICE 0x4
149 #define G_MIRROR_EVENT_DONE 0x8
150 struct g_mirror_event {
151 struct g_mirror_disk *e_disk;
155 TAILQ_ENTRY(g_mirror_event) e_next;
158 #define G_MIRROR_DEVICE_STATE_STARTING 0
159 #define G_MIRROR_DEVICE_STATE_RUNNING 1
161 #define G_MIRROR_TYPE_MANUAL 0
162 #define G_MIRROR_TYPE_AUTOMATIC 1
164 /* Bump syncid on first write. */
165 #define G_MIRROR_BUMP_SYNCID 0x1
166 /* Bump genid immediately. */
167 #define G_MIRROR_BUMP_GENID 0x2
168 /* Bump syncid immediately. */
169 #define G_MIRROR_BUMP_SYNCID_NOW 0x4
170 struct g_mirror_softc {
171 u_int sc_type; /* Device type (manual/automatic). */
172 u_int sc_state; /* Device state. */
173 uint32_t sc_slice; /* Slice size. */
174 uint8_t sc_balance; /* Balance algorithm. */
175 uint64_t sc_mediasize; /* Device size. */
176 uint32_t sc_sectorsize; /* Sector size. */
177 uint64_t sc_flags; /* Additional flags. */
179 struct g_geom *sc_geom;
180 struct g_provider *sc_provider;
181 int sc_provider_open;
183 uint32_t sc_id; /* Mirror unique ID. */
186 struct bio_queue sc_queue;
187 struct mtx sc_queue_mtx;
188 struct proc *sc_worker;
189 struct bio_queue sc_inflight; /* In-flight regular write requests. */
190 struct bio_queue sc_regular_delayed; /* Delayed I/O requests due to
191 collision with sync requests. */
192 struct bio_queue sc_sync_delayed; /* Delayed sync requests due to
193 collision with regular requests. */
195 LIST_HEAD(, g_mirror_disk) sc_disks;
196 u_int sc_ndisks; /* Number of disks. */
197 struct g_mirror_disk *sc_hint;
199 u_int sc_genid; /* Generation ID. */
200 u_int sc_syncid; /* Synchronization ID. */
202 struct g_mirror_device_sync sc_sync;
203 int sc_idle; /* DIRTY flags removed. */
204 time_t sc_last_write;
206 u_int sc_refcnt; /* Number of softc references */
208 TAILQ_HEAD(, g_mirror_event) sc_events;
209 struct mtx sc_events_mtx;
211 struct callout sc_callout;
213 struct root_hold_token *sc_rootmount;
215 struct mtx sc_done_mtx;
217 #define sc_name sc_geom->name
219 struct g_mirror_metadata;
221 u_int g_mirror_ndisks(struct g_mirror_softc *sc, int state);
222 struct g_geom * g_mirror_create(struct g_class *mp,
223 const struct g_mirror_metadata *md, u_int type);
224 #define G_MIRROR_DESTROY_SOFT 0
225 #define G_MIRROR_DESTROY_DELAYED 1
226 #define G_MIRROR_DESTROY_HARD 2
227 int g_mirror_destroy(struct g_mirror_softc *sc, int how);
228 int g_mirror_event_send(void *arg, int state, int flags);
229 struct g_mirror_metadata;
230 int g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp,
231 struct g_mirror_metadata *md);
232 int g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md);
233 void g_mirror_fill_metadata(struct g_mirror_softc *sc,
234 struct g_mirror_disk *disk, struct g_mirror_metadata *md);
235 void g_mirror_update_metadata(struct g_mirror_disk *disk);
237 g_ctl_req_t g_mirror_config;
240 struct g_mirror_metadata {
241 char md_magic[16]; /* Magic value. */
242 uint32_t md_version; /* Version number. */
243 char md_name[16]; /* Mirror name. */
244 uint32_t md_mid; /* Mirror unique ID. */
245 uint32_t md_did; /* Disk unique ID. */
246 uint8_t md_all; /* Number of disks in mirror. */
247 uint32_t md_genid; /* Generation ID. */
248 uint32_t md_syncid; /* Synchronization ID. */
249 uint8_t md_priority; /* Disk priority. */
250 uint32_t md_slice; /* Slice size. */
251 uint8_t md_balance; /* Balance type. */
252 uint64_t md_mediasize; /* Size of the smallest
254 uint32_t md_sectorsize; /* Sector size. */
255 uint64_t md_sync_offset; /* Synchronized offset. */
256 uint64_t md_mflags; /* Additional mirror flags. */
257 uint64_t md_dflags; /* Additional disk flags. */
258 char md_provider[16]; /* Hardcoded provider. */
259 uint64_t md_provsize; /* Provider's size. */
260 u_char md_hash[16]; /* MD5 hash. */
263 mirror_metadata_encode(struct g_mirror_metadata *md, u_char *data)
267 bcopy(md->md_magic, data, 16);
268 le32enc(data + 16, md->md_version);
269 bcopy(md->md_name, data + 20, 16);
270 le32enc(data + 36, md->md_mid);
271 le32enc(data + 40, md->md_did);
272 *(data + 44) = md->md_all;
273 le32enc(data + 45, md->md_genid);
274 le32enc(data + 49, md->md_syncid);
275 *(data + 53) = md->md_priority;
276 le32enc(data + 54, md->md_slice);
277 *(data + 58) = md->md_balance;
278 le64enc(data + 59, md->md_mediasize);
279 le32enc(data + 67, md->md_sectorsize);
280 le64enc(data + 71, md->md_sync_offset);
281 le64enc(data + 79, md->md_mflags);
282 le64enc(data + 87, md->md_dflags);
283 bcopy(md->md_provider, data + 95, 16);
284 le64enc(data + 111, md->md_provsize);
286 MD5Update(&ctx, data, 119);
287 MD5Final(md->md_hash, &ctx);
288 bcopy(md->md_hash, data + 119, 16);
291 mirror_metadata_decode_v0v1(const u_char *data, struct g_mirror_metadata *md)
295 bcopy(data + 20, md->md_name, 16);
296 md->md_mid = le32dec(data + 36);
297 md->md_did = le32dec(data + 40);
298 md->md_all = *(data + 44);
299 md->md_syncid = le32dec(data + 45);
300 md->md_priority = *(data + 49);
301 md->md_slice = le32dec(data + 50);
302 md->md_balance = *(data + 54);
303 md->md_mediasize = le64dec(data + 55);
304 md->md_sectorsize = le32dec(data + 63);
305 md->md_sync_offset = le64dec(data + 67);
306 md->md_mflags = le64dec(data + 75);
307 md->md_dflags = le64dec(data + 83);
308 bcopy(data + 91, md->md_provider, 16);
309 bcopy(data + 107, md->md_hash, 16);
311 MD5Update(&ctx, data, 107);
312 MD5Final(md->md_hash, &ctx);
313 if (bcmp(md->md_hash, data + 107, 16) != 0)
323 mirror_metadata_decode_v2(const u_char *data, struct g_mirror_metadata *md)
327 bcopy(data + 20, md->md_name, 16);
328 md->md_mid = le32dec(data + 36);
329 md->md_did = le32dec(data + 40);
330 md->md_all = *(data + 44);
331 md->md_genid = le32dec(data + 45);
332 md->md_syncid = le32dec(data + 49);
333 md->md_priority = *(data + 53);
334 md->md_slice = le32dec(data + 54);
335 md->md_balance = *(data + 58);
336 md->md_mediasize = le64dec(data + 59);
337 md->md_sectorsize = le32dec(data + 67);
338 md->md_sync_offset = le64dec(data + 71);
339 md->md_mflags = le64dec(data + 79);
340 md->md_dflags = le64dec(data + 87);
341 bcopy(data + 95, md->md_provider, 16);
342 bcopy(data + 111, md->md_hash, 16);
344 MD5Update(&ctx, data, 111);
345 MD5Final(md->md_hash, &ctx);
346 if (bcmp(md->md_hash, data + 111, 16) != 0)
355 mirror_metadata_decode_v3v4(const u_char *data, struct g_mirror_metadata *md)
359 bcopy(data + 20, md->md_name, 16);
360 md->md_mid = le32dec(data + 36);
361 md->md_did = le32dec(data + 40);
362 md->md_all = *(data + 44);
363 md->md_genid = le32dec(data + 45);
364 md->md_syncid = le32dec(data + 49);
365 md->md_priority = *(data + 53);
366 md->md_slice = le32dec(data + 54);
367 md->md_balance = *(data + 58);
368 md->md_mediasize = le64dec(data + 59);
369 md->md_sectorsize = le32dec(data + 67);
370 md->md_sync_offset = le64dec(data + 71);
371 md->md_mflags = le64dec(data + 79);
372 md->md_dflags = le64dec(data + 87);
373 bcopy(data + 95, md->md_provider, 16);
374 md->md_provsize = le64dec(data + 111);
375 bcopy(data + 119, md->md_hash, 16);
377 MD5Update(&ctx, data, 119);
378 MD5Final(md->md_hash, &ctx);
379 if (bcmp(md->md_hash, data + 119, 16) != 0)
384 mirror_metadata_decode(const u_char *data, struct g_mirror_metadata *md)
388 bcopy(data, md->md_magic, 16);
389 md->md_version = le32dec(data + 16);
390 switch (md->md_version) {
393 error = mirror_metadata_decode_v0v1(data, md);
396 error = mirror_metadata_decode_v2(data, md);
400 error = mirror_metadata_decode_v3v4(data, md);
409 static __inline const char *
410 balance_name(u_int balance)
412 static const char *algorithms[] = {
413 [G_MIRROR_BALANCE_NONE] = "none",
414 [G_MIRROR_BALANCE_ROUND_ROBIN] = "round-robin",
415 [G_MIRROR_BALANCE_LOAD] = "load",
416 [G_MIRROR_BALANCE_SPLIT] = "split",
417 [G_MIRROR_BALANCE_PREFER] = "prefer",
418 [G_MIRROR_BALANCE_MAX + 1] = "unknown"
421 if (balance > G_MIRROR_BALANCE_MAX)
422 balance = G_MIRROR_BALANCE_MAX + 1;
424 return (algorithms[balance]);
428 balance_id(const char *name)
430 static const char *algorithms[] = {
431 [G_MIRROR_BALANCE_NONE] = "none",
432 [G_MIRROR_BALANCE_ROUND_ROBIN] = "round-robin",
433 [G_MIRROR_BALANCE_LOAD] = "load",
434 [G_MIRROR_BALANCE_SPLIT] = "split",
435 [G_MIRROR_BALANCE_PREFER] = "prefer"
439 for (n = G_MIRROR_BALANCE_MIN; n <= G_MIRROR_BALANCE_MAX; n++) {
440 if (strcmp(name, algorithms[n]) == 0)
447 mirror_metadata_dump(const struct g_mirror_metadata *md)
449 static const char hex[] = "0123456789abcdef";
450 char hash[16 * 2 + 1];
453 printf(" magic: %s\n", md->md_magic);
454 printf(" version: %u\n", (u_int)md->md_version);
455 printf(" name: %s\n", md->md_name);
456 printf(" mid: %u\n", (u_int)md->md_mid);
457 printf(" did: %u\n", (u_int)md->md_did);
458 printf(" all: %u\n", (u_int)md->md_all);
459 printf(" genid: %u\n", (u_int)md->md_genid);
460 printf(" syncid: %u\n", (u_int)md->md_syncid);
461 printf(" priority: %u\n", (u_int)md->md_priority);
462 printf(" slice: %u\n", (u_int)md->md_slice);
463 printf(" balance: %s\n", balance_name((u_int)md->md_balance));
464 printf(" mediasize: %jd\n", (intmax_t)md->md_mediasize);
465 printf("sectorsize: %u\n", (u_int)md->md_sectorsize);
466 printf("syncoffset: %jd\n", (intmax_t)md->md_sync_offset);
468 if (md->md_mflags == 0)
471 if ((md->md_mflags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0)
472 printf(" NOFAILSYNC");
473 if ((md->md_mflags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0)
474 printf(" NOAUTOSYNC");
478 if (md->md_dflags == 0)
481 if ((md->md_dflags & G_MIRROR_DISK_FLAG_DIRTY) != 0)
483 if ((md->md_dflags & G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0)
484 printf(" SYNCHRONIZING");
485 if ((md->md_dflags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0)
486 printf(" FORCE_SYNC");
487 if ((md->md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0)
491 printf("hcprovider: %s\n", md->md_provider);
492 printf(" provsize: %ju\n", (uintmax_t)md->md_provsize);
493 bzero(hash, sizeof(hash));
494 for (i = 0; i < 16; i++) {
495 hash[i * 2] = hex[md->md_hash[i] >> 4];
496 hash[i * 2 + 1] = hex[md->md_hash[i] & 0x0f];
498 printf(" MD5 hash: %s\n", hash);
500 #endif /* !_G_MIRROR_H_ */