2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2005-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/module.h>
36 #include <sys/limits.h>
38 #include <sys/mutex.h>
40 #include <sys/sysctl.h>
41 #include <sys/malloc.h>
42 #include <sys/mount.h>
43 #include <sys/eventhandler.h>
45 #include <sys/kthread.h>
46 #include <sys/sched.h>
47 #include <sys/taskqueue.h>
48 #include <sys/vnode.h>
51 #include <sys/stack.h>
55 #include <vm/vm_kern.h>
56 #include <geom/geom.h>
57 #include <geom/geom_dbg.h>
59 #include <geom/journal/g_journal.h>
61 FEATURE(geom_journal, "GEOM journaling support");
64 * On-disk journal format:
69 * %%%%%% ****** +------+ +------+ ****** +------+ %%%%%%
70 * % JH % * RH * | Data | | Data | ... * RH * | Data | ... % JH % ...
71 * %%%%%% ****** +------+ +------+ ****** +------+ %%%%%%
75 CTASSERT(sizeof(struct g_journal_header) <= 512);
76 CTASSERT(sizeof(struct g_journal_record_header) <= 512);
78 static MALLOC_DEFINE(M_JOURNAL, "journal_data", "GEOM_JOURNAL Data");
79 static struct mtx g_journal_cache_mtx;
80 MTX_SYSINIT(g_journal_cache, &g_journal_cache_mtx, "cache usage", MTX_DEF);
82 const struct g_journal_desc *g_journal_filesystems[] = {
87 SYSCTL_DECL(_kern_geom);
89 int g_journal_debug = 0;
90 static u_int g_journal_switch_time = 10;
91 static u_int g_journal_force_switch = 70;
92 static u_int g_journal_parallel_flushes = 16;
93 static u_int g_journal_parallel_copies = 16;
94 static u_int g_journal_accept_immediately = 64;
95 static u_int g_journal_record_entries = GJ_RECORD_HEADER_NENTRIES;
96 static u_int g_journal_do_optimize = 1;
98 static SYSCTL_NODE(_kern_geom, OID_AUTO, journal,
99 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
100 "GEOM_JOURNAL stuff");
101 SYSCTL_INT(_kern_geom_journal, OID_AUTO, debug, CTLFLAG_RWTUN, &g_journal_debug, 0,
103 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, switch_time, CTLFLAG_RW,
104 &g_journal_switch_time, 0, "Switch journals every N seconds");
105 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, force_switch, CTLFLAG_RW,
106 &g_journal_force_switch, 0, "Force switch when journal is N% full");
107 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, parallel_flushes, CTLFLAG_RW,
108 &g_journal_parallel_flushes, 0,
109 "Number of flush I/O requests to send in parallel");
110 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, accept_immediately, CTLFLAG_RW,
111 &g_journal_accept_immediately, 0,
112 "Number of I/O requests accepted immediately");
113 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, parallel_copies, CTLFLAG_RW,
114 &g_journal_parallel_copies, 0,
115 "Number of copy I/O requests to send in parallel");
117 g_journal_record_entries_sysctl(SYSCTL_HANDLER_ARGS)
122 entries = g_journal_record_entries;
123 error = sysctl_handle_int(oidp, &entries, 0, req);
124 if (error != 0 || req->newptr == NULL)
126 if (entries < 1 || entries > GJ_RECORD_HEADER_NENTRIES)
128 g_journal_record_entries = entries;
131 SYSCTL_PROC(_kern_geom_journal, OID_AUTO, record_entries,
132 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0,
133 g_journal_record_entries_sysctl, "I",
134 "Maximum number of entires in one journal record");
135 SYSCTL_UINT(_kern_geom_journal, OID_AUTO, optimize, CTLFLAG_RW,
136 &g_journal_do_optimize, 0, "Try to combine bios on flush and copy");
138 static u_long g_journal_cache_used = 0;
139 static u_long g_journal_cache_limit = 64 * 1024 * 1024;
140 static u_int g_journal_cache_divisor = 2;
141 static u_int g_journal_cache_switch = 90;
142 static u_int g_journal_cache_misses = 0;
143 static u_int g_journal_cache_alloc_failures = 0;
144 static u_long g_journal_cache_low = 0;
146 static SYSCTL_NODE(_kern_geom_journal, OID_AUTO, cache,
147 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
148 "GEOM_JOURNAL cache");
149 SYSCTL_ULONG(_kern_geom_journal_cache, OID_AUTO, used, CTLFLAG_RD,
150 &g_journal_cache_used, 0, "Number of allocated bytes");
152 g_journal_cache_limit_sysctl(SYSCTL_HANDLER_ARGS)
157 limit = g_journal_cache_limit;
158 error = sysctl_handle_long(oidp, &limit, 0, req);
159 if (error != 0 || req->newptr == NULL)
161 g_journal_cache_limit = limit;
162 g_journal_cache_low = (limit / 100) * g_journal_cache_switch;
165 SYSCTL_PROC(_kern_geom_journal_cache, OID_AUTO, limit,
166 CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, NULL, 0,
167 g_journal_cache_limit_sysctl, "I",
168 "Maximum number of allocated bytes");
169 SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, divisor, CTLFLAG_RDTUN,
170 &g_journal_cache_divisor, 0,
171 "(kmem_size / kern.geom.journal.cache.divisor) == cache size");
173 g_journal_cache_switch_sysctl(SYSCTL_HANDLER_ARGS)
178 cswitch = g_journal_cache_switch;
179 error = sysctl_handle_int(oidp, &cswitch, 0, req);
180 if (error != 0 || req->newptr == NULL)
184 g_journal_cache_switch = cswitch;
185 g_journal_cache_low = (g_journal_cache_limit / 100) * cswitch;
188 SYSCTL_PROC(_kern_geom_journal_cache, OID_AUTO, switch,
189 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0,
190 g_journal_cache_switch_sysctl, "I",
191 "Force switch when we hit this percent of cache use");
192 SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, misses, CTLFLAG_RW,
193 &g_journal_cache_misses, 0, "Number of cache misses");
194 SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, alloc_failures, CTLFLAG_RW,
195 &g_journal_cache_alloc_failures, 0, "Memory allocation failures");
197 static u_long g_journal_stats_bytes_skipped = 0;
198 static u_long g_journal_stats_combined_ios = 0;
199 static u_long g_journal_stats_switches = 0;
200 static u_long g_journal_stats_wait_for_copy = 0;
201 static u_long g_journal_stats_journal_full = 0;
202 static u_long g_journal_stats_low_mem = 0;
204 static SYSCTL_NODE(_kern_geom_journal, OID_AUTO, stats,
205 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
206 "GEOM_JOURNAL statistics");
207 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, skipped_bytes, CTLFLAG_RW,
208 &g_journal_stats_bytes_skipped, 0, "Number of skipped bytes");
209 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, combined_ios, CTLFLAG_RW,
210 &g_journal_stats_combined_ios, 0, "Number of combined I/O requests");
211 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, switches, CTLFLAG_RW,
212 &g_journal_stats_switches, 0, "Number of journal switches");
213 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, wait_for_copy, CTLFLAG_RW,
214 &g_journal_stats_wait_for_copy, 0, "Wait for journal copy on switch");
215 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, journal_full, CTLFLAG_RW,
216 &g_journal_stats_journal_full, 0,
217 "Number of times journal was almost full.");
218 SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, low_mem, CTLFLAG_RW,
219 &g_journal_stats_low_mem, 0, "Number of times low_mem hook was called.");
221 static g_taste_t g_journal_taste;
222 static g_ctl_req_t g_journal_config;
223 static g_dumpconf_t g_journal_dumpconf;
224 static g_init_t g_journal_init;
225 static g_fini_t g_journal_fini;
227 struct g_class g_journal_class = {
228 .name = G_JOURNAL_CLASS_NAME,
229 .version = G_VERSION,
230 .taste = g_journal_taste,
231 .ctlreq = g_journal_config,
232 .dumpconf = g_journal_dumpconf,
233 .init = g_journal_init,
234 .fini = g_journal_fini
237 static int g_journal_destroy(struct g_journal_softc *sc);
238 static void g_journal_metadata_update(struct g_journal_softc *sc);
239 static void g_journal_start_switcher(struct g_class *mp);
240 static void g_journal_stop_switcher(void);
241 static void g_journal_switch_wait(struct g_journal_softc *sc);
243 #define GJ_SWITCHER_WORKING 0
244 #define GJ_SWITCHER_DIE 1
245 #define GJ_SWITCHER_DIED 2
246 static struct proc *g_journal_switcher_proc = NULL;
247 static int g_journal_switcher_state = GJ_SWITCHER_WORKING;
248 static int g_journal_switcher_wokenup = 0;
249 static int g_journal_sync_requested = 0;
254 struct stack mi_stack;
259 * We use our own malloc/realloc/free funtions, so we can collect statistics
260 * and force journal switch when we're running out of cache.
263 gj_malloc(size_t size, int flags)
270 mtx_lock(&g_journal_cache_mtx);
271 if (g_journal_cache_limit > 0 && !g_journal_switcher_wokenup &&
272 g_journal_cache_used + size > g_journal_cache_low) {
273 GJ_DEBUG(1, "No cache, waking up the switcher.");
274 g_journal_switcher_wokenup = 1;
275 wakeup(&g_journal_switcher_state);
277 if ((flags & M_NOWAIT) && g_journal_cache_limit > 0 &&
278 g_journal_cache_used + size > g_journal_cache_limit) {
279 mtx_unlock(&g_journal_cache_mtx);
280 g_journal_cache_alloc_failures++;
283 g_journal_cache_used += size;
284 mtx_unlock(&g_journal_cache_mtx);
287 p = malloc(size, M_JOURNAL, flags | M_WAITOK);
289 mi = malloc(sizeof(*mi) + size, M_JOURNAL, flags | M_WAITOK);
290 p = (u_char *)mi + sizeof(*mi);
292 stack_save(&mi->mi_stack);
298 gj_free(void *p, size_t size)
304 KASSERT(p != NULL, ("p=NULL"));
305 KASSERT(size > 0, ("size=0"));
306 mtx_lock(&g_journal_cache_mtx);
307 KASSERT(g_journal_cache_used >= size, ("Freeing too much?"));
308 g_journal_cache_used -= size;
309 mtx_unlock(&g_journal_cache_mtx);
311 mi = p = (void *)((u_char *)p - sizeof(*mi));
312 if (mi->mi_size != size) {
313 printf("GJOURNAL: Size mismatch! %zu != %zu\n", size,
315 printf("GJOURNAL: Alloc backtrace:\n");
316 stack_print(&mi->mi_stack);
317 printf("GJOURNAL: Free backtrace:\n");
325 gj_realloc(void *p, size_t size, size_t oldsize)
330 mtx_lock(&g_journal_cache_mtx);
331 g_journal_cache_used -= oldsize;
332 g_journal_cache_used += size;
333 mtx_unlock(&g_journal_cache_mtx);
334 np = realloc(p, size, M_JOURNAL, M_WAITOK);
336 np = gj_malloc(size, M_WAITOK);
337 bcopy(p, np, MIN(oldsize, size));
344 g_journal_check_overflow(struct g_journal_softc *sc)
348 if ((sc->sc_active.jj_offset < sc->sc_inactive.jj_offset &&
349 sc->sc_journal_offset >= sc->sc_inactive.jj_offset) ||
350 (sc->sc_active.jj_offset > sc->sc_inactive.jj_offset &&
351 sc->sc_journal_offset >= sc->sc_inactive.jj_offset &&
352 sc->sc_journal_offset < sc->sc_active.jj_offset)) {
353 panic("Journal overflow "
354 "(id = %u joffset=%jd active=%jd inactive=%jd)",
356 (intmax_t)sc->sc_journal_offset,
357 (intmax_t)sc->sc_active.jj_offset,
358 (intmax_t)sc->sc_inactive.jj_offset);
360 if (sc->sc_active.jj_offset < sc->sc_inactive.jj_offset) {
361 length = sc->sc_inactive.jj_offset - sc->sc_active.jj_offset;
362 used = sc->sc_journal_offset - sc->sc_active.jj_offset;
364 length = sc->sc_jend - sc->sc_active.jj_offset;
365 length += sc->sc_inactive.jj_offset - sc->sc_jstart;
366 if (sc->sc_journal_offset >= sc->sc_active.jj_offset)
367 used = sc->sc_journal_offset - sc->sc_active.jj_offset;
369 used = sc->sc_jend - sc->sc_active.jj_offset;
370 used += sc->sc_journal_offset - sc->sc_jstart;
373 /* Already woken up? */
374 if (g_journal_switcher_wokenup)
377 * If the active journal takes more than g_journal_force_switch precent
378 * of free journal space, we force journal switch.
381 ("length=%jd used=%jd active=%jd inactive=%jd joffset=%jd",
382 (intmax_t)length, (intmax_t)used,
383 (intmax_t)sc->sc_active.jj_offset,
384 (intmax_t)sc->sc_inactive.jj_offset,
385 (intmax_t)sc->sc_journal_offset));
386 if ((used * 100) / length > g_journal_force_switch) {
387 g_journal_stats_journal_full++;
388 GJ_DEBUG(1, "Journal %s %jd%% full, forcing journal switch.",
389 sc->sc_name, (used * 100) / length);
390 mtx_lock(&g_journal_cache_mtx);
391 g_journal_switcher_wokenup = 1;
392 wakeup(&g_journal_switcher_state);
393 mtx_unlock(&g_journal_cache_mtx);
398 g_journal_orphan(struct g_consumer *cp)
400 struct g_journal_softc *sc;
405 sc = cp->geom->softc;
406 strlcpy(name, cp->provider->name, sizeof(name));
407 GJ_DEBUG(0, "Lost provider %s.", name);
410 error = g_journal_destroy(sc);
412 GJ_DEBUG(0, "Journal %s destroyed.", name);
414 GJ_DEBUG(0, "Cannot destroy journal %s (error=%d). "
415 "Destroy it manually after last close.", sc->sc_name,
421 g_journal_access(struct g_provider *pp, int acr, int acw, int ace)
423 struct g_journal_softc *sc;
427 GJ_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name,
434 sc = pp->geom->softc;
435 if (sc == NULL || (sc->sc_flags & GJF_DEVICE_DESTROY)) {
436 if (acr <= 0 && acw <= 0 && ace <= 0)
441 if (pp->acw == 0 && dcw > 0) {
442 GJ_DEBUG(1, "Marking %s as dirty.", sc->sc_name);
443 sc->sc_flags &= ~GJF_DEVICE_CLEAN;
445 g_journal_metadata_update(sc);
447 } /* else if (pp->acw == 0 && dcw > 0 && JEMPTY(sc)) {
448 GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
449 sc->sc_flags |= GJF_DEVICE_CLEAN;
451 g_journal_metadata_update(sc);
458 g_journal_header_encode(struct g_journal_header *hdr, u_char *data)
461 bcopy(GJ_HEADER_MAGIC, data, sizeof(GJ_HEADER_MAGIC));
462 data += sizeof(GJ_HEADER_MAGIC);
463 le32enc(data, hdr->jh_journal_id);
465 le32enc(data, hdr->jh_journal_next_id);
469 g_journal_header_decode(const u_char *data, struct g_journal_header *hdr)
472 bcopy(data, hdr->jh_magic, sizeof(hdr->jh_magic));
473 data += sizeof(hdr->jh_magic);
474 if (bcmp(hdr->jh_magic, GJ_HEADER_MAGIC, sizeof(GJ_HEADER_MAGIC)) != 0)
476 hdr->jh_journal_id = le32dec(data);
478 hdr->jh_journal_next_id = le32dec(data);
483 g_journal_flush_cache(struct g_journal_softc *sc)
488 if (sc->sc_bio_flush == 0)
490 GJ_TIMER_START(1, &bt);
491 if (sc->sc_bio_flush & GJ_FLUSH_JOURNAL) {
492 error = g_io_flush(sc->sc_jconsumer);
493 GJ_DEBUG(error == 0 ? 2 : 0, "Flush cache of %s: error=%d.",
494 sc->sc_jconsumer->provider->name, error);
496 if (sc->sc_bio_flush & GJ_FLUSH_DATA) {
498 * TODO: This could be called in parallel with the
501 error = g_io_flush(sc->sc_dconsumer);
502 GJ_DEBUG(error == 0 ? 2 : 0, "Flush cache of %s: error=%d.",
503 sc->sc_dconsumer->provider->name, error);
505 GJ_TIMER_STOP(1, &bt, "Cache flush time");
509 g_journal_write_header(struct g_journal_softc *sc)
511 struct g_journal_header hdr;
512 struct g_consumer *cp;
516 cp = sc->sc_jconsumer;
517 buf = gj_malloc(cp->provider->sectorsize, M_WAITOK);
519 strlcpy(hdr.jh_magic, GJ_HEADER_MAGIC, sizeof(hdr.jh_magic));
520 hdr.jh_journal_id = sc->sc_journal_id;
521 hdr.jh_journal_next_id = sc->sc_journal_next_id;
522 g_journal_header_encode(&hdr, buf);
523 error = g_write_data(cp, sc->sc_journal_offset, buf,
524 cp->provider->sectorsize);
525 /* if (error == 0) */
526 sc->sc_journal_offset += cp->provider->sectorsize;
528 gj_free(buf, cp->provider->sectorsize);
533 * Every journal record has a header and data following it.
534 * Functions below are used to decode the header before storing it to
535 * little endian and to encode it after reading to system endianness.
538 g_journal_record_header_encode(struct g_journal_record_header *hdr,
541 struct g_journal_entry *ent;
544 bcopy(GJ_RECORD_HEADER_MAGIC, data, sizeof(GJ_RECORD_HEADER_MAGIC));
545 data += sizeof(GJ_RECORD_HEADER_MAGIC);
546 le32enc(data, hdr->jrh_journal_id);
548 le16enc(data, hdr->jrh_nentries);
550 bcopy(hdr->jrh_sum, data, sizeof(hdr->jrh_sum));
552 for (i = 0; i < hdr->jrh_nentries; i++) {
553 ent = &hdr->jrh_entries[i];
554 le64enc(data, ent->je_joffset);
556 le64enc(data, ent->je_offset);
558 le64enc(data, ent->je_length);
564 g_journal_record_header_decode(const u_char *data,
565 struct g_journal_record_header *hdr)
567 struct g_journal_entry *ent;
570 bcopy(data, hdr->jrh_magic, sizeof(hdr->jrh_magic));
571 data += sizeof(hdr->jrh_magic);
572 if (strcmp(hdr->jrh_magic, GJ_RECORD_HEADER_MAGIC) != 0)
574 hdr->jrh_journal_id = le32dec(data);
576 hdr->jrh_nentries = le16dec(data);
578 if (hdr->jrh_nentries > GJ_RECORD_HEADER_NENTRIES)
580 bcopy(data, hdr->jrh_sum, sizeof(hdr->jrh_sum));
582 for (i = 0; i < hdr->jrh_nentries; i++) {
583 ent = &hdr->jrh_entries[i];
584 ent->je_joffset = le64dec(data);
586 ent->je_offset = le64dec(data);
588 ent->je_length = le64dec(data);
595 * Function reads metadata from a provider (via the given consumer), decodes
596 * it to system endianness and verifies its correctness.
599 g_journal_metadata_read(struct g_consumer *cp, struct g_journal_metadata *md)
601 struct g_provider *pp;
607 error = g_access(cp, 1, 0, 0);
612 /* Metadata is stored in last sector. */
613 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
616 g_access(cp, -1, 0, 0);
618 GJ_DEBUG(1, "Cannot read metadata from %s (error=%d).",
619 cp->provider->name, error);
623 /* Decode metadata. */
624 error = journal_metadata_decode(buf, md);
626 /* Is this is gjournal provider at all? */
627 if (strcmp(md->md_magic, G_JOURNAL_MAGIC) != 0)
630 * Are we able to handle this version of metadata?
631 * We only maintain backward compatibility.
633 if (md->md_version > G_JOURNAL_VERSION) {
635 "Kernel module is too old to handle metadata from %s.",
639 /* Is checksum correct? */
641 GJ_DEBUG(0, "MD5 metadata hash mismatch for provider %s.",
649 * Two functions below are responsible for updating metadata.
650 * Only metadata on the data provider is updated (we need to update
651 * information about active journal in there).
654 g_journal_metadata_done(struct bio *bp)
658 * There is not much we can do on error except informing about it.
660 if (bp->bio_error != 0) {
661 GJ_LOGREQ(0, bp, "Cannot update metadata (error=%d).",
664 GJ_LOGREQ(2, bp, "Metadata updated.");
666 gj_free(bp->bio_data, bp->bio_length);
671 g_journal_metadata_update(struct g_journal_softc *sc)
673 struct g_journal_metadata md;
674 struct g_consumer *cp;
678 cp = sc->sc_dconsumer;
679 sector = gj_malloc(cp->provider->sectorsize, M_WAITOK);
680 strlcpy(md.md_magic, G_JOURNAL_MAGIC, sizeof(md.md_magic));
681 md.md_version = G_JOURNAL_VERSION;
682 md.md_id = sc->sc_id;
683 md.md_type = sc->sc_orig_type;
684 md.md_jstart = sc->sc_jstart;
685 md.md_jend = sc->sc_jend;
686 md.md_joffset = sc->sc_inactive.jj_offset;
687 md.md_jid = sc->sc_journal_previous_id;
689 if (sc->sc_flags & GJF_DEVICE_CLEAN)
690 md.md_flags |= GJ_FLAG_CLEAN;
692 if (sc->sc_flags & GJF_DEVICE_HARDCODED)
693 strlcpy(md.md_provider, sc->sc_name, sizeof(md.md_provider));
695 bzero(md.md_provider, sizeof(md.md_provider));
696 md.md_provsize = cp->provider->mediasize;
697 journal_metadata_encode(&md, sector);
700 * Flush the cache, so we know all data are on disk.
701 * We write here informations like "journal is consistent", so we need
702 * to be sure it is. Without BIO_FLUSH here, we can end up in situation
703 * where metadata is stored on disk, but not all data.
705 g_journal_flush_cache(sc);
708 bp->bio_offset = cp->provider->mediasize - cp->provider->sectorsize;
709 bp->bio_length = cp->provider->sectorsize;
710 bp->bio_data = sector;
711 bp->bio_cmd = BIO_WRITE;
712 if (!(sc->sc_flags & GJF_DEVICE_DESTROY)) {
713 bp->bio_done = g_journal_metadata_done;
714 g_io_request(bp, cp);
717 g_io_request(bp, cp);
718 biowait(bp, "gjmdu");
719 g_journal_metadata_done(bp);
723 * Be sure metadata reached the disk.
725 g_journal_flush_cache(sc);
729 * This is where the I/O request comes from the GEOM.
732 g_journal_start(struct bio *bp)
734 struct g_journal_softc *sc;
736 sc = bp->bio_to->geom->softc;
737 GJ_LOGREQ(3, bp, "Request received.");
739 switch (bp->bio_cmd) {
742 mtx_lock(&sc->sc_mtx);
743 bioq_insert_tail(&sc->sc_regular_queue, bp);
745 mtx_unlock(&sc->sc_mtx);
748 if (strcmp(bp->bio_attribute, "GJOURNAL::provider") == 0) {
749 strlcpy(bp->bio_data, bp->bio_to->name, bp->bio_length);
750 bp->bio_completed = strlen(bp->bio_to->name) + 1;
758 g_io_deliver(bp, EOPNOTSUPP);
764 g_journal_std_done(struct bio *bp)
766 struct g_journal_softc *sc;
768 sc = bp->bio_from->geom->softc;
769 mtx_lock(&sc->sc_mtx);
770 bioq_insert_tail(&sc->sc_back_queue, bp);
772 mtx_unlock(&sc->sc_mtx);
776 g_journal_new_bio(off_t start, off_t end, off_t joffset, u_char *data,
782 bp->bio_offset = start;
783 bp->bio_joffset = joffset;
784 bp->bio_length = end - start;
785 bp->bio_cmd = BIO_WRITE;
786 bp->bio_done = g_journal_std_done;
790 bp->bio_data = gj_malloc(bp->bio_length, flags);
791 if (bp->bio_data != NULL)
792 bcopy(data, bp->bio_data, bp->bio_length);
797 #define g_journal_insert_bio(head, bp, flags) \
798 g_journal_insert((head), (bp)->bio_offset, \
799 (bp)->bio_offset + (bp)->bio_length, (bp)->bio_joffset, \
800 (bp)->bio_data, flags)
802 * The function below does a lot more than just inserting bio to the queue.
803 * It keeps the queue sorted by offset and ensures that there are no doubled
804 * data (it combines bios where ranges overlap).
806 * The function returns the number of bios inserted (as bio can be splitted).
809 g_journal_insert(struct bio **head, off_t nstart, off_t nend, off_t joffset,
810 u_char *data, int flags)
812 struct bio *nbp, *cbp, *pbp;
817 GJ_DEBUG(3, "INSERT(%p): (%jd, %jd, %jd)", *head, nstart, nend,
821 GJQ_FOREACH(*head, cbp) {
822 cstart = cbp->bio_offset;
823 cend = cbp->bio_offset + cbp->bio_length;
825 if (nstart >= cend) {
829 * | current | +-------------+
832 * +-------------+ | bio |
836 GJ_DEBUG(3, "INSERT(%p): 1", *head);
837 } else if (nend <= cstart) {
841 * +-------------+ | current |
844 * | bio | +-------------+
848 nbp = g_journal_new_bio(nstart, nend, joffset, data,
856 GJ_DEBUG(3, "INSERT(%p): 2 (nbp=%p pbp=%p)", *head, nbp,
859 } else if (nstart <= cstart && nend >= cend) {
861 * +-------------+ +-------------+
862 * | current bio | | current bio |
863 * +---+-------------+---+ +-------------+---+
866 * | +-------------+ | +-------------+ |
867 * | new bio | | new bio |
868 * +---------------------+ +-----------------+
870 * +-------------+ +-------------+
871 * | current bio | | current bio |
872 * +---+-------------+ +-------------+
875 * | +-------------+ +-------------+
876 * | new bio | | new bio |
877 * +-----------------+ +-------------+
879 g_journal_stats_bytes_skipped += cbp->bio_length;
880 cbp->bio_offset = nstart;
881 cbp->bio_joffset = joffset;
882 cbp->bio_length = cend - nstart;
883 if (cbp->bio_data != NULL) {
884 gj_free(cbp->bio_data, cend - cstart);
885 cbp->bio_data = NULL;
888 cbp->bio_data = gj_malloc(cbp->bio_length,
890 if (cbp->bio_data != NULL) {
891 bcopy(data, cbp->bio_data,
894 data += cend - nstart;
896 joffset += cend - nstart;
898 GJ_DEBUG(3, "INSERT(%p): 3 (cbp=%p)", *head, cbp);
899 } else if (nstart > cstart && nend >= cend) {
901 * +-----------------+ +-------------+
902 * | current bio | | current bio |
903 * | +-------------+ | +---------+---+
906 * +---+-------------+ +---+---------+ |
907 * | new bio | | new bio |
908 * +-------------+ +-------------+
910 g_journal_stats_bytes_skipped += cend - nstart;
911 nbp = g_journal_new_bio(nstart, cend, joffset, data,
913 nbp->bio_next = cbp->bio_next;
915 cbp->bio_length = nstart - cstart;
916 if (cbp->bio_data != NULL) {
917 cbp->bio_data = gj_realloc(cbp->bio_data,
918 cbp->bio_length, cend - cstart);
921 data += cend - nstart;
922 joffset += cend - nstart;
925 GJ_DEBUG(3, "INSERT(%p): 4 (cbp=%p)", *head, cbp);
926 } else if (nstart > cstart && nend < cend) {
928 * +---------------------+
930 * | +-------------+ |
933 * +---+-------------+---+
937 g_journal_stats_bytes_skipped += nend - nstart;
938 nbp = g_journal_new_bio(nstart, nend, joffset, data,
940 nbp->bio_next = cbp->bio_next;
942 if (cbp->bio_data == NULL)
945 tmpdata = cbp->bio_data + nend - cstart;
946 nbp = g_journal_new_bio(nend, cend,
947 cbp->bio_joffset + nend - cstart, tmpdata, flags);
948 nbp->bio_next = ((struct bio *)cbp->bio_next)->bio_next;
949 ((struct bio *)cbp->bio_next)->bio_next = nbp;
950 cbp->bio_length = nstart - cstart;
951 if (cbp->bio_data != NULL) {
952 cbp->bio_data = gj_realloc(cbp->bio_data,
953 cbp->bio_length, cend - cstart);
956 GJ_DEBUG(3, "INSERT(%p): 5 (cbp=%p)", *head, cbp);
958 } else if (nstart <= cstart && nend < cend) {
960 * +-----------------+ +-------------+
961 * | current bio | | current bio |
962 * +-------------+ | +---+---------+ |
965 * +-------------+---+ | +---------+---+
966 * | new bio | | new bio |
967 * +-------------+ +-------------+
969 g_journal_stats_bytes_skipped += nend - nstart;
970 nbp = g_journal_new_bio(nstart, nend, joffset, data,
977 cbp->bio_offset = nend;
978 cbp->bio_length = cend - nend;
979 cbp->bio_joffset += nend - cstart;
980 tmpdata = cbp->bio_data;
981 if (tmpdata != NULL) {
982 cbp->bio_data = gj_malloc(cbp->bio_length,
984 if (cbp->bio_data != NULL) {
985 bcopy(tmpdata + nend - cstart,
986 cbp->bio_data, cbp->bio_length);
988 gj_free(tmpdata, cend - cstart);
991 GJ_DEBUG(3, "INSERT(%p): 6 (cbp=%p)", *head, cbp);
998 nbp = g_journal_new_bio(nstart, nend, joffset, data, flags);
1002 pbp->bio_next = nbp;
1003 nbp->bio_next = NULL;
1005 GJ_DEBUG(3, "INSERT(%p): 8 (nbp=%p pbp=%p)", *head, nbp, pbp);
1007 if (g_journal_debug >= 3) {
1008 GJQ_FOREACH(*head, cbp) {
1009 GJ_DEBUG(3, "ELEMENT: %p (%jd, %jd, %jd, %p)", cbp,
1010 (intmax_t)cbp->bio_offset,
1011 (intmax_t)cbp->bio_length,
1012 (intmax_t)cbp->bio_joffset, cbp->bio_data);
1014 GJ_DEBUG(3, "INSERT(%p): DONE %d", *head, n);
1020 * The function combines neighbour bios trying to squeeze as much data as
1021 * possible into one bio.
1023 * The function returns the number of bios combined (negative value).
1026 g_journal_optimize(struct bio *head)
1028 struct bio *cbp, *pbp;
1033 GJQ_FOREACH(head, cbp) {
1034 /* Skip bios which has to be read first. */
1035 if (cbp->bio_data == NULL) {
1039 /* There is no previous bio yet. */
1044 /* Is this a neighbour bio? */
1045 if (pbp->bio_offset + pbp->bio_length != cbp->bio_offset) {
1046 /* Be sure that bios queue is sorted. */
1047 KASSERT(pbp->bio_offset + pbp->bio_length < cbp->bio_offset,
1048 ("poffset=%jd plength=%jd coffset=%jd",
1049 (intmax_t)pbp->bio_offset,
1050 (intmax_t)pbp->bio_length,
1051 (intmax_t)cbp->bio_offset));
1055 /* Be sure we don't end up with too big bio. */
1056 if (pbp->bio_length + cbp->bio_length > MAXPHYS) {
1060 /* Ok, we can join bios. */
1061 GJ_LOGREQ(4, pbp, "Join: ");
1062 GJ_LOGREQ(4, cbp, "and: ");
1063 pbp->bio_data = gj_realloc(pbp->bio_data,
1064 pbp->bio_length + cbp->bio_length, pbp->bio_length);
1065 bcopy(cbp->bio_data, pbp->bio_data + pbp->bio_length,
1067 gj_free(cbp->bio_data, cbp->bio_length);
1068 pbp->bio_length += cbp->bio_length;
1069 pbp->bio_next = cbp->bio_next;
1072 g_journal_stats_combined_ios++;
1074 GJ_LOGREQ(4, pbp, "Got: ");
1080 * TODO: Update comment.
1081 * These are functions responsible for copying one portion of data from journal
1082 * to the destination provider.
1083 * The order goes like this:
1084 * 1. Read the header, which contains informations about data blocks
1086 * 2. Read the data blocks from the journal.
1087 * 3. Write the data blocks on the data provider.
1089 * g_journal_copy_start()
1090 * g_journal_copy_done() - got finished write request, logs potential errors.
1094 * When there is no data in cache, this function is used to read it.
1097 g_journal_read_first(struct g_journal_softc *sc, struct bio *bp)
1102 * We were short in memory, so data was freed.
1103 * In that case we need to read it back from journal.
1105 cbp = g_alloc_bio();
1106 cbp->bio_cflags = bp->bio_cflags;
1107 cbp->bio_parent = bp;
1108 cbp->bio_offset = bp->bio_joffset;
1109 cbp->bio_length = bp->bio_length;
1110 cbp->bio_data = gj_malloc(bp->bio_length, M_WAITOK);
1111 cbp->bio_cmd = BIO_READ;
1112 cbp->bio_done = g_journal_std_done;
1113 GJ_LOGREQ(4, cbp, "READ FIRST");
1114 g_io_request(cbp, sc->sc_jconsumer);
1115 g_journal_cache_misses++;
1119 g_journal_copy_send(struct g_journal_softc *sc)
1121 struct bio *bioq, *bp, *lbp;
1124 mtx_lock(&sc->sc_mtx);
1125 for (; sc->sc_copy_in_progress < g_journal_parallel_copies;) {
1126 bp = GJQ_FIRST(sc->sc_inactive.jj_queue);
1129 GJQ_REMOVE(sc->sc_inactive.jj_queue, bp);
1130 sc->sc_copy_in_progress++;
1131 GJQ_INSERT_AFTER(bioq, bp, lbp);
1134 mtx_unlock(&sc->sc_mtx);
1135 if (g_journal_do_optimize)
1136 sc->sc_copy_in_progress += g_journal_optimize(bioq);
1137 while ((bp = GJQ_FIRST(bioq)) != NULL) {
1138 GJQ_REMOVE(bioq, bp);
1139 GJQ_INSERT_HEAD(sc->sc_copy_queue, bp);
1140 bp->bio_cflags = GJ_BIO_COPY;
1141 if (bp->bio_data == NULL)
1142 g_journal_read_first(sc, bp);
1144 bp->bio_joffset = 0;
1145 GJ_LOGREQ(4, bp, "SEND");
1146 g_io_request(bp, sc->sc_dconsumer);
1152 g_journal_copy_start(struct g_journal_softc *sc)
1156 * Remember in metadata that we're starting to copy journaled data
1157 * to the data provider.
1158 * In case of power failure, we will copy these data once again on boot.
1160 if (!sc->sc_journal_copying) {
1161 sc->sc_journal_copying = 1;
1162 GJ_DEBUG(1, "Starting copy of journal.");
1163 g_journal_metadata_update(sc);
1165 g_journal_copy_send(sc);
1169 * Data block has been read from the journal provider.
1172 g_journal_copy_read_done(struct bio *bp)
1174 struct g_journal_softc *sc;
1175 struct g_consumer *cp;
1178 KASSERT(bp->bio_cflags == GJ_BIO_COPY,
1179 ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_COPY));
1181 sc = bp->bio_from->geom->softc;
1182 pbp = bp->bio_parent;
1184 if (bp->bio_error != 0) {
1185 GJ_DEBUG(0, "Error while reading data from %s (error=%d).",
1186 bp->bio_to->name, bp->bio_error);
1188 * We will not be able to deliver WRITE request as well.
1190 gj_free(bp->bio_data, bp->bio_length);
1193 sc->sc_copy_in_progress--;
1196 pbp->bio_data = bp->bio_data;
1197 cp = sc->sc_dconsumer;
1198 g_io_request(pbp, cp);
1199 GJ_LOGREQ(4, bp, "READ DONE");
1205 * Data block has been written to the data provider.
1208 g_journal_copy_write_done(struct bio *bp)
1210 struct g_journal_softc *sc;
1212 KASSERT(bp->bio_cflags == GJ_BIO_COPY,
1213 ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_COPY));
1215 sc = bp->bio_from->geom->softc;
1216 sc->sc_copy_in_progress--;
1218 if (bp->bio_error != 0) {
1219 GJ_LOGREQ(0, bp, "[copy] Error while writing data (error=%d)",
1222 GJQ_REMOVE(sc->sc_copy_queue, bp);
1223 gj_free(bp->bio_data, bp->bio_length);
1224 GJ_LOGREQ(4, bp, "DONE");
1227 if (sc->sc_copy_in_progress == 0) {
1229 * This was the last write request for this journal.
1231 GJ_DEBUG(1, "Data has been copied.");
1232 sc->sc_journal_copying = 0;
1236 static void g_journal_flush_done(struct bio *bp);
1239 * Flush one record onto active journal provider.
1242 g_journal_flush(struct g_journal_softc *sc)
1244 struct g_journal_record_header hdr;
1245 struct g_journal_entry *ent;
1246 struct g_provider *pp;
1248 struct bio *bp, *fbp, *pbp;
1250 u_char *data, hash[16];
1254 if (sc->sc_current_count == 0)
1257 pp = sc->sc_jprovider;
1258 GJ_VALIDATE_OFFSET(sc->sc_journal_offset, sc);
1259 joffset = sc->sc_journal_offset;
1261 GJ_DEBUG(2, "Storing %d journal entries on %s at %jd.",
1262 sc->sc_current_count, pp->name, (intmax_t)joffset);
1265 * Store 'journal id', so we know to which journal this record belongs.
1267 hdr.jrh_journal_id = sc->sc_journal_id;
1268 /* Could be less than g_journal_record_entries if called due timeout. */
1269 hdr.jrh_nentries = MIN(sc->sc_current_count, g_journal_record_entries);
1270 strlcpy(hdr.jrh_magic, GJ_RECORD_HEADER_MAGIC, sizeof(hdr.jrh_magic));
1272 bioq = &sc->sc_active.jj_queue;
1273 GJQ_LAST(sc->sc_flush_queue, pbp);
1275 fbp = g_alloc_bio();
1276 fbp->bio_parent = NULL;
1277 fbp->bio_cflags = GJ_BIO_JOURNAL;
1278 fbp->bio_offset = -1;
1279 fbp->bio_joffset = joffset;
1280 fbp->bio_length = pp->sectorsize;
1281 fbp->bio_cmd = BIO_WRITE;
1282 fbp->bio_done = g_journal_std_done;
1283 GJQ_INSERT_AFTER(sc->sc_flush_queue, fbp, pbp);
1286 GJ_LOGREQ(4, fbp, "FLUSH_OUT");
1287 joffset += pp->sectorsize;
1288 sc->sc_flush_count++;
1289 if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1292 for (i = 0; i < hdr.jrh_nentries; i++) {
1293 bp = sc->sc_current_queue;
1294 KASSERT(bp != NULL, ("NULL bp"));
1296 GJ_LOGREQ(4, bp, "FLUSHED");
1297 sc->sc_current_queue = bp->bio_next;
1298 bp->bio_next = NULL;
1299 sc->sc_current_count--;
1301 /* Add to the header. */
1302 ent = &hdr.jrh_entries[i];
1303 ent->je_offset = bp->bio_offset;
1304 ent->je_joffset = joffset;
1305 ent->je_length = bp->bio_length;
1307 data = bp->bio_data;
1308 if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1309 MD5Update(&ctx, data, ent->je_length);
1311 bp->bio_cflags = GJ_BIO_JOURNAL;
1312 bp->bio_offset = ent->je_offset;
1313 bp->bio_joffset = ent->je_joffset;
1314 bp->bio_length = ent->je_length;
1315 bp->bio_data = data;
1316 bp->bio_cmd = BIO_WRITE;
1317 bp->bio_done = g_journal_std_done;
1318 GJQ_INSERT_AFTER(sc->sc_flush_queue, bp, pbp);
1321 GJ_LOGREQ(4, bp, "FLUSH_OUT");
1322 joffset += bp->bio_length;
1323 sc->sc_flush_count++;
1326 * Add request to the active sc_journal_queue queue.
1327 * This is our cache. After journal switch we don't have to
1328 * read the data from the inactive journal, because we keep
1331 g_journal_insert(bioq, ent->je_offset,
1332 ent->je_offset + ent->je_length, ent->je_joffset, data,
1337 * After all requests, store valid header.
1339 data = gj_malloc(pp->sectorsize, M_WAITOK);
1340 if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1341 MD5Final(hash, &ctx);
1342 bcopy(hash, hdr.jrh_sum, sizeof(hdr.jrh_sum));
1344 g_journal_record_header_encode(&hdr, data);
1345 fbp->bio_data = data;
1347 sc->sc_journal_offset = joffset;
1349 g_journal_check_overflow(sc);
1353 * Flush request finished.
1356 g_journal_flush_done(struct bio *bp)
1358 struct g_journal_softc *sc;
1359 struct g_consumer *cp;
1361 KASSERT((bp->bio_cflags & GJ_BIO_MASK) == GJ_BIO_JOURNAL,
1362 ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_JOURNAL));
1365 sc = cp->geom->softc;
1366 sc->sc_flush_in_progress--;
1368 if (bp->bio_error != 0) {
1369 GJ_LOGREQ(0, bp, "[flush] Error while writing data (error=%d)",
1372 gj_free(bp->bio_data, bp->bio_length);
1373 GJ_LOGREQ(4, bp, "DONE");
1377 static void g_journal_release_delayed(struct g_journal_softc *sc);
1380 g_journal_flush_send(struct g_journal_softc *sc)
1382 struct g_consumer *cp;
1383 struct bio *bioq, *bp, *lbp;
1385 cp = sc->sc_jconsumer;
1387 while (sc->sc_flush_in_progress < g_journal_parallel_flushes) {
1388 /* Send one flush requests to the active journal. */
1389 bp = GJQ_FIRST(sc->sc_flush_queue);
1391 GJQ_REMOVE(sc->sc_flush_queue, bp);
1392 sc->sc_flush_count--;
1393 bp->bio_offset = bp->bio_joffset;
1394 bp->bio_joffset = 0;
1395 sc->sc_flush_in_progress++;
1396 GJQ_INSERT_AFTER(bioq, bp, lbp);
1399 /* Try to release delayed requests. */
1400 g_journal_release_delayed(sc);
1401 /* If there are no requests to flush, leave. */
1402 if (GJQ_FIRST(sc->sc_flush_queue) == NULL)
1405 if (g_journal_do_optimize)
1406 sc->sc_flush_in_progress += g_journal_optimize(bioq);
1407 while ((bp = GJQ_FIRST(bioq)) != NULL) {
1408 GJQ_REMOVE(bioq, bp);
1409 GJ_LOGREQ(3, bp, "Flush request send");
1410 g_io_request(bp, cp);
1415 g_journal_add_current(struct g_journal_softc *sc, struct bio *bp)
1419 GJ_LOGREQ(4, bp, "CURRENT %d", sc->sc_current_count);
1420 n = g_journal_insert_bio(&sc->sc_current_queue, bp, M_WAITOK);
1421 sc->sc_current_count += n;
1422 n = g_journal_optimize(sc->sc_current_queue);
1423 sc->sc_current_count += n;
1425 * For requests which are added to the current queue we deliver
1426 * response immediately.
1428 bp->bio_completed = bp->bio_length;
1429 g_io_deliver(bp, 0);
1430 if (sc->sc_current_count >= g_journal_record_entries) {
1432 * Let's flush one record onto active journal provider.
1434 g_journal_flush(sc);
1439 g_journal_release_delayed(struct g_journal_softc *sc)
1444 /* The flush queue is full, exit. */
1445 if (sc->sc_flush_count >= g_journal_accept_immediately)
1447 bp = bioq_takefirst(&sc->sc_delayed_queue);
1450 sc->sc_delayed_count--;
1451 g_journal_add_current(sc, bp);
1456 * Add I/O request to the current queue. If we have enough requests for one
1457 * journal record we flush them onto active journal provider.
1460 g_journal_add_request(struct g_journal_softc *sc, struct bio *bp)
1464 * The flush queue is full, we need to delay the request.
1466 if (sc->sc_delayed_count > 0 ||
1467 sc->sc_flush_count >= g_journal_accept_immediately) {
1468 GJ_LOGREQ(4, bp, "DELAYED");
1469 bioq_insert_tail(&sc->sc_delayed_queue, bp);
1470 sc->sc_delayed_count++;
1474 KASSERT(TAILQ_EMPTY(&sc->sc_delayed_queue.queue),
1475 ("DELAYED queue not empty."));
1476 g_journal_add_current(sc, bp);
1479 static void g_journal_read_done(struct bio *bp);
1482 * Try to find requested data in cache.
1485 g_journal_read_find(struct bio *head, int sorted, struct bio *pbp, off_t ostart,
1491 GJQ_FOREACH(head, bp) {
1492 if (bp->bio_offset == -1)
1494 cstart = MAX(ostart, bp->bio_offset);
1495 cend = MIN(oend, bp->bio_offset + bp->bio_length);
1498 else if (cstart >= oend) {
1506 if (bp->bio_data == NULL)
1508 GJ_DEBUG(3, "READ(%p): (%jd, %jd) (bp=%p)", head, cstart, cend,
1510 bcopy(bp->bio_data + cstart - bp->bio_offset,
1511 pbp->bio_data + cstart - pbp->bio_offset, cend - cstart);
1512 pbp->bio_completed += cend - cstart;
1513 if (pbp->bio_completed == pbp->bio_length) {
1515 * Cool, the whole request was in cache, deliver happy
1518 g_io_deliver(pbp, 0);
1527 * This function is used for collecting data on read.
1528 * The complexity is because parts of the data can be stored in four different
1530 * - in memory - the data not yet send to the active journal provider
1531 * - in the active journal
1532 * - in the inactive journal
1533 * - in the data provider
1536 g_journal_read(struct g_journal_softc *sc, struct bio *pbp, off_t ostart,
1539 struct bio *bp, *nbp, *head;
1541 u_int i, sorted = 0;
1543 GJ_DEBUG(3, "READ: (%jd, %jd)", ostart, oend);
1548 for (i = 1; i <= 5; i++) {
1550 case 1: /* Not-yet-send data. */
1551 head = sc->sc_current_queue;
1554 case 2: /* Skip flush queue as they are also in active queue */
1556 case 3: /* Active journal. */
1557 head = sc->sc_active.jj_queue;
1560 case 4: /* Inactive journal. */
1562 * XXX: Here could be a race with g_journal_lowmem().
1564 head = sc->sc_inactive.jj_queue;
1567 case 5: /* In-flight to the data provider. */
1568 head = sc->sc_copy_queue;
1572 panic("gjournal %s: i=%d", __func__, i);
1574 bp = g_journal_read_find(head, sorted, pbp, ostart, oend);
1575 if (bp == pbp) { /* Got the whole request. */
1576 GJ_DEBUG(2, "Got the whole request from %u.", i);
1578 } else if (bp != NULL) {
1579 cstart = MAX(ostart, bp->bio_offset);
1580 cend = MIN(oend, bp->bio_offset + bp->bio_length);
1581 GJ_DEBUG(2, "Got part of the request from %u (%jd-%jd).",
1582 i, (intmax_t)cstart, (intmax_t)cend);
1587 if (bp->bio_data == NULL) {
1588 nbp = g_duplicate_bio(pbp);
1589 nbp->bio_cflags = GJ_BIO_READ;
1591 pbp->bio_data + cstart - pbp->bio_offset;
1593 bp->bio_joffset + cstart - bp->bio_offset;
1594 nbp->bio_length = cend - cstart;
1595 nbp->bio_done = g_journal_read_done;
1596 g_io_request(nbp, sc->sc_jconsumer);
1599 * If we don't have the whole request yet, call g_journal_read()
1602 if (ostart < cstart)
1603 g_journal_read(sc, pbp, ostart, cstart);
1605 g_journal_read(sc, pbp, cend, oend);
1608 * No data in memory, no data in journal.
1609 * Its time for asking data provider.
1611 GJ_DEBUG(3, "READ(data): (%jd, %jd)", ostart, oend);
1612 nbp = g_duplicate_bio(pbp);
1613 nbp->bio_cflags = GJ_BIO_READ;
1614 nbp->bio_data = pbp->bio_data + ostart - pbp->bio_offset;
1615 nbp->bio_offset = ostart;
1616 nbp->bio_length = oend - ostart;
1617 nbp->bio_done = g_journal_read_done;
1618 g_io_request(nbp, sc->sc_dconsumer);
1619 /* We have the whole request, return here. */
1625 * Function responsible for handling finished READ requests.
1626 * Actually, g_std_done() could be used here, the only difference is that we
1630 g_journal_read_done(struct bio *bp)
1634 KASSERT(bp->bio_cflags == GJ_BIO_READ,
1635 ("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_READ));
1637 pbp = bp->bio_parent;
1639 pbp->bio_completed += bp->bio_length;
1641 if (bp->bio_error != 0) {
1642 if (pbp->bio_error == 0)
1643 pbp->bio_error = bp->bio_error;
1644 GJ_DEBUG(0, "Error while reading data from %s (error=%d).",
1645 bp->bio_to->name, bp->bio_error);
1648 if (pbp->bio_children == pbp->bio_inbed &&
1649 pbp->bio_completed == pbp->bio_length) {
1651 g_io_deliver(pbp, 0);
1656 * Deactive current journal and active next one.
1659 g_journal_switch(struct g_journal_softc *sc)
1661 struct g_provider *pp;
1664 GJ_DEBUG(3, "No need for %s switch.", sc->sc_name);
1665 pp = LIST_FIRST(&sc->sc_geom->provider);
1666 if (!(sc->sc_flags & GJF_DEVICE_CLEAN) && pp->acw == 0) {
1667 sc->sc_flags |= GJF_DEVICE_CLEAN;
1668 GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
1669 g_journal_metadata_update(sc);
1672 GJ_DEBUG(3, "Switching journal %s.", sc->sc_geom->name);
1674 pp = sc->sc_jprovider;
1676 sc->sc_journal_previous_id = sc->sc_journal_id;
1678 sc->sc_journal_id = sc->sc_journal_next_id;
1679 sc->sc_journal_next_id = arc4random();
1681 GJ_VALIDATE_OFFSET(sc->sc_journal_offset, sc);
1683 g_journal_write_header(sc);
1685 sc->sc_inactive.jj_offset = sc->sc_active.jj_offset;
1686 sc->sc_inactive.jj_queue = sc->sc_active.jj_queue;
1688 sc->sc_active.jj_offset =
1689 sc->sc_journal_offset - pp->sectorsize;
1690 sc->sc_active.jj_queue = NULL;
1693 * Switch is done, start copying data from the (now) inactive
1694 * journal to the data provider.
1696 g_journal_copy_start(sc);
1698 mtx_lock(&sc->sc_mtx);
1699 sc->sc_flags &= ~GJF_DEVICE_SWITCH;
1700 mtx_unlock(&sc->sc_mtx);
1704 g_journal_initialize(struct g_journal_softc *sc)
1707 sc->sc_journal_id = arc4random();
1708 sc->sc_journal_next_id = arc4random();
1709 sc->sc_journal_previous_id = sc->sc_journal_id;
1710 sc->sc_journal_offset = sc->sc_jstart;
1711 sc->sc_inactive.jj_offset = sc->sc_jstart;
1712 g_journal_write_header(sc);
1713 sc->sc_active.jj_offset = sc->sc_jstart;
1717 g_journal_mark_as_dirty(struct g_journal_softc *sc)
1719 const struct g_journal_desc *desc;
1722 GJ_DEBUG(1, "Marking file system %s as dirty.", sc->sc_name);
1723 for (i = 0; (desc = g_journal_filesystems[i]) != NULL; i++)
1724 desc->jd_dirty(sc->sc_dconsumer);
1728 * Function read record header from the given journal.
1729 * It is very simlar to g_read_data(9), but it doesn't allocate memory for bio
1730 * and data on every call.
1733 g_journal_sync_read(struct g_consumer *cp, struct bio *bp, off_t offset,
1739 bp->bio_cmd = BIO_READ;
1740 bp->bio_done = NULL;
1741 bp->bio_offset = offset;
1742 bp->bio_length = cp->provider->sectorsize;
1743 bp->bio_data = data;
1744 g_io_request(bp, cp);
1745 error = biowait(bp, "gjs_read");
1751 * Function is called when we start the journal device and we detect that
1752 * one of the journals was not fully copied.
1753 * The purpose of this function is to read all records headers from journal
1754 * and placed them in the inactive queue, so we can start journal
1755 * synchronization process and the journal provider itself.
1756 * Design decision was taken to not synchronize the whole journal here as it
1757 * can take too much time. Reading headers only and delaying synchronization
1758 * process until after journal provider is started should be the best choice.
1763 g_journal_sync(struct g_journal_softc *sc)
1765 struct g_journal_record_header rhdr;
1766 struct g_journal_entry *ent;
1767 struct g_journal_header jhdr;
1768 struct g_consumer *cp;
1769 struct bio *bp, *fbp, *tbp;
1770 off_t joffset, offset;
1771 u_char *buf, sum[16];
1774 int error, found, i;
1778 cp = sc->sc_jconsumer;
1780 buf = gj_malloc(cp->provider->sectorsize, M_WAITOK);
1781 offset = joffset = sc->sc_inactive.jj_offset = sc->sc_journal_offset;
1783 GJ_DEBUG(2, "Looking for termination at %jd.", (intmax_t)joffset);
1786 * Read and decode first journal header.
1788 error = g_journal_sync_read(cp, bp, offset, buf);
1790 GJ_DEBUG(0, "Error while reading journal header from %s.",
1791 cp->provider->name);
1794 error = g_journal_header_decode(buf, &jhdr);
1796 GJ_DEBUG(0, "Cannot decode journal header from %s.",
1797 cp->provider->name);
1800 id = sc->sc_journal_id;
1801 if (jhdr.jh_journal_id != sc->sc_journal_id) {
1802 GJ_DEBUG(1, "Journal ID mismatch at %jd (0x%08x != 0x%08x).",
1803 (intmax_t)offset, (u_int)jhdr.jh_journal_id, (u_int)id);
1806 offset += cp->provider->sectorsize;
1807 id = sc->sc_journal_next_id = jhdr.jh_journal_next_id;
1811 * If the biggest record won't fit, look for a record header or
1812 * journal header from the beginning.
1814 GJ_VALIDATE_OFFSET(offset, sc);
1815 error = g_journal_sync_read(cp, bp, offset, buf);
1818 * Not good. Having an error while reading header
1819 * means, that we cannot read next headers and in
1820 * consequence we cannot find termination.
1823 "Error while reading record header from %s.",
1824 cp->provider->name);
1828 error = g_journal_record_header_decode(buf, &rhdr);
1830 GJ_DEBUG(2, "Not a record header at %jd (error=%d).",
1831 (intmax_t)offset, error);
1833 * This is not a record header.
1834 * If we are lucky, this is next journal header.
1836 error = g_journal_header_decode(buf, &jhdr);
1838 GJ_DEBUG(1, "Not a journal header at %jd (error=%d).",
1839 (intmax_t)offset, error);
1841 * Nope, this is not journal header, which
1842 * bascially means that journal is not
1843 * terminated properly.
1849 * Ok. This is header of _some_ journal. Now we need to
1850 * verify if this is header of the _next_ journal.
1852 if (jhdr.jh_journal_id != id) {
1853 GJ_DEBUG(1, "Journal ID mismatch at %jd "
1854 "(0x%08x != 0x%08x).", (intmax_t)offset,
1855 (u_int)jhdr.jh_journal_id, (u_int)id);
1860 /* Found termination. */
1862 GJ_DEBUG(1, "Found termination at %jd (id=0x%08x).",
1863 (intmax_t)offset, (u_int)id);
1864 sc->sc_active.jj_offset = offset;
1865 sc->sc_journal_offset =
1866 offset + cp->provider->sectorsize;
1867 sc->sc_journal_id = id;
1868 id = sc->sc_journal_next_id = jhdr.jh_journal_next_id;
1870 while ((tbp = fbp) != NULL) {
1871 fbp = tbp->bio_next;
1872 GJ_LOGREQ(3, tbp, "Adding request.");
1873 g_journal_insert_bio(&sc->sc_inactive.jj_queue,
1877 /* Skip journal's header. */
1878 offset += cp->provider->sectorsize;
1882 /* Skip record's header. */
1883 offset += cp->provider->sectorsize;
1886 * Add information about every record entry to the inactive
1889 if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
1891 for (i = 0; i < rhdr.jrh_nentries; i++) {
1892 ent = &rhdr.jrh_entries[i];
1893 GJ_DEBUG(3, "Insert entry: %jd %jd.",
1894 (intmax_t)ent->je_offset, (intmax_t)ent->je_length);
1895 g_journal_insert(&fbp, ent->je_offset,
1896 ent->je_offset + ent->je_length, ent->je_joffset,
1898 if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1902 * TODO: Should use faster function (like
1903 * g_journal_sync_read()).
1905 buf2 = g_read_data(cp, offset, ent->je_length,
1908 GJ_DEBUG(0, "Cannot read data at %jd.",
1911 MD5Update(&ctx, buf2, ent->je_length);
1915 /* Skip entry's data. */
1916 offset += ent->je_length;
1918 if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
1919 MD5Final(sum, &ctx);
1920 if (bcmp(sum, rhdr.jrh_sum, sizeof(rhdr.jrh_sum)) != 0) {
1921 GJ_DEBUG(0, "MD5 hash mismatch at %jd!",
1927 gj_free(bp->bio_data, cp->provider->sectorsize);
1930 /* Remove bios from unterminated journal. */
1931 while ((tbp = fbp) != NULL) {
1932 fbp = tbp->bio_next;
1936 if (found < 1 && joffset > 0) {
1937 GJ_DEBUG(0, "Journal on %s is broken/corrupted. Initializing.",
1939 while ((tbp = sc->sc_inactive.jj_queue) != NULL) {
1940 sc->sc_inactive.jj_queue = tbp->bio_next;
1943 g_journal_initialize(sc);
1944 g_journal_mark_as_dirty(sc);
1946 GJ_DEBUG(0, "Journal %s consistent.", sc->sc_name);
1947 g_journal_copy_start(sc);
1952 * Wait for requests.
1953 * If we have requests in the current queue, flush them after 3 seconds from the
1954 * last flush. In this way we don't wait forever (or for journal switch) with
1955 * storing not full records on journal.
1958 g_journal_wait(struct g_journal_softc *sc, time_t last_write)
1962 GJ_DEBUG(3, "%s: enter", __func__);
1963 if (sc->sc_current_count == 0) {
1964 if (g_journal_debug < 2)
1965 msleep(sc, &sc->sc_mtx, PRIBIO | PDROP, "gj:work", 0);
1968 * If we have debug turned on, show number of elements
1969 * in various queues.
1972 error = msleep(sc, &sc->sc_mtx, PRIBIO,
1975 mtx_unlock(&sc->sc_mtx);
1978 GJ_DEBUG(3, "Report: current count=%d",
1979 sc->sc_current_count);
1980 GJ_DEBUG(3, "Report: flush count=%d",
1981 sc->sc_flush_count);
1982 GJ_DEBUG(3, "Report: flush in progress=%d",
1983 sc->sc_flush_in_progress);
1984 GJ_DEBUG(3, "Report: copy in progress=%d",
1985 sc->sc_copy_in_progress);
1986 GJ_DEBUG(3, "Report: delayed=%d",
1987 sc->sc_delayed_count);
1990 GJ_DEBUG(3, "%s: exit 1", __func__);
1995 * Flush even not full records every 3 seconds.
1997 timeout = (last_write + 3 - time_second) * hz;
1999 mtx_unlock(&sc->sc_mtx);
2000 g_journal_flush(sc);
2001 g_journal_flush_send(sc);
2002 GJ_DEBUG(3, "%s: exit 2", __func__);
2005 error = msleep(sc, &sc->sc_mtx, PRIBIO | PDROP, "gj:work", timeout);
2006 if (error == EWOULDBLOCK)
2007 g_journal_flush_send(sc);
2008 GJ_DEBUG(3, "%s: exit 3", __func__);
2015 g_journal_worker(void *arg)
2017 struct g_journal_softc *sc;
2019 struct g_provider *pp;
2024 thread_lock(curthread);
2025 sched_prio(curthread, PRIBIO);
2026 thread_unlock(curthread);
2031 if (sc->sc_flags & GJF_DEVICE_CLEAN) {
2032 GJ_DEBUG(0, "Journal %s clean.", sc->sc_name);
2033 g_journal_initialize(sc);
2038 * Check if we can use BIO_FLUSH.
2040 sc->sc_bio_flush = 0;
2041 if (g_io_flush(sc->sc_jconsumer) == 0) {
2042 sc->sc_bio_flush |= GJ_FLUSH_JOURNAL;
2043 GJ_DEBUG(1, "BIO_FLUSH supported by %s.",
2044 sc->sc_jconsumer->provider->name);
2046 GJ_DEBUG(0, "BIO_FLUSH not supported by %s.",
2047 sc->sc_jconsumer->provider->name);
2049 if (sc->sc_jconsumer != sc->sc_dconsumer) {
2050 if (g_io_flush(sc->sc_dconsumer) == 0) {
2051 sc->sc_bio_flush |= GJ_FLUSH_DATA;
2052 GJ_DEBUG(1, "BIO_FLUSH supported by %s.",
2053 sc->sc_dconsumer->provider->name);
2055 GJ_DEBUG(0, "BIO_FLUSH not supported by %s.",
2056 sc->sc_dconsumer->provider->name);
2062 pp = g_new_providerf(gp, "%s.journal", sc->sc_name);
2063 pp->mediasize = sc->sc_mediasize;
2065 * There could be a problem when data provider and journal providers
2066 * have different sectorsize, but such scenario is prevented on journal
2069 pp->sectorsize = sc->sc_sectorsize;
2070 g_error_provider(pp, 0);
2071 g_topology_unlock();
2072 last_write = time_second;
2074 if (sc->sc_rootmount != NULL) {
2075 GJ_DEBUG(1, "root_mount_rel %p", sc->sc_rootmount);
2076 root_mount_rel(sc->sc_rootmount);
2077 sc->sc_rootmount = NULL;
2081 /* Get first request from the queue. */
2082 mtx_lock(&sc->sc_mtx);
2083 bp = bioq_first(&sc->sc_back_queue);
2085 type = (bp->bio_cflags & GJ_BIO_MASK);
2087 bp = bioq_first(&sc->sc_regular_queue);
2089 type = GJ_BIO_REGULAR;
2093 if ((sc->sc_flags & GJF_DEVICE_SWITCH) ||
2094 (sc->sc_flags & GJF_DEVICE_DESTROY)) {
2095 if (sc->sc_current_count > 0) {
2096 mtx_unlock(&sc->sc_mtx);
2097 g_journal_flush(sc);
2098 g_journal_flush_send(sc);
2101 if (sc->sc_flush_in_progress > 0)
2103 if (sc->sc_copy_in_progress > 0)
2106 if (sc->sc_flags & GJF_DEVICE_SWITCH) {
2107 mtx_unlock(&sc->sc_mtx);
2108 g_journal_switch(sc);
2109 wakeup(&sc->sc_journal_copying);
2112 if (sc->sc_flags & GJF_DEVICE_DESTROY) {
2113 GJ_DEBUG(1, "Shutting down worker "
2114 "thread for %s.", gp->name);
2115 sc->sc_worker = NULL;
2116 wakeup(&sc->sc_worker);
2117 mtx_unlock(&sc->sc_mtx);
2121 g_journal_wait(sc, last_write);
2125 * If we're in switch process, we need to delay all new
2126 * write requests until its done.
2128 if ((sc->sc_flags & GJF_DEVICE_SWITCH) &&
2129 type == GJ_BIO_REGULAR && bp->bio_cmd == BIO_WRITE) {
2130 GJ_LOGREQ(2, bp, "WRITE on SWITCH");
2133 if (type == GJ_BIO_REGULAR)
2134 bioq_remove(&sc->sc_regular_queue, bp);
2136 bioq_remove(&sc->sc_back_queue, bp);
2137 mtx_unlock(&sc->sc_mtx);
2139 case GJ_BIO_REGULAR:
2140 /* Regular request. */
2141 switch (bp->bio_cmd) {
2143 g_journal_read(sc, bp, bp->bio_offset,
2144 bp->bio_offset + bp->bio_length);
2147 last_write = time_second;
2148 g_journal_add_request(sc, bp);
2149 g_journal_flush_send(sc);
2152 panic("Invalid bio_cmd (%d).", bp->bio_cmd);
2156 switch (bp->bio_cmd) {
2158 if (g_journal_copy_read_done(bp))
2159 g_journal_copy_send(sc);
2162 g_journal_copy_write_done(bp);
2163 g_journal_copy_send(sc);
2166 panic("Invalid bio_cmd (%d).", bp->bio_cmd);
2169 case GJ_BIO_JOURNAL:
2170 g_journal_flush_done(bp);
2171 g_journal_flush_send(sc);
2175 panic("Invalid bio (%d).", type);
2181 g_journal_destroy_event(void *arg, int flags __unused)
2183 struct g_journal_softc *sc;
2185 g_topology_assert();
2187 g_journal_destroy(sc);
2191 g_journal_timeout(void *arg)
2193 struct g_journal_softc *sc;
2196 GJ_DEBUG(0, "Timeout. Journal %s cannot be completed.",
2198 g_post_event(g_journal_destroy_event, sc, M_NOWAIT, NULL);
2201 static struct g_geom *
2202 g_journal_create(struct g_class *mp, struct g_provider *pp,
2203 const struct g_journal_metadata *md)
2205 struct g_journal_softc *sc;
2207 struct g_consumer *cp;
2210 sc = NULL; /* gcc */
2212 g_topology_assert();
2214 * There are two possibilities:
2215 * 1. Data and both journals are on the same provider.
2216 * 2. Data and journals are all on separated providers.
2218 /* Look for journal device with the same ID. */
2219 LIST_FOREACH(gp, &mp->geom, geom) {
2223 if (sc->sc_id == md->md_id)
2228 else if (sc != NULL && (sc->sc_type & md->md_type) != 0) {
2229 GJ_DEBUG(1, "Journal device %u already configured.", sc->sc_id);
2232 if (md->md_type == 0 || (md->md_type & ~GJ_TYPE_COMPLETE) != 0) {
2233 GJ_DEBUG(0, "Invalid type on %s.", pp->name);
2236 if (md->md_type & GJ_TYPE_DATA) {
2237 GJ_DEBUG(0, "Journal %u: %s contains data.", md->md_id,
2240 if (md->md_type & GJ_TYPE_JOURNAL) {
2241 GJ_DEBUG(0, "Journal %u: %s contains journal.", md->md_id,
2247 sc = malloc(sizeof(*sc), M_JOURNAL, M_WAITOK | M_ZERO);
2248 sc->sc_id = md->md_id;
2251 sc->sc_worker = NULL;
2253 gp = g_new_geomf(mp, "gjournal %u", sc->sc_id);
2254 gp->start = g_journal_start;
2255 gp->orphan = g_journal_orphan;
2256 gp->access = g_journal_access;
2258 gp->flags |= G_GEOM_VOLATILE_BIO;
2261 mtx_init(&sc->sc_mtx, "gjournal", NULL, MTX_DEF);
2263 bioq_init(&sc->sc_back_queue);
2264 bioq_init(&sc->sc_regular_queue);
2265 bioq_init(&sc->sc_delayed_queue);
2266 sc->sc_delayed_count = 0;
2267 sc->sc_current_queue = NULL;
2268 sc->sc_current_count = 0;
2269 sc->sc_flush_queue = NULL;
2270 sc->sc_flush_count = 0;
2271 sc->sc_flush_in_progress = 0;
2272 sc->sc_copy_queue = NULL;
2273 sc->sc_copy_in_progress = 0;
2274 sc->sc_inactive.jj_queue = NULL;
2275 sc->sc_active.jj_queue = NULL;
2277 sc->sc_rootmount = root_mount_hold("GJOURNAL");
2278 GJ_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount);
2280 callout_init(&sc->sc_callout, 1);
2281 if (md->md_type != GJ_TYPE_COMPLETE) {
2283 * Journal and data are on separate providers.
2284 * At this point we have only one of them.
2285 * We setup a timeout in case the other part will not
2286 * appear, so we won't wait forever.
2288 callout_reset(&sc->sc_callout, 5 * hz,
2289 g_journal_timeout, sc);
2293 /* Remember type of the data provider. */
2294 if (md->md_type & GJ_TYPE_DATA)
2295 sc->sc_orig_type = md->md_type;
2296 sc->sc_type |= md->md_type;
2299 if (md->md_type & GJ_TYPE_DATA) {
2300 if (md->md_flags & GJ_FLAG_CLEAN)
2301 sc->sc_flags |= GJF_DEVICE_CLEAN;
2302 if (md->md_flags & GJ_FLAG_CHECKSUM)
2303 sc->sc_flags |= GJF_DEVICE_CHECKSUM;
2304 cp = g_new_consumer(gp);
2305 error = g_attach(cp, pp);
2306 KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
2308 error = g_access(cp, 1, 1, 1);
2310 GJ_DEBUG(0, "Cannot access %s (error=%d).", pp->name,
2312 g_journal_destroy(sc);
2315 sc->sc_dconsumer = cp;
2316 sc->sc_mediasize = pp->mediasize - pp->sectorsize;
2317 sc->sc_sectorsize = pp->sectorsize;
2318 sc->sc_jstart = md->md_jstart;
2319 sc->sc_jend = md->md_jend;
2320 if (md->md_provider[0] != '\0')
2321 sc->sc_flags |= GJF_DEVICE_HARDCODED;
2322 sc->sc_journal_offset = md->md_joffset;
2323 sc->sc_journal_id = md->md_jid;
2324 sc->sc_journal_previous_id = md->md_jid;
2326 if (md->md_type & GJ_TYPE_JOURNAL) {
2328 cp = g_new_consumer(gp);
2329 error = g_attach(cp, pp);
2330 KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
2332 error = g_access(cp, 1, 1, 1);
2334 GJ_DEBUG(0, "Cannot access %s (error=%d).",
2336 g_journal_destroy(sc);
2341 * Journal is on the same provider as data, which means
2342 * that data provider ends where journal starts.
2344 sc->sc_mediasize = md->md_jstart;
2346 sc->sc_jconsumer = cp;
2349 /* Start switcher kproc if needed. */
2350 if (g_journal_switcher_proc == NULL)
2351 g_journal_start_switcher(mp);
2353 if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE) {
2354 /* Journal is not complete yet. */
2357 /* Journal complete, cancel timeout. */
2358 callout_drain(&sc->sc_callout);
2361 error = kproc_create(g_journal_worker, sc, &sc->sc_worker, 0, 0,
2362 "g_journal %s", sc->sc_name);
2364 GJ_DEBUG(0, "Cannot create worker thread for %s.journal.",
2366 g_journal_destroy(sc);
2374 g_journal_destroy_consumer(void *arg, int flags __unused)
2376 struct g_consumer *cp;
2378 g_topology_assert();
2381 g_destroy_consumer(cp);
2385 g_journal_destroy(struct g_journal_softc *sc)
2388 struct g_provider *pp;
2389 struct g_consumer *cp;
2391 g_topology_assert();
2397 pp = LIST_FIRST(&gp->provider);
2399 if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0) {
2400 GJ_DEBUG(1, "Device %s is still open (r%dw%de%d).",
2401 pp->name, pp->acr, pp->acw, pp->ace);
2404 g_error_provider(pp, ENXIO);
2406 g_journal_flush(sc);
2407 g_journal_flush_send(sc);
2408 g_journal_switch(sc);
2411 sc->sc_flags |= (GJF_DEVICE_DESTROY | GJF_DEVICE_CLEAN);
2413 g_topology_unlock();
2415 if (sc->sc_rootmount != NULL) {
2416 GJ_DEBUG(1, "root_mount_rel %p", sc->sc_rootmount);
2417 root_mount_rel(sc->sc_rootmount);
2418 sc->sc_rootmount = NULL;
2421 callout_drain(&sc->sc_callout);
2422 mtx_lock(&sc->sc_mtx);
2424 while (sc->sc_worker != NULL)
2425 msleep(&sc->sc_worker, &sc->sc_mtx, PRIBIO, "gj:destroy", 0);
2426 mtx_unlock(&sc->sc_mtx);
2429 GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
2430 g_journal_metadata_update(sc);
2432 g_wither_provider(pp, ENXIO);
2436 mtx_destroy(&sc->sc_mtx);
2438 if (sc->sc_current_count != 0) {
2439 GJ_DEBUG(0, "Warning! Number of current requests %d.",
2440 sc->sc_current_count);
2444 LIST_FOREACH(cp, &gp->consumer, consumer) {
2445 if (cp->acr + cp->acw + cp->ace > 0)
2446 g_access(cp, -1, -1, -1);
2448 * We keep all consumers open for writting, so if I'll detach
2449 * and destroy consumer here, I'll get providers for taste, so
2450 * journal will be started again.
2451 * Sending an event here, prevents this from happening.
2453 g_post_event(g_journal_destroy_consumer, cp, M_WAITOK, NULL);
2455 g_wither_geom(gp, ENXIO);
2456 free(sc, M_JOURNAL);
2461 g_journal_taste_orphan(struct g_consumer *cp)
2464 KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
2465 cp->provider->name));
2468 static struct g_geom *
2469 g_journal_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
2471 struct g_journal_metadata md;
2472 struct g_consumer *cp;
2476 g_topology_assert();
2477 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
2478 GJ_DEBUG(2, "Tasting %s.", pp->name);
2479 if (pp->geom->class == mp)
2482 gp = g_new_geomf(mp, "journal:taste");
2483 /* This orphan function should be never called. */
2484 gp->orphan = g_journal_taste_orphan;
2485 cp = g_new_consumer(gp);
2487 error = g_journal_metadata_read(cp, &md);
2489 g_destroy_consumer(cp);
2495 if (md.md_provider[0] != '\0' &&
2496 !g_compare_names(md.md_provider, pp->name))
2498 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize)
2500 if (g_journal_debug >= 2)
2501 journal_metadata_dump(&md);
2503 gp = g_journal_create(mp, pp, &md);
2507 static struct g_journal_softc *
2508 g_journal_find_device(struct g_class *mp, const char *name)
2510 struct g_journal_softc *sc;
2512 struct g_provider *pp;
2514 if (strncmp(name, _PATH_DEV, 5) == 0)
2516 LIST_FOREACH(gp, &mp->geom, geom) {
2520 if (sc->sc_flags & GJF_DEVICE_DESTROY)
2522 if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE)
2524 pp = LIST_FIRST(&gp->provider);
2525 if (strcmp(sc->sc_name, name) == 0)
2527 if (pp != NULL && strcmp(pp->name, name) == 0)
2534 g_journal_ctl_destroy(struct gctl_req *req, struct g_class *mp)
2536 struct g_journal_softc *sc;
2542 g_topology_assert();
2544 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
2545 if (nargs == NULL) {
2546 gctl_error(req, "No '%s' argument.", "nargs");
2550 gctl_error(req, "Missing device(s).");
2554 for (i = 0; i < *nargs; i++) {
2555 snprintf(param, sizeof(param), "arg%d", i);
2556 name = gctl_get_asciiparam(req, param);
2558 gctl_error(req, "No 'arg%d' argument.", i);
2561 sc = g_journal_find_device(mp, name);
2563 gctl_error(req, "No such device: %s.", name);
2566 error = g_journal_destroy(sc);
2568 gctl_error(req, "Cannot destroy device %s (error=%d).",
2569 LIST_FIRST(&sc->sc_geom->provider)->name, error);
2576 g_journal_ctl_sync(struct gctl_req *req __unused, struct g_class *mp __unused)
2579 g_topology_assert();
2580 g_topology_unlock();
2581 g_journal_sync_requested++;
2582 wakeup(&g_journal_switcher_state);
2583 while (g_journal_sync_requested > 0)
2584 tsleep(&g_journal_sync_requested, PRIBIO, "j:sreq", hz / 2);
2589 g_journal_config(struct gctl_req *req, struct g_class *mp, const char *verb)
2593 g_topology_assert();
2595 version = gctl_get_paraml(req, "version", sizeof(*version));
2596 if (version == NULL) {
2597 gctl_error(req, "No '%s' argument.", "version");
2600 if (*version != G_JOURNAL_VERSION) {
2601 gctl_error(req, "Userland and kernel parts are out of sync.");
2605 if (strcmp(verb, "destroy") == 0 || strcmp(verb, "stop") == 0) {
2606 g_journal_ctl_destroy(req, mp);
2608 } else if (strcmp(verb, "sync") == 0) {
2609 g_journal_ctl_sync(req, mp);
2613 gctl_error(req, "Unknown verb.");
2617 g_journal_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
2618 struct g_consumer *cp, struct g_provider *pp)
2620 struct g_journal_softc *sc;
2622 g_topology_assert();
2629 } else if (cp != NULL) {
2632 sbuf_printf(sb, "%s<Role>", indent);
2633 if (cp == sc->sc_dconsumer) {
2634 sbuf_cat(sb, "Data");
2637 if (cp == sc->sc_jconsumer) {
2640 sbuf_cat(sb, "Journal");
2642 sbuf_cat(sb, "</Role>\n");
2643 if (cp == sc->sc_jconsumer) {
2644 sbuf_printf(sb, "<Jstart>%jd</Jstart>\n",
2645 (intmax_t)sc->sc_jstart);
2646 sbuf_printf(sb, "<Jend>%jd</Jend>\n",
2647 (intmax_t)sc->sc_jend);
2650 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
2654 static eventhandler_tag g_journal_event_shutdown = NULL;
2655 static eventhandler_tag g_journal_event_lowmem = NULL;
2658 g_journal_shutdown(void *arg, int howto __unused)
2661 struct g_geom *gp, *gp2;
2663 if (KERNEL_PANICKED())
2667 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
2668 if (gp->softc == NULL)
2670 GJ_DEBUG(0, "Shutting down geom %s.", gp->name);
2671 g_journal_destroy(gp->softc);
2673 g_topology_unlock();
2677 * Free cached requests from inactive queue in case of low memory.
2678 * We free GJ_FREE_AT_ONCE elements at once.
2680 #define GJ_FREE_AT_ONCE 4
2682 g_journal_lowmem(void *arg, int howto __unused)
2684 struct g_journal_softc *sc;
2688 u_int nfree = GJ_FREE_AT_ONCE;
2690 g_journal_stats_low_mem++;
2693 LIST_FOREACH(gp, &mp->geom, geom) {
2695 if (sc == NULL || (sc->sc_flags & GJF_DEVICE_DESTROY))
2697 mtx_lock(&sc->sc_mtx);
2698 for (bp = sc->sc_inactive.jj_queue; nfree > 0 && bp != NULL;
2699 nfree--, bp = bp->bio_next) {
2701 * This is safe to free the bio_data, because:
2702 * 1. If bio_data is NULL it will be read from the
2704 * 2. If bp is sent down, it is first removed from the
2705 * inactive queue, so it's impossible to free the
2706 * data from under in-flight bio.
2707 * On the other hand, freeing elements from the active
2708 * queue, is not safe.
2710 if (bp->bio_data != NULL) {
2711 GJ_DEBUG(2, "Freeing data from %s.",
2713 gj_free(bp->bio_data, bp->bio_length);
2714 bp->bio_data = NULL;
2717 mtx_unlock(&sc->sc_mtx);
2721 g_topology_unlock();
2724 static void g_journal_switcher(void *arg);
2727 g_journal_init(struct g_class *mp)
2730 /* Pick a conservative value if provided value sucks. */
2731 if (g_journal_cache_divisor <= 0 ||
2732 (vm_kmem_size / g_journal_cache_divisor == 0)) {
2733 g_journal_cache_divisor = 5;
2735 if (g_journal_cache_limit > 0) {
2736 g_journal_cache_limit = vm_kmem_size / g_journal_cache_divisor;
2737 g_journal_cache_low =
2738 (g_journal_cache_limit / 100) * g_journal_cache_switch;
2740 g_journal_event_shutdown = EVENTHANDLER_REGISTER(shutdown_post_sync,
2741 g_journal_shutdown, mp, EVENTHANDLER_PRI_FIRST);
2742 if (g_journal_event_shutdown == NULL)
2743 GJ_DEBUG(0, "Warning! Cannot register shutdown event.");
2744 g_journal_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem,
2745 g_journal_lowmem, mp, EVENTHANDLER_PRI_FIRST);
2746 if (g_journal_event_lowmem == NULL)
2747 GJ_DEBUG(0, "Warning! Cannot register lowmem event.");
2751 g_journal_fini(struct g_class *mp)
2754 if (g_journal_event_shutdown != NULL) {
2755 EVENTHANDLER_DEREGISTER(shutdown_post_sync,
2756 g_journal_event_shutdown);
2758 if (g_journal_event_lowmem != NULL)
2759 EVENTHANDLER_DEREGISTER(vm_lowmem, g_journal_event_lowmem);
2760 if (g_journal_switcher_proc != NULL)
2761 g_journal_stop_switcher();
2764 DECLARE_GEOM_CLASS(g_journal_class, g_journal);
2766 static const struct g_journal_desc *
2767 g_journal_find_desc(const char *fstype)
2769 const struct g_journal_desc *desc;
2772 for (desc = g_journal_filesystems[i = 0]; desc != NULL;
2773 desc = g_journal_filesystems[++i]) {
2774 if (strcmp(desc->jd_fstype, fstype) == 0)
2781 g_journal_switch_wait(struct g_journal_softc *sc)
2785 mtx_assert(&sc->sc_mtx, MA_OWNED);
2786 if (g_journal_debug >= 2) {
2787 if (sc->sc_flush_in_progress > 0) {
2788 GJ_DEBUG(2, "%d requests flushing.",
2789 sc->sc_flush_in_progress);
2791 if (sc->sc_copy_in_progress > 0) {
2792 GJ_DEBUG(2, "%d requests copying.",
2793 sc->sc_copy_in_progress);
2795 if (sc->sc_flush_count > 0) {
2796 GJ_DEBUG(2, "%d requests to flush.",
2797 sc->sc_flush_count);
2799 if (sc->sc_delayed_count > 0) {
2800 GJ_DEBUG(2, "%d requests delayed.",
2801 sc->sc_delayed_count);
2804 g_journal_stats_switches++;
2805 if (sc->sc_copy_in_progress > 0)
2806 g_journal_stats_wait_for_copy++;
2807 GJ_TIMER_START(1, &bt);
2808 sc->sc_flags &= ~GJF_DEVICE_BEFORE_SWITCH;
2809 sc->sc_flags |= GJF_DEVICE_SWITCH;
2811 while (sc->sc_flags & GJF_DEVICE_SWITCH) {
2812 msleep(&sc->sc_journal_copying, &sc->sc_mtx, PRIBIO,
2815 GJ_TIMER_STOP(1, &bt, "Switch time of %s", sc->sc_name);
2819 g_journal_do_switch(struct g_class *classp)
2821 struct g_journal_softc *sc;
2822 const struct g_journal_desc *desc;
2830 LIST_FOREACH(gp, &classp->geom, geom) {
2834 if (sc->sc_flags & GJF_DEVICE_DESTROY)
2836 if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE)
2838 mtx_lock(&sc->sc_mtx);
2839 sc->sc_flags |= GJF_DEVICE_BEFORE_SWITCH;
2840 mtx_unlock(&sc->sc_mtx);
2842 g_topology_unlock();
2844 mtx_lock(&mountlist_mtx);
2845 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2846 if (mp->mnt_gjprovider == NULL)
2848 if (mp->mnt_flag & MNT_RDONLY)
2850 desc = g_journal_find_desc(mp->mnt_stat.f_fstypename);
2853 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK))
2855 /* mtx_unlock(&mountlist_mtx) was done inside vfs_busy() */
2858 sc = g_journal_find_device(classp, mp->mnt_gjprovider);
2859 g_topology_unlock();
2862 GJ_DEBUG(0, "Cannot find journal geom for %s.",
2863 mp->mnt_gjprovider);
2865 } else if (JEMPTY(sc)) {
2866 mtx_lock(&sc->sc_mtx);
2867 sc->sc_flags &= ~GJF_DEVICE_BEFORE_SWITCH;
2868 mtx_unlock(&sc->sc_mtx);
2869 GJ_DEBUG(3, "No need for %s switch.", sc->sc_name);
2873 mountpoint = mp->mnt_stat.f_mntonname;
2875 error = vn_start_write(NULL, &mp, V_WAIT);
2877 GJ_DEBUG(0, "vn_start_write(%s) failed (error=%d).",
2882 save = curthread_pflags_set(TDP_SYNCIO);
2884 GJ_TIMER_START(1, &bt);
2885 vfs_periodic(mp, MNT_NOWAIT);
2886 GJ_TIMER_STOP(1, &bt, "Msync time of %s", mountpoint);
2888 GJ_TIMER_START(1, &bt);
2889 error = VFS_SYNC(mp, MNT_NOWAIT);
2891 GJ_TIMER_STOP(1, &bt, "Sync time of %s", mountpoint);
2893 GJ_DEBUG(0, "Cannot sync file system %s (error=%d).",
2897 curthread_pflags_restore(save);
2899 vn_finished_write(mp);
2905 * Send BIO_FLUSH before freezing the file system, so it can be
2906 * faster after the freeze.
2908 GJ_TIMER_START(1, &bt);
2909 g_journal_flush_cache(sc);
2910 GJ_TIMER_STOP(1, &bt, "BIO_FLUSH time of %s", sc->sc_name);
2912 GJ_TIMER_START(1, &bt);
2913 error = vfs_write_suspend(mp, VS_SKIP_UNMOUNT);
2914 GJ_TIMER_STOP(1, &bt, "Suspend time of %s", mountpoint);
2916 GJ_DEBUG(0, "Cannot suspend file system %s (error=%d).",
2921 error = desc->jd_clean(mp);
2925 mtx_lock(&sc->sc_mtx);
2926 g_journal_switch_wait(sc);
2927 mtx_unlock(&sc->sc_mtx);
2929 vfs_write_resume(mp, 0);
2931 mtx_lock(&mountlist_mtx);
2934 mtx_unlock(&mountlist_mtx);
2939 LIST_FOREACH(gp, &g_journal_class.geom, geom) {
2943 mtx_lock(&sc->sc_mtx);
2944 if ((sc->sc_type & GJ_TYPE_COMPLETE) == GJ_TYPE_COMPLETE &&
2945 !(sc->sc_flags & GJF_DEVICE_DESTROY) &&
2946 (sc->sc_flags & GJF_DEVICE_BEFORE_SWITCH)) {
2949 mtx_unlock(&sc->sc_mtx);
2952 g_topology_unlock();
2955 mtx_assert(&sc->sc_mtx, MA_OWNED);
2956 g_journal_switch_wait(sc);
2957 mtx_unlock(&sc->sc_mtx);
2962 g_journal_start_switcher(struct g_class *mp)
2966 g_topology_assert();
2967 MPASS(g_journal_switcher_proc == NULL);
2968 g_journal_switcher_state = GJ_SWITCHER_WORKING;
2969 error = kproc_create(g_journal_switcher, mp, &g_journal_switcher_proc,
2970 0, 0, "g_journal switcher");
2971 KASSERT(error == 0, ("Cannot create switcher thread."));
2975 g_journal_stop_switcher(void)
2977 g_topology_assert();
2978 MPASS(g_journal_switcher_proc != NULL);
2979 g_journal_switcher_state = GJ_SWITCHER_DIE;
2980 wakeup(&g_journal_switcher_state);
2981 while (g_journal_switcher_state != GJ_SWITCHER_DIED)
2982 tsleep(&g_journal_switcher_state, PRIBIO, "jfini:wait", hz / 5);
2983 GJ_DEBUG(1, "Switcher died.");
2984 g_journal_switcher_proc = NULL;
2988 * TODO: Kill switcher thread on last geom destruction?
2991 g_journal_switcher(void *arg)
2998 curthread->td_pflags |= TDP_NORUNNINGBUF;
3000 g_journal_switcher_wokenup = 0;
3001 error = tsleep(&g_journal_switcher_state, PRIBIO, "jsw:wait",
3002 g_journal_switch_time * hz);
3003 if (g_journal_switcher_state == GJ_SWITCHER_DIE) {
3004 g_journal_switcher_state = GJ_SWITCHER_DIED;
3005 GJ_DEBUG(1, "Switcher exiting.");
3006 wakeup(&g_journal_switcher_state);
3009 if (error == 0 && g_journal_sync_requested == 0) {
3010 GJ_DEBUG(1, "Out of cache, force switch (used=%jd "
3011 "limit=%jd).", (intmax_t)g_journal_cache_used,
3012 (intmax_t)g_journal_cache_limit);
3014 GJ_TIMER_START(1, &bt);
3015 g_journal_do_switch(mp);
3016 GJ_TIMER_STOP(1, &bt, "Entire switch time");
3017 if (g_journal_sync_requested > 0) {
3018 g_journal_sync_requested = 0;
3019 wakeup(&g_journal_sync_requested);